Return-Path: Delivered-To: apmail-harmony-commits-archive@www.apache.org Received: (qmail 50335 invoked from network); 27 Aug 2007 08:12:36 -0000 Received: from hermes.apache.org (HELO mail.apache.org) (140.211.11.2) by minotaur.apache.org with SMTP; 27 Aug 2007 08:12:36 -0000 Received: (qmail 3274 invoked by uid 500); 27 Aug 2007 08:12:32 -0000 Delivered-To: apmail-harmony-commits-archive@harmony.apache.org Received: (qmail 3187 invoked by uid 500); 27 Aug 2007 08:12:31 -0000 Mailing-List: contact commits-help@harmony.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@harmony.apache.org Delivered-To: mailing list commits@harmony.apache.org Received: (qmail 3070 invoked by uid 99); 27 Aug 2007 08:12:31 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 27 Aug 2007 01:12:31 -0700 X-ASF-Spam-Status: No, hits=-100.0 required=10.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.3] (HELO eris.apache.org) (140.211.11.3) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 27 Aug 2007 08:12:31 +0000 Received: by eris.apache.org (Postfix, from userid 65534) id E26AD1A9842; Mon, 27 Aug 2007 01:12:10 -0700 (PDT) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r570028 [3/4] - in /harmony/enhanced/drlvm/trunk/vm/gc_gen/src: common/ finalizer_weakref/ gen/ los/ mark_compact/ mark_sweep/ tests/ thread/ trace_forward/ utils/ verify/ Date: Mon, 27 Aug 2007 08:12:01 -0000 To: commits@harmony.apache.org From: xli@apache.org X-Mailer: svnmailer-1.0.8 Message-Id: <20070827081210.E26AD1A9842@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.h URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.h?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.h (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.h Mon Aug 27 01:11:57 2007 @@ -36,17 +36,19 @@ typedef struct Chunk_Header_Basic { Chunk_Header_Basic *next; + Chunk_Header_Basic *prev; Chunk_Status_t status; - Chunk_Header_Basic *adj_prev; // adjacent previous chunk, for merging continuous free chunks Chunk_Header_Basic *adj_next; // adjacent next chunk + Chunk_Header_Basic *adj_prev; // adjacent previous chunk, for merging continuous free chunks } Chunk_Header_Basic; typedef struct Chunk_Header { /* Beginning of Chunk_Header_Basic */ Chunk_Header *next; /* pointing to the next pfc in the pfc pool */ + Chunk_Header *prev; /* pointing to the prev pfc in the pfc pool */ Chunk_Status_t status; - Chunk_Header_Basic *adj_prev; // adjacent previous chunk, for merging continuous free chunks Chunk_Header_Basic *adj_next; // adjacent next chunk + Chunk_Header_Basic *adj_prev; // adjacent previous chunk, for merging continuous free chunks /* End of Chunk_Header_Basic */ void *base; unsigned int slot_size; @@ -65,7 +67,7 @@ #define ABNORMAL_CHUNK_HEADER(addr) ((Chunk_Header*)((POINTER_SIZE_INT)addr & CHUNK_GRANULARITY_HIGH_MASK)) #define MAX_SLOT_INDEX 0xFFffFFff -#define COLOR_BITS_PER_OBJ 2 // should be powers of 2 +#define COLOR_BITS_PER_OBJ 4 // should be powers of 2 #define SLOT_NUM_PER_WORD_IN_TABLE (BITS_PER_WORD /COLOR_BITS_PER_OBJ) /* Two equations: @@ -101,11 +103,11 @@ typedef struct Free_Chunk { /* Beginning of Chunk_Header_Basic */ Free_Chunk *next; /* pointing to the next free Free_Chunk */ + Free_Chunk *prev; /* pointing to the prev free Free_Chunk */ Chunk_Status_t status; - Chunk_Header_Basic *adj_prev; // adjacent previous chunk, for merging continuous free chunks Chunk_Header_Basic *adj_next; // adjacent next chunk + Chunk_Header_Basic *adj_prev; // adjacent previous chunk, for merging continuous free chunks /* End of Chunk_Header_Basic */ - Free_Chunk *prev; /* pointing to the prev free Free_Chunk */ } Free_Chunk; typedef struct Free_Chunk_List { @@ -139,6 +141,31 @@ assert(list->lock == FREE_LOCK); } +inline void free_list_detach_chunk(Free_Chunk_List *list, Free_Chunk *chunk) +{ + if(chunk->prev) + chunk->prev->next = chunk->next; + else // chunk is the head + list->head = chunk->next; + if(chunk->next) + chunk->next->prev = chunk->prev; +} + +inline void move_free_chunks_between_lists(Free_Chunk_List *to_list, Free_Chunk_List *from_list) +{ + if(to_list->tail){ + to_list->head->prev = from_list->tail; + } else { + to_list->tail = from_list->tail; + } + if(from_list->head){ + from_list->tail->next = to_list->head; + to_list->head = from_list->head; + } + from_list->head = NULL; + from_list->tail = NULL; +} + /* Padding the last index word in table to facilitate allocation */ inline void chunk_pad_last_index_word(Chunk_Header *chunk, POINTER_SIZE_INT alloc_mask) { @@ -167,7 +194,7 @@ inline void normal_chunk_init(Chunk_Header *chunk, unsigned int slot_size) { assert(chunk->status == CHUNK_FREE); - assert((POINTER_SIZE_INT)chunk->adj_next == (POINTER_SIZE_INT)chunk + NORMAL_CHUNK_SIZE_BYTES); + assert(CHUNK_SIZE(chunk) == NORMAL_CHUNK_SIZE_BYTES); chunk->next = NULL; chunk->status = CHUNK_FRESH | CHUNK_NORMAL | CHUNK_NEED_ZEROING; @@ -184,7 +211,7 @@ inline void abnormal_chunk_init(Chunk_Header *chunk, unsigned int chunk_size, unsigned int obj_size) { assert(chunk->status == CHUNK_FREE); - assert((POINTER_SIZE_INT)chunk->adj_next == (POINTER_SIZE_INT)chunk + chunk_size); + assert(CHUNK_SIZE(chunk) == chunk_size); chunk->next = NULL; chunk->status = CHUNK_ABNORMAL; @@ -231,7 +258,7 @@ #define ALIGNED_CHUNK_INDEX_TO_SIZE(index) (((index) + 1) << NORMAL_CHUNK_SHIFT_COUNT) #define UNALIGNED_CHUNK_INDEX_TO_SIZE(index) (((index) + 1) << CHUNK_GRANULARITY_BITS) -#define SUPER_OBJ_MASK ((Obj_Info_Type)0x1) /* the lowest bit in obj info */ +#define SUPER_OBJ_MASK ((Obj_Info_Type)0x20) /* the 4th bit in obj info */ #define PFC_STEAL_NUM 3 #define PFC_STEAL_THRESHOLD 3 @@ -274,6 +301,7 @@ inline void sspace_put_pfc(Sspace *sspace, Chunk_Header *chunk) { unsigned int size = chunk->slot_size; + assert(chunk->base && chunk->alloc_num); assert(chunk && (size <= SUPER_OBJ_THRESHOLD)); assert(chunk->slot_index < chunk->slot_num); @@ -309,5 +337,9 @@ extern void zeroing_free_chunk(Free_Chunk *chunk); +extern void allocator_clear_local_chunks(Allocator *allocator, Boolean reuse_pfc); +extern void gc_clear_collector_local_chunks(GC *gc); + +extern void sspace_collect_free_chunks_to_list(Sspace *sspace, Free_Chunk_List *list); #endif //#ifndef _SSPACE_CHUNK_H_ Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_compact.cpp URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_compact.cpp?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_compact.cpp (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_compact.cpp Mon Aug 27 01:11:57 2007 @@ -19,18 +19,20 @@ #include "sspace_alloc.h" #include "sspace_mark_sweep.h" #include "sspace_verify.h" -#include "../common/fix_repointed_refs.h" #define PFC_SORT_NUM 8 -static Chunk_Header_Basic *volatile next_chunk_for_fixing; - void sspace_decide_compaction_need(Sspace *sspace) { POINTER_SIZE_INT free_mem_size = free_mem_in_sspace(sspace, FALSE); float free_mem_ratio = (float)free_mem_size / sspace->committed_heap_size; + +#ifdef USE_MARK_SWEEP_GC if((free_mem_ratio > SSPACE_COMPACT_RATIO) && (sspace->gc->cause != GC_CAUSE_RUNTIME_FORCE_GC)){ +#else + if(gc_match_kind(sspace->gc, MAJOR_COLLECTION)){ +#endif sspace->need_compact = sspace->move_object = TRUE; } else { sspace->need_compact = sspace->move_object = FALSE; @@ -39,7 +41,7 @@ static inline void sorted_chunk_bucket_add_entry(Chunk_Header **head, Chunk_Header **tail, Chunk_Header *chunk) { - chunk->adj_prev = NULL; /* Field adj_prev is used as prev */ + chunk->prev = NULL; /* Field adj_prev is used as prev */ if(!*head){ assert(!*tail); @@ -50,7 +52,7 @@ assert(*tail); chunk->next = *head; - (*head)->adj_prev = (Chunk_Header_Basic*)chunk; + (*head)->prev = chunk; *head = chunk; } @@ -112,7 +114,7 @@ tail = bucket_tail[i]; } else { tail->next = bucket_head[i]; - bucket_head[i]->adj_prev = (Chunk_Header_Basic*)tail; + bucket_head[i]->prev = tail; tail = bucket_tail[i]; } } @@ -133,7 +135,7 @@ Chunk_Header *result = *least_free_chunk; *least_free_chunk = (*least_free_chunk)->next; if(*least_free_chunk) - (*least_free_chunk)->adj_prev = NULL; + (*least_free_chunk)->prev = NULL; else *most_free_chunk = NULL; return result; @@ -145,11 +147,12 @@ return NULL; } Chunk_Header *result = *most_free_chunk; - *most_free_chunk = (Chunk_Header*)(*most_free_chunk)->adj_prev; + *most_free_chunk = (*most_free_chunk)->prev; if(*most_free_chunk) (*most_free_chunk)->next = NULL; else *least_free_chunk = NULL; + assert(!result->next); return result; } @@ -175,7 +178,6 @@ } /* dest might be set to NULL, so we use *dest_ptr here */ - (*dest_ptr)->alloc_num += src->alloc_num - alloc_num; assert((*dest_ptr)->alloc_num <= (*dest_ptr)->slot_num); src->alloc_num = alloc_num; if(!dest){ @@ -185,7 +187,7 @@ } } -static void sspace_move_objects(Collector *collector, Sspace *sspace) +void sspace_compact(Collector *collector, Sspace *sspace) { Chunk_Header *least_free_chunk, *most_free_chunk; Pool *pfc_pool = sspace_grab_next_pfc_pool(sspace); @@ -225,111 +227,4 @@ } } -static void sspace_init_chunk_for_ref_fixing(Sspace *sspace) -{ - next_chunk_for_fixing = (Chunk_Header_Basic*)space_heap_start((Space*)sspace); - next_chunk_for_fixing->adj_prev = NULL; -} - -static void normal_chunk_fix_repointed_refs(Chunk_Header *chunk) -{ - /* Init field slot_index and depad the last index word in table for fixing */ - chunk->slot_index = 0; - chunk_depad_last_index_word(chunk); - - unsigned int alloc_num = chunk->alloc_num; - assert(alloc_num); - - /* After compaction, many chunks are filled with objects. - * For these chunks, we needn't find the allocated slot one by one by calling next_alloc_slot_in_chunk. - * That is a little time consuming. - * We'd like to fix those objects by incrementing their addr to find the next. - */ - if(alloc_num == chunk->slot_num){ /* Filled with objects */ - unsigned int slot_size = chunk->slot_size; - Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)slot_index_to_addr(chunk, 0); - for(unsigned int i = alloc_num; i--;){ - object_fix_ref_slots(p_obj); -#ifdef SSPACE_VERIFY - sspace_verify_fix_in_compact(); -#endif - p_obj = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_obj + slot_size); - } - } else { /* Chunk is not full */ - while(alloc_num){ - Partial_Reveal_Object *p_obj = next_alloc_slot_in_chunk(chunk); - assert(p_obj); - object_fix_ref_slots(p_obj); -#ifdef SSPACE_VERIFY - sspace_verify_fix_in_compact(); -#endif - --alloc_num; - } - } - - if(chunk->alloc_num != chunk->slot_num){ - chunk_pad_last_index_word(chunk, cur_alloc_mask); - pfc_reset_slot_index(chunk); - } -} - -static void abnormal_chunk_fix_repointed_refs(Chunk_Header *chunk) -{ - object_fix_ref_slots((Partial_Reveal_Object*)chunk->base); -#ifdef SSPACE_VERIFY - sspace_verify_fix_in_compact(); -#endif -} - -static void sspace_fix_repointed_refs(Collector *collector, Sspace *sspace) -{ - Chunk_Header_Basic *chunk = sspace_grab_next_chunk(sspace, &next_chunk_for_fixing, TRUE); - - while(chunk){ - if(chunk->status & CHUNK_NORMAL) - normal_chunk_fix_repointed_refs((Chunk_Header*)chunk); - else if(chunk->status & CHUNK_ABNORMAL) - abnormal_chunk_fix_repointed_refs((Chunk_Header*)chunk); - - chunk = sspace_grab_next_chunk(sspace, &next_chunk_for_fixing, TRUE); - } -} - -static volatile unsigned int num_moving_collectors = 0; -static volatile unsigned int num_fixing_collectors = 0; - -void compact_sspace(Collector *collector, Sspace *sspace) -{ - GC *gc = collector->gc; - - unsigned int num_active_collectors = gc->num_active_collectors; - - /* Pass 1: ************************************************** - move live objects between pfcs with the same size *****************/ - atomic_cas32(&num_moving_collectors, 0, num_active_collectors+1); - - sspace_move_objects(collector, sspace); - - unsigned int old_num = atomic_inc32(&num_moving_collectors); - if( ++old_num == num_active_collectors ){ - /* last collector's world here */ -#ifdef SSPACE_TIME - sspace_compact_time(FALSE); -#endif - sspace_init_chunk_for_ref_fixing(sspace); - /* let other collectors go */ - num_moving_collectors++; - } - while(num_moving_collectors != num_active_collectors + 1); - - /* Pass 2: ************************************************** - sweep dead objects ***************************************/ - atomic_cas32( &num_fixing_collectors, 0, num_active_collectors); - - sspace_fix_repointed_refs(collector, sspace); - - atomic_inc32(&num_fixing_collectors); - while(num_fixing_collectors != num_active_collectors); - -} Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_fallback_mark.cpp URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_fallback_mark.cpp?rev=570028&view=auto ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_fallback_mark.cpp (added) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_fallback_mark.cpp Mon Aug 27 01:11:57 2007 @@ -0,0 +1,196 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "sspace_mark_sweep.h" +#include "../finalizer_weakref/finalizer_weakref.h" + +static Sspace *sspace_in_fallback_marking; +static FORCE_INLINE Boolean obj_mark(Partial_Reveal_Object *obj) +{ + if(obj_belongs_to_space(obj, (Space*)sspace_in_fallback_marking)) + return obj_mark_black_in_table(obj); + else + return obj_mark_in_vt(obj); +} + +static FORCE_INLINE void scan_slot(Collector *collector, REF *p_ref) +{ + if( read_slot(p_ref) == NULL) return; + + collector_tracestack_push(collector, p_ref); +} + +static FORCE_INLINE void scan_object(Collector *collector, REF *p_ref) +{ + Partial_Reveal_Object *p_obj = read_slot(p_ref); + assert(p_obj); + assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0); + + if(obj_belongs_to_nos(p_obj) && obj_is_fw_in_oi(p_obj)){ + assert(obj_get_vt(p_obj) == obj_get_vt(obj_get_fw_in_oi(p_obj))); + p_obj = obj_get_fw_in_oi(p_obj); + assert(p_obj); + write_slot(p_ref, p_obj); + } + + if(!obj_mark(p_obj)) + return; + + if(!object_has_ref_field(p_obj)) return; + + if(object_is_array(p_obj)){ /* scan array object */ + Partial_Reveal_Array *array = (Partial_Reveal_Array*)p_obj; + unsigned int array_length = array->array_len; + + REF *p_ref = (REF *)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array)); + for (unsigned int i = 0; i < array_length; i++) + scan_slot(collector, p_ref+i); + + return; + } + + /* scan non-array object */ + unsigned int num_refs = object_ref_field_num(p_obj); + int *ref_iterator = object_ref_iterator_init(p_obj); + + for(unsigned int i=0; itrace_stack; + while(!vector_stack_is_empty(trace_stack)){ + p_ref = (REF*)vector_stack_pop(trace_stack); + scan_object(collector, p_ref); + trace_stack = collector->trace_stack; + } +} + +/* NOTE:: This is another marking version: marking in color bitmap table. + Originally, we have to mark the object before put it into markstack, to + guarantee there is only one occurrance of an object in markstack. This is to + guarantee there is only one occurrance of a repointed ref slot in repset (they + are put to the set when the object is scanned). If the same object is put to + markstack twice, they will be scanned twice and their ref slots will be recorded twice. + Problem occurs when the ref slot is updated first time with new position, + the second time the value in the ref slot is not the old position as expected. + It needs to read the original obj header for forwarding pointer. With the new value, + it will read something nonsense since the obj is not moved yet. + This can be worked around if we want. + To do this we have to use atomic instruction for marking, which is undesirable. + So we abondoned this design. We no longer use the repset to remember repointed slots. +*/ + +/* for marking phase termination detection */ +static volatile unsigned int num_finished_collectors = 0; + +void sspace_fallback_mark_scan(Collector *collector, Sspace *sspace) +{ + GC *gc = collector->gc; + GC_Metadata *metadata = gc->metadata; + sspace_in_fallback_marking = sspace; + + /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */ + unsigned int num_active_collectors = gc->num_active_collectors; + atomic_cas32(&num_finished_collectors, 0, num_active_collectors); + + collector->trace_stack = free_task_pool_get_entry(metadata); + + Vector_Block *root_set = pool_iterator_next(metadata->gc_rootset_pool); + + /* first step: copy all root objects to mark tasks. + FIXME:: can be done sequentially before coming here to eliminate atomic ops */ + while(root_set){ + POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set); + while(!vector_block_iterator_end(root_set,iter)){ + REF *p_ref = (REF*)*iter; + iter = vector_block_iterator_advance(root_set,iter); + + /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */ + assert(read_slot(p_ref) != NULL); + /* we have to mark the object before putting it into marktask, because + it is possible to have two slots containing a same object. They will + be scanned twice and their ref slots will be recorded twice. Problem + occurs after the ref slot is updated first time with new position + and the second time the value is the ref slot is the old position as expected. + This can be worked around if we want. + */ + collector_tracestack_push(collector, p_ref); + } + root_set = pool_iterator_next(metadata->gc_rootset_pool); + } + /* put back the last trace_stack task */ + pool_put_entry(metadata->mark_task_pool, collector->trace_stack); + + /* second step: iterate over the mark tasks and scan objects */ + /* get a task buf for the mark stack */ + collector->trace_stack = free_task_pool_get_entry(metadata); + +retry: + Vector_Block *mark_task = pool_get_entry(metadata->mark_task_pool); + + while(mark_task){ + POINTER_SIZE_INT *iter = vector_block_iterator_init(mark_task); + while(!vector_block_iterator_end(mark_task, iter)){ + REF *p_ref = (REF*)*iter; + iter = vector_block_iterator_advance(mark_task, iter); + + /* FIXME:: we should not let mark_task empty during working, , other may want to steal it. + degenerate my stack into mark_task, and grab another mark_task */ + trace_object(collector, p_ref); + } + /* run out one task, put back to the pool and grab another task */ + vector_stack_clear(mark_task); + pool_put_entry(metadata->free_task_pool, mark_task); + mark_task = pool_get_entry(metadata->mark_task_pool); + } + + /* termination detection. This is also a barrier. + NOTE:: We can simply spin waiting for num_finished_collectors, because each + generated new task would surely be processed by its generating collector eventually. + So code below is only for load balance optimization. */ + atomic_inc32(&num_finished_collectors); + while(num_finished_collectors != num_active_collectors){ + if(!pool_is_empty(metadata->mark_task_pool)){ + atomic_dec32(&num_finished_collectors); + goto retry; + } + } + + /* put back the last mark stack to the free pool */ + mark_task = (Vector_Block*)collector->trace_stack; + vector_stack_clear(mark_task); + pool_put_entry(metadata->free_task_pool, mark_task); + collector->trace_stack = NULL; + + return; +} + +void trace_obj_in_ms_fallback_marking(Collector *collector, void *p_ref) +{ + trace_object(collector, (REF*)p_ref); +} Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_fallback_mark.cpp ------------------------------------------------------------------------------ svn:eol-style = native Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark.cpp URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark.cpp?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark.cpp (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark.cpp Mon Aug 27 01:11:57 2007 @@ -18,6 +18,25 @@ #include "sspace_mark_sweep.h" #include "../finalizer_weakref/finalizer_weakref.h" +static Sspace *sspace_in_marking; +static FORCE_INLINE Boolean obj_mark_gray(Partial_Reveal_Object *obj) +{ + if(obj_belongs_to_space(obj, (Space*)sspace_in_marking)) + return obj_mark_gray_in_table(obj); + else + return obj_mark_in_vt(obj); +} + +static FORCE_INLINE Boolean obj_mark_black(Partial_Reveal_Object *obj) +{ + if(obj_belongs_to_space(obj, (Space*)sspace_in_marking)) + return obj_mark_black_in_table(obj); + else + return obj_mark_in_vt(obj); +} + + +/* The caller must be in places where alloc color and mark color haven't been flipped */ Boolean obj_is_marked_in_table(Partial_Reveal_Object *obj) { unsigned int index_in_word; @@ -25,7 +44,7 @@ assert(p_color_word); POINTER_SIZE_INT color_word = *p_color_word; - POINTER_SIZE_INT mark_color = cur_mark_color << index_in_word; + POINTER_SIZE_INT mark_color = cur_mark_gray_color << index_in_word; return (Boolean)(color_word & mark_color); } @@ -36,7 +55,7 @@ if( p_obj == NULL) return; assert(address_belongs_to_gc_heap(p_obj, collector->gc)); - if(obj_mark_in_table(p_obj)){ + if(obj_mark_gray(p_obj)){ assert(p_obj); collector_tracestack_push(collector, p_obj); } @@ -50,7 +69,7 @@ if(VTABLE_TRACING) if(vtable->vtmark == VT_UNMARKED) { vtable->vtmark = VT_MARKED; - if(obj_mark_in_table(vtable->jlC)) + if(obj_mark_black(vtable->jlC)) collector_tracestack_push(collector, vtable->jlC); } @@ -87,11 +106,13 @@ static void trace_object(Collector *collector, Partial_Reveal_Object *p_obj) { scan_object(collector, p_obj); + obj_mark_black(p_obj); Vector_Block *trace_stack = collector->trace_stack; while(!vector_stack_is_empty(trace_stack)){ p_obj = (Partial_Reveal_Object*)vector_stack_pop(trace_stack); scan_object(collector, p_obj); + obj_mark_black(p_obj); trace_stack = collector->trace_stack; } } @@ -114,10 +135,11 @@ /* for marking phase termination detection */ static volatile unsigned int num_finished_collectors = 0; -void sspace_mark_scan(Collector *collector) +void sspace_mark_scan(Collector *collector, Sspace *sspace) { GC *gc = collector->gc; GC_Metadata *metadata = gc->metadata; + sspace_in_marking = sspace; /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */ unsigned int num_active_collectors = gc->num_active_collectors; @@ -132,12 +154,12 @@ while(root_set){ POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set); while(!vector_block_iterator_end(root_set,iter)){ - REF *p_ref = (REF *)*iter; + REF *p_ref = (REF*)*iter; iter = vector_block_iterator_advance(root_set,iter); Partial_Reveal_Object *p_obj = read_slot(p_ref); /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */ - assert(p_obj!=NULL); + assert(p_obj != NULL); /* we have to mark the object before putting it into marktask, because it is possible to have two slots containing a same object. They will be scanned twice and their ref slots will be recorded twice. Problem @@ -146,7 +168,7 @@ This can be worked around if we want. */ assert(address_belongs_to_gc_heap(p_obj, gc)); - if(obj_mark_in_table(p_obj)) + if(obj_mark_gray(p_obj)) collector_tracestack_push(collector, p_obj); } root_set = pool_iterator_next(metadata->gc_rootset_pool); @@ -163,9 +185,9 @@ while(mark_task){ POINTER_SIZE_INT *iter = vector_block_iterator_init(mark_task); - while(!vector_block_iterator_end(mark_task,iter)){ + while(!vector_block_iterator_end(mark_task, iter)){ Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)*iter; - iter = vector_block_iterator_advance(mark_task,iter); + iter = vector_block_iterator_advance(mark_task, iter); /* FIXME:: we should not let mark_task empty during working, , other may want to steal it. degenerate my stack into mark_task, and grab another mark_task */ @@ -200,6 +222,6 @@ void trace_obj_in_ms_marking(Collector *collector, void *p_obj) { - obj_mark_in_table((Partial_Reveal_Object*)p_obj); - trace_object(collector, (Partial_Reveal_Object *)p_obj); + obj_mark_gray((Partial_Reveal_Object*)p_obj); + trace_object(collector, (Partial_Reveal_Object*)p_obj); } Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_concurrent.cpp URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_concurrent.cpp?rev=570028&view=auto ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_concurrent.cpp (added) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_concurrent.cpp Mon Aug 27 01:11:57 2007 @@ -0,0 +1,235 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "sspace_mark_sweep.h" +#include "../finalizer_weakref/finalizer_weakref.h" +#include "../thread/marker.h" + +Boolean obj_is_marked_in_table(Partial_Reveal_Object *obj); + +static FORCE_INLINE void scan_slot(Collector* marker, REF *p_ref) +{ + Partial_Reveal_Object *p_obj = read_slot(p_ref); + if( p_obj == NULL) return; + + assert(address_belongs_to_gc_heap(p_obj, marker->gc)); + if(obj_mark_gray_in_table(p_obj)){ + assert(p_obj); + collector_tracestack_push((Collector*)marker, p_obj); + } +} + +static FORCE_INLINE void scan_object(Marker* marker, Partial_Reveal_Object *p_obj) +{ + assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0); + + if(obj_is_dirty_in_table(p_obj)){ + assert(obj_is_mark_black_in_table(p_obj)); + return; + } + + if(!object_has_ref_field(p_obj)) return; + + REF *p_ref; + + if(object_is_array(p_obj)){ /* scan array object */ + Partial_Reveal_Array *array = (Partial_Reveal_Array*)p_obj; + unsigned int array_length = array->array_len; + + p_ref = (REF *)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array)); + for (unsigned int i = 0; i < array_length; i++) + scan_slot((Collector*)marker, p_ref+i); + + return; + } + + /* scan non-array object */ + unsigned int num_refs = object_ref_field_num(p_obj); + int *ref_iterator = object_ref_iterator_init(p_obj); + + for(unsigned int i=0; itrace_stack; + while(!vector_stack_is_empty(trace_stack)){ + p_obj = (Partial_Reveal_Object*)vector_stack_pop(trace_stack); + scan_object(marker, p_obj); + obj_mark_black_in_table(p_obj); + trace_stack = marker->trace_stack; + } +} + +static Boolean concurrent_mark_need_terminating(GC* gc) +{ + GC_Metadata *metadata = gc->metadata; + return gc_local_snapshot_is_empty(gc) && pool_is_empty(metadata->dirty_obj_snaptshot_pool); +} + +/* for marking phase termination detection */ +static volatile unsigned int num_active_markers = 0; + +void sspace_mark_scan_concurrent(Marker* marker) +{ + int64 time_mark_start = time_now(); + GC *gc = marker->gc; + GC_Metadata *metadata = gc->metadata; + + /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */ + atomic_inc32(&num_active_markers); + + marker->trace_stack = free_task_pool_get_entry(metadata); + + Vector_Block *root_set = pool_iterator_next(metadata->gc_rootset_pool); + + /* first step: copy all root objects to mark tasks. + FIXME:: can be done sequentially before coming here to eliminate atomic ops */ + while(root_set){ + POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set); + while(!vector_block_iterator_end(root_set,iter)){ + REF *p_ref = (REF *)*iter; + iter = vector_block_iterator_advance(root_set,iter); + + Partial_Reveal_Object *p_obj = read_slot(p_ref); + /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */ + assert(p_obj!=NULL); + /* we have to mark the object before putting it into marktask, because + it is possible to have two slots containing a same object. They will + be scanned twice and their ref slots will be recorded twice. Problem + occurs after the ref slot is updated first time with new position + and the second time the value is the ref slot is the old position as expected. + This can be worked around if we want. + */ + assert(address_belongs_to_gc_heap(p_obj, gc)); + if(obj_mark_gray_in_table(p_obj)) + collector_tracestack_push((Collector*)marker, p_obj); + } + root_set = pool_iterator_next(metadata->gc_rootset_pool); + } + /* put back the last trace_stack task */ + pool_put_entry(metadata->mark_task_pool, marker->trace_stack); + + marker_notify_mark_root_done(marker); + + /*second step: mark dirty object snapshot pool*/ + marker->trace_stack = free_task_pool_get_entry(metadata); + +retry: + + Vector_Block* snapshot_set = pool_get_entry(metadata->dirty_obj_snaptshot_pool); + + while(snapshot_set){ + POINTER_SIZE_INT* iter = vector_block_iterator_init(snapshot_set); + while(!vector_block_iterator_end(snapshot_set,iter)){ + Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter; + iter = vector_block_iterator_advance(snapshot_set,iter); + + assert(p_obj!=NULL); //ynhe, restrict? + if(obj_mark_gray_in_table(p_obj)) + collector_tracestack_push((Collector*)marker, p_obj); + } + vector_block_clear(snapshot_set); + pool_put_entry(metadata->free_set_pool, snapshot_set); + snapshot_set = pool_get_entry(metadata->dirty_obj_snaptshot_pool); + } + + /* put back the last trace_stack task */ + pool_put_entry(metadata->mark_task_pool, marker->trace_stack); + + /* third step: iterate over the mark tasks and scan objects */ + /* get a task buf for the mark stack */ + marker->trace_stack = free_task_pool_get_entry(metadata); + + + Vector_Block *mark_task = pool_get_entry(metadata->mark_task_pool); + + while(mark_task){ + POINTER_SIZE_INT *iter = vector_block_iterator_init(mark_task); + while(!vector_block_iterator_end(mark_task,iter)){ + Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)*iter; + iter = vector_block_iterator_advance(mark_task,iter); + trace_object(marker, p_obj); + } + /* run out one task, put back to the pool and grab another task */ + vector_stack_clear(mark_task); + pool_put_entry(metadata->free_task_pool, mark_task); + mark_task = pool_get_entry(metadata->mark_task_pool); + } + + /* termination condition: + 1.all thread finished current job. + 2.local snapshot vectors are empty. + 3.global snapshot pool is empty. + */ + atomic_dec32(&num_active_markers); + while(num_active_markers != 0 || !concurrent_mark_need_terminating(gc)){ + if(!pool_is_empty(metadata->mark_task_pool) || !pool_is_empty(metadata->dirty_obj_snaptshot_pool)){ + atomic_inc32(&num_active_markers); + goto retry; + }else{ + /*grab a block from mutator and begin tracing*/ + POINTER_SIZE_INT thread_num = (POINTER_SIZE_INT)marker->thread_handle; + Vector_Block* local_snapshot_set = gc_get_local_snapshot(gc, (unsigned int)(thread_num + 1)); + /*1. If local_snapshot_set has been set full bit, the block is full and will no longer be put into global snapshot pool; + so it should be checked again to see if there're remaining entrys unscanned in it. In this case, the + share bit in local_snapshot_set should not be clear, beacause of rescanning exclusively. + 2. If local_snapshot_set has not been set full bit, the block is used by mutator and has the chance to be put into + global snapshot pool. In this case, we simply cleared the share bit in local_snapshot_set. + */ + if(local_snapshot_set != NULL){ + atomic_inc32(&num_active_markers); + while(!vector_block_is_empty(local_snapshot_set) || !vector_block_not_full_set_unshared(local_snapshot_set)){ + Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*) vector_block_get_entry(local_snapshot_set); + if(obj_mark_gray_in_table(p_obj)) + collector_tracestack_push((Collector*)marker, p_obj); + } + goto retry; + } + } + } + + /* put back the last mark stack to the free pool */ + mark_task = (Vector_Block*)marker->trace_stack; + vector_stack_clear(mark_task); + pool_put_entry(metadata->free_task_pool, mark_task); + marker->trace_stack = NULL; + assert(pool_is_empty(metadata->dirty_obj_snaptshot_pool)); + + int64 time_mark = time_now() - time_mark_start; + marker->time_mark = time_mark; + + return; +} + +void trace_obj_in_ms_concurrent_mark(Marker *marker, void *p_obj) +{ + obj_mark_gray_in_table((Partial_Reveal_Object*)p_obj); + trace_object(marker, (Partial_Reveal_Object *)p_obj); +} + Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_concurrent.cpp ------------------------------------------------------------------------------ svn:eol-style = native Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.cpp URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.cpp?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.cpp (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.cpp Mon Aug 27 01:11:57 2007 @@ -15,26 +15,31 @@ * limitations under the License. */ +#include "sspace_alloc.h" #include "sspace_mark_sweep.h" #include "sspace_verify.h" #include "gc_ms.h" #include "../gen/gen.h" #include "../thread/collector.h" #include "../finalizer_weakref/finalizer_weakref.h" +#include "../common/fix_repointed_refs.h" +#include "../common/gc_concurrent.h" - +POINTER_SIZE_INT cur_alloc_mask = (~MARK_MASK_IN_TABLE) & FLIP_COLOR_MASK_IN_TABLE; +POINTER_SIZE_INT cur_mark_mask = MARK_MASK_IN_TABLE; POINTER_SIZE_INT cur_alloc_color = OBJ_COLOR_WHITE; -POINTER_SIZE_INT cur_mark_color = OBJ_COLOR_BLACK; -POINTER_SIZE_INT cur_alloc_mask = ~BLACK_MASK_IN_TABLE; -POINTER_SIZE_INT cur_mark_mask = BLACK_MASK_IN_TABLE; +POINTER_SIZE_INT cur_mark_gray_color = OBJ_COLOR_GRAY; +POINTER_SIZE_INT cur_mark_black_color = OBJ_COLOR_BLACK; + +static Chunk_Header_Basic *volatile next_chunk_for_fixing; static void ops_color_flip(void) { POINTER_SIZE_INT temp = cur_alloc_color; - cur_alloc_color = cur_mark_color; - cur_mark_color = temp; - cur_alloc_mask = ~cur_alloc_mask; - cur_mark_mask = ~cur_mark_mask; + cur_alloc_color = cur_mark_black_color; + cur_mark_black_color = temp; + cur_alloc_mask = (~cur_alloc_mask) & FLIP_COLOR_MASK_IN_TABLE; + cur_mark_mask = (~cur_mark_mask) & FLIP_COLOR_MASK_IN_TABLE; } void collector_init_free_chunk_list(Collector *collector) @@ -66,21 +71,134 @@ } +static void sspace_init_chunk_for_ref_fixing(Sspace *sspace) +{ + next_chunk_for_fixing = (Chunk_Header_Basic*)space_heap_start((Space*)sspace); + next_chunk_for_fixing->adj_prev = NULL; +} + +static void nos_init_block_for_forwarding(GC_Gen *gc_gen) +{ blocked_space_block_iterator_init((Blocked_Space*)gc_get_nos(gc_gen)); } + +static inline void block_forward_live_objects(Collector *collector, Sspace *sspace, Block_Header *cur_block) +{ + void *start_pos; + Partial_Reveal_Object *p_obj = block_get_first_marked_object(cur_block, &start_pos); + + while(p_obj ){ + assert(obj_is_marked_in_vt(p_obj)); + obj_clear_dual_bits_in_vt(p_obj); + Partial_Reveal_Object *p_target_obj = collector_forward_object(collector, p_obj); /* Could be implemented with a optimized function */ + if(!p_target_obj){ + assert(collector->gc->collect_result == FALSE); + printf("Out of mem in forwarding nos!\n"); + exit(0); + } + p_obj = block_get_next_marked_object(cur_block, &start_pos); + } +} + +static void collector_forward_nos_to_sspace(Collector *collector, Sspace *sspace) +{ + Blocked_Space *nos = (Blocked_Space*)gc_get_nos((GC_Gen*)collector->gc); + Block_Header *cur_block = blocked_space_block_iterator_next(nos); + + /* We must iterate over all nos blocks to forward live objects in them */ + while(cur_block){ + block_forward_live_objects(collector, sspace, cur_block); + cur_block = blocked_space_block_iterator_next(nos); + } +} + +static void normal_chunk_fix_repointed_refs(Chunk_Header *chunk) +{ + /* Init field slot_index and depad the last index word in table for fixing */ + chunk->slot_index = 0; + chunk_depad_last_index_word(chunk); + + unsigned int alloc_num = chunk->alloc_num; + assert(alloc_num); + + /* After compaction, many chunks are filled with objects. + * For these chunks, we needn't find the allocated slot one by one by calling next_alloc_slot_in_chunk. + * That is a little time consuming. + * We'd like to fix those objects by incrementing their addr to find the next. + */ + if(alloc_num == chunk->slot_num){ /* Filled with objects */ + unsigned int slot_size = chunk->slot_size; + Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)slot_index_to_addr(chunk, 0); + for(unsigned int i = alloc_num; i--;){ + object_fix_ref_slots(p_obj); +#ifdef SSPACE_VERIFY + sspace_verify_fix_in_compact(); +#endif + p_obj = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_obj + slot_size); + } + } else { /* Chunk is not full */ + while(alloc_num){ + Partial_Reveal_Object *p_obj = next_alloc_slot_in_chunk(chunk); + assert(p_obj); + object_fix_ref_slots(p_obj); +#ifdef SSPACE_VERIFY + sspace_verify_fix_in_compact(); +#endif + --alloc_num; + } + } + + if(chunk->alloc_num != chunk->slot_num){ + chunk_pad_last_index_word(chunk, cur_alloc_mask); + pfc_reset_slot_index(chunk); + } +} + +static void abnormal_chunk_fix_repointed_refs(Chunk_Header *chunk) +{ + object_fix_ref_slots((Partial_Reveal_Object*)chunk->base); +#ifdef SSPACE_VERIFY + sspace_verify_fix_in_compact(); +#endif +} + +static void sspace_fix_repointed_refs(Collector *collector, Sspace *sspace) +{ + Chunk_Header_Basic *chunk = sspace_grab_next_chunk(sspace, &next_chunk_for_fixing, TRUE); + + while(chunk){ + if(chunk->status & CHUNK_NORMAL) + normal_chunk_fix_repointed_refs((Chunk_Header*)chunk); + else if(chunk->status & CHUNK_ABNORMAL) + abnormal_chunk_fix_repointed_refs((Chunk_Header*)chunk); + + chunk = sspace_grab_next_chunk(sspace, &next_chunk_for_fixing, TRUE); + } +} + + static volatile unsigned int num_marking_collectors = 0; static volatile unsigned int num_sweeping_collectors = 0; +static volatile unsigned int num_compacting_collectors = 0; +static volatile unsigned int num_forwarding_collectors = 0; +static volatile unsigned int num_fixing_collectors = 0; void mark_sweep_sspace(Collector *collector) { GC *gc = collector->gc; Sspace *sspace = gc_get_sspace(gc); + Space *nos = NULL; + if(gc_match_kind(gc, MAJOR_COLLECTION)) + nos = gc_get_nos((GC_Gen*)gc); unsigned int num_active_collectors = gc->num_active_collectors; /* Pass 1: ************************************************** - mark all live objects in heap ****************************/ + Mark all live objects in heap ****************************/ atomic_cas32(&num_marking_collectors, 0, num_active_collectors+1); - sspace_mark_scan(collector); + if(gc_match_kind(gc, FALLBACK_COLLECTION)) + sspace_fallback_mark_scan(collector, sspace); + else + sspace_mark_scan(collector, sspace); unsigned int old_num = atomic_inc32(&num_marking_collectors); if( ++old_num == num_active_collectors ){ @@ -104,7 +222,7 @@ while(num_marking_collectors != num_active_collectors + 1); /* Pass 2: ************************************************** - sweep dead objects ***************************************/ + Sweep dead objects ***************************************/ atomic_cas32( &num_sweeping_collectors, 0, num_active_collectors+1); sspace_sweep(collector, sspace); @@ -121,36 +239,79 @@ #ifdef SSPACE_VERIFY sspace_verify_after_sweep(gc); #endif - if(sspace->need_compact){ + + sspace_merge_free_chunks(gc, sspace); + + if(gc_match_kind(gc, MAJOR_COLLECTION)) + nos_init_block_for_forwarding((GC_Gen*)gc); + if(sspace->need_compact) sspace_init_pfc_pool_iterator(sspace); - } + if(sspace->need_fix) + sspace_init_chunk_for_ref_fixing(sspace); /* let other collectors go */ num_sweeping_collectors++; } while(num_sweeping_collectors != num_active_collectors + 1); - if(sspace->need_compact) - compact_sspace(collector, sspace); + /* Optional Pass: ******************************************* + Forward live obj in nos to mos (sspace) ******************/ + if(gc_match_kind(gc, MAJOR_COLLECTION)){ + atomic_cas32( &num_forwarding_collectors, 0, num_active_collectors); + + collector_forward_nos_to_sspace(collector, sspace); + + atomic_inc32(&num_forwarding_collectors); + while(num_forwarding_collectors != num_active_collectors); + } + + /* Optional Pass: ******************************************* + Compact pfcs with the same size **************************/ + if(sspace->need_compact){ + atomic_cas32(&num_compacting_collectors, 0, num_active_collectors+1); + + sspace_compact(collector, sspace); + + /* If we need forward nos to mos, i.e. in major collection, an extra fixing phase after compaction is needed. */ + old_num = atomic_inc32(&num_compacting_collectors); + if( ++old_num == num_active_collectors ){ + sspace_remerge_free_chunks(gc, sspace); + /* let other collectors go */ + num_compacting_collectors++; + } + while(num_compacting_collectors != num_active_collectors + 1); + } + + /* Optional Pass: ******************************************* + Fix repointed refs ***************************************/ + if(sspace->need_fix){ + atomic_cas32( &num_fixing_collectors, 0, num_active_collectors); + + sspace_fix_repointed_refs(collector, sspace); + + atomic_inc32(&num_fixing_collectors); + while(num_fixing_collectors != num_active_collectors); + } if( collector->thread_handle != 0 ) return; - if(sspace->need_compact){ + /* Leftover: *************************************************/ + + if(sspace->need_fix){ gc_fix_rootset(collector); #ifdef SSPACE_TIME sspace_fix_time(FALSE); #endif } - gc_collect_free_chunks(gc, sspace); -#ifdef SSPACE_TIME - sspace_merge_time(FALSE); -#endif - - /* Leftover: ************************************************ */ - - gc->root_set = NULL; // FIXME:: should be placed to a more appopriate place + //gc->root_set = NULL; // FIXME:: should be placed to a more appopriate place gc_set_pool_clear(gc->metadata->gc_rootset_pool); +#ifdef USE_MARK_SWEEP_GC + sspace_set_space_statistic(sspace); +#endif + + if(gc_match_kind(gc, MAJOR_COLLECTION)) + gc_clear_collector_local_chunks(gc); #ifdef SSPACE_VERIFY sspace_verify_after_collection(gc); Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.h URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.h?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.h (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.h Mon Aug 27 01:11:57 2007 @@ -27,22 +27,34 @@ inline Boolean chunk_is_reusable(Chunk_Header *chunk) { return (float)(chunk->slot_num-chunk->alloc_num)/chunk->slot_num > PFC_REUSABLE_RATIO; } +#define OBJ_ALLOC_BIT_IN_TABLE 0x01 +#define OBJ_BLACK_BIT_IN_TABLE 0x02 +#define OBJ_GRAY_BIT_IN_TABLE 0x04 +#define OBJ_COLOR_BIT_IN_TABLE 0x06 +#define OBJ_DIRTY_BIT_IN_TABLE 0x08 + enum Obj_Color { - OBJ_COLOR_BLUE = 0x0, - OBJ_COLOR_WHITE = 0x1, - OBJ_COLOR_BLACK = 0x2, - OBJ_COLOR_GRAY = 0x3, - OBJ_COLOR_MASK = 0x3 + OBJ_COLOR_BLUE = 0x0, + OBJ_COLOR_WHITE = OBJ_ALLOC_BIT_IN_TABLE, + OBJ_COLOR_GRAY = OBJ_GRAY_BIT_IN_TABLE, + OBJ_COLOR_BLACK = OBJ_BLACK_BIT_IN_TABLE, + OBJ_COLOR_MASK = OBJ_COLOR_BIT_IN_TABLE }; #ifdef POINTER64 - #define BLACK_MASK_IN_TABLE ((POINTER_SIZE_INT)0xAAAAAAAAAAAAAAAA) + //#define BLACK_MASK_IN_TABLE ((POINTER_SIZE_INT)0xAAAAAAAAAAAAAAAA) + #define MARK_MASK_IN_TABLE ((POINTER_SIZE_INT)0x2222222222222222) + #define FLIP_COLOR_MASK_IN_TABLE ((POINTER_SIZE_INT)0x3333333333333333) + //#define DIRY_MASK_IN_TABLE ((POINTER_SIZE_INT)0x4444444444444444) #else - #define BLACK_MASK_IN_TABLE ((POINTER_SIZE_INT)0xAAAAAAAA) + #define MARK_MASK_IN_TABLE ((POINTER_SIZE_INT)0x22222222) + #define FLIP_COLOR_MASK_IN_TABLE ((POINTER_SIZE_INT)0x33333333) + //#define DIRY_MASK_IN_TABLE ((POINTER_SIZE_INT)0x44444444) #endif extern POINTER_SIZE_INT cur_alloc_color; -extern POINTER_SIZE_INT cur_mark_color; +extern POINTER_SIZE_INT cur_mark_gray_color; +extern POINTER_SIZE_INT cur_mark_black_color; extern POINTER_SIZE_INT cur_alloc_mask; extern POINTER_SIZE_INT cur_mark_mask; @@ -73,7 +85,25 @@ return &chunk->table[word_index]; } +inline POINTER_SIZE_INT *get_color_word_in_table(Partial_Reveal_Object *obj, unsigned int &index_in_word, unsigned int size) +{ + Chunk_Header *chunk; + unsigned int index; + + if(size > SUPER_OBJ_THRESHOLD){ + chunk = ABNORMAL_CHUNK_HEADER(obj); + index = 0; + } else { + chunk = NORMAL_CHUNK_HEADER(obj); + index = slot_addr_to_index(chunk, obj); + } + unsigned int word_index = index >> 3; + index_in_word = COLOR_BITS_PER_OBJ * (index & (((unsigned int)(SLOT_NUM_PER_WORD_IN_TABLE-1)))); + + return &chunk->table[word_index]; +} +#if 0 /* Accurate marking: TRUE stands for being marked by this collector, and FALSE for another collector */ inline Boolean obj_mark_in_table(Partial_Reveal_Object *obj) { @@ -106,6 +136,207 @@ return FALSE; } +#endif + +inline Boolean obj_is_mark_gray_in_table(Partial_Reveal_Object *obj) +{ + POINTER_SIZE_INT *p_color_word; + unsigned int index_in_word; + p_color_word = get_color_word_in_table(obj, index_in_word); + POINTER_SIZE_INT current_word = *p_color_word; + POINTER_SIZE_INT mark_gray_color = cur_mark_gray_color << index_in_word; + POINTER_SIZE_INT mark_black_color = cur_mark_black_color<< index_in_word; + + + if(current_word & mark_gray_color && !(current_word & mark_black_color)) + return TRUE; + else + return FALSE; +} + +inline Boolean obj_is_mark_black_in_table(Partial_Reveal_Object *obj) +{ + POINTER_SIZE_INT *p_color_word; + unsigned int index_in_word; + p_color_word = get_color_word_in_table(obj, index_in_word); + POINTER_SIZE_INT current_word = *p_color_word; + POINTER_SIZE_INT mark_black_color = cur_mark_black_color << index_in_word; + + if(current_word & mark_black_color) + return TRUE; + else + return FALSE; + +} + +inline Boolean obj_is_mark_black_in_table(Partial_Reveal_Object *obj, unsigned int size) +{ + POINTER_SIZE_INT *p_color_word; + unsigned int index_in_word; + p_color_word = get_color_word_in_table(obj, index_in_word, size); + POINTER_SIZE_INT current_word = *p_color_word; + POINTER_SIZE_INT mark_black_color = cur_mark_black_color << index_in_word; + + if(current_word & mark_black_color) + return TRUE; + else + return FALSE; + +} + + +inline Boolean obj_mark_gray_in_table(Partial_Reveal_Object *obj) +{ + volatile POINTER_SIZE_INT *p_color_word; + unsigned int index_in_word; + p_color_word = get_color_word_in_table(obj, index_in_word); + assert(p_color_word); + + //POINTER_SIZE_INT color_bits_mask = ~(OBJ_COLOR_MASK << index_in_word); + POINTER_SIZE_INT mark_color = cur_mark_gray_color << index_in_word; + + POINTER_SIZE_INT old_word = *p_color_word; + if(old_word & mark_color) return FALSE; /*already marked gray or black.*/ + + //POINTER_SIZE_INT new_word = (old_word & color_bits_mask) | mark_color; + POINTER_SIZE_INT new_word = old_word | mark_color; + while(new_word != old_word) { + POINTER_SIZE_INT temp = (POINTER_SIZE_INT)atomic_casptr((volatile void**)p_color_word, (void*)new_word, (void*)old_word); + if(temp == old_word){ + return TRUE; /*returning true does not mean it's marked by this thread. */ + } + old_word = *p_color_word; + if(old_word & mark_color) return FALSE; /*already marked gray or black.*/ + + //new_word = (old_word & color_bits_mask) | mark_color; + new_word = old_word | mark_color; + } + + return FALSE; +} + +inline Boolean obj_mark_black_in_table(Partial_Reveal_Object *obj, unsigned int size) +{ + //assert(obj_is_mark_in_table(obj)); + volatile POINTER_SIZE_INT *p_color_word; + unsigned int index_in_word; + p_color_word = get_color_word_in_table(obj, index_in_word, size); + assert(p_color_word); + + //POINTER_SIZE_INT color_bits_mask = ~(OBJ_COLOR_MASK << index_in_word); + POINTER_SIZE_INT mark_black_color = cur_mark_black_color << index_in_word; + + POINTER_SIZE_INT old_word = *p_color_word; + if(old_word & mark_black_color) return FALSE; /*already marked black*/ + + POINTER_SIZE_INT new_word = old_word | mark_black_color; + while(new_word != old_word) { + POINTER_SIZE_INT temp = (POINTER_SIZE_INT)atomic_casptr((volatile void**)p_color_word, (void*)new_word, (void*)old_word); + if(temp == old_word){ + return TRUE; /*returning true does not mean it's marked by this thread. */ + } + old_word = *p_color_word; + if(old_word & mark_black_color) return FALSE; /*already marked black*/ + + new_word = old_word | mark_black_color; + } + + return FALSE; + +} + +inline Boolean obj_mark_black_in_table(Partial_Reveal_Object *obj) +{ + // assert(obj_is_mark_in_table(obj)); + volatile POINTER_SIZE_INT *p_color_word; + unsigned int index_in_word; + p_color_word = get_color_word_in_table(obj, index_in_word); + assert(p_color_word); + + POINTER_SIZE_INT color_bits_mask = ~(OBJ_COLOR_MASK << index_in_word); + POINTER_SIZE_INT mark_black_color = cur_mark_black_color << index_in_word; + + POINTER_SIZE_INT old_word = *p_color_word; + if(obj_is_mark_black_in_table(obj)) return FALSE; /*already marked black*/ + + POINTER_SIZE_INT new_word = old_word | mark_black_color; + while(new_word != old_word) { + POINTER_SIZE_INT temp = (POINTER_SIZE_INT)atomic_casptr((volatile void**)p_color_word, (void*)new_word, (void*)old_word); + if(temp == old_word){ + return TRUE; /*returning true does not mean it's marked by this thread. */ + } + old_word = *p_color_word; + if(obj_is_mark_black_in_table(obj)) return FALSE; /*already marked black*/ + + new_word = old_word | mark_black_color; + } + + return FALSE; +} + +inline Boolean obj_dirty_in_table(Partial_Reveal_Object *obj) +{ + volatile POINTER_SIZE_INT *p_color_word; + unsigned int index_in_word; + p_color_word = get_color_word_in_table(obj, index_in_word); + assert(p_color_word); + + POINTER_SIZE_INT obj_dirty_bit_in_word = OBJ_DIRTY_BIT_IN_TABLE<< index_in_word; + + POINTER_SIZE_INT old_word = *p_color_word; + if(old_word & obj_dirty_bit_in_word) return FALSE; + + POINTER_SIZE_INT new_word = old_word | obj_dirty_bit_in_word; + while(new_word != old_word) { + POINTER_SIZE_INT temp = (POINTER_SIZE_INT)atomic_casptr((volatile void**)p_color_word, (void*)new_word, (void*)old_word); + if(temp == old_word){ + return TRUE; /*returning true does not mean it's marked by this thread. */ + } + old_word = *p_color_word; + if(old_word & obj_dirty_bit_in_word) return FALSE; + + new_word = old_word | obj_dirty_bit_in_word; + } + + return FALSE; +} + +inline Boolean obj_is_dirty_in_table(Partial_Reveal_Object *obj) +{ + POINTER_SIZE_INT *p_color_word; + unsigned int index_in_word; + p_color_word = get_color_word_in_table(obj, index_in_word); + POINTER_SIZE_INT current_word = *p_color_word; + POINTER_SIZE_INT obj_dirty_bit_in_word = OBJ_DIRTY_BIT_IN_TABLE<< index_in_word; + + + if(current_word & obj_dirty_bit_in_word) + return TRUE; + else + return FALSE; +} + +inline Boolean obj_is_alloc_color_in_table(Partial_Reveal_Object *obj) +{ + POINTER_SIZE_INT *p_color_word; + unsigned int index_in_word; + p_color_word = get_color_word_in_table(obj, index_in_word); + POINTER_SIZE_INT current_word = *p_color_word; + POINTER_SIZE_INT obj_alloc_color_bit_in_word = cur_alloc_color<< index_in_word; + + + if(current_word & obj_alloc_color_bit_in_word) + return TRUE; + else + return FALSE; + +} + +inline Boolean obj_need_take_snaptshot(Partial_Reveal_Object *obj) +{ + return !obj_is_mark_black_in_table(obj) && !obj_is_dirty_in_table(obj); +} + inline void collector_add_free_chunk(Collector *collector, Free_Chunk *chunk) { Free_Chunk_List *list = collector->free_chunk_list; @@ -121,11 +352,13 @@ } -extern void sspace_mark_scan(Collector *collector); +extern void sspace_mark_scan(Collector *collector, Sspace *sspace); +extern void sspace_fallback_mark_scan(Collector *collector, Sspace *sspace); extern void gc_init_chunk_for_sweep(GC *gc, Sspace *sspace); extern void sspace_sweep(Collector *collector, Sspace *sspace); -extern void compact_sspace(Collector *collector, Sspace *sspace); -extern void gc_collect_free_chunks(GC *gc, Sspace *sspace); +extern void sspace_compact(Collector *collector, Sspace *sspace); +extern void sspace_merge_free_chunks(GC *gc, Sspace *sspace); +extern void sspace_remerge_free_chunks(GC *gc, Sspace *sspace); extern Chunk_Header_Basic *sspace_grab_next_chunk(Sspace *sspace, Chunk_Header_Basic *volatile *shared_next_chunk, Boolean need_construct); extern void pfc_set_slot_index(Chunk_Header *chunk, unsigned int first_free_word_index, POINTER_SIZE_INT alloc_color); Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_sweep.cpp URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_sweep.cpp?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_sweep.cpp (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_sweep.cpp Mon Aug 27 01:11:57 2007 @@ -76,7 +76,7 @@ assert(live_num >= slot_index); live_num -= slot_index; POINTER_SIZE_INT index_word = table[word_index]; - POINTER_SIZE_INT mark_color = cur_mark_color << (COLOR_BITS_PER_OBJ * (slot_index % SLOT_NUM_PER_WORD_IN_TABLE)); + POINTER_SIZE_INT mark_color = cur_mark_black_color << (COLOR_BITS_PER_OBJ * (slot_index % SLOT_NUM_PER_WORD_IN_TABLE)); for(; slot_index < slot_num; ++slot_index){ assert(!(index_word & ~cur_mark_mask)); if(index_word & mark_color){ @@ -100,7 +100,7 @@ } mark_color <<= COLOR_BITS_PER_OBJ; if(!mark_color){ - mark_color = cur_mark_color; + mark_color = cur_mark_black_color; ++word_index; index_word = table[word_index]; while(index_word == cur_mark_mask && cur_free_slot_num == 0 && slot_index < slot_num){ @@ -137,14 +137,14 @@ live_num += live_num_in_word; if((first_free_word_index == MAX_SLOT_INDEX) && (live_num_in_word < SLOT_NUM_PER_WORD_IN_TABLE)){ first_free_word_index = i; - pfc_set_slot_index((Chunk_Header*)chunk, first_free_word_index, cur_mark_color); + pfc_set_slot_index((Chunk_Header*)chunk, first_free_word_index, cur_mark_black_color); } } assert(live_num <= slot_num); chunk->alloc_num = live_num; -#ifdef SSPACE_VERIFY + collector->live_obj_size += live_num * chunk->slot_size; collector->live_obj_num += live_num; -#endif + if(!live_num){ /* all objects in this chunk are dead */ collector_add_free_chunk(collector, (Free_Chunk*)chunk); } else if(chunk_is_reusable(chunk)){ /* most objects in this chunk are swept, add chunk to pfc list*/ @@ -163,19 +163,17 @@ if(!table[0]){ collector_add_free_chunk(collector, (Free_Chunk*)chunk); } -#ifdef SSPACE_VERIFY else { + collector->live_obj_size += CHUNK_SIZE(chunk); collector->live_obj_num++; } -#endif } void sspace_sweep(Collector *collector, Sspace *sspace) { Chunk_Header_Basic *chunk; -#ifdef SSPACE_VERIFY + collector->live_obj_size = 0; collector->live_obj_num = 0; -#endif chunk = sspace_grab_next_chunk(sspace, &next_chunk_for_sweep, TRUE); while(chunk){ @@ -192,70 +190,77 @@ } } -static void free_list_detach_chunk(Free_Chunk_List *list, Free_Chunk *chunk) -{ - if(chunk->prev) - chunk->prev->next = chunk->next; - else // chunk is the head - list->head = chunk->next; - if(chunk->next) - chunk->next->prev = chunk->prev; -} +/************ For merging free chunks in sspace ************/ -void gc_collect_free_chunks(GC *gc, Sspace *sspace) +static void merge_free_chunks_in_list(Sspace *sspace, Free_Chunk_List *list) { Free_Chunk *sspace_ceiling = (Free_Chunk*)space_heap_end((Space*)sspace); + Free_Chunk *chunk = list->head; - Free_Chunk_List free_chunk_list; - free_chunk_list.head = NULL; - free_chunk_list.tail = NULL; - - /* Collect free chunks from collectors to one list */ - for(unsigned int i=0; inum_collectors; ++i){ - Free_Chunk_List *list = gc->collectors[i]->free_chunk_list; - if(free_chunk_list.tail){ - free_chunk_list.head->prev = list->tail; - } else { - free_chunk_list.tail = list->tail; - } - if(list->head){ - list->tail->next = free_chunk_list.head; - free_chunk_list.head = list->head; - } - list->head = NULL; - list->tail = NULL; - } - - Free_Chunk *chunk = free_chunk_list.head; while(chunk){ assert(chunk->status == (CHUNK_FREE | CHUNK_TO_MERGE)); /* Remove current chunk from the chunk list */ - free_chunk_list.head = chunk->next; - if(free_chunk_list.head) - free_chunk_list.head->prev = NULL; - /* Check if the back adjcent chunks are free */ - Free_Chunk *back_chunk = (Free_Chunk*)chunk->adj_next; - while(back_chunk < sspace_ceiling && back_chunk->status == (CHUNK_FREE | CHUNK_TO_MERGE)){ - /* Remove back_chunk from list */ - free_list_detach_chunk(&free_chunk_list, back_chunk); - chunk->adj_next = back_chunk->adj_next; - back_chunk = (Free_Chunk*)chunk->adj_next; - } + list->head = chunk->next; + if(list->head) + list->head->prev = NULL; /* Check if the prev adjacent chunks are free */ Free_Chunk *prev_chunk = (Free_Chunk*)chunk->adj_prev; while(prev_chunk && prev_chunk->status == (CHUNK_FREE | CHUNK_TO_MERGE)){ + assert(prev_chunk < chunk); /* Remove prev_chunk from list */ - free_list_detach_chunk(&free_chunk_list, prev_chunk); + free_list_detach_chunk(list, prev_chunk); prev_chunk->adj_next = chunk->adj_next; chunk = prev_chunk; prev_chunk = (Free_Chunk*)chunk->adj_prev; } - - //zeroing_free_chunk(chunk); + /* Check if the back adjcent chunks are free */ + Free_Chunk *back_chunk = (Free_Chunk*)chunk->adj_next; + while(back_chunk < sspace_ceiling && back_chunk->status == (CHUNK_FREE | CHUNK_TO_MERGE)){ + assert(chunk < back_chunk); + /* Remove back_chunk from list */ + free_list_detach_chunk(list, back_chunk); + back_chunk = (Free_Chunk*)back_chunk->adj_next; + chunk->adj_next = (Chunk_Header_Basic*)back_chunk; + } + if(back_chunk < sspace_ceiling) + back_chunk->adj_prev = (Chunk_Header_Basic*)chunk; /* put the free chunk to the according free chunk list */ sspace_put_free_chunk(sspace, chunk); - chunk = free_chunk_list.head; + chunk = list->head; + } +} + +void sspace_merge_free_chunks(GC *gc, Sspace *sspace) +{ + Free_Chunk_List free_chunk_list; + free_chunk_list.head = NULL; + free_chunk_list.tail = NULL; + + /* Collect free chunks from collectors to one list */ + for(unsigned int i=0; inum_collectors; ++i){ + Free_Chunk_List *list = gc->collectors[i]->free_chunk_list; + move_free_chunks_between_lists(&free_chunk_list, list); + } + + merge_free_chunks_in_list(sspace, &free_chunk_list); +} + +void sspace_remerge_free_chunks(GC *gc, Sspace *sspace) +{ + Free_Chunk_List free_chunk_list; + free_chunk_list.head = NULL; + free_chunk_list.tail = NULL; + + /* Collect free chunks from sspace free chunk lists to one list */ + sspace_collect_free_chunks_to_list(sspace, &free_chunk_list); + + /* Collect free chunks from collectors to one list */ + for(unsigned int i=0; inum_collectors; ++i){ + Free_Chunk_List *list = gc->collectors[i]->free_chunk_list; + move_free_chunks_between_lists(&free_chunk_list, list); } + + merge_free_chunks_in_list(sspace, &free_chunk_list); } Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_verify.cpp URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_verify.cpp?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_verify.cpp (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_verify.cpp Mon Aug 27 01:11:57 2007 @@ -345,6 +345,41 @@ return live_num; } +static void allocator_verify_local_chunks(Allocator *allocator) +{ + Sspace *sspace = gc_get_sspace(allocator->gc); + Size_Segment **size_segs = sspace->size_segments; + Chunk_Header ***local_chunks = allocator->local_chunks; + + for(unsigned int i = SIZE_SEGMENT_NUM; i--;){ + if(!size_segs[i]->local_alloc){ + assert(!local_chunks[i]); + continue; + } + Chunk_Header **chunks = local_chunks[i]; + assert(chunks); + for(unsigned int j = size_segs[i]->chunk_num; j--;){ + assert(!chunks[j]); + } + } +} + +static void gc_verify_allocator_local_chunks(GC *gc) +{ + if(gc_match_kind(gc, MARK_SWEEP_GC)){ + Mutator *mutator = gc->mutator_list; + while(mutator){ + allocator_verify_local_chunks((Allocator*)mutator); + mutator = mutator->next; + } + } + + if(gc_match_kind(gc, MAJOR_COLLECTION)) + for(unsigned int i = gc->num_collectors; i--;){ + allocator_verify_local_chunks((Allocator*)gc->collectors[i]); + } +} + void sspace_verify_before_collection(GC *gc) { printf("Allocated obj: %d\n", alloc_obj_num); @@ -374,6 +409,7 @@ POINTER_SIZE_INT total_live_obj = sspace_live_obj_num(sspace, TRUE); printf("Live obj after collection: %d\n", total_live_obj); check_and_clear_mark_cards(); + gc_verify_allocator_local_chunks(gc); } /* @@ -398,6 +434,7 @@ /* sspace verify marking with vtable marking in advance */ +Sspace *sspace_in_verifier; static Pool *trace_pool = NULL; static Vector_Block *trace_stack = NULL; POINTER_SIZE_INT live_obj_in_verify_marking = 0; @@ -429,7 +466,7 @@ Partial_Reveal_Object *p_obj = read_slot(p_ref); if( p_obj == NULL) return; - if(obj_mark_in_vtable(gc, p_obj)) + if(obj_belongs_to_space(p_obj, (Space*)sspace_in_verifier) && obj_mark_in_vtable(gc, p_obj)) tracestack_push(p_obj); return; @@ -479,6 +516,7 @@ void sspace_verify_vtable_mark(GC *gc) { + sspace_in_verifier = gc_get_sspace(gc); GC_Metadata *metadata = gc->metadata; Pool *rootset_pool = metadata->gc_rootset_pool; @@ -496,7 +534,7 @@ Partial_Reveal_Object *p_obj = read_slot(p_ref); assert(p_obj!=NULL); - if(obj_mark_in_vtable(gc, p_obj)) + if(obj_belongs_to_space(p_obj, (Space*)sspace_in_verifier) && obj_mark_in_vtable(gc, p_obj)) tracestack_push(p_obj); } root_set = pool_iterator_next(metadata->gc_rootset_pool); Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp Mon Aug 27 01:11:57 2007 @@ -103,7 +103,7 @@ #ifndef USE_MARK_SWEEP_GC /*For LOS_Shrink and LOS_Extend*/ - if(collector->gc->tuner->kind != TRANS_NOTHING){ + if(gc_has_space_tuner(collector->gc) && collector->gc->tuner->kind != TRANS_NOTHING){ collector->non_los_live_obj_size = 0; collector->los_live_obj_size = 0; } @@ -137,9 +137,9 @@ { /* FIXME:: to adaptively identify the num_collectors_to_activate */ if( MINOR_COLLECTORS && gc_match_kind(gc, MINOR_COLLECTION)){ - gc->num_active_collectors = MINOR_COLLECTORS; - }else if ( MAJOR_COLLECTORS && !gc_match_kind(gc, MINOR_COLLECTION)){ - gc->num_active_collectors = MAJOR_COLLECTORS; + gc->num_active_collectors = MINOR_COLLECTORS; + }else if ( MAJOR_COLLECTORS && gc_match_kind(gc, MAJOR_COLLECTION)){ + gc->num_active_collectors = MAJOR_COLLECTORS; }else{ gc->num_active_collectors = gc->num_collectors; } @@ -291,6 +291,8 @@ #ifdef USE_MARK_SWEEP_GC collector_init_free_chunk_list(collector); +#else + gc_gen_hook_for_collector_init(collector); #endif #ifdef GC_GEN_STATS Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h Mon Aug 27 01:11:57 2007 @@ -23,7 +23,6 @@ #define _COLLECTOR_H_ #include "../common/gc_space.h" -#include "../mark_sweep/sspace_verify.h" struct Block_Header; struct Stealable_Stack; @@ -70,9 +69,9 @@ Block_Header* cur_target_block; Free_Chunk_List *free_chunk_list; -#ifdef SSPACE_VERIFY + + POINTER_SIZE_INT live_obj_size; POINTER_SIZE_INT live_obj_num; -#endif void(*task_func)(void*) ; /* current task */ @@ -100,6 +99,10 @@ void collector_attach_hashcode(Collector *collector); #endif +#ifndef USE_MARK_SWEEP_GC +void gc_gen_hook_for_collector_init(Collector *collector); +#endif + inline Boolean gc_collection_result(GC* gc) { Boolean result = TRUE; @@ -120,7 +123,6 @@ gc->collect_result = TRUE; return; } - #endif //#ifndef _COLLECTOR_H_ Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.h URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.h?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.h (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.h Mon Aug 27 01:11:57 2007 @@ -27,7 +27,7 @@ #include "../common/hashcode.h" #endif -void* mos_alloc(unsigned size, Allocator *allocator); +extern Space_Alloc_Func mos_alloc; /* NOS forward obj to MOS in MINOR_COLLECTION */ FORCE_INLINE Partial_Reveal_Object* collector_forward_object(Collector* collector, Partial_Reveal_Object* p_obj) Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/gc_thread.cpp URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/gc_thread.cpp?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/gc_thread.cpp (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/gc_thread.cpp Mon Aug 27 01:11:57 2007 @@ -23,6 +23,8 @@ static hythread_tls_key_t tls_gc_key; POINTER_SIZE_INT tls_gc_offset; +hythread_group_t gc_thread_group = NULL; + void gc_tls_init() { Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.cpp URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.cpp?rev=570028&view=auto ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.cpp (added) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.cpp Mon Aug 27 01:11:57 2007 @@ -0,0 +1,276 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "marker.h" +#include "../finalizer_weakref/finalizer_weakref.h" + +#include "../gen/gen.h" +#include "../mark_sweep/gc_ms.h" + +unsigned int NUM_MARKERS = 0; +static volatile unsigned int live_marker_num = 0; + +void notify_marker_to_work(Marker* marker) +{ + vm_set_event(marker->task_assigned_event); +} + +void wait_marker_to_finish(Marker* marker) +{ + vm_wait_event(marker->task_finished_event); +} + +void marker_wait_for_task(Marker* marker) +{ + vm_wait_event(marker->task_assigned_event); +} + +void marker_notify_work_done(Marker* marker) +{ + vm_set_event(marker->task_finished_event); +} + +void wait_marker_finish_mark_root(Marker* marker) +{ + vm_wait_event(marker->markroot_finished_event); +} + +void marker_notify_mark_root_done(Marker* marker) +{ + vm_set_event(marker->markroot_finished_event); +} + +static int marker_thread_func(void *arg) +{ + Marker* marker = (Marker *)arg; + assert(marker); + + while(true){ + /* Waiting for newly assigned task */ + marker_wait_for_task(marker); + marker->marker_is_active = TRUE; + + /* waken up and check for new task */ + TaskType task_func = marker->task_func; + if(task_func == NULL){ + atomic_dec32(&live_marker_num); + return 1; + } + + task_func(marker); + + marker_notify_work_done(marker); + + marker->marker_is_active = FALSE; + } + + return 0; +} + +static void marker_init_thread(Marker* marker) +{ + int status = vm_create_event(&marker->task_assigned_event); + assert(status == THREAD_OK); + + status = vm_create_event(&marker->task_finished_event); + assert(status == THREAD_OK); + + status = vm_create_event(&marker->markroot_finished_event); + assert(status == THREAD_OK); + + status = (unsigned int)vm_create_thread(marker_thread_func, (void*)marker); + + assert(status == THREAD_OK); + + return; +} + +void marker_initialize(GC* gc) +{ + unsigned int num_processors = gc_get_processor_num(gc); + + unsigned int nthreads = max(NUM_MARKERS,num_processors); + + unsigned int size = sizeof(Marker*) * nthreads; + gc->markers = (Marker **) STD_MALLOC(size); + memset(gc->markers, 0, size); + + size = sizeof(Marker); + for (unsigned int i = 0; i < nthreads; i++) { + Marker* marker = (Marker *)STD_MALLOC(size); + memset(marker, 0, size); + + /* FIXME:: thread_handle is for temporary control */ + marker->thread_handle = (VmThreadHandle)(POINTER_SIZE_INT)i; + marker->gc = gc; + marker->marker_is_active = FALSE; + marker_init_thread(marker); + + gc->markers[i] = marker; + } + + gc->num_markers = NUM_MARKERS? NUM_MARKERS:num_processors; + live_marker_num = NUM_MARKERS; + + return; + +} + +void marker_terminate_thread(Marker* marker) +{ + assert(live_marker_num); + unsigned int old_live_marker_num = live_marker_num; + marker->task_func = NULL; /* NULL to notify thread exit */ + if(marker->marker_is_active) wait_marker_to_finish(marker); + notify_marker_to_work(marker); + while(old_live_marker_num == live_marker_num) + vm_thread_yield(); /* give marker time to die */ + + delete marker->trace_stack; + return; + +} + +void marker_destruct(GC* gc) +{ + for(unsigned int i=0; inum_markers; i++) + { + Marker* marker = gc->markers[i]; + marker_terminate_thread(marker); + STD_FREE(marker); + + } + assert(live_marker_num == 0); + + STD_FREE(gc->markers); + return; +} + +void wait_mark_finish(GC* gc) +{ + unsigned int num_active_marker = gc->num_active_markers; + for(unsigned int i=0; imarkers[i]; + wait_marker_to_finish(marker); + } + return; +} + +Boolean is_mark_finished(GC* gc) +{ + unsigned int num_active_marker = gc->num_active_markers; + unsigned int i = 0; + for(; imarkers[i]; + if(marker->marker_is_active){ + return FALSE; + } + } + return TRUE; +} + + +void marker_reset_thread(Marker* marker) +{ + marker->task_func = NULL; + +#ifndef BUILD_IN_REFERENT + collector_reset_weakref_sets((Collector*)marker); +#endif + + return; + +} + +void assign_marker_with_task(GC* gc, TaskType task_func, Space* space) +{ + for(unsigned int i=0; inum_markers; i++) + { + Marker* marker = gc->markers[i]; + + marker_reset_thread(marker); + marker->task_func = task_func; + marker->mark_space= space; + notify_marker_to_work(marker); + } + return; +} + +void assign_marker_with_task(GC* gc, TaskType task_func, Space* space, unsigned int num_markers) +{ + unsigned int i = gc->num_active_markers; + gc->num_active_markers += num_markers; + for(; i < gc->num_active_markers; i++) + { + printf("start mark thread %d \n", i); + Marker* marker = gc->markers[i]; + + marker_reset_thread(marker); + marker->task_func = task_func; + marker->mark_space= space; + notify_marker_to_work(marker); + } + return; +} + +void marker_execute_task(GC* gc, TaskType task_func, Space* space) +{ + assign_marker_with_task(gc, task_func, space); + wait_mark_finish(gc); + return; +} + +void wait_mark_root_finish(GC* gc) +{ + unsigned int num_marker = gc->num_markers; + for(unsigned int i=0; imarkers[i]; + wait_marker_finish_mark_root(marker); + } + return; +} + +void wait_mark_root_finish(GC* gc, unsigned int num_markers) +{ + unsigned int num_active_marker = gc->num_active_markers; + unsigned int i= num_active_marker - num_markers; + for(; i < num_active_marker; i++) + { + Marker* marker = gc->markers[i]; + wait_marker_finish_mark_root(marker); + } + return; +} + +void marker_execute_task_concurrent(GC* gc, TaskType task_func, Space* space) +{ + assign_marker_with_task(gc, task_func, space); + wait_mark_root_finish(gc); + return; +} + +void marker_execute_task_concurrent(GC* gc, TaskType task_func, Space* space, unsigned int num_markers) +{ + unsigned int num_free_markers = gc->num_markers - gc->num_active_markers; + if(num_markers > num_free_markers) + num_markers = num_free_markers; + assign_marker_with_task(gc, task_func, space,num_markers); + wait_mark_root_finish(gc, num_markers); + return; +} + Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.cpp ------------------------------------------------------------------------------ svn:eol-style = native Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.h URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.h?rev=570028&view=auto ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.h (added) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.h Mon Aug 27 01:11:57 2007 @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _MARKER_H_ +#define _MARKER_H_ + +#include "../common/gc_space.h" +#include "../mark_sweep/sspace_chunk.h" + +typedef struct Marker{ + /* <-- first couple of fields are overloaded as Allocator */ + void *free; + void *ceiling; + void *end; + void *alloc_block; + Chunk_Header ***local_chunks; + Space* alloc_space; + GC* gc; + VmThreadHandle thread_handle; /* This thread; */ + /* End of Allocator --> */ + + /* FIXME:: for testing */ + Space* mark_space; + + Vector_Block *trace_stack; + + Vector_Block* rep_set; /* repointed set */ + Vector_Block* rem_set; +#ifdef USE_32BITS_HASHCODE + Vector_Block* hashcode_set; +#endif + + Vector_Block *softref_set; + Vector_Block *weakref_set; + Vector_Block *phanref_set; + + VmEventHandle task_assigned_event; + VmEventHandle task_finished_event; + + Block_Header* cur_compact_block; + Block_Header* cur_target_block; + + Free_Chunk_List *free_chunk_list; + + POINTER_SIZE_INT live_obj_size; + POINTER_SIZE_INT live_obj_num; + + void(*task_func)(void*) ; /* current task */ + + POINTER_SIZE_INT non_los_live_obj_size; + POINTER_SIZE_INT los_live_obj_size; + POINTER_SIZE_INT segment_live_size[NORMAL_SIZE_SEGMENT_NUM]; + unsigned int result; + + VmEventHandle markroot_finished_event; + + Boolean marker_is_active; + int64 time_mark; + Marker* next; +} Marker; + +typedef Marker* Marker_List; + +#define MAX_NUM_MARKERS 0xff +#define MIN_NUM_MARKERS 0x01 + +void marker_initialize(GC* gc); +void marker_destruct(GC* gc); + +void marker_execute_task(GC* gc, TaskType task_func, Space* space); +void marker_execute_task_concurrent(GC* gc, TaskType task_func, Space* space, unsigned int num_markers); +void marker_execute_task_concurrent(GC* gc, TaskType task_func, Space* space); + +void marker_notify_mark_root_done(Marker* marker); +void wait_mark_finish(GC* gc); +Boolean is_mark_finished(GC* gc); + + + +#endif //_MARKER_H_ + Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.h ------------------------------------------------------------------------------ svn:eol-style = native Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp Mon Aug 27 01:11:57 2007 @@ -38,6 +38,7 @@ mutator->rem_set = free_set_pool_get_entry(gc->metadata); assert(vector_block_is_empty(mutator->rem_set)); } + mutator->dirty_obj_snapshot = free_set_pool_get_entry(gc->metadata); if(!IGNORE_FINREF ) mutator->obj_with_fin = finref_get_free_block(gc); @@ -97,6 +98,13 @@ pool_put_entry(gc->finref_metadata->obj_with_fin_pool, mutator->obj_with_fin); mutator->obj_with_fin = NULL; } + + if( mutator->dirty_obj_snapshot != NULL){ + if(vector_block_is_empty(mutator->dirty_obj_snapshot)) + pool_put_entry(gc->metadata->free_set_pool, mutator->dirty_obj_snapshot); + else /* FIXME:: this condition may be released. */ + pool_put_entry(gc->metadata->dirty_obj_snaptshot_pool, mutator->dirty_obj_snapshot); + } //gc_set_tls(NULL); @@ -122,6 +130,60 @@ mutator = mutator->next; } return; +} + +/* +Boolean gc_local_snapshot_is_empty(GC* gc) +{ + lock(gc->mutator_list_lock); + + Mutator *mutator = gc->mutator_list; + while (mutator) { + if(mutator->concurrent_mark_handshake_status != LOCAL_SNAPSHOT_CONTAINER_IS_EMPTY){ + unlock(gc->mutator_list_lock); + return FALSE; + } + mutator = mutator->next; + } + + unlock(gc->mutator_list_lock); + return TRUE; +}*/ + +Boolean gc_local_snapshot_is_empty(GC* gc) +{ + lock(gc->mutator_list_lock); + + Mutator *mutator = gc->mutator_list; + while (mutator) { + Vector_Block* local_snapshot_set = mutator->dirty_obj_snapshot; + if(!vector_block_is_empty(local_snapshot_set)){ + unlock(gc->mutator_list_lock); + return FALSE; + } + mutator = mutator->next; + } + + unlock(gc->mutator_list_lock); + return TRUE; +} + +Vector_Block* gc_get_local_snapshot(GC* gc, unsigned int shared_id) +{ + lock(gc->mutator_list_lock); + + Mutator *mutator = gc->mutator_list; + while (mutator) { + Vector_Block* local_snapshot = mutator->dirty_obj_snapshot; + if(!vector_block_is_empty(local_snapshot) && vector_block_set_shared(local_snapshot,shared_id)){ + unlock(gc->mutator_list_lock); + return local_snapshot; + } + mutator = mutator->next; + } + + unlock(gc->mutator_list_lock); + return NULL; } Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h Mon Aug 27 01:11:57 2007 @@ -42,6 +42,7 @@ Vector_Block* rem_set; Vector_Block* obj_with_fin; Mutator* next; /* The gc info area associated with the next active thread. */ + Vector_Block* dirty_obj_snapshot; } Mutator; void mutator_initialize(GC* gc, void* tls_gc_info); @@ -51,4 +52,6 @@ void gc_reset_mutator_context(GC* gc); void gc_prepare_mutator_remset(GC* gc); +Boolean gc_local_snapshot_is_empty(GC* gc); +Vector_Block* gc_get_local_snapshot(GC* gc, unsigned int shared_id); #endif /*ifndef _MUTATOR_H_ */ Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.cpp URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.cpp?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.cpp (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.cpp Mon Aug 27 01:11:57 2007 @@ -38,7 +38,7 @@ struct GC_Gen; void gc_set_nos(GC_Gen* gc, Space* space); -void fspace_initialize(GC* gc, void* start, POINTER_SIZE_INT fspace_size, POINTER_SIZE_INT commit_size) +Fspace *fspace_initialize(GC* gc, void* start, POINTER_SIZE_INT fspace_size, POINTER_SIZE_INT commit_size) { assert( (fspace_size%GC_BLOCK_SIZE_BYTES) == 0 ); Fspace* fspace = (Fspace *)STD_MALLOC(sizeof(Fspace)); @@ -84,7 +84,6 @@ fspace->period_surviving_size = 0; fspace->gc = gc; - gc_set_nos((GC_Gen*)gc, (Space*)fspace); /* above is same as Mspace init --> */ forward_first_half = TRUE; @@ -97,7 +96,7 @@ else object_forwarding_boundary = (void*)&fspace->blocks[fspace->num_managed_blocks]; - return; + return fspace; } void fspace_destruct(Fspace *fspace) @@ -106,12 +105,12 @@ STD_FREE(fspace); } -void fspace_reset_for_allocation(Fspace* fspace) +void fspace_reset_after_collection(Fspace* fspace) { unsigned int first_idx = fspace->first_block_idx; unsigned int marked_start_idx = 0; //was for oi markbit reset, now useless unsigned int marked_last_idx = 0; - Boolean is_major_collection = !gc_match_kind(fspace->gc, MINOR_COLLECTION); + Boolean is_major_collection = gc_match_kind(fspace->gc, MAJOR_COLLECTION); Boolean gen_mode = gc_is_gen_mode(); if( is_major_collection || @@ -152,7 +151,16 @@ block->free = block->base; } - + + /* For los extension + * num_managed_blocks of fspace might be 0. + * In this case, the last block we found is mos' last block. + * And this implementation depends on the fact that mos and nos are continuous. + */ + int last_block_index = fspace->num_managed_blocks - 1; + Block_Header *fspace_last_block = (Block_Header*)&fspace->blocks[last_block_index]; + fspace_last_block->next = NULL; + return; } @@ -196,7 +204,7 @@ fspace->num_collections++; GC* gc = fspace->gc; - mspace_free_block_idx = ((GC_Gen*)gc)->mos->free_block_idx; + mspace_free_block_idx = ((Blocked_Space*)((GC_Gen*)gc)->mos)->free_block_idx; if(gc_is_gen_mode()){ fspace->collect_algorithm = MINOR_GEN_FORWARD_POOL; @@ -210,32 +218,32 @@ switch(fspace->collect_algorithm){ #ifdef MARK_BIT_FLIPPING - -case MINOR_NONGEN_FORWARD_POOL: - TRACE2("gc.process", "GC: nongen_forward_pool algo start ... \n"); - collector_execute_task(gc, (TaskType)nongen_forward_pool, (Space*)fspace); - TRACE2("gc.process", "\nGC: end of nongen forward algo ... \n"); + + case MINOR_NONGEN_FORWARD_POOL: + TRACE2("gc.process", "GC: nongen_forward_pool algo start ... \n"); + collector_execute_task(gc, (TaskType)nongen_forward_pool, (Space*)fspace); + TRACE2("gc.process", "\nGC: end of nongen forward algo ... \n"); #ifdef GC_GEN_STATS - gc_gen_stats_set_nos_algo((GC_Gen*)gc, MINOR_NONGEN_FORWARD_POOL); + gc_gen_stats_set_nos_algo((GC_Gen*)gc, MINOR_NONGEN_FORWARD_POOL); #endif - break; - + break; + #endif /*#ifdef MARK_BIT_FLIPPING */ -case MINOR_GEN_FORWARD_POOL: - TRACE2("gc.process", "gen_forward_pool algo start ... \n"); - collector_execute_task(gc, (TaskType)gen_forward_pool, (Space*)fspace); - TRACE2("gc.process", "\nGC: end of gen forward algo ... \n"); + case MINOR_GEN_FORWARD_POOL: + TRACE2("gc.process", "gen_forward_pool algo start ... \n"); + collector_execute_task(gc, (TaskType)gen_forward_pool, (Space*)fspace); + TRACE2("gc.process", "\nGC: end of gen forward algo ... \n"); #ifdef GC_GEN_STATS - gc_gen_stats_set_nos_algo((GC_Gen*)gc, MINOR_NONGEN_FORWARD_POOL); + gc_gen_stats_set_nos_algo((GC_Gen*)gc, MINOR_NONGEN_FORWARD_POOL); #endif - break; - -default: - DIE2("gc.collection","Specified minor collection algorithm doesn't exist!"); - exit(0); - break; + break; + + default: + DIE2("gc.collection","Specified minor collection algorithm doesn't exist!"); + exit(0); + break; } - + return; }