Return-Path: Delivered-To: apmail-harmony-commits-archive@www.apache.org Received: (qmail 50314 invoked from network); 27 Aug 2007 08:12:35 -0000 Received: from hermes.apache.org (HELO mail.apache.org) (140.211.11.2) by minotaur.apache.org with SMTP; 27 Aug 2007 08:12:35 -0000 Received: (qmail 3103 invoked by uid 500); 27 Aug 2007 08:12:31 -0000 Delivered-To: apmail-harmony-commits-archive@harmony.apache.org Received: (qmail 3079 invoked by uid 500); 27 Aug 2007 08:12:31 -0000 Mailing-List: contact commits-help@harmony.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@harmony.apache.org Delivered-To: mailing list commits@harmony.apache.org Received: (qmail 3069 invoked by uid 99); 27 Aug 2007 08:12:31 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 27 Aug 2007 01:12:31 -0700 X-ASF-Spam-Status: No, hits=-100.0 required=10.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.3] (HELO eris.apache.org) (140.211.11.3) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 27 Aug 2007 08:12:30 +0000 Received: by eris.apache.org (Postfix, from userid 65534) id C674A1A983A; Mon, 27 Aug 2007 01:12:10 -0700 (PDT) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r570028 [2/4] - in /harmony/enhanced/drlvm/trunk/vm/gc_gen/src: common/ finalizer_weakref/ gen/ los/ mark_compact/ mark_sweep/ tests/ thread/ trace_forward/ utils/ verify/ Date: Mon, 27 Aug 2007 08:12:01 -0000 To: commits@harmony.apache.org From: xli@apache.org X-Mailer: svnmailer-1.0.8 Message-Id: <20070827081210.C674A1A983A@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp Mon Aug 27 01:11:57 2007 @@ -55,73 +55,81 @@ #define RESERVE_BOTTOM ((void*)0x1000000) -void* alloc_large_pages(size_t size, const char* hint); +static void determine_min_nos_size(GC_Gen *gc, POINTER_SIZE_INT min_heap_size) +{ + min_nos_size_bytes *= gc->_num_processors; + + POINTER_SIZE_INT min_nos_size_threshold = min_heap_size>>5; + if(min_nos_size_bytes > min_nos_size_threshold) + min_nos_size_bytes = round_down_to_size(min_nos_size_threshold, SPACE_ALLOC_UNIT); + + if(MIN_NOS_SIZE) min_nos_size_bytes = MIN_NOS_SIZE; +} + +static POINTER_SIZE_INT determine_los_size(POINTER_SIZE_INT min_heap_size) +{ + POINTER_SIZE_INT los_size = min_heap_size >> 7; + if(INIT_LOS_SIZE) los_size = INIT_LOS_SIZE; + if(los_size < min_los_size_bytes ) + los_size = min_los_size_bytes; + + los_size = round_down_to_size(los_size, SPACE_ALLOC_UNIT); + return los_size; +} + +void *alloc_large_pages(size_t size, const char *hint); void gc_gen_initial_verbose_info(GC_Gen *gc); -void gc_gen_initialize(GC_Gen *gc_gen, POINTER_SIZE_INT min_heap_size, POINTER_SIZE_INT max_heap_size) +void gc_gen_initialize(GC_Gen *gc_gen, POINTER_SIZE_INT min_heap_size, POINTER_SIZE_INT max_heap_size) { TRACE2("gc.process", "GC: GC_Gen heap init ... \n"); assert(gc_gen); - + max_heap_size = round_down_to_size(max_heap_size, SPACE_ALLOC_UNIT); min_heap_size = round_up_to_size(min_heap_size, SPACE_ALLOC_UNIT); assert(max_heap_size <= max_heap_size_bytes); assert(max_heap_size >= min_heap_size_bytes); - - min_nos_size_bytes *= gc_gen->_num_processors; - - POINTER_SIZE_INT min_nos_size_threshold = min_heap_size>>5; - if(min_nos_size_bytes > min_nos_size_threshold){ - min_nos_size_bytes = round_down_to_size(min_nos_size_threshold,SPACE_ALLOC_UNIT); - } - if( MIN_NOS_SIZE ) min_nos_size_bytes = MIN_NOS_SIZE; - - POINTER_SIZE_INT los_size = min_heap_size >> 7; - if(INIT_LOS_SIZE) los_size = INIT_LOS_SIZE; - if(los_size < min_los_size_bytes ) - los_size = min_los_size_bytes ; + determine_min_nos_size(gc_gen, min_heap_size); + + POINTER_SIZE_INT los_size = 0; + if(MAJOR_ALGO == MAJOR_MARK_SWEEP) + min_los_size_bytes = 0; + else + los_size = determine_los_size(min_heap_size); - los_size = round_down_to_size(los_size, SPACE_ALLOC_UNIT); - /* let's compute and reserve the space for committing */ /* heuristic nos + mos + LOS = max, and nos*ratio = mos */ - POINTER_SIZE_INT nos_reserve_size, nos_commit_size; - POINTER_SIZE_INT mos_reserve_size, mos_commit_size; - POINTER_SIZE_INT los_mos_size; + POINTER_SIZE_INT nos_reserve_size, nos_commit_size; + POINTER_SIZE_INT mos_reserve_size, mos_commit_size; + POINTER_SIZE_INT los_mos_reserve_size; - /*Give GC a hint of gc survive ratio. And the last_survive_ratio field is used in heap size adjustment*/ + /* Give GC a hint of gc survive ratio. And the last_survive_ratio field is used in heap size adjustment */ gc_gen->survive_ratio = 0.2f; - - if(NOS_SIZE){ - los_mos_size = min_heap_size - NOS_SIZE; - mos_reserve_size = los_mos_size - los_size; - - nos_commit_size = NOS_SIZE; - nos_reserve_size = NOS_SIZE; - }else{ - los_mos_size = min_heap_size; - mos_reserve_size = max_heap_size_bytes - min_los_size_bytes; - nos_commit_size = (POINTER_SIZE_INT)(((float)(min_heap_size - los_size))/(1.0f + gc_gen->survive_ratio)); + los_mos_reserve_size = max_heap_size - NOS_SIZE; + mos_reserve_size = los_mos_reserve_size - min_los_size_bytes; + if(NOS_SIZE){ + nos_reserve_size = nos_commit_size = NOS_SIZE; + } else { nos_reserve_size = mos_reserve_size; + nos_commit_size = (POINTER_SIZE_INT)(((float)(min_heap_size - los_size))/(1.0f + gc_gen->survive_ratio)); } - - nos_commit_size = round_down_to_size(nos_commit_size, SPACE_ALLOC_UNIT); + nos_commit_size = round_down_to_size(nos_commit_size, SPACE_ALLOC_UNIT); mos_commit_size = min_heap_size - los_size - nos_commit_size; - - /* allocate memory for gc_gen */ - void* reserved_base; - void* reserved_end; - void* nos_base; + + /* Reserve memory for spaces of gc_gen */ + void *reserved_base; + void *reserved_end; + void *nos_base; #ifdef STATIC_NOS_MAPPING - //FIXME: no large page support in static nos mapping + //FIXME:: no large page support in static nos mapping assert(large_page_hint==NULL); - assert((POINTER_SIZE_INT)nos_boundary%SPACE_ALLOC_UNIT == 0); + assert(!((POINTER_SIZE_INT)nos_boundary % SPACE_ALLOC_UNIT)); nos_base = vm_reserve_mem(nos_boundary, nos_reserve_size); if( nos_base != nos_boundary ){ DIE2("gc.base","Warning: Static NOS mapping: Can't reserve memory at address"<= nos_base){ los_mos_base = (void*)((POINTER_SIZE_INT)los_mos_base - SPACE_ALLOC_UNIT); if(los_mos_base < RESERVE_BOTTOM){ DIE2("gc.base","Static NOS mapping: Can't reserve memory at address"<reserved_heap_size = los_size + nos_reserve_size + mos_reserve_size; -#else - gc_gen->reserved_heap_size = max_heap_size_bytes; -#endif + gc_gen->heap_start = reserved_base; gc_gen->heap_end = reserved_end; - gc_gen->blocks = (Block*)reserved_base; +#ifdef STATIC_NOS_MAPPING + gc_gen->reserved_heap_size = los_mos_reserve_size + nos_reserve_size; +#else + gc_gen->reserved_heap_size = max_heap_size; +#endif + /* Commented out for that the frontmost reserved mem size in los is not counted in los' committed size. + * gc_gen->committed_heap_size = min_heap_size; + */ gc_gen->num_collections = 0; gc_gen->time_collections = 0; + gc_gen->blocks = (Block*)reserved_base; gc_gen->force_major_collect = FALSE; gc_gen->force_gen_mode = FALSE; gc_los_initialize(gc_gen, reserved_base, los_size); - - reserved_base = (void*)((POINTER_SIZE_INT)reserved_base + los_size); - gc_mos_initialize(gc_gen, reserved_base, mos_reserve_size, mos_commit_size); - - gc_nos_initialize(gc_gen, nos_base, nos_reserve_size, nos_commit_size); - - /* connect mos and nos, so that they can be compacted as one space */ - Blocked_Space* mos = (Blocked_Space*)gc_get_mos(gc_gen); - Blocked_Space* nos = (Blocked_Space*)gc_get_nos(gc_gen); - Block_Header* mos_last_block = (Block_Header*)&mos->blocks[mos->num_managed_blocks-1]; - Block_Header* nos_first_block = (Block_Header*)&nos->blocks[0]; - mos_last_block->next = nos_first_block; + gc_mos_initialize(gc_gen, (void*)((POINTER_SIZE_INT)reserved_base + los_size), mos_reserve_size, mos_commit_size); + gc_nos_initialize(gc_gen, nos_base, nos_reserve_size, nos_commit_size); - nos->collect_algorithm = MINOR_ALGO; - mos->collect_algorithm = MAJOR_ALGO; - - gc_space_tuner_initialize((GC*)gc_gen); - - gc_gen_mode_adapt_init(gc_gen); + gc_gen->committed_heap_size = space_committed_size(gc_get_nos(gc_gen)) + + space_committed_size(gc_get_mos(gc_gen)) + + space_committed_size(gc_get_los(gc_gen)); + + if(MAJOR_ALGO != MAJOR_MARK_SWEEP){ + Blocked_Space *nos = (Blocked_Space*)gc_get_nos(gc_gen); + Blocked_Space *mos = (Blocked_Space*)gc_get_mos(gc_gen); + /* Connect mos and nos, so that they can be compacted as one space */ + Block_Header *mos_last_block = (Block_Header*)&mos->blocks[mos->num_managed_blocks-1]; + Block_Header *nos_first_block = (Block_Header*)&nos->blocks[0]; + mos_last_block->next = nos_first_block; - gc_gen->committed_heap_size = space_committed_size((Space*)gc_gen->nos) + - space_committed_size((Space*)gc_gen->mos) + - space_committed_size((Space*)gc_gen->los); + gc_space_tuner_initialize((GC*)gc_gen); + gc_gen_mode_adapt_init(gc_gen); + } #ifdef GC_GEN_STATS gc_gen_stats_initialize(gc_gen); @@ -235,33 +239,27 @@ return; } -void gc_gen_destruct(GC_Gen *gc_gen) +void gc_gen_destruct(GC_Gen *gc_gen) { TRACE2("gc.process", "GC: GC_Gen heap destruct ......"); - Space* nos = (Space*)gc_gen->nos; - Space* mos = (Space*)gc_gen->mos; - Space* los = (Space*)gc_gen->los; - - POINTER_SIZE_INT nos_size = space_committed_size(nos); - POINTER_SIZE_INT mos_size = space_committed_size(mos); - POINTER_SIZE_INT los_size = space_committed_size(los); - - void* nos_start = nos->heap_start; - void* mos_start = mos->heap_start; - void* los_start = los->heap_start; + Space *nos = gc_gen->nos; gc_nos_destruct(gc_gen); + vm_unmap_mem(nos->heap_start, space_committed_size(nos)); gc_gen->nos = NULL; - gc_mos_destruct(gc_gen); + Space *mos = gc_gen->mos; + gc_mos_destruct(gc_gen); + vm_unmap_mem(mos->heap_start, space_committed_size(mos)); gc_gen->mos = NULL; - - gc_los_destruct(gc_gen); - gc_gen->los = NULL; - vm_unmap_mem(nos_start, nos_size); - vm_unmap_mem(mos_start, mos_size); - vm_unmap_mem(los_start, los_size); + if(MAJOR_ALGO != MAJOR_MARK_SWEEP){ + Space *los = gc_gen->los; + gc_los_destruct(gc_gen); + vm_unmap_mem(los->heap_start, space_committed_size(los)); + gc_gen->los = NULL; + } + #ifdef GC_GEN_STATS gc_gen_stats_destruct(gc_gen); #endif @@ -269,23 +267,86 @@ return; } -Space* gc_get_nos(GC_Gen* gc){ return (Space*)gc->nos;} -Space* gc_get_mos(GC_Gen* gc){ return (Space*)gc->mos;} -Space* gc_get_los(GC_Gen* gc){ return (Space*)gc->los;} - -void gc_set_nos(GC_Gen* gc, Space* nos){ gc->nos = (Fspace*)nos;} -void gc_set_mos(GC_Gen* gc, Space* mos){ gc->mos = (Mspace*)mos;} -void gc_set_los(GC_Gen* gc, Space* los){ gc->los = (Lspace*)los;} +Space *gc_get_nos(GC_Gen *gc){ return gc->nos; } +Space *gc_get_mos(GC_Gen *gc){ return gc->mos; } +Space *gc_get_los(GC_Gen *gc){ return gc->los; } + +void gc_set_nos(GC_Gen *gc, Space *nos){ gc->nos = nos; } +void gc_set_mos(GC_Gen *gc, Space *mos){ gc->mos = mos; } +void gc_set_los(GC_Gen *gc, Space *los){ gc->los = los; } -void* mos_alloc(unsigned size, Allocator *allocator){return mspace_alloc(size, allocator);} +Space_Alloc_Func mos_alloc; +//void* mos_alloc(unsigned size, Allocator *allocator){return mspace_alloc(size, allocator);} void* nos_alloc(unsigned size, Allocator *allocator){return fspace_alloc(size, allocator);} -void* los_alloc(unsigned size, Allocator *allocator){return lspace_alloc(size, allocator);} +Space_Alloc_Func los_alloc; +//void* los_alloc(unsigned size, Allocator *allocator){return lspace_alloc(size, allocator);} void* los_try_alloc(POINTER_SIZE_INT size, GC* gc){ return lspace_try_alloc((Lspace*)((GC_Gen*)gc)->los, size); } +void gc_nos_initialize(GC_Gen *gc, void *start, POINTER_SIZE_INT nos_size, POINTER_SIZE_INT commit_size) +{ + Space *nos = (Space*)fspace_initialize((GC*)gc, start, nos_size, commit_size); + gc_set_nos(gc, nos); + nos->collect_algorithm = MINOR_ALGO; +} + +void gc_nos_destruct(GC_Gen *gc) +{ fspace_destruct((Fspace*)gc->nos); } + +void gc_mos_initialize(GC_Gen *gc, void *start, POINTER_SIZE_INT mos_size, POINTER_SIZE_INT commit_size) +{ + Space *mos; + if(MAJOR_ALGO == MAJOR_MARK_SWEEP){ + mos = (Space*)sspace_initialize((GC*)gc, start, mos_size, commit_size); + mos_alloc = sspace_alloc; + } else { + mos = (Space*)mspace_initialize((GC*)gc, start, mos_size, commit_size); + mos_alloc = mspace_alloc; + } + gc_set_mos(gc, mos); + mos->collect_algorithm = MAJOR_ALGO; +} + +void gc_mos_destruct(GC_Gen *gc) +{ + if(MAJOR_ALGO == MAJOR_MARK_SWEEP) + sspace_destruct((Sspace*)gc->mos); + else + mspace_destruct((Mspace*)gc->mos); +} + +void gc_los_initialize(GC_Gen *gc, void *start, POINTER_SIZE_INT los_size) +{ + Space *los; + if(MAJOR_ALGO == MAJOR_MARK_SWEEP){ + assert(los_size == 0); + los = NULL; + los_alloc = sspace_alloc; + } else { + los = (Space*)lspace_initialize((GC*)gc, start, los_size); + los_alloc = lspace_alloc; + } + gc_set_los(gc, los); +} + +void gc_los_destruct(GC_Gen *gc) +{ + if(MAJOR_ALGO != MAJOR_MARK_SWEEP) + lspace_destruct((Lspace*)gc->los); +} + Boolean FORCE_FULL_COMPACT = FALSE; Boolean IGNORE_VTABLE_TRACING = TRUE; -Boolean VTABLE_TRACING = FALSE; +Boolean VTABLE_TRACING = FALSE; + +unsigned int gc_next_collection_kind(GC_Gen* gc) +{ + if(gc->force_major_collect || FORCE_FULL_COMPACT) + return MAJOR_COLLECTION; + else + return MINOR_COLLECTION; +} + void gc_decide_collection_kind(GC_Gen* gc, unsigned int cause) { @@ -293,7 +354,7 @@ gc->last_collect_kind = gc->collect_kind; if(gc->force_major_collect || cause== GC_CAUSE_LOS_IS_FULL || FORCE_FULL_COMPACT) - gc->collect_kind = MAJOR_COLLECTION; + gc->collect_kind = NORMAL_MAJOR_COLLECTION; else gc->collect_kind = MINOR_COLLECTION; @@ -303,7 +364,7 @@ VTABLE_TRACING = TRUE; #ifdef USE_MARK_SWEEP_GC - gc->collect_kind = MARK_SWEEP_GC; + gc->collect_kind = MS_COLLECTION; #endif return; } @@ -332,16 +393,19 @@ } if(!major_algo){ - MAJOR_ALGO= MAJOR_COMPACT_MOVE; + MAJOR_ALGO = MAJOR_COMPACT_MOVE; }else{ string_to_upper(major_algo); if(!strcmp(major_algo, "MAJOR_COMPACT_SLIDE")){ - MAJOR_ALGO= MAJOR_COMPACT_SLIDE; + MAJOR_ALGO = MAJOR_COMPACT_SLIDE; }else if(!strcmp(major_algo, "MAJOR_COMPACT_MOVE")){ - MAJOR_ALGO= MAJOR_COMPACT_MOVE; + MAJOR_ALGO = MAJOR_COMPACT_MOVE; + + }else if(!strcmp(major_algo, "MAJOR_MARK_SWEEP")){ + MAJOR_ALGO = MAJOR_MARK_SWEEP; }else{ WARN2("gc.base","\nWarning: GC algorithm setting incorrect. Will use default value.\n"); @@ -356,7 +420,7 @@ void gc_gen_assign_free_area_to_mutators(GC_Gen* gc) { if(gc->cause == GC_CAUSE_LOS_IS_FULL){ - Lspace* los = gc->los; + Lspace* los = (Lspace*)gc->los; los->success_ptr = los_try_alloc(los->failure_size, (GC*)gc); los->failure_size = 0; @@ -372,14 +436,14 @@ return; } -void gc_gen_adjust_heap_size(GC_Gen* gc, int64 pause_time) +static void gc_gen_adjust_heap_size(GC_Gen* gc) { if(gc_match_kind((GC*)gc, MINOR_COLLECTION)) return; if(gc->committed_heap_size == max_heap_size_bytes - LOS_HEAD_RESERVE_FOR_HEAP_NULL) return; - Mspace* mos = gc->mos; - Fspace* nos = gc->nos; - Lspace* los = gc->los; + Mspace* mos = (Mspace*)gc->mos; + Fspace* nos = (Fspace*)gc->nos; + Lspace* los = (Lspace*)gc->los; /* We can not tolerate gc->survive_ratio be greater than threshold twice continuously. * Or, we must adjust heap size */ @@ -393,18 +457,20 @@ assert(heap_total_size > heap_surviving_size); float heap_survive_ratio = (float)heap_surviving_size / (float)heap_total_size; + float non_los_survive_ratio = (float)mos->period_surviving_size / (float)(mos->committed_heap_size + nos->committed_heap_size); float threshold_survive_ratio = 0.3f; float regular_survive_ratio = 0.125f; POINTER_SIZE_INT new_heap_total_size = 0; POINTER_SIZE_INT adjust_size = 0; - if(heap_survive_ratio < threshold_survive_ratio) return; + if( (heap_survive_ratio < threshold_survive_ratio) && (non_los_survive_ratio < threshold_survive_ratio) )return; if(++tolerate < 2) return; tolerate = 0; - new_heap_total_size = (POINTER_SIZE_INT)((float)heap_surviving_size / regular_survive_ratio); + new_heap_total_size = max((POINTER_SIZE_INT)((float)heap_surviving_size / regular_survive_ratio), + (POINTER_SIZE_INT)((float)mos->period_surviving_size / regular_survive_ratio + los->committed_heap_size)); new_heap_total_size = round_down_to_size(new_heap_total_size, SPACE_ALLOC_UNIT); @@ -469,25 +535,119 @@ } +void gc_gen_start_concurrent_mark(GC_Gen* gc) +{ + assert(0); +} + +static inline void nos_collection(Space *nos) +{ fspace_collection((Fspace*)nos); } + +static inline void mos_collection(Space *mos) +{ + if(MAJOR_ALGO == MAJOR_MARK_SWEEP) + sspace_collection((Sspace*)mos); + else + mspace_collection((Mspace*)mos); +} + +static inline void los_collection(Space *los) +{ + if(MAJOR_ALGO != MAJOR_MARK_SWEEP) + lspace_collection((Lspace*)los); +} + +static void gc_gen_update_space_info_before_gc(GC_Gen *gc) +{ + Fspace *nos = (Fspace*)gc->nos; + Mspace *mos = (Mspace*)gc->mos; + Lspace *los = (Lspace*)gc->los; + + /* Update before every GC to avoid the atomic operation in every fspace_alloc_block */ + assert( nos->free_block_idx >= nos->first_block_idx ); + nos->num_used_blocks = nos->free_block_idx - nos->first_block_idx; + nos->last_alloced_size = GC_BLOCK_SIZE_BYTES * nos->num_used_blocks; + nos->accumu_alloced_size += nos->last_alloced_size; + + mos->num_used_blocks = mos->free_block_idx - mos->first_block_idx; + + if(los){ + assert(MAJOR_ALGO != MAJOR_MARK_SWEEP); + los->accumu_alloced_size += los->last_alloced_size; + } +} + +static void gc_gen_update_space_info_after_gc(GC_Gen *gc) +{ + Space *nos = gc_get_nos(gc); + Space *mos = gc_get_mos(gc); + Space *los = gc_get_los(gc); + + /* Minor collection, but also can be every n minor collections, use fspace->num_collections to identify. */ + if (gc_match_kind((GC*)gc, MINOR_COLLECTION)){ + mos->accumu_alloced_size += mos->last_alloced_size; + /* The alloced_size reset operation of mos and nos is not necessary, because they are not accumulated. + * But los->last_alloced_size must be reset, because it is accumulated. */ + if(los){ + assert(MAJOR_ALGO != MAJOR_MARK_SWEEP); + los->last_alloced_size = 0; + } + /* Major collection, but also can be every n major collections, use mspace->num_collections to identify. */ + } else { + mos->total_alloced_size += mos->accumu_alloced_size; + mos->last_alloced_size = 0; + mos->accumu_alloced_size = 0; + + nos->total_alloced_size += nos->accumu_alloced_size; + nos->last_alloced_size = 0; + nos->accumu_alloced_size = 0; + + if(los){ + assert(MAJOR_ALGO != MAJOR_MARK_SWEEP); + los->total_alloced_size += los->accumu_alloced_size; + los->last_alloced_size = 0; + los->accumu_alloced_size = 0; + } + } +} + +static void nos_reset_after_collection(Space *nos) +{ + fspace_reset_after_collection((Fspace*)nos); +} + +static void mos_reset_after_collection(Space *mos) +{ + if(MAJOR_ALGO != MAJOR_MARK_SWEEP) + mspace_reset_after_collection((Mspace*)mos); + else + sspace_reset_after_collection((Sspace*)mos); +} + Boolean IS_FALLBACK_COMPACTION = FALSE; /* only for debugging, don't use it. */ -static unsigned int mspace_num_used_blocks_before_minor; -static unsigned int mspace_num_used_blocks_after_minor; void gc_gen_stats_verbose(GC_Gen* gc); -void gc_gen_reclaim_heap(GC_Gen* gc) + +void gc_gen_reclaim_heap(GC_Gen *gc, int64 gc_start_time) { INFO2("gc.process", "GC: start GC_Gen ...\n"); - - if(verify_live_heap) gc_verify_heap((GC*)gc, TRUE); - - Blocked_Space* fspace = (Blocked_Space*)gc->nos; - Blocked_Space* mspace = (Blocked_Space*)gc->mos; - mspace->num_used_blocks = mspace->free_block_idx - mspace->first_block_idx; - fspace->num_used_blocks = fspace->free_block_idx - fspace->first_block_idx; - + + Space *nos = gc->nos; + Space *mos = gc->mos; + Space *los = gc->los; + + + if(verify_live_heap && (MAJOR_ALGO != MAJOR_MARK_SWEEP)) + gc_verify_heap((GC*)gc, TRUE); + + if(MAJOR_ALGO != MAJOR_MARK_SWEEP){ + gc_gen_update_space_info_before_gc(gc); + gc_compute_space_tune_size_before_marking((GC*)gc); + } + gc->collect_result = TRUE; #ifdef GC_GEN_STATS - gc_gen_stats_reset_before_collection((GC_Gen*)gc); - gc_gen_collector_stats_reset((GC_Gen*)gc); + gc_gen_stats_reset_before_collection(gc); + gc_gen_collector_stats_reset(gc); #endif if(gc_match_kind((GC*)gc, MINOR_COLLECTION)){ @@ -495,41 +655,49 @@ INFO2("gc.process", "GC: start minor collection ...\n"); /* FIXME:: move_object is only useful for nongen_slide_copy */ - gc->mos->move_object = 0; - /* This is for compute mspace->last_alloced_size */ - - mspace_num_used_blocks_before_minor = mspace->free_block_idx - mspace->first_block_idx; - fspace_collection(gc->nos); + mos->move_object = FALSE; + + /* This is for compute mos->last_alloced_size */ + unsigned int mos_used_blocks_before_minor, mos_used_blocks_after_minor; /* only used for non MAJOR_MARK_SWEEP collection */ + if(MAJOR_ALGO != MAJOR_MARK_SWEEP) + mos_used_blocks_before_minor = ((Blocked_Space*)mos)->free_block_idx - ((Blocked_Space*)mos)->first_block_idx; + + nos_collection(nos); #ifdef GC_GEN_STATS gc_gen_collector_stats_verbose_minor_collection(gc); #endif - mspace_num_used_blocks_after_minor = mspace->free_block_idx - mspace->first_block_idx; - assert( mspace_num_used_blocks_before_minor <= mspace_num_used_blocks_after_minor ); - mspace->last_alloced_size = GC_BLOCK_SIZE_BYTES * ( mspace_num_used_blocks_after_minor - mspace_num_used_blocks_before_minor ); - /*If the current minor collection failed, i.e. there happens a fallback, we should not do the minor sweep of LOS*/ + if(MAJOR_ALGO != MAJOR_MARK_SWEEP){ + mos_used_blocks_after_minor = ((Blocked_Space*)mos)->free_block_idx - ((Blocked_Space*)mos)->first_block_idx; + assert( mos_used_blocks_before_minor <= mos_used_blocks_after_minor ); + ((Blocked_Space*)mos)->last_alloced_size = GC_BLOCK_SIZE_BYTES * ( mos_used_blocks_after_minor - mos_used_blocks_before_minor ); + } + + /* If the minor collection failed, i.e. there happens a fallback, we should not do the minor sweep of LOS. */ if(gc->collect_result != FALSE && !gc_is_gen_mode()) { #ifdef GC_GEN_STATS gc->stats->num_minor_collections++; #endif - lspace_collection(gc->los); + los_collection(los); } - gc->mos->move_object = 1; + + mos->move_object = TRUE; INFO2("gc.process", "GC: end of minor collection ...\n"); - }else{ + } else { INFO2("gc.process", "GC: start major collection ...\n"); - /* process mos and nos together in one compaction */ - gc->los->move_object = 1; - - mspace_collection(gc->mos); /* fspace collection is included */ - lspace_collection(gc->los); - - gc->los->move_object = 0; + if(MAJOR_ALGO != MAJOR_MARK_SWEEP) + los->move_object = TRUE; + + mos_collection(mos); /* collect mos and nos together */ + los_collection(los); + + if(MAJOR_ALGO != MAJOR_MARK_SWEEP) + los->move_object = FALSE; #ifdef GC_GEN_STATS gc->stats->num_major_collections++; @@ -538,33 +706,39 @@ INFO2("gc.process", "GC: end of major collection ...\n"); } - + if(gc->collect_result == FALSE && gc_match_kind((GC*)gc, MINOR_COLLECTION)){ - + INFO2("gc.process", "GC: Minor collection failed, transform to fallback collection ..."); - - if(gc_is_gen_mode()) gc_clear_remset((GC*)gc); - /* runout mspace in minor collection */ - assert(mspace->free_block_idx == mspace->ceiling_block_idx + 1); - mspace->num_used_blocks = mspace->num_managed_blocks; - + if(gc_is_gen_mode()) gc_clear_remset((GC*)gc); + + /* runout mos in minor collection */ + if(MAJOR_ALGO != MAJOR_MARK_SWEEP){ + assert(((Blocked_Space*)mos)->free_block_idx == ((Blocked_Space*)mos)->ceiling_block_idx + 1); + ((Blocked_Space*)mos)->num_used_blocks = ((Blocked_Space*)mos)->num_managed_blocks; + } + IS_FALLBACK_COMPACTION = TRUE; gc_reset_collect_result((GC*)gc); - gc->collect_kind = FALLBACK_COLLECTION; + gc->collect_kind = FALLBACK_COLLECTION; + #ifdef GC_GEN_STATS /*since stats is changed in minor collection, we need to reset stats before fallback collection*/ gc_gen_stats_reset_before_collection((GC_Gen*)gc); gc_gen_collector_stats_reset((GC_Gen*)gc); #endif - if(verify_live_heap) event_gc_collect_kind_changed((GC*)gc); + if(verify_live_heap && (MAJOR_ALGO != MAJOR_MARK_SWEEP)) + event_gc_collect_kind_changed((GC*)gc); + + if(MAJOR_ALGO != MAJOR_MARK_SWEEP) + los->move_object = TRUE; + mos_collection(mos); /* collect both mos and nos */ + los_collection(los); + if(MAJOR_ALGO != MAJOR_MARK_SWEEP) + los->move_object = FALSE; - gc->los->move_object = 1; - mspace_collection(gc->mos); /* fspace collection is included */ - lspace_collection(gc->los); - gc->los->move_object = 0; - IS_FALLBACK_COMPACTION = FALSE; #ifdef GC_GEN_STATS @@ -582,59 +756,36 @@ exit(0); } - if(verify_live_heap) gc_verify_heap((GC*)gc, FALSE); - - /* FIXME:: clear root set here to support verify. */ -#ifdef COMPRESS_REFERENCE - gc_set_pool_clear(gc->metadata->gc_uncompressed_rootset_pool); -#endif + nos_reset_after_collection(nos); + if(gc_match_kind((GC*)gc, MAJOR_COLLECTION)) + mos_reset_after_collection(mos); + + if(verify_live_heap && (MAJOR_ALGO != MAJOR_MARK_SWEEP)) + gc_verify_heap((GC*)gc, FALSE); + + assert(MAJOR_ALGO == MAJOR_MARK_SWEEP || !los->move_object); + + if(MAJOR_ALGO != MAJOR_MARK_SWEEP){ + gc_gen_adjust_heap_size(gc); + + int64 pause_time = time_now() - gc_start_time; + gc->time_collections += pause_time; + gc_gen_adapt(gc, pause_time); + gc_space_tuner_reset((GC*)gc); + } + + gc_gen_update_space_info_after_gc(gc); - assert(!gc->los->move_object); #ifdef GC_GEN_STATS - gc_gen_stats_update_after_collection((GC_Gen*)gc); + gc_gen_stats_update_after_collection(gc); gc_gen_stats_verbose(gc); #endif INFO2("gc.process", "GC: end of GC_Gen\n"); - + return; } -void gc_gen_update_space_before_gc(GC_Gen *gc) -{ - /* Update before every GC to avoid the atomic operation in every fspace_alloc_block */ - assert( gc->nos->free_block_idx >= gc->nos->first_block_idx ); - gc->nos->last_alloced_size = GC_BLOCK_SIZE_BYTES * ( gc->nos->free_block_idx - gc->nos->first_block_idx ); - - gc->nos->accumu_alloced_size += gc->nos->last_alloced_size; - gc->los->accumu_alloced_size += gc->los->last_alloced_size; -} - -void gc_gen_update_space_after_gc(GC_Gen *gc) -{ - /* Minor collection, but also can be every n minor collections, use fspace->num_collections to identify. */ - if (gc_match_kind((GC*)gc, MINOR_COLLECTION)){ - gc->mos->accumu_alloced_size += gc->mos->last_alloced_size; - /* The alloced_size reset operation of mos and nos is not necessary, because they are not accumulated. - * But los->last_alloced_size must be reset, because it is accumulated. */ - gc->los->last_alloced_size = 0; - /* Major collection, but also can be every n major collections, use mspace->num_collections to identify. */ - }else{ - gc->mos->total_alloced_size += gc->mos->accumu_alloced_size; - gc->mos->last_alloced_size = 0; - gc->mos->accumu_alloced_size = 0; - - gc->nos->total_alloced_size += gc->nos->accumu_alloced_size; - gc->nos->last_alloced_size = 0; - gc->nos->accumu_alloced_size = 0; - - gc->los->total_alloced_size += gc->los->accumu_alloced_size; - gc->los->last_alloced_size = 0; - gc->los->accumu_alloced_size = 0; - - } -} - void gc_gen_iterate_heap(GC_Gen *gc) { /** the function is called after stoped the world **/ @@ -646,7 +797,7 @@ mutator = mutator->next; } - Mspace* mspace = gc->mos; + Blocked_Space *mspace = (Blocked_Space*)gc->mos; Block_Header *curr_block = (Block_Header*)mspace->blocks; Block_Header *space_end = (Block_Header*)&mspace->blocks[mspace->free_block_idx - mspace->first_block_idx]; while(curr_block < space_end) { @@ -665,7 +816,7 @@ if(curr_block == NULL) break; } - Fspace* fspace = gc->nos; + Blocked_Space *fspace = (Blocked_Space*)gc->nos; curr_block = (Block_Header*)fspace->blocks; space_end = (Block_Header*)&fspace->blocks[fspace->free_block_idx - fspace->first_block_idx]; while(curr_block < space_end) { @@ -680,7 +831,7 @@ if(curr_block == NULL) break; } - Lspace* lspace = gc->los; + Lspace *lspace = (Lspace*)gc->los; POINTER_SIZE_INT lspace_obj = (POINTER_SIZE_INT)lspace->heap_start; POINTER_SIZE_INT lspace_end = (POINTER_SIZE_INT)lspace->heap_end; unsigned int hash_extend_size = 0; @@ -699,6 +850,14 @@ } } +void gc_gen_hook_for_collector_init(Collector *collector) +{ + if(MAJOR_ALGO == MAJOR_MARK_SWEEP){ + allocator_init_local_chunks((Allocator*)collector); + collector_init_free_chunk_list(collector); + } +} + void gc_gen_collection_verbose_info(GC_Gen *gc, int64 pause_time, int64 mutator_time) { @@ -719,10 +878,10 @@ INFO2("gc.collect","GC: current minor collection num: "<stats->num_minor_collections); #endif break; - case MAJOR_COLLECTION: - INFO2("gc.collect","GC: collection type: major"); + case NORMAL_MAJOR_COLLECTION: + INFO2("gc.collect","GC: collection type: normal major"); #ifdef GC_GEN_STATS - INFO2("gc.collect","GC: current major collection num: "<stats->num_major_collections); + INFO2("gc.collect","GC: current normal major collection num: "<stats->num_major_collections); #endif break; case FALLBACK_COLLECTION: @@ -752,9 +911,9 @@ { INFO2("gc.space","GC: Heap info after GC["<num_collections<<"]:" <<"\nGC: Heap size: "<committed_heap_size)<<", free size:"<los->committed_heap_size)<<", free size:"<los)) - <<"\nGC: MOS size: "<mos->committed_heap_size)<<", free size:"<mos)) - <<"\nGC: NOS size: "<nos->committed_heap_size)<<", free size:"<nos))<<"\n"); + <<"\nGC: LOS size: "<los->committed_heap_size)<<", free size:"<los)) + <<"\nGC: MOS size: "<mos->committed_heap_size)<<", free size:"<mos)) + <<"\nGC: NOS size: "<nos->committed_heap_size)<<", free size:"<nos))<<"\n"); } inline void gc_gen_initial_verbose_info(GC_Gen *gc) @@ -789,6 +948,4 @@ <<"\nGC: total appliction execution time: "<total_mutator_time<<"\n"); #endif } - - Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h Mon Aug 27 01:11:57 2007 @@ -28,6 +28,7 @@ #include "../trace_forward/fspace.h" #include "../mark_compact/mspace.h" #include "../los/lspace.h" +#include "../mark_sweep/sspace.h" #include "../finalizer_weakref/finalizer_weakref_metadata.h" #ifdef GC_GEN_STATS @@ -74,6 +75,10 @@ unsigned int num_collectors; unsigned int num_active_collectors; /* not all collectors are working */ + Marker** markers; + unsigned int num_markers; + unsigned int num_active_markers; + /* metadata is the pool for rootset, markstack, etc. */ GC_Metadata* metadata; Finref_Metadata *finref_metadata; @@ -93,6 +98,13 @@ //For_LOS_extend Space_Tuner* tuner; + unsigned int gc_concurrent_status; + Collection_Scheduler* collection_scheduler; + + SpinLock concurrent_mark_lock; + SpinLock enumerate_rootset_lock; + + /* system info */ unsigned int _system_alloc_unit; unsigned int _machine_page_size_bytes; @@ -100,9 +112,9 @@ /* END of GC --> */ Block* blocks; - Fspace *nos; - Mspace *mos; - Lspace *los; + Space *nos; + Space *mos; + Space *los; Boolean force_major_collect; Gen_Mode_Adaptor* gen_mode_adaptor; @@ -124,34 +136,23 @@ void gc_gen_wrapup_verbose(GC_Gen* gc); inline POINTER_SIZE_INT gc_gen_free_memory_size(GC_Gen* gc) -{ return space_free_memory_size((Blocked_Space*)gc->nos) + - space_free_memory_size((Blocked_Space*)gc->mos) + - lspace_free_memory_size(gc->los); } +{ return blocked_space_free_mem_size((Blocked_Space*)gc->nos) + + blocked_space_free_mem_size((Blocked_Space*)gc->mos) + + lspace_free_memory_size((Lspace*)gc->los); } inline POINTER_SIZE_INT gc_gen_total_memory_size(GC_Gen* gc) { return space_committed_size((Space*)gc->nos) + space_committed_size((Space*)gc->mos) + - lspace_committed_size(gc->los); } + lspace_committed_size((Lspace*)gc->los); } ///////////////////////////////////////////////////////////////////////////////////////// -inline void gc_nos_initialize(GC_Gen* gc, void* start, POINTER_SIZE_INT nos_size, POINTER_SIZE_INT commit_size) -{ fspace_initialize((GC*)gc, start, nos_size, commit_size); } - -inline void gc_nos_destruct(GC_Gen* gc) -{ fspace_destruct(gc->nos); } - -inline void gc_mos_initialize(GC_Gen* gc, void* start, POINTER_SIZE_INT mos_size, POINTER_SIZE_INT commit_size) -{ mspace_initialize((GC*)gc, start, mos_size, commit_size); } - -inline void gc_mos_destruct(GC_Gen* gc) -{ mspace_destruct(gc->mos); } - -inline void gc_los_initialize(GC_Gen* gc, void* start, POINTER_SIZE_INT los_size) -{ lspace_initialize((GC*)gc, start, los_size); } - -inline void gc_los_destruct(GC_Gen* gc) -{ lspace_destruct(gc->los); } +void gc_nos_initialize(GC_Gen *gc, void *start, POINTER_SIZE_INT nos_size, POINTER_SIZE_INT commit_size); +void gc_nos_destruct(GC_Gen *gc); +void gc_mos_initialize(GC_Gen *gc, void *start, POINTER_SIZE_INT mos_size, POINTER_SIZE_INT commit_size); +void gc_mos_destruct(GC_Gen *gc); +void gc_los_initialize(GC_Gen *gc, void *start, POINTER_SIZE_INT los_size); +void gc_los_destruct(GC_Gen *gc); inline Space* space_of_addr(GC* gc, void* addr) { @@ -161,9 +162,9 @@ return (Space*)((GC_Gen*)gc)->los; } -void* mos_alloc(unsigned size, Allocator *allocator); +extern Space_Alloc_Func mos_alloc; void* nos_alloc(unsigned size, Allocator *allocator); -void* los_alloc(unsigned size, Allocator *allocator); +extern Space_Alloc_Func los_alloc; void* los_try_alloc(POINTER_SIZE_INT size, GC* gc); Space* gc_get_nos(GC_Gen* gc); @@ -176,10 +177,11 @@ void gc_decide_collection_algorithm(GC_Gen* gc, char* minor_algo, char* major_algo); void gc_decide_collection_kind(GC_Gen* gc, unsigned int cause); +unsigned int gc_next_collection_kind(GC_Gen* gc); void gc_gen_adapt(GC_Gen* gc, int64 pause_time); -void gc_gen_reclaim_heap(GC_Gen* gc); +void gc_gen_reclaim_heap(GC_Gen* gc, int64 gc_start_time); void gc_gen_assign_free_area_to_mutators(GC_Gen* gc); @@ -192,61 +194,10 @@ void gc_gen_iterate_heap(GC_Gen *gc); -extern Boolean GEN_NONGEN_SWITCH ; - -inline Boolean obj_is_dead_in_gen_minor_gc(Partial_Reveal_Object *p_obj) -{ - /* - * The first condition is for supporting switch between nongen and gen minor collection - * With this kind of switch dead objects in MOS & LOS may be set the mark or fw bit in oi - */ - return obj_belongs_to_nos(p_obj) && !obj_is_marked_or_fw_in_oi(p_obj); -} - -inline Boolean obj_is_dead_in_nongen_minor_gc(Partial_Reveal_Object *p_obj) -{ - return (obj_belongs_to_nos(p_obj) && !obj_is_fw_in_oi(p_obj)) - || (!obj_belongs_to_nos(p_obj) && !obj_is_marked_in_oi(p_obj)); -} +void gc_gen_start_concurrent_mark(GC_Gen* gc); -inline Boolean obj_is_dead_in_major_gc(Partial_Reveal_Object *p_obj) -{ - return !obj_is_marked_in_vt(p_obj); -} - -// clear the two least significant bits of p_obj first -inline Boolean gc_obj_is_dead(GC *gc, Partial_Reveal_Object *p_obj) -{ - assert(p_obj); - if(gc_match_kind(gc, MINOR_COLLECTION)){ - if(gc_is_gen_mode()) - return obj_is_dead_in_gen_minor_gc(p_obj); - else - return obj_is_dead_in_nongen_minor_gc(p_obj); - } else { - return obj_is_dead_in_major_gc(p_obj); - } -} - -extern Boolean forward_first_half; -extern void* object_forwarding_boundary; - -inline Boolean fspace_obj_to_be_forwarded(Partial_Reveal_Object *p_obj) -{ - if(!obj_belongs_to_nos(p_obj)) return FALSE; - return forward_first_half? (p_obj < object_forwarding_boundary):(p_obj>=object_forwarding_boundary); -} - -inline Boolean obj_need_move(GC *gc, Partial_Reveal_Object *p_obj) -{ - if(gc_is_gen_mode() && gc_match_kind(gc, MINOR_COLLECTION)) - return fspace_obj_to_be_forwarded(p_obj); - - Space *space = space_of_addr(gc, p_obj); - return space->move_object; -} +extern Boolean GEN_NONGEN_SWITCH ; #endif /* ifndef _GC_GEN_H_ */ - Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp Mon Aug 27 01:11:57 2007 @@ -101,11 +101,11 @@ Blocked_Space* mspace = (Blocked_Space*)gc->mos; Gen_Mode_Adaptor* gen_mode_adaptor = gc->gen_mode_adaptor; - POINTER_SIZE_INT mos_free_size = space_free_memory_size(mspace); - POINTER_SIZE_INT nos_free_size = space_free_memory_size(fspace); + POINTER_SIZE_INT mos_free_size = blocked_space_free_mem_size(mspace); + POINTER_SIZE_INT nos_free_size = blocked_space_free_mem_size(fspace); POINTER_SIZE_INT total_free_size = mos_free_size + nos_free_size; - if(!gc_match_kind((GC*)gc, MINOR_COLLECTION)) { + if(gc_match_kind((GC*)gc, MAJOR_COLLECTION)) { assert(!gc_is_gen_mode()); if(gen_mode_adaptor->major_survive_ratio_threshold != 0 && mspace->survive_ratio > gen_mode_adaptor->major_survive_ratio_threshold){ @@ -195,20 +195,20 @@ float survive_ratio = 0.2f; - POINTER_SIZE_INT mos_free_size = space_free_memory_size(mspace); - POINTER_SIZE_INT nos_free_size = space_free_memory_size(fspace); + POINTER_SIZE_INT mos_free_size = blocked_space_free_mem_size(mspace); + POINTER_SIZE_INT nos_free_size = blocked_space_free_mem_size(fspace); assert(nos_free_size == space_committed_size((Space*)fspace)); POINTER_SIZE_INT total_free_size = mos_free_size + nos_free_size; - if(!gc_match_kind((GC*)gc, MINOR_COLLECTION)) gc->force_gen_mode = FALSE; + if(gc_match_kind((GC*)gc, MAJOR_COLLECTION)) gc->force_gen_mode = FALSE; if(!gc->force_gen_mode){ /*Major collection:*/ - if(!gc_match_kind((GC*)gc, MINOR_COLLECTION)){ + if(gc_match_kind((GC*)gc, MAJOR_COLLECTION)){ mspace->time_collections += pause_time; Tslow = (float)pause_time; SMax = total_free_size; /*If fall back happens, and nos_boundary is up to heap_ceiling, then we force major.*/ - if(gc->nos->num_managed_blocks == 0) + if(((Fspace*)gc->nos)->num_managed_blocks == 0) gc->force_major_collect = TRUE; else gc->force_major_collect = FALSE; @@ -293,8 +293,7 @@ POINTER_SIZE_INT new_mos_size; POINTER_SIZE_INT curr_nos_size = space_committed_size((Space*)fspace); - POINTER_SIZE_INT used_mos_size = space_used_memory_size(mspace); - POINTER_SIZE_INT free_mos_size = space_committed_size((Space*)mspace) - used_mos_size; + POINTER_SIZE_INT used_mos_size = blocked_space_used_mem_size(mspace); POINTER_SIZE_INT total_size; @@ -455,9 +454,9 @@ <num_collections = 0; lspace->time_collections = 0; lspace->survive_ratio = 0.5f; - lspace->last_alloced_size = 0; lspace->accumu_alloced_size = 0; lspace->total_alloced_size = 0; lspace->last_surviving_size = 0; lspace->period_surviving_size = 0; - gc_set_los((GC_Gen*)gc, (Space*)lspace); p_global_lspace_move_obj = &(lspace->move_object); los_boundary = lspace->heap_end; - return; + return lspace; } void lspace_destruct(Lspace* lspace) Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.h URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.h?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.h (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace.h Mon Aug 27 01:11:57 2007 @@ -50,6 +50,8 @@ /*LOS_Shrink:This field stands for sliding compact to lspace */ Boolean move_object; + Space_Statistics* space_statistic; + /* Size allocted since last collection. */ volatile uint64 last_alloced_size; /* Size allocted since last major collection. */ @@ -75,9 +77,9 @@ void* scompact_fa_end; }Lspace; -void lspace_initialize(GC* gc, void* reserved_base, POINTER_SIZE_INT lspace_size); +Lspace *lspace_initialize(GC* gc, void* reserved_base, POINTER_SIZE_INT lspace_size); void lspace_destruct(Lspace* lspace); -Managed_Object_Handle lspace_alloc(POINTER_SIZE_INT size, Allocator* allocator); +Managed_Object_Handle lspace_alloc(unsigned size, Allocator* allocator); void* lspace_try_alloc(Lspace* lspace, POINTER_SIZE_INT alloc_size); void lspace_sliding_compact(Collector* collector, Lspace* lspace); void lspace_compute_object_target(Collector* collector, Lspace* lspace); @@ -85,11 +87,20 @@ void lspace_collection(Lspace* lspace); inline POINTER_SIZE_INT lspace_free_memory_size(Lspace* lspace) -{ /* FIXME:: */ +{ + if(!lspace) return 0; + /* FIXME:: */ assert(lspace->committed_heap_size > (POINTER_SIZE_INT)lspace->last_surviving_size + (POINTER_SIZE_INT)lspace->last_alloced_size); return (lspace->committed_heap_size - (POINTER_SIZE_INT)lspace->last_surviving_size - (POINTER_SIZE_INT)lspace->last_alloced_size); } -inline POINTER_SIZE_INT lspace_committed_size(Lspace* lspace){ return lspace->committed_heap_size; } + +inline POINTER_SIZE_INT lspace_committed_size(Lspace* lspace) +{ + if(lspace) + return lspace->committed_heap_size; + else + return 0; +} inline Partial_Reveal_Object* lspace_get_next_marked_object( Lspace* lspace, unsigned int* iterate_index) { Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp Mon Aug 27 01:11:57 2007 @@ -22,7 +22,8 @@ #include "lspace.h" #include "../gen/gen.h" #include "../common/space_tuner.h" - +#include "../common/gc_concurrent.h" +#include "../common/collection_scheduler.h" #ifdef GC_GEN_STATS #include "../gen/gen_stats.h" #endif @@ -197,13 +198,17 @@ return p_result; } -void* lspace_alloc(POINTER_SIZE_INT size, Allocator *allocator) +void* lspace_alloc(unsigned size, Allocator *allocator) { unsigned int try_count = 0; void* p_result = NULL; POINTER_SIZE_INT alloc_size = ALIGN_UP_TO_KILO(size); Lspace* lspace = (Lspace*)gc_get_los((GC_Gen*)allocator->gc); Free_Area_Pool* pool = lspace->free_pool; + + if(gc_need_start_concurrent_mark(allocator->gc)) + gc_start_concurrent_mark(allocator->gc); + while( try_count < 2 ){ if(p_result = lspace_try_alloc(lspace, alloc_size)) @@ -334,7 +339,7 @@ /*Lspace collection in major collection must move object*/ assert(lspace->move_object); //debug_minor_sweep - Block* mos_first_block = ((GC_Gen*)gc)->mos->blocks; + Block* mos_first_block = ((Blocked_Space*)((GC_Gen*)gc)->mos)->blocks; lspace->heap_end = (void*)mos_first_block; assert(!(tuner->tuning_size % GC_BLOCK_SIZE_BYTES)); new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start + tuner->tuning_size; @@ -346,7 +351,7 @@ case TRANS_FROM_LOS_TO_MOS:{ assert(lspace->move_object); assert(tuner->tuning_size); - Block* mos_first_block = ((GC_Gen*)gc)->mos->blocks; + Block* mos_first_block = ((Blocked_Space*)((GC_Gen*)gc)->mos)->blocks; assert( (POINTER_SIZE_INT)lspace->heap_end - trans_size == (POINTER_SIZE_INT)mos_first_block ); lspace->heap_end = (void*)mos_first_block; lspace->committed_heap_size -= trans_size; @@ -475,5 +480,4 @@ TRACE2("gc.process", "GC: end of lspace sweep algo ...\n"); return; } - Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp Mon Aug 27 01:11:57 2007 @@ -35,7 +35,7 @@ extern void gc_set_mos(GC_Gen* gc, Space* space); extern Space* gc_get_nos(GC_Gen* gc); -void mspace_initialize(GC* gc, void* start, POINTER_SIZE_INT mspace_size, POINTER_SIZE_INT commit_size) +Mspace *mspace_initialize(GC* gc, void* start, POINTER_SIZE_INT mspace_size, POINTER_SIZE_INT commit_size) { Mspace* mspace = (Mspace*)STD_MALLOC( sizeof(Mspace)); assert(mspace); @@ -69,6 +69,11 @@ space_init_blocks((Blocked_Space*)mspace); + mspace->space_statistic = (Space_Statistics*)STD_MALLOC(sizeof(Space_Statistics)); + assert(mspace->space_statistic); + memset(mspace->space_statistic, 0, sizeof(Space_Statistics)); + + mspace->num_collections = 0; mspace->time_collections = 0; mspace->survive_ratio = 0.2f; @@ -84,9 +89,7 @@ mspace->expected_threshold_ratio = 0.5f; - gc_set_mos((GC_Gen*)gc, (Space*)mspace); - - return; + return mspace; } @@ -97,47 +100,52 @@ STD_FREE(mspace); } -void mspace_block_iterator_init_free(Mspace* mspace) -{ - mspace->block_iterator = (Block_Header*)&mspace->blocks[mspace->free_block_idx - mspace->first_block_idx]; -} - -#include "../common/space_tuner.h" -void mspace_block_iterator_init(Mspace* mspace) -{ - mspace->block_iterator = (Block_Header*)mspace->blocks; - return; -} - -Block_Header* mspace_block_iterator_get(Mspace* mspace) -{ - return (Block_Header*)mspace->block_iterator; -} - -Block_Header* mspace_block_iterator_next(Mspace* mspace) +void mspace_reset_after_collection(Mspace* mspace) { - Block_Header* cur_block = (Block_Header*)mspace->block_iterator; + unsigned int old_num_used = mspace->num_used_blocks; + unsigned int new_num_used = mspace->free_block_idx - mspace->first_block_idx; + unsigned int num_used = old_num_used>new_num_used? old_num_used:new_num_used; - while(cur_block != NULL){ - Block_Header* next_block = cur_block->next; - - Block_Header* temp = (Block_Header*)atomic_casptr((volatile void **)&mspace->block_iterator, next_block, cur_block); - if(temp != cur_block){ - cur_block = (Block_Header*)mspace->block_iterator; - continue; + Block* blocks = mspace->blocks; + unsigned int i; + for(i=0; i < num_used; i++){ + Block_Header* block = (Block_Header*)&(blocks[i]); + assert(!((POINTER_SIZE_INT)block % GC_BLOCK_SIZE_BYTES)); + block->status = BLOCK_USED; + block->free = block->new_free; + block->new_free = block->base; + block->src = NULL; + block->next_src = NULL; + assert(!block->dest_counter); + + if(i >= new_num_used){ + block->status = BLOCK_FREE; + block->free = GC_BLOCK_BODY(block); } - return cur_block; } - /* run out space blocks */ - return NULL; + mspace->num_used_blocks = new_num_used; + /*For_statistic mos infomation*/ + mspace->period_surviving_size = new_num_used * GC_BLOCK_SIZE_BYTES; + + /* we should clear the remaining blocks which are set to be BLOCK_COMPACTED or BLOCK_TARGET */ + for(; i < mspace->num_managed_blocks; i++){ + Block_Header* block = (Block_Header*)&(blocks[i]); + assert(block->status& (BLOCK_COMPACTED|BLOCK_TARGET|BLOCK_DEST)); + block->status = BLOCK_FREE; + block->src = NULL; + block->next_src = NULL; + block->free = GC_BLOCK_BODY(block); + assert(!block->dest_counter); + } } + #include "../common/fix_repointed_refs.h" void mspace_fix_after_copy_nursery(Collector* collector, Mspace* mspace) { //the first block is not set yet - Block_Header* curr_block = mspace_block_iterator_next(mspace); + Block_Header* curr_block = blocked_space_block_iterator_next((Blocked_Space*)mspace); unsigned int first_block_idx = mspace->first_block_idx; unsigned int old_num_used = mspace->num_used_blocks; unsigned int old_free_idx = first_block_idx + old_num_used; @@ -155,7 +163,7 @@ else /* for blocks used for nos copy */ block_fix_ref_after_copying(curr_block); - curr_block = mspace_block_iterator_next(mspace); + curr_block = blocked_space_block_iterator_next((Blocked_Space*)mspace); } return; Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h Mon Aug 27 01:11:57 2007 @@ -38,6 +38,8 @@ GC* gc; Boolean move_object; + Space_Statistics* space_statistic; + /* Size allocted since last minor collection. */ volatile uint64 last_alloced_size; /* Size allocted since last major collection. */ @@ -61,23 +63,20 @@ unsigned int num_used_blocks; unsigned int num_managed_blocks; unsigned int num_total_blocks; + + volatile Block_Header* block_iterator; /* END of Blocked_Space --> */ - volatile Block_Header* block_iterator; /*Threshold computed by NOS adaptive*/ float expected_threshold_ratio; }Mspace; -void mspace_initialize(GC* gc, void* reserved_base, POINTER_SIZE_INT mspace_size, POINTER_SIZE_INT commit_size); +Mspace *mspace_initialize(GC* gc, void* reserved_base, POINTER_SIZE_INT mspace_size, POINTER_SIZE_INT commit_size); void mspace_destruct(Mspace* mspace); void* mspace_alloc(unsigned size, Allocator *allocator); void mspace_collection(Mspace* mspace); - -void mspace_block_iterator_init(Mspace* mspace); -void mspace_block_iterator_init_free(Mspace* mspace); -Block_Header* mspace_block_iterator_next(Mspace* mspace); -Block_Header* mspace_block_iterator_get(Mspace* mspace); +void mspace_reset_after_collection(Mspace* mspace); void mspace_fix_after_copy_nursery(Collector* collector, Mspace* mspace); Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp Mon Aug 27 01:11:57 2007 @@ -33,6 +33,7 @@ #ifdef GC_GEN_STATS #include "../gen/gen_stats.h" #endif + void mspace_update_info_after_space_tuning(Mspace* mspace) { Space_Tuner *tuner = mspace->gc->tuner; @@ -60,44 +61,6 @@ Space* gc_get_nos(GC_Gen* gc); -void mspace_reset_after_compaction(Mspace* mspace) -{ - unsigned int old_num_used = mspace->num_used_blocks; - unsigned int new_num_used = mspace->free_block_idx - mspace->first_block_idx; - unsigned int num_used = old_num_used>new_num_used? old_num_used:new_num_used; - - Block* blocks = mspace->blocks; - unsigned int i; - for(i=0; i < num_used; i++){ - Block_Header* block = (Block_Header*)&(blocks[i]); - assert(!((POINTER_SIZE_INT)block % GC_BLOCK_SIZE_BYTES)); - block->status = BLOCK_USED; - block->free = block->new_free; - block->new_free = block->base; - block->src = NULL; - block->next_src = NULL; - assert(!block->dest_counter); - - if(i >= new_num_used){ - block->status = BLOCK_FREE; - block->free = GC_BLOCK_BODY(block); - } - } - mspace->num_used_blocks = new_num_used; - /*For_statistic mos infomation*/ - mspace->period_surviving_size = new_num_used * GC_BLOCK_SIZE_BYTES; - - /* we should clear the remaining blocks which are set to be BLOCK_COMPACTED or BLOCK_TARGET */ - for(; i < mspace->num_managed_blocks; i++){ - Block_Header* block = (Block_Header*)&(blocks[i]); - assert(block->status& (BLOCK_COMPACTED|BLOCK_TARGET|BLOCK_DEST)); - block->status = BLOCK_FREE; - block->src = NULL; - block->next_src = NULL; - block->free = GC_BLOCK_BODY(block); - assert(!block->dest_counter); - } -} void gc_reset_block_for_collectors(GC* gc, Mspace* mspace) { @@ -337,27 +300,27 @@ switch(mspace->collect_algorithm){ case MAJOR_COMPACT_SLIDE: - TRACE2("gc.process", "GC: slide compact algo start ... \n"); - collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace); - TRACE2("gc.process", "\nGC: end of slide compact algo ... \n"); + TRACE2("gc.process", "GC: slide compact algo start ... \n"); + collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace); + TRACE2("gc.process", "\nGC: end of slide compact algo ... \n"); #ifdef GC_GEN_STATS - gc_gen_stats_set_los_collected_flag((GC_Gen*)gc, true); - gc_gen_stats_set_mos_algo((GC_Gen*)gc, MAJOR_COMPACT_SLIDE); + gc_gen_stats_set_los_collected_flag((GC_Gen*)gc, true); + gc_gen_stats_set_mos_algo((GC_Gen*)gc, MAJOR_COMPACT_SLIDE); #endif break; case MAJOR_COMPACT_MOVE: IS_MOVE_COMPACT = TRUE; - - TRACE2("gc.process", "GC: move compact algo start ... \n"); - collector_execute_task(gc, (TaskType)move_compact_mspace, (Space*)mspace); - TRACE2("gc.process", "\nGC: end of move compact algo ... \n"); - IS_MOVE_COMPACT = FALSE; + + TRACE2("gc.process", "GC: move compact algo start ... \n"); + collector_execute_task(gc, (TaskType)move_compact_mspace, (Space*)mspace); + TRACE2("gc.process", "\nGC: end of move compact algo ... \n"); + IS_MOVE_COMPACT = FALSE; #ifdef GC_GEN_STATS - gc_gen_stats_set_mos_algo((GC_Gen*)gc, MAJOR_COMPACT_MOVE); + gc_gen_stats_set_mos_algo((GC_Gen*)gc, MAJOR_COMPACT_MOVE); #endif - break; - + break; + default: DIE2("gc.collect", "The speficied major collection algorithm doesn't exist!"); exit(0); @@ -368,4 +331,5 @@ return; } + Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.h URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.h?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.h (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.h Mon Aug 27 01:11:57 2007 @@ -30,7 +30,6 @@ void gc_init_block_for_collectors(GC* gc, Mspace* mspace); void mspace_update_info_after_space_tuning(Mspace* mspace); -void mspace_reset_after_compaction(Mspace* mspace); Block_Header* mspace_get_first_compact_block(Mspace* mspace); Block_Header* mspace_get_first_target_block(Mspace* mspace); @@ -53,5 +52,6 @@ extern Boolean IS_MOVE_COMPACT; #endif /* _MSPACE_COLLECT_COMPACT_H_ */ + Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp Mon Aug 27 01:11:57 2007 @@ -34,15 +34,15 @@ static void set_first_and_end_block_to_move(Collector *collector, unsigned int mem_changed_size) { GC_Gen *gc_gen = (GC_Gen *)collector->gc; - Mspace *mspace = gc_gen->mos; - Fspace *fspace = gc_gen->nos; + Space *nos = gc_gen->nos; assert (!(mem_changed_size % SPACE_ALLOC_UNIT)); unsigned int mos_added_block_num = mem_changed_size >> GC_BLOCK_SHIFT_COUNT; // block number needing moving first_block_to_move = nos_first_free_block - mos_added_block_num; - if(first_block_to_move < (Block *)space_heap_start((Space *)fspace)) - first_block_to_move = (Block *)space_heap_start((Space *)fspace); + Block *nos_start_block = (Block*)space_heap_start(nos); + if(first_block_to_move < nos_start_block) + first_block_to_move = nos_start_block; } static POINTER_SIZE_INT fspace_shrink(Fspace *fspace) @@ -252,8 +252,7 @@ static void move_compacted_blocks_to_mspace(Collector *collector, unsigned int addr_diff) { GC_Gen *gc_gen = (GC_Gen *)collector->gc; - Mspace *mspace = gc_gen->mos; - Fspace *fspace = gc_gen->nos; + Mspace *mspace = (Mspace *)gc_gen->mos; while(Block_Header *block = mspace_block_iter_next_for_extension(mspace, (Block_Header *)nos_first_free_block)){ Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)block->base; @@ -272,9 +271,8 @@ void mspace_extend_compact(Collector *collector) { GC_Gen *gc_gen = (GC_Gen *)collector->gc; - Mspace *mspace = gc_gen->mos; - Fspace *fspace = gc_gen->nos; - Lspace *lspace = gc_gen->los; + Mspace *mspace = (Mspace *)gc_gen->mos; + Fspace *fspace = (Fspace *)gc_gen->nos; /*For_LOS adaptive: when doing EXTEND_COLLECTION, mspace->survive_ratio should not be updated in gc_decide_next_collect( )*/ gc_gen->collect_kind |= EXTEND_COLLECTION; Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp Mon Aug 27 01:11:57 2007 @@ -150,13 +150,13 @@ static void mspace_fix_repointed_refs(Collector *collector, Mspace *mspace) { - Block_Header* curr_block = mspace_block_iterator_next(mspace); + Block_Header* curr_block = blocked_space_block_iterator_next((Blocked_Space*)mspace); while( curr_block){ if(curr_block->block_idx >= mspace->free_block_idx) break; curr_block->free = curr_block->new_free; // block_fix_ref_after_marking(curr_block); - curr_block = mspace_block_iterator_next(mspace); + curr_block = blocked_space_block_iterator_next((Blocked_Space*)mspace); } return; @@ -233,7 +233,7 @@ } gc_reset_block_for_collectors(gc, mspace); - mspace_block_iterator_init(mspace); + blocked_space_block_iterator_init((Blocked_Space*)mspace); num_moving_collectors++; } while(num_moving_collectors != num_active_collectors + 1); @@ -291,8 +291,6 @@ TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"] finished"); return; } - mspace_reset_after_compaction(mspace); - fspace_reset_for_allocation(fspace); gc_set_pool_clear(gc->metadata->gc_rootset_pool); gc_set_pool_clear(gc->metadata->weak_roots_pool); Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp Mon Aug 27 01:11:57 2007 @@ -156,14 +156,14 @@ #include "../common/fix_repointed_refs.h" -static void mspace_fix_repointed_refs(Collector* collector, Mspace* mspace) +static void mspace_fix_repointed_refs(Collector *collector, Mspace *mspace) { - Block_Header* curr_block = mspace_block_iterator_next(mspace); + Block_Header *curr_block = blocked_space_block_iterator_next((Blocked_Space*)mspace); /* for MAJOR_COLLECTION, we must iterate over all compact blocks */ while( curr_block){ block_fix_ref_after_repointing(curr_block); - curr_block = mspace_block_iterator_next(mspace); + curr_block = blocked_space_block_iterator_next((Blocked_Space*)mspace); } return; @@ -181,7 +181,7 @@ { assert(!next_block_for_dest); - Block_Header *block = mspace_block_iterator_get(mspace); + Block_Header *block = blocked_space_block_iterator_get((Blocked_Space*)mspace); if(block->status != BLOCK_DEST) return block; @@ -248,7 +248,7 @@ cur_dest_block = cur_dest_block->next; } } else { - cur_dest_block = mspace_block_iterator_get(mspace); + cur_dest_block = blocked_space_block_iterator_get((Blocked_Space*)mspace); } unsigned int total_dest_counter = 0; @@ -312,7 +312,7 @@ POINTER_SIZE_INT tuning_size = tuner->tuning_size; /*If LOS_Shrink, we just fix the repointed refs from the start of old mspace.*/ if((tuner->kind == TRANS_NOTHING) || (tuner->kind == TRANS_FROM_LOS_TO_MOS)){ - mspace_block_iterator_init(mspace); + blocked_space_block_iterator_init((Blocked_Space*)mspace); return; }else{ /*If LOS_Extend, we fix from the new start of mspace, because the block list is start from there.*/ @@ -333,7 +333,7 @@ if( tuner->kind == TRANS_NOTHING ){ /*If space is not tuned, we just start from mspace->heap_start.*/ - mspace_block_iterator_init(mspace); + blocked_space_block_iterator_init((Blocked_Space*)mspace); return; }else if (tuner->kind == TRANS_FROM_MOS_TO_LOS){ /*If LOS_Extend, we compact from the new start of mspace, because the block list is start from there.*/ @@ -399,16 +399,6 @@ } -/*For LOS_Extend*/ -static void mspace_restore_block_chain(Mspace* mspace) -{ - GC* gc = mspace->gc; - Fspace* fspace = (Fspace*)gc_get_nos((GC_Gen*)gc); - if(gc->tuner->kind == TRANS_FROM_MOS_TO_LOS) { - Block_Header* fspace_last_block = (Block_Header*)&fspace->blocks[fspace->num_managed_blocks - 1]; - fspace_last_block->next = NULL; - } -} static volatile unsigned int num_marking_collectors = 0; static volatile unsigned int num_repointing_collectors = 0; @@ -560,7 +550,8 @@ old_num = atomic_inc32(&num_restoring_collectors); if( ++old_num == num_active_collectors ){ - if(gc->tuner->kind != TRANS_NOTHING) mspace_update_info_after_space_tuning(mspace); + if(gc->tuner->kind != TRANS_NOTHING) + mspace_update_info_after_space_tuning(mspace); num_restoring_collectors++; } while(num_restoring_collectors != num_active_collectors + 1); @@ -585,12 +576,6 @@ /* Leftover: ************************************************** */ - mspace_reset_after_compaction(mspace); - fspace_reset_for_allocation(fspace); - - /*For LOS_Extend*/ - mspace_restore_block_chain(mspace); - gc_set_pool_clear(gc->metadata->gc_rootset_pool); gc_set_pool_clear(gc->metadata->weak_roots_pool); Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp Mon Aug 27 01:11:57 2007 @@ -22,6 +22,8 @@ #include "gc_ms.h" #include "../finalizer_weakref/finalizer_weakref.h" #include "../common/compressed_ref.h" +#include "../thread/marker.h" +#include "../verify/verify_live_heap.h" #ifdef USE_32BITS_HASHCODE #include "../common/hashcode.h" #endif @@ -61,12 +63,49 @@ void gc_ms_reclaim_heap(GC_MS *gc) { + if(verify_live_heap) gc_verify_heap((GC*)gc, TRUE); + sspace_collection(gc_ms_get_sspace(gc)); - /* FIXME:: clear root set here to support verify */ -#ifdef COMPRESS_REFERENCE - gc_set_pool_clear(gc->metadata->gc_uncompressed_rootset_pool); -#endif + if(verify_live_heap) gc_verify_heap((GC*)gc, FALSE); +} + +void sspace_mark_scan_concurrent(Marker* marker); +void gc_ms_start_concurrent_mark(GC_MS* gc, unsigned int num_markers) +{ + if(gc->num_active_markers == 0) + pool_iterator_init(gc->metadata->gc_rootset_pool); + + marker_execute_task_concurrent((GC*)gc,(TaskType)sspace_mark_scan_concurrent,(Space*)gc->sspace, num_markers); +} + +void gc_ms_start_concurrent_mark(GC_MS* gc) +{ + pool_iterator_init(gc->metadata->gc_rootset_pool); + + marker_execute_task_concurrent((GC*)gc,(TaskType)sspace_mark_scan_concurrent,(Space*)gc->sspace); +} + +void gc_ms_update_space_statistics(GC_MS* gc) +{ + POINTER_SIZE_INT num_live_obj = 0; + POINTER_SIZE_INT size_live_obj = 0; + + Space_Statistics* sspace_stat = gc->sspace->space_statistic; + + unsigned int num_collectors = gc->num_active_collectors; + Collector** collectors = gc->collectors; + unsigned int i; + for(i = 0; i < num_collectors; i++){ + Collector* collector = collectors[i]; + num_live_obj += collector->live_obj_num; + size_live_obj += collector->live_obj_size; + } + + sspace_stat->num_live_obj = num_live_obj; + sspace_stat->size_live_obj = size_live_obj; + sspace_stat->last_size_free_space = sspace_stat->size_free_space; + sspace_stat->size_free_space = gc->committed_heap_size - size_live_obj;/*TODO:inaccurate value.*/ } void gc_ms_iterate_heap(GC_MS *gc) Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h Mon Aug 27 01:11:57 2007 @@ -47,6 +47,10 @@ unsigned int num_collectors; unsigned int num_active_collectors; /* not all collectors are working */ + Marker** markers; + unsigned int num_markers; + unsigned int num_active_markers; + /* metadata is the pool for rootset, markstack, etc. */ GC_Metadata *metadata; Finref_Metadata *finref_metadata; @@ -64,7 +68,12 @@ //For_LOS_extend Space_Tuner *tuner; - + + unsigned int gc_concurrent_status; + Collection_Scheduler* collection_scheduler; + + SpinLock concurrent_mark_lock; + SpinLock enumerate_rootset_lock; /* system info */ unsigned int _system_alloc_unit; unsigned int _machine_page_size_bytes; @@ -102,6 +111,9 @@ void gc_ms_reclaim_heap(GC_MS *gc); void gc_ms_iterate_heap(GC_MS *gc); +void gc_ms_start_concurrent_mark(GC_MS* gc); +void gc_ms_start_concurrent_mark(GC_MS* gc, unsigned int num_markers); +void gc_ms_update_space_statistics(GC_MS* gc); #endif // USE_MARK_SWEEP_GC Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.cpp URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.cpp?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.cpp (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.cpp Mon Aug 27 01:11:57 2007 @@ -23,7 +23,7 @@ struct GC_Gen; -void sspace_initialize(GC *gc, void *start, unsigned int sspace_size, unsigned int commit_size) +Sspace *sspace_initialize(GC *gc, void *start, POINTER_SIZE_INT sspace_size, POINTER_SIZE_INT commit_size) { /* With sspace in the heap, the heap must be composed of a single sspace or a sspace and a NOS. * In either case, the reserved size and committed size of sspace must be the same. @@ -58,6 +58,10 @@ sspace_init_chunks(sspace); + sspace->space_statistic = (Space_Statistics*)STD_MALLOC(sizeof(Space_Statistics)); + assert(sspace->space_statistic); + memset(sspace->space_statistic, 0, sizeof(Space_Statistics)); + #ifdef USE_MARK_SWEEP_GC gc_ms_set_sspace((GC_MS*)gc, sspace); #else @@ -67,7 +71,7 @@ #ifdef SSPACE_VERIFY sspace_verify_init(gc); #endif - return; + return sspace; } static void sspace_destruct_chunks(Sspace *sspace) { return; } @@ -79,6 +83,13 @@ STD_FREE(sspace); } +void sspace_reset_after_collection(Sspace *sspace) +{ + sspace->move_object = FALSE; + sspace->need_compact = FALSE; + sspace->need_fix = FALSE; +} + void allocator_init_local_chunks(Allocator *allocator) { Sspace *sspace = gc_get_sspace(allocator->gc); @@ -110,6 +121,27 @@ allocator->local_chunks = local_chunks; } +void allocator_clear_local_chunks(Allocator *allocator, Boolean reuse_pfc) +{ + Sspace *sspace = gc_get_sspace(allocator->gc); + Size_Segment **size_segs = sspace->size_segments; + Chunk_Header ***local_chunks = allocator->local_chunks; + + for(unsigned int i = SIZE_SEGMENT_NUM; i--;){ + if(!size_segs[i]->local_alloc){ + assert(!local_chunks[i]); + continue; + } + Chunk_Header **chunks = local_chunks[i]; + assert(chunks); + for(unsigned int j = size_segs[i]->chunk_num; j--;){ + if(chunks[j] && reuse_pfc) + sspace_put_pfc(sspace, chunks[j]); + chunks[j] = NULL; + } + } +} + void allocactor_destruct_local_chunks(Allocator *allocator) { Sspace *sspace = gc_get_sspace(allocator->gc); @@ -141,6 +173,18 @@ STD_FREE(local_chunks); } +#ifdef USE_MARK_SWEEP_GC +void sspace_set_space_statistic(Sspace *sspace) +{ + GC_MS* gc = (GC_MS*)sspace->gc; + + for(unsigned int i=0; inum_collectors; ++i){ + sspace->surviving_obj_num += gc->collectors[i]->live_obj_num; + sspace->surviving_obj_size += gc->collectors[i]->live_obj_size; + } +} +#endif + extern void sspace_decide_compaction_need(Sspace *sspace); extern void mark_sweep_sspace(Collector *collector); @@ -157,8 +201,12 @@ #endif sspace_decide_compaction_need(sspace); - if(sspace->need_compact) - gc->collect_kind = SWEEP_COMPACT_GC; + if(sspace->need_compact && gc_match_kind(gc, MARK_SWEEP_GC)){ + assert(gc_match_kind(gc, MS_COLLECTION)); + gc->collect_kind = MS_COMPACT_COLLECTION; + } + if(sspace->need_compact || gc_match_kind(gc, MAJOR_COLLECTION)) + sspace->need_fix = TRUE; //printf("\n\n>>>>>>>>%s>>>>>>>>>>>>\n\n", sspace->need_compact ? "SWEEP COMPACT" : "MARK SWEEP"); #ifdef SSPACE_VERIFY sspace_verify_before_collection(gc); Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.h URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.h?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.h (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace.h Mon Aug 27 01:11:57 2007 @@ -34,8 +34,8 @@ /* <-- first couple of fields are overloadded as Space */ void *heap_start; void *heap_end; - unsigned int reserved_heap_size; - unsigned int committed_heap_size; + POINTER_SIZE_INT reserved_heap_size; + POINTER_SIZE_INT committed_heap_size; unsigned int num_collections; int64 time_collections; float survive_ratio; @@ -43,6 +43,8 @@ GC *gc; Boolean move_object; + Space_Statistics* space_statistic; + /* Size allocted since last minor collection. */ volatile POINTER_SIZE_INT last_alloced_size; /* Size allocted since last major collection. */ @@ -58,15 +60,23 @@ /* END of Space --> */ Boolean need_compact; + Boolean need_fix; /* There are repointed ref needing fixing */ Size_Segment **size_segments; Pool ***pfc_pools; Free_Chunk_List *aligned_free_chunk_lists; Free_Chunk_List *unaligned_free_chunk_lists; Free_Chunk_List *hyper_free_chunk_list; + POINTER_SIZE_INT surviving_obj_num; + POINTER_SIZE_INT surviving_obj_size; } Sspace; -void sspace_initialize(GC *gc, void *start, unsigned int sspace_size, unsigned int commit_size); +#ifdef USE_MARK_SWEEP_GC +void sspace_set_space_statistic(Sspace *sspace); +#endif + +Sspace *sspace_initialize(GC *gc, void *start, POINTER_SIZE_INT sspace_size, POINTER_SIZE_INT commit_size); void sspace_destruct(Sspace *sspace); +void sspace_reset_after_collection(Sspace *sspace); void *sspace_thread_local_alloc(unsigned size, Allocator *allocator); void *sspace_alloc(unsigned size, Allocator *allocator); Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.cpp URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.cpp?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.cpp (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.cpp Mon Aug 27 01:11:57 2007 @@ -130,6 +130,8 @@ } p_obj = alloc_in_chunk(chunks[index]); } else { + if(gc_need_start_concurrent_mark(allocator->gc)) + gc_start_concurrent_mark(allocator->gc); chunk = sspace_get_pfc(sspace, seg_index, index); if(!chunk){ chunk = (Chunk_Header*)sspace_get_normal_free_chunk(sspace); @@ -137,8 +139,6 @@ } //if(!chunk) chunk = sspace_steal_pfc(sspace, seg_index, index); if(!chunk) return NULL; - assert(chunk->alloc_num < chunk->slot_num); - ++chunk->alloc_num; p_obj = alloc_in_chunk(chunk); if(chunk) sspace_put_pfc(sspace, chunk); @@ -151,6 +151,9 @@ { assert(size > SUPER_OBJ_THRESHOLD); + if(gc_need_start_concurrent_mark(allocator->gc)) + gc_start_concurrent_mark(allocator->gc); + unsigned int chunk_size = SUPER_SIZE_ROUNDUP(size); assert(chunk_size > SUPER_OBJ_THRESHOLD); assert(!(chunk_size & CHUNK_GRANULARITY_LOW_MASK)); @@ -187,6 +190,7 @@ if(p_obj) sspace_verify_alloc(p_obj, size); #endif + if(p_obj && gc_is_concurrent_mark_phase()) obj_mark_black_in_table((Partial_Reveal_Object*)p_obj,size); return p_obj; } @@ -206,7 +210,7 @@ vm_gc_unlock_enum(); return p_obj; } - gc_reclaim_heap(allocator->gc, GC_CAUSE_POS_IS_FULL); + gc_reclaim_heap(allocator->gc, GC_CAUSE_SSPACE_IS_FULL); vm_gc_unlock_enum(); #ifdef SSPACE_CHUNK_INFO Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.h URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.h?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.h (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.h Mon Aug 27 01:11:57 2007 @@ -19,10 +19,11 @@ #define _SSPACE_ALLOC_H_ #include "sspace_chunk.h" - +#include "sspace_mark_sweep.h" +#include "../common/gc_concurrent.h" +#include "../common/collection_scheduler.h" extern POINTER_SIZE_INT cur_alloc_color; -extern POINTER_SIZE_INT cur_mark_color; extern POINTER_SIZE_INT cur_alloc_mask; extern POINTER_SIZE_INT cur_mark_mask; @@ -36,6 +37,20 @@ return (Boolean)(table[word_index] & (cur_alloc_color << index_in_word)); } +#ifdef _DEBUG +static Boolean slot_is_free_in_table(POINTER_SIZE_INT *table, unsigned int slot_index) +{ + assert(!slot_is_alloc_in_table(table, slot_index)); + + unsigned int color_bits_index = slot_index * COLOR_BITS_PER_OBJ; + unsigned int word_index = color_bits_index / BITS_PER_WORD; + unsigned int index_in_word = color_bits_index % BITS_PER_WORD; + + return !(table[word_index] & cur_alloc_color << index_in_word); + +} +#endif + inline unsigned int composed_slot_index(unsigned int word_index, unsigned int index_in_word) { unsigned int color_bits_index = word_index*BITS_PER_WORD + index_in_word; @@ -158,7 +173,13 @@ POINTER_SIZE_INT *table = chunk->table; unsigned int slot_index = chunk->slot_index; + assert(chunk->alloc_num < chunk->slot_num); + ++chunk->alloc_num; + assert(chunk->base); void *p_obj = (void*)((POINTER_SIZE_INT)chunk->base + ((POINTER_SIZE_INT)chunk->slot_size * slot_index)); +#ifdef _DEBUG + slot_is_free_in_table(table, slot_index); +#endif alloc_slot_in_table(table, slot_index); if(chunk->status & CHUNK_NEED_ZEROING) memset(p_obj, 0, chunk->slot_size); @@ -172,6 +193,9 @@ chunk->slot_index = (slot_index < chunk->slot_num) ? slot_index : MAX_SLOT_INDEX; } else #endif + + if(p_obj && gc_is_concurrent_mark_phase()) obj_mark_black_in_table((Partial_Reveal_Object*)p_obj,chunk->slot_size); + chunk->slot_index = next_free_slot_index_in_table(table, slot_index, chunk->slot_num); if(chunk->slot_index == MAX_SLOT_INDEX){ chunk->status = CHUNK_USED | CHUNK_NORMAL; Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.cpp URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.cpp?rev=570028&r1=570027&r2=570028&view=diff ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.cpp (original) +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_chunk.cpp Mon Aug 27 01:11:57 2007 @@ -124,23 +124,24 @@ free_chunk_list_clear(&unaligned_free_chunk_lists[i]); free_chunk_list_clear(&hyper_free_chunk_list); - - /* release small obj chunks of each mutator */ + +#ifdef USE_MARK_SWEEP_GC + /* release local chunks of each mutator in unique mark-sweep GC */ Mutator *mutator = gc->mutator_list; while(mutator){ - Chunk_Header ***local_chunks = mutator->local_chunks; - for(i = SIZE_SEGMENT_NUM; i--;){ - if(!size_segments[i]->local_alloc){ - assert(!local_chunks[i]); - continue; - } - Chunk_Header **chunks = local_chunks[i]; - assert(chunks); - for(j = size_segments[i]->chunk_num; j--;) - chunks[j] = NULL; - } + allocator_clear_local_chunks((Allocator*)mutator, FALSE); mutator = mutator->next; } +#endif +} + +void gc_clear_collector_local_chunks(GC *gc) +{ + assert(gc_match_kind(gc, MAJOR_COLLECTION)); + /* release local chunks of each collector in gen GC */ + for(unsigned int i = gc->num_collectors; i--;){ + allocator_clear_local_chunks((Allocator*)gc->collectors[i], TRUE); + } } /* Simply put the free chunk to the according list @@ -150,7 +151,6 @@ static void list_put_free_chunk(Free_Chunk_List *list, Free_Chunk *chunk) { chunk->status = CHUNK_FREE; - chunk->adj_prev = NULL; chunk->prev = NULL; lock(list->lock); @@ -158,6 +158,8 @@ if(list->head) list->head->prev = chunk; list->head = chunk; + if(!list->tail) + list->tail = chunk; assert(list->chunk_num < ~((unsigned int)0)); ++list->chunk_num; unlock(list->lock); @@ -171,6 +173,8 @@ list->head = chunk->next; if(list->head) list->head->prev = NULL; + else + list->tail = NULL; assert(list->chunk_num); --list->chunk_num; assert(chunk->status == CHUNK_FREE); @@ -336,21 +340,22 @@ Free_Chunk_List *list = sspace->hyper_free_chunk_list; lock(list->lock); - Free_Chunk **p_next = &list->head; + Free_Chunk *prev_chunk = NULL; Free_Chunk *chunk = list->head; while(chunk){ if(CHUNK_SIZE(chunk) >= chunk_size){ Free_Chunk *next_chunk = chunk->next; - *p_next = next_chunk; - if(next_chunk){ - if(chunk != list->head) - next_chunk->prev = (Free_Chunk *)p_next; /* utilize an assumption: next is the first field of Free_Chunk */ - else - next_chunk->prev = NULL; - } + if(prev_chunk) + prev_chunk->next = next_chunk; + else + list->head = next_chunk; + if(next_chunk) + next_chunk->prev = prev_chunk; + else + list->tail = prev_chunk; break; } - p_next = &chunk->next; + prev_chunk = chunk; chunk = chunk->next; } unlock(list->lock); @@ -366,6 +371,26 @@ return chunk; } + +void sspace_collect_free_chunks_to_list(Sspace *sspace, Free_Chunk_List *list) +{ + unsigned int i; + + for(i = NUM_ALIGNED_FREE_CHUNK_BUCKET; i--;) + move_free_chunks_between_lists(list, &sspace->aligned_free_chunk_lists[i]); + + for(i = NUM_UNALIGNED_FREE_CHUNK_BUCKET; i--;) + move_free_chunks_between_lists(list, &sspace->unaligned_free_chunk_lists[i]); + + move_free_chunks_between_lists(list, sspace->hyper_free_chunk_list); + + Free_Chunk *chunk = list->head; + while(chunk){ + chunk->status = CHUNK_FREE | CHUNK_TO_MERGE; + chunk = chunk->next; + } +} + typedef struct PFC_Pool_Iterator { volatile unsigned int seg_index;