harmony-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ge...@apache.org
Subject svn commit: r442092 [2/2] - in /incubator/harmony/enhanced/drlvm/trunk: build/make/components/vm/ vm/gc/ vm/gc/build/ vm/gc/src/ vm/gcv4/ vm/include/open/ vm/interpreter/src/ vm/tests/smoke/gc/ vm/vmcore/include/ vm/vmcore/src/class_support/ vm/vmcore/...
Date Mon, 11 Sep 2006 04:31:38 GMT
Added: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/fast_list.h
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/fast_list.h?view=auto&rev=442092
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/fast_list.h (added)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/fast_list.h Sun Sep 10 21:31:36 2006
@@ -0,0 +1,167 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+/**
+ * @author Ivan Volosyuk
+ */
+
+#ifndef __NON_MOVABLE_VECTOR_H__
+#define __NON_MOVABLE_VECTOR_H__
+
+#include <vector>
+#include <assert.h>
+#include <port_malloc.h>
+
+template <typename Object, int size>
+class fast_list {
+    struct row {
+        struct row *prev;
+        Object data[size];
+        struct row *next;
+    };
+
+    row *first;
+    row *last;
+    int pos;
+public:
+
+    fast_list() {
+        first = last = (row*) STD_MALLOC(sizeof(row));
+        first->next = 0;
+        first->prev = 0;
+        pos = 0;
+    }
+
+    ~fast_list() {
+        for(row *r = first; r != 0; ) {
+            row *next = r->next;
+            STD_FREE(r);
+            r = next;
+
+        }
+    }
+
+    void clear() {
+        last = first;
+        pos = 0;
+    }
+
+    bool empty() {
+        return first == last && pos == 0;
+    }
+
+    int count() {
+        int res = pos;
+        for(row *r = first; r != last; r = r->next) res += size;
+        return res;
+    }
+
+    Object &push_back(Object obj) {
+        // caching pos and last in local variables helpful for compiler
+        // optimizations
+        int p = pos;
+        row *l = last;
+
+        Object &res = l->data[p];
+        res = obj;
+        ++p;
+        if (p < size) {
+            pos = p;
+            return res;
+        }
+
+        if (!l->next) {
+            row *new_row = (row*) STD_MALLOC(sizeof(row));
+            l->next = new_row;
+            new_row->prev = l;
+            new_row->next = 0;
+        }
+        last = l->next;
+        pos = 0;
+        return res;
+    }
+
+    Object pop_back() {
+        // caching pos and last in local variables helpful for compiler
+        // optimizations
+        int p = pos - 1;
+        row *l = last;
+
+        if (p >= 0) {
+            pos = p;
+            return l->data[p];
+        }
+
+        last = l = l->prev;
+        pos = size - 1;
+        return l->data[size - 1];
+    }
+
+    Object& back() {
+        int p = pos;
+        row *l = last;
+        --p;
+        if (p >= 0) return l->data[p];
+        return l->prev->data[size-1];
+    }
+
+    class iterator {
+        private:
+            row *r;
+            int pos;
+        public:
+        iterator(row *_r, int _pos) {
+            r = _r;
+            pos = _pos;
+        }
+
+        Object &operator *() { return r->data[pos]; }
+
+        iterator &operator ++() {
+            ++pos;
+            if (pos < size) return *this;
+            pos = 0;
+            r = r->next;
+            return *this;
+        }
+
+        iterator &operator --() {
+            --pos;
+            if (pos >= 0) return *this;
+            r = r->prev;
+            pos = size - 1;
+            return *this;
+        }
+
+        bool operator == (iterator i) {
+            return i.r == r && i.pos == pos;
+        }
+
+        bool operator != (iterator i) {
+            return ! operator == (i);
+        }
+
+        typedef std::input_iterator_tag iterator_category;
+        typedef Object value_type;
+        typedef Object* pointer;
+        typedef Object& reference;
+        typedef ptrdiff_t difference_type;
+    };
+
+    iterator begin() { return iterator(first, 0); }
+    iterator end() { return iterator(last, pos); }
+};
+
+#endif /* __NON_MOVABLE_VECTOR_H__ */

Propchange: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/fast_list.h
------------------------------------------------------------------------------
    svn:eol-style = native

Added: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/gc_for_vm.cpp
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/gc_for_vm.cpp?view=auto&rev=442092
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/gc_for_vm.cpp (added)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/gc_for_vm.cpp Sun Sep 10 21:31:36 2006
@@ -0,0 +1,434 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+/**
+ * @author Ivan Volosyuk
+ */
+
+#include <assert.h>
+#include <iostream>
+#include <open/vm_gc.h>
+#include <open/gc.h>
+#include <cxxlog.h>
+#include "gc_types.h"
+#include "fast_list.h"
+#include "port_atomic.h"
+
+GC_Thread_Info *thread_list;
+volatile int thread_list_lock;
+int num_threads = 0;
+
+fast_list<Partial_Reveal_Object*, 1024> finalizible_objects;
+
+
+// GCExport Boolean gc_supports_compressed_references(); optional
+GCExport void gc_write_barrier(Managed_Object_Handle p_base_of_obj_with_slot) {
+    TRACE2("gc.wb", "gc_write_barrier");
+}
+GCExport void gc_heap_wrote_object (Managed_Object_Handle p_base_of_object_just_written) {
+    // NOTE: looks like the function is redundant for now, it called for all
+    // heap objects accesses not only the objects with slots.
+    //TRACE2("gc.wb", "gc_heap_wrote_object");
+}
+
+GCExport void gc_heap_write_ref (Managed_Object_Handle p_base_of_object_with_slot,
+                                 unsigned offset,
+                                 Managed_Object_Handle value) {
+    TRACE2("gc.wb", "gc_heap_write_ref");
+    Managed_Object_Handle *p_slot = 
+        (Managed_Object_Handle *)(((char *)p_base_of_object_with_slot) + offset);
+    assert (p_base_of_object_with_slot != NULL);
+    *p_slot = value;
+}
+GCExport void gc_heap_slot_write_ref (Managed_Object_Handle p_base_of_object_with_slot,
+                                      Managed_Object_Handle *p_slot,
+                                      Managed_Object_Handle value) {
+    TRACE2("gc.wb", "gc_heap_slot_write_ref");
+    assert (p_base_of_object_with_slot != NULL);
+    *p_slot = value;
+}
+//GCExport void gc_heap_slot_write_ref_compressed (Managed_Object_Handle p_base_of_object_with_slot,
+//                                                 uint32 *p_slot,
+//                                                 Managed_Object_Handle value);
+
+
+
+
+// GCExport void gc_test_safepoint(); optional
+Boolean gc_supports_frontier_allocation(unsigned *offset_of_current, unsigned *offset_of_limit) {
+    // Need additional support for object offset in native stubs.
+    *offset_of_current = field_offset(GC_Thread_Info, tls_current_free);
+    *offset_of_limit = field_offset(GC_Thread_Info, tls_current_ceiling);
+    return true;
+}
+
+void gc_vm_initialized() {
+    static int UNUSED count = 0;
+    TRACE2("gc.init2", "gc_vm_initialized called (" << count++ << ")");
+}
+
+//GCExport void gc_add_compressed_root_set_entry(uint32 *ref, Boolean is_pinned); optional
+
+void gc_add_weak_root_set_entry(Managed_Object_Handle *slot, 
+    Boolean is_pinned, Boolean is_short_weak) {
+    TRACE2("gc.enum", "gc_add_weak_root_set_entry - EMPTY");
+    abort();
+}
+//GCExport void gc_add_root_set_entry_managed_pointer(void **slot,
+//                                                    Boolean is_pinned); //  optional
+
+#define BITS_PER_BYTE 8
+#define NEXT_TO_HIGH_BIT_SET_MASK (1<<((sizeof(unsigned) * BITS_PER_BYTE)-2))
+#define NEXT_TO_HIGH_BIT_CLEAR_MASK ~NEXT_TO_HIGH_BIT_SET_MASK
+
+// classloader sometimes sets the bit for finalizible objects (?)
+inline unsigned int get_instance_data_size (unsigned int encoded_size) 
+{
+    return (encoded_size & NEXT_TO_HIGH_BIT_CLEAR_MASK);
+}
+
+unsigned char* allocate_from_chunk(int size) {
+    unsigned char *res;
+
+    TRACE2("gc.mem", "get next chunk: pinned_areas_pos = " << pinned_areas_pos
+            << " pinned_areas.size() = " << pinned_areas.size());
+    // lets try next chunk
+    while (pinned_areas_pos < pinned_areas.size()) {
+        assert(heap.pos_limit <= pinned_areas[pinned_areas_pos]);
+        heap.pos = pinned_areas[pinned_areas_pos];
+        unsigned char *new_limit = heap.allocation_region_end();
+
+        if (pinned_areas_pos + 1 < pinned_areas.size()) {
+            new_limit = pinned_areas[pinned_areas_pos + 1];
+        }
+        assert(heap.pos_limit <= new_limit);
+        //assert(heap.pos <= new_limit);
+        heap.pos_limit = new_limit;
+        TRACE2("gc.mem", "next chunk[" << pinned_areas_pos << "] = " << heap.pos << " : " << heap.pos_limit);
+
+        pinned_areas_pos += 2;
+
+        if (heap.pos_limit > heap.allocation_region_end()) {
+            heap.pos_limit = heap.allocation_region_end();
+        }
+
+        if (heap.pos + size <= heap.pos_limit) {
+            res = heap.pos;
+            heap.pos += size;
+            return res;
+        }
+        // we have unspent memory chunks
+    }
+    return 0;
+}
+
+static bool UNUSED thread_is_thread_list(GC_Thread_Info *thread) {
+    spin_lock(&thread_list_lock);
+    GC_Thread_Info *t = thread_list;
+    while(t) {
+        if (t == thread) {
+            spin_unlock(&thread_list_lock);
+            return true;
+        }
+        t = t->next;
+    }
+    spin_unlock(&thread_list_lock);
+    return false;
+}
+
+Managed_Object_Handle gc_alloc_fast(unsigned in_size, 
+                                             Allocation_Handle ah,
+                                             void *thread_pointer) {
+
+    //TRACE2("gc.alloc", "gc_alloc_fast");
+    assert((in_size % GC_OBJECT_ALIGNMENT) == 0);
+    assert (ah);
+    unsigned char *res;
+
+    GC_Thread_Info *info = (GC_Thread_Info *) thread_pointer;
+    Partial_Reveal_VTable *vtable = (Partial_Reveal_VTable*) ah;
+    GC_VTable_Info *gcvt = vtable->get_gcvt();
+
+    // TODO: finalizible objects can be added atomicly and can be handled here
+    if (info->tls_current_free + in_size <= info->tls_current_ceiling && !gcvt->is_finalizible()) {
+        res = (unsigned char*) info->tls_current_free;
+        info->tls_current_free += in_size;
+        *(int*)res = ah;
+        return (Managed_Object_Handle)res;
+    }
+    return 0;
+}
+
+Managed_Object_Handle gc_alloc(unsigned in_size, 
+                                        Allocation_Handle ah,
+                                        void *thread_pointer) {
+    TRACE2("gc.alloc", "gc_alloc: " << in_size);
+    assert((in_size % GC_OBJECT_ALIGNMENT) == 0);
+    assert (ah);
+    unsigned char *res;
+
+    GC_Thread_Info *info = (GC_Thread_Info *) thread_pointer;
+    Partial_Reveal_VTable *vtable = (Partial_Reveal_VTable*) ah;
+    GC_VTable_Info *gcvt = vtable->get_gcvt();
+
+    if (!gcvt->is_finalizible() && info->tls_current_free + in_size <= info->tls_current_ceiling) {
+        res = (unsigned char*) info->tls_current_free;
+        info->tls_current_free += in_size;
+        *(int*)res = ah;
+        return (Managed_Object_Handle)res;
+    }
+
+    // TODO: can reproduce problems of synchronization of finalizer threads
+    // if remove atomic exchange
+    if (pending_finalizers) {
+        bool run = apr_atomic_xchg32((volatile uint32*)&pending_finalizers, 0);
+        if (run) {
+            vm_hint_finalize();
+        }
+    }
+
+    vm_gc_lock_enum();
+    
+    unsigned size = get_instance_data_size(in_size);
+
+    if (gcvt->is_finalizible()) {
+        unsigned char *obj;
+        unsigned char *endpos;
+        bool res = place_into_old_objects(obj, endpos, size);
+        if (res) {
+            memset(obj, 0, size);
+            finalizible_objects.push_back((Partial_Reveal_Object*) obj);
+            vm_gc_unlock_enum();
+            *(int*)obj = ah;
+            return (Managed_Object_Handle)obj;
+        }
+    }
+
+    if (info->tls_current_free + size <= info->tls_current_ceiling) {
+        res = (unsigned char*) info->tls_current_free;
+        info->tls_current_free += size;
+        assert(!((POINTER_SIZE_INT)res & 3));
+
+        if (gcvt->is_finalizible()) {
+            finalizible_objects.push_back((Partial_Reveal_Object*) res);
+        }
+        vm_gc_unlock_enum();
+        *(int*)res = ah;
+        return (Managed_Object_Handle)res;
+    }
+
+    res = heap.pos;
+    if (res + size >= heap.pos_limit) {
+        // lets try next chunk
+        res = allocate_from_chunk(size);
+
+        if (!res) {
+            res = select_gc(size);
+        }
+
+        if (!res) {
+            vm_gc_unlock_enum();
+            vm_hint_finalize();
+            TRACE2("gc.verbose", "OutOfMemoryError!\n");
+            return 0;
+        }
+
+        if (/* in_size != size && */ gcvt->is_finalizible()) {
+            finalizible_objects.push_back((Partial_Reveal_Object*) res);
+        }
+        vm_gc_unlock_enum();
+        if (cleaning_needed) memset(res, 0, size);
+        *(int*)res = ah; // NOTE: object partially initialized, should not be moved!!
+                         //       problems with arrays
+                         //       no way to call vm_hint_finalize() here
+        return res;
+    }
+
+    heap.pos = res + size;
+
+    if (/* in_size != size && */ gcvt->is_finalizible()) {
+        finalizible_objects.push_back((Partial_Reveal_Object*) res);
+    }
+
+    if (info->tls_current_free + chunk_size / 8 < info->tls_current_ceiling) {
+        // chunk is not expired yet, reuse it
+        vm_gc_unlock_enum();
+        if (cleaning_needed) memset(res, 0, size);
+        *(int*)res = ah;
+        return (Managed_Object_Handle)res;
+    }
+
+    info->tls_current_free = heap.pos;
+    info->tls_current_ceiling = heap.pos + chunk_size;
+    if (info->tls_current_ceiling > heap.pos_limit)
+        info->tls_current_ceiling = heap.pos_limit;
+    heap.pos = info->tls_current_ceiling;
+
+    vm_gc_unlock_enum();
+    if (cleaning_needed) memset(res, 0, info->tls_current_ceiling - info->tls_current_free + size);
+    *(int*)res = ah;
+    return (Managed_Object_Handle)res;
+}
+
+Managed_Object_Handle gc_pinned_malloc_noclass(unsigned size) {
+    TRACE2("gc.alloc", "gc_pinned_malloc_noclass - NOT IMPLEMENTED");
+    abort();
+    return 0;
+}
+
+Managed_Object_Handle gc_alloc_pinned(unsigned size, Allocation_Handle type, void *thread_pointer) {
+    TRACE2("gc.alloc", "gc_alloc_pinned - NOT IMPLEMENTED");
+    abort();
+    return 0;
+}
+
+Boolean gc_requires_barriers() {
+    // SPAM TRACE2("gc.init", "gc_requires_barriers - NO");
+    return false;
+}
+
+void gc_thread_init(void *gc_information) {
+    TRACE2("gc.thread", "gc_thread_init " << gc_information);
+
+    GC_Thread_Info *info = (GC_Thread_Info *) gc_information;
+    info->tls_current_free = 0;
+    info->tls_current_ceiling = 0;
+    //info->saved_object = 0;
+    spin_lock(&thread_list_lock);
+    info->next = thread_list;
+    if (info->next) info->next->prev = &info->next;
+    thread_list = info;
+    info->prev = &thread_list;
+    int n = ++num_threads;
+    chunk_size = round_down(heap.size / (10 * n),128);
+    spin_unlock(&thread_list_lock);
+
+}
+
+void gc_thread_kill(void *gc_information) {
+    TRACE2("gc.thread", "gc_thread_kill " << gc_information);
+    GC_Thread_Info *info = (GC_Thread_Info *) gc_information;
+
+    //assert(info->saved_object == 0);
+    spin_lock(&thread_list_lock);
+    *info->prev = info->next;
+    if (info->next) info->next->prev = info->prev;
+    int n = --num_threads;
+    if (n != 0)
+        chunk_size = round_down(heap.size / (10 * n),128);
+    spin_unlock(&thread_list_lock);
+}
+
+void gc_force_gc() {
+    TRACE2("gc.collect", "gc_force_gc");
+    select_force_gc();
+}
+
+int64 gc_total_memory() 
+{
+    return heap.size;
+}
+
+int64 gc_max_memory()
+{
+    return heap.max_size;
+}
+
+int64 gc_free_memory() 
+{
+    return (int64) ((heap.allocation_region_end() - heap.pos) + (heap.old_objects.end - heap.old_objects.pos));
+}
+
+void gc_pin_object (Managed_Object_Handle* p_object) {
+    // FIXME: overflow check and handling
+    Partial_Reveal_Object *obj = *(Partial_Reveal_Object**) p_object;
+
+    volatile uint8 *info = (volatile uint8 *)&obj->obj_info_byte();
+    uint8 value = *info;
+    if ((value & OBJECT_IS_PINNED_BITS) == OBJECT_IS_PINNED_BITS) {
+        DIE2("gc", "no handling for pin overflow");
+    }
+
+    while (true) {
+        uint8 old_value = port_atomic_cas8(info, value + OBJECT_IS_PINNED_INCR, value);
+        if (old_value == value) return;
+        value = old_value;
+    }
+}
+
+void gc_unpin_object (Managed_Object_Handle* p_object) {
+    Partial_Reveal_Object *obj = *(Partial_Reveal_Object**) p_object;
+    assert((obj->obj_info_byte() & OBJECT_IS_PINNED_BITS) != 0);
+
+    volatile uint8 *info = (volatile uint8 *)&obj->obj_info_byte();
+    uint8 value = *info;
+    while (true) {
+        uint32 old_value = port_atomic_cas8(info, value - OBJECT_IS_PINNED_INCR, value);
+        if (old_value == value) return;
+        value = old_value;
+    }
+}
+
+Boolean gc_is_object_pinned (Managed_Object_Handle p_object) {
+    Partial_Reveal_Object *obj = (Partial_Reveal_Object*) p_object;
+    return (obj->obj_info_byte() & OBJECT_IS_PINNED_INCR) != 0;
+}
+
+int32 gc_get_hashcode(Managed_Object_Handle p_object) {
+    Partial_Reveal_Object *obj = (Partial_Reveal_Object*) p_object;
+    if (!obj) return 0;
+    assert((unsigned char*)obj >= heap_base && (unsigned char*)obj < heap_ceiling);
+    assert(obj->vtable());
+    unsigned char info = obj->obj_info_byte();
+    // FIXME: atomic ops need to keep pinning work?
+    int hash;
+    if (info & HASHCODE_IS_SET_BIT) {
+        if (info & HASHCODE_IS_ALLOCATED_BIT) {
+            int offset = get_object_size(obj, obj->vtable()->get_gcvt());
+            unsigned char *pos = (unsigned char *)obj;
+            hash = *(int*) (pos + offset);
+            check_hashcode(hash);
+        } else {
+            hash = gen_hashcode(obj);
+        }
+    } else {
+        obj->obj_info_byte() = info | HASHCODE_IS_SET_BIT;
+        hash = gen_hashcode(obj);
+    }
+    return hash;
+}
+
+
+Managed_Object_Handle gc_get_next_live_object(void *iterator) {
+    TRACE2("gc.iter", "gc_get_next_live_object - NOT IMPLEMENTED");
+    abort();
+}
+
+unsigned int gc_time_since_last_gc() {
+    TRACE2("gc.time", "gc_time_since_last_gc");
+    return 0;
+}
+
+void *gc_heap_base_address() {
+    return (void*) heap_base;
+}
+void *gc_heap_ceiling_address() {
+    return (void*) heap_ceiling;
+}
+
+void gc_finalize_on_exit() {
+    process_finalizible_objects_on_exit();
+}
+

Propchange: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/gc_for_vm.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Added: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/gc_types.h
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/gc_types.h?view=auto&rev=442092
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/gc_types.h (added)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/gc_types.h Sun Sep 10 21:31:36 2006
@@ -0,0 +1,328 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+/**
+ * @author Ivan Volosyuk
+ */
+
+#ifndef __GC_TYPES_H__
+#define __GC_TYPES_H__
+
+#define LOG_DOMAIN "gc.verbose"
+
+#include <assert.h>
+#include <vector>
+#include <list>
+#include <open/vm.h>
+#include <open/vm_gc.h>
+#include <port_vmem.h>
+#include <apr_time.h>
+#include <apr_atomic.h>
+#include <cxxlog.h>
+
+/// obtains a spinlock.
+inline void spin_lock(volatile int* lock) {
+    for(int i = 0; i < 10000; i++) {
+        int val = *lock;
+        if (val == 1)
+            continue;
+        assert(val == 0);
+        if (apr_atomic_cas32((volatile uint32 *)lock, 1, 0) == 0) return;
+    }
+    while (true) {
+#ifdef _WIN32
+        Sleep(1);
+#endif
+        if (apr_atomic_cas32((volatile uint32 *)lock, 1, 0) == 0) return;
+    }
+}
+
+/// releases a spinlock.
+inline void spin_unlock(volatile int* lock) {
+    assert(1 == *lock);
+    *lock = 0;
+}
+
+enum GC_TYPE {
+    GC_COPY,
+    GC_FORCED,
+    GC_SLIDE_COMPACT,
+    GC_CACHE,
+
+    GC_FULL,
+};
+
+extern GC_TYPE gc_type;
+class Partial_Reveal_Object;
+
+typedef struct GC_Thread_Info {
+    unsigned char *tls_current_free;
+    unsigned char *tls_current_ceiling;
+
+    GC_Thread_Info *next;
+    GC_Thread_Info **prev;
+} GC_Thread_Info;
+
+#define FORWARDING_BIT 1
+#define RESCAN_BIT 2
+#define GC_OBJECT_MARK_BIT_MASK 0x00000080
+#define MARK_BITS 3
+
+#define HASHCODE_IS_ALLOCATED_BIT 4
+#define HASHCODE_IS_SET_BIT 8
+#define OBJECT_IS_PINNED_BITS (7 << 4)
+#define OBJECT_IS_PINNED_INCR (1 << 4)
+
+// FLAGS
+extern const char *lp_hint; // Use large pages
+extern bool ignore_finalizers;
+extern bool remember_root_set;
+
+#define field_offset(type,field) ((POINTER_SIZE_INT)&((type*)0)->field)
+
+#define GC_VT_ARRAY 1
+#define GC_VT_FINALIZIBLE 2
+#define GC_VT_HAS_SLOTS 4
+#define GC_VT_FLAGS 7
+#define GC_VT_ARRAY_ELEMENT_SHIFT 3
+#define GC_VT_ARRAY_ELEMENT_MASK 3
+#define GC_VT_ARRAY_FIRST_SHIFT 5
+
+#define GC_VT_REF_TYPE 3
+
+struct GC_VTable_Info {
+    // Fields
+    unsigned size_and_ref_type;
+
+    // Methods
+    unsigned flags() { return (int)this; }
+    GC_VTable_Info *ptr() {
+        assert(!is_array());
+        return (GC_VTable_Info*) ((POINTER_SIZE_INT)this & ~GC_VT_FLAGS);
+    }
+
+    unsigned obj_size() { return ptr()->size_and_ref_type & ~GC_VT_REF_TYPE; }
+    WeakReferenceType reference_type() { return (WeakReferenceType) (ptr()->size_and_ref_type & GC_VT_REF_TYPE); }
+    int *offset_array() { /* reference array just after this struct */ return (int*)(ptr() + 1); }
+
+    bool is_array() { return flags() & GC_VT_ARRAY; }
+    bool is_finalizible() { return flags() & GC_VT_FINALIZIBLE; }
+    bool has_slots() { return flags() & GC_VT_HAS_SLOTS; }
+
+    inline unsigned array_size(int length);
+};
+
+typedef POINTER_SIZE_INT GC_VT;
+
+typedef struct Partial_Reveal_VTable {
+private:
+    GC_VTable_Info *gcvt;
+public:
+
+    void set_gcvt(struct GC_VTable_Info *new_gcvt) { gcvt = new_gcvt; }
+    struct GC_VTable_Info *get_gcvt() { return gcvt; }
+
+} Partial_Reveal_VTable;
+
+class Partial_Reveal_Object {
+    private:
+    Partial_Reveal_Object();
+    int vt_raw;
+    int info;
+    int array_len;
+
+    public:
+    int &vt() { assert(/* alignment check */ !((int)this & 3)); return vt_raw; }
+    int &obj_info() { assert(/* alignment check */ !((int)this & 3)); return info; }
+    unsigned char &obj_info_byte() { return *(unsigned char*)&obj_info(); }
+
+    Partial_Reveal_VTable *vtable() {
+        assert(!(vt() & FORWARDING_BIT));
+        return (Partial_Reveal_VTable*) vt();
+    }
+
+    int array_length() { return array_len; }
+
+    Partial_Reveal_Object **get_array_elements(GC_VTable_Info *gcvt) {
+        assert(gcvt->is_array());
+        assert(gcvt->has_slots());
+        return (Partial_Reveal_Object**)
+            ((unsigned char*) this + (gcvt->flags() >> GC_VT_ARRAY_FIRST_SHIFT));
+    }
+};
+
+
+unsigned
+GC_VTable_Info::array_size(int length) {
+    assert(is_array());
+    unsigned f = flags();
+    unsigned element_shift = f >> GC_VT_ARRAY_ELEMENT_SHIFT;
+    unsigned first_element = element_shift >> (GC_VT_ARRAY_FIRST_SHIFT - GC_VT_ARRAY_ELEMENT_SHIFT);
+    return (first_element + (length << (element_shift & GC_VT_ARRAY_ELEMENT_MASK)) + 3) & ~3;
+}
+
+static inline int get_object_size(Partial_Reveal_Object *obj, GC_VTable_Info *gcvt) {
+    if (gcvt->is_array()) {
+        return gcvt->array_size(obj->array_length());
+    } else {
+        return gcvt->obj_size();
+    }
+}
+
+static inline bool
+is_array_of_primitives(Partial_Reveal_Object *p_obj)
+{
+    GC_VTable_Info *gcvt = p_obj->vtable()->get_gcvt();
+    return gcvt->is_array() && !gcvt->has_slots();
+}
+
+template <typename T>
+static inline T round_down(T value, int round) {
+    assert((round & (round - 1)) == 0);
+    return (T) (((POINTER_SIZE_INT)value) & ~(round - 1));
+}
+
+template <typename T>
+static inline T round_up(T value, int round) {
+    assert((round & (round - 1)) == 0);
+    return (T) ((((POINTER_SIZE_INT)value) + round - 1) & ~(round - 1));
+}
+
+inline POINTER_SIZE_INT mb(POINTER_SIZE_INT size) {
+    int m = 1024 * 1024;
+    return (size + m/2-1)/m;
+}
+
+typedef unsigned char* Ptr;
+
+struct OldObjects {
+    Ptr end;
+    Ptr pos;
+    Ptr pos_limit;
+
+    // position before gc
+    Ptr prev_pos;
+};
+
+struct HeapSegment {
+    Ptr base; // base pointer
+    Ptr ceiling; // upper bound
+    size_t size;
+    size_t max_size;
+    Ptr pos; // current allocation position
+    Ptr pos_limit; // end of continuous allocation region
+
+    Ptr compaction_region_start() { return old_objects.end; }  // compaction region
+    Ptr compaction_region_end() { return ceiling; }
+
+    Ptr allocation_region_start() { return old_objects.end; } // allocation region
+    Ptr allocation_region_end() { return ceiling; }
+
+    OldObjects old_objects;
+
+    // data for gc algorithm switching
+    float working_set_size;
+    float Tcompact;
+    float Tcopy;
+    float dS_copy;
+
+    // data of evacuation area prediction
+    POINTER_SIZE_SINT incr_abs;
+    float incr_rel;
+    unsigned char *predicted_pos;
+
+    GC_TYPE next_gc;
+};
+
+extern HeapSegment heap;
+
+// GLOBALS
+extern Ptr heap_base;
+extern Ptr heap_ceiling;
+
+extern int pending_finalizers;
+extern int chunk_size;
+extern bool cleaning_needed;
+extern std::vector<unsigned char*> pinned_areas;
+extern unsigned pinned_areas_pos;
+extern std::vector<unsigned char*> old_pinned_areas;
+extern unsigned old_pinned_areas_pos;
+
+extern unsigned int heap_mark_phase;
+extern unsigned int prev_mark_phase;
+extern GC_Thread_Info *thread_list;
+extern int num_threads;
+extern int global_referent_offset;
+extern apr_time_t gc_start, gc_end;
+extern int gc_num;
+extern apr_time_t total_gc_time;
+extern apr_time_t total_user_time;
+extern apr_time_t max_gc_time;
+extern int gc_algorithm;
+extern int gc_adaptive;
+
+// for slide compaction algorithms
+extern unsigned char *mark_bits;
+extern int mark_bits_size;
+
+
+// FUNCTIONS PROTOTYPES
+
+const char *gc_name(GC_TYPE gc);
+
+unsigned char * copy_gc(int size);
+void force_gc();
+unsigned char * full_gc(int size);
+unsigned char * slide_gc(int size);
+unsigned char* allocate_from_chunk(int size);
+
+void gc_reserve_mark_bits();
+void gc_unreserve_mark_bits();
+void gc_allocate_mark_bits();
+void gc_deallocate_mark_bits();
+void init_gcvt();
+void deinit_gcvt();
+
+void init_select_gc();
+void select_force_gc();
+unsigned char* select_gc(int size);
+void after_copy_gc();
+void after_slide_gc();
+
+void heap_extend(size_t size);
+void heap_shrink(size_t size);
+
+void process_finalizible_objects_on_exit();
+void *alloc_large_pages(size_t size, const char *hint);
+bool place_into_old_objects(unsigned char *&newpos, unsigned char *&endpos, int size);
+
+//// OPTIONAL FEATURES /////////
+
+//#define DEBUG_HASHCODE
+#ifdef DEBUG_HASHCODE
+inline int gen_hashcode(void *addr) {
+    return (((int)addr >> 2) & 0x7e) | 0x3a00;
+}
+
+inline void check_hashcode(int hash) {
+    assert((hash & ~0x7e) == 0x3a00);
+}
+#else /* DEBUG_HASHCODE */
+inline int gen_hashcode(void *addr) { return (int)addr; }
+inline void check_hashcode(int hash) {}
+#endif /* DEBUG_HASHCODE */
+
+#endif /* __GC_TYPES_H__ */
+

Propchange: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/gc_types.h
------------------------------------------------------------------------------
    svn:eol-style = native

Added: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/init.cpp
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/init.cpp?view=auto&rev=442092
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/init.cpp (added)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/init.cpp Sun Sep 10 21:31:36 2006
@@ -0,0 +1,381 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+/**
+ * @author Ivan Volosyuk
+ */
+
+// System header files
+#include <iostream>
+
+// VM interface header files
+#include "port_malloc.h"
+#include <apr_general.h>
+#include "platform_lowlevel.h"
+#include "open/vm_gc.h"
+#include "open/gc.h"
+#include "jit_intf.h"
+#include <assert.h>
+#include "gc_types.h"
+#include "cxxlog.h"
+#include "timer.h"
+#ifndef _WIN32
+#include <sys/mman.h>
+#endif
+
+// Variables partially sorted by usage pattern. Should optimize cache lines
+
+unsigned int heap_mark_phase;
+
+HeapSegment heap;
+int chunk_size;
+
+int pending_finalizers = false;
+
+#define RESERVED_FOR_LAST_HASH 4
+
+#define MB * (1024 * 1024)
+int HEAP_SIZE_DEFAULT = 256 MB;
+
+unsigned int prev_mark_phase;
+bool cleaning_needed = false;
+int gc_algorithm = 0;
+int gc_adaptive = true;
+int64 timer_start;
+int64 timer_dt;
+Ptr heap_base;
+Ptr heap_ceiling;
+size_t max_heap_size;
+size_t min_heap_size;
+bool ignore_finalizers = false;
+bool remember_root_set = false;
+const char *lp_hint = NULL;
+
+static long parse_size_string(const char* size_string) {
+    size_t len = strlen(size_string);
+    int unit = 1;
+    if (tolower(size_string[len - 1]) == 'k') {
+        unit = 1024;
+    } else if (tolower(size_string[len - 1]) == 'm') {
+        unit = 1024 * 1024;
+    } else if (tolower(size_string[len - 1]) == 'g') {
+        unit = 1024 * 1024 * 1024;
+    }
+    long size = atol(size_string);
+    long res = size * unit;
+    if (res / unit != size) {
+        // overflow happened
+        return 0;
+    }
+    return res;
+}
+
+static bool get_property_value_boolean(char* name, bool deflt) {
+    const char* value = vm_get_property_value(name);
+    if (value == NULL || 0 == value[0])
+       return deflt;
+
+    return (strcmp("0", value) != 0
+        && strcmp("off", value) != 0 
+        && strcmp("false", value) != 0);
+}
+
+static int get_property_value_int(char* name) {
+    const char* value = vm_get_property_value(name);
+    return (NULL == value) ? 0 : atoi(value);
+}
+
+static bool is_property_set(char* name) {
+    const char* value = vm_get_property_value(name);
+    return (NULL != value && 0 != value[0]);
+}
+
+static void parse_configuration_properties() {
+    max_heap_size = HEAP_SIZE_DEFAULT;
+    min_heap_size = 8 MB;
+    if (is_property_set("gc.mx")) {
+        max_heap_size = parse_size_string(vm_get_property_value("gc.mx"));
+
+        if (max_heap_size < 8 MB) {
+            INFO("max heap size is too small: " << max_heap_size);
+            max_heap_size = 8 MB;
+        }
+        if (0 == max_heap_size) {
+            INFO("wrong max heap size");
+            max_heap_size = HEAP_SIZE_DEFAULT;
+        }
+
+        min_heap_size = max_heap_size / 10;
+        if (min_heap_size < 8 MB) min_heap_size = 8 MB;
+    }
+
+    if (is_property_set("gc.ms")) {
+        min_heap_size = parse_size_string(vm_get_property_value("gc.ms"));
+
+        if (min_heap_size < 1 MB) {
+            INFO("min heap size is too small: " << min_heap_size);
+            min_heap_size = 1 MB;
+        }
+
+        if (0 == min_heap_size)
+            INFO("wrong min heap size");
+    }
+
+    if (min_heap_size > max_heap_size) {
+        INFO("min heap size is larger then max");
+        max_heap_size = min_heap_size;
+    }
+
+
+    if (is_property_set("gc.lp")) {
+        lp_hint = vm_get_property_value("gc.lp");
+    }
+    
+    if (is_property_set("gc.type"))
+        gc_algorithm = get_property_value_int("gc.type");
+
+#if (defined _DEBUG) || ! (defined NDEBUG)
+    char *build_mode = " (debug)";
+#else
+    char *build_mode = " (release)";
+#endif
+    INFO("gc 4.1" << build_mode);
+    INFO("GC type = " << gc_algorithm);
+
+    if (get_property_value_boolean("gc.ignore_finalizers", false)) {
+        ignore_finalizers = true;
+        INFO("GC will ignore finalizers");
+    }
+
+    if (get_property_value_boolean("gc.adaptive", true)) {
+        INFO("GC will use adaptive algorithm selection");
+    } else {
+        INFO("GC will NOT use adaptive algorithm selection");
+        gc_adaptive = false;
+    }
+
+    if (get_property_value_boolean("gc.remember_root_set", false)) {
+        remember_root_set = true;
+        INFO("GC will retrieve root set before any modification in heap");
+    }
+}
+
+#ifdef _WIN32
+static inline void *reserve_mem(long size) {
+    return VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_READWRITE);
+}
+static const void* RESERVE_FAILURE = 0;
+#else
+static inline void *reserve_mem(long size) {
+    return mmap(0, max_heap_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+}
+static const void* RESERVE_FAILURE = MAP_FAILED;
+#endif
+
+
+
+
+void init_mem() {
+    parse_configuration_properties();
+    max_heap_size = round_down(max_heap_size, 65536);
+    min_heap_size = round_down(min_heap_size, 65536);
+    INFO("min heap size " << mb(min_heap_size) << " mb");
+    INFO("max heap size " << mb(max_heap_size) << " mb");
+
+    heap_base = 0;
+
+    heap_base = NULL;
+    if (lp_hint) {
+        heap_base = (unsigned char*) alloc_large_pages(max_heap_size, lp_hint);
+        if (heap_base == NULL) lp_hint = NULL;
+        else min_heap_size = max_heap_size;
+    }
+
+    if (heap_base == NULL) {
+        INFO("GC use small pages\n");
+    } else {
+        INFO("GC use large pages\n");
+    }
+
+    if (heap_base == NULL) {
+        heap_base = (unsigned char*) reserve_mem(max_heap_size);
+        if (heap_base == RESERVE_FAILURE) {
+            long dec = 100 * 1024 * 1024;
+            max_heap_size = max_heap_size / dec * dec;
+
+            while(true) {
+                heap_base = (unsigned char*) reserve_mem(max_heap_size);
+                if (heap_base != RESERVE_FAILURE) break;
+                max_heap_size -= dec;
+                assert(max_heap_size > 0);
+            }
+            ECHO("WARNING: max heap size is too large, reduced to " << mb(max_heap_size) << " Mb");
+        }
+    }
+
+    if (min_heap_size > max_heap_size) {
+        min_heap_size = max_heap_size;
+        ECHO("WARNING: min heap size reduced to " << mb(min_heap_size) << " Mb");
+    }
+
+    heap_ceiling = heap_base + max_heap_size;
+
+    heap.base = heap_base;
+    heap.size = min_heap_size;
+    heap.ceiling = heap.base + heap.size - RESERVED_FOR_LAST_HASH;
+    heap.max_size = max_heap_size;
+
+#ifdef _WIN32
+    void *res;
+    if (heap_base && !lp_hint) {
+        res = VirtualAlloc(heap.base, heap.size, MEM_COMMIT, PAGE_READWRITE);
+        if (!res) DIE("Can't create heap_L");
+    }
+#endif
+    chunk_size = round_down(heap.size / 10, 65536);
+    init_gcvt();
+    gc_reserve_mark_bits();
+}
+
+void gc_init() {
+    INFO2("gc.init", "GC init called\n");
+    init_mem();
+    init_select_gc();
+    gc_end = apr_time_now();
+    timer_init();
+}
+
+void
+gc_wrapup() {
+    gc_start = apr_time_now();
+    total_user_time += gc_start - gc_end;
+    INFO("\nGC: "
+        << gc_num << " time(s), "
+        << "avg " << (gc_num ? (total_gc_time/gc_num/1000) : 0) << " ms, "
+        << "max " << (max_gc_time/1000) << " ms, "
+        << "total " << total_gc_time/1000 << " ms, "
+        << "gc/user " << (int)(total_gc_time*100.f/total_user_time) << " %"
+    );
+    INFO2("gc.init", "gc_wrapup called");
+    gc_unreserve_mark_bits();
+    deinit_gcvt();
+#ifdef _WIN32
+    bool UNUSED res = VirtualFree(heap_base, max_heap_size, MEM_DECOMMIT);
+    assert (res);
+#else
+    int UNUSED res = munmap(heap_base, max_heap_size);
+    assert (res != -1);
+#endif
+    INFO2("gc.init", "gc_wrapup done");
+}
+
+void gc_reserve_mark_bits() {
+    mark_bits_size = max_heap_size / sizeof(void*) / 8;
+#ifdef _WIN32
+    mark_bits = (unsigned char*) VirtualAlloc(NULL, mark_bits_size, MEM_RESERVE, PAGE_READWRITE);
+    assert(mark_bits);
+#else
+    mark_bits = (unsigned char*) mmap(0, mark_bits_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+    assert(mark_bits != MAP_FAILED);
+#endif
+}
+
+void gc_unreserve_mark_bits() {
+#ifdef _WIN32
+    bool UNUSED res = VirtualFree(mark_bits, 0, MEM_RELEASE);
+    assert(res);
+#else
+    int UNUSED res = munmap(mark_bits, mark_bits_size);
+    assert(res != -1);
+#endif
+}
+
+static unsigned char *mark_bits_allocated_start;
+static unsigned char *mark_bits_allocated_end;
+
+void gc_allocate_mark_bits() {
+    //memset(heap.compaction_region_start(), 0, heap.compaction_region_end() - heap.compaction_region_start());
+    unsigned char *start = mark_bits + (heap.compaction_region_start() - heap_base) / sizeof(void*) / 8;
+    unsigned char *end = mark_bits + (heap.compaction_region_end() - heap_base + sizeof(void*) * 8 - 1) / sizeof(void*) / 8;
+    int page = 4096; // FIXME
+    mark_bits_allocated_start = (unsigned char*)((int)start & ~(page - 1));
+    mark_bits_allocated_end = (unsigned char*)(((int)end + page - 1) & ~(page - 1));
+#ifdef _WIN32
+    unsigned char *res = (unsigned char*) VirtualAlloc(mark_bits_allocated_start,
+            mark_bits_allocated_end - mark_bits_allocated_start, MEM_COMMIT, PAGE_READWRITE);
+    assert(res);
+#endif
+}
+
+void gc_deallocate_mark_bits() {
+#ifdef _WIN32
+    bool UNUSED res = VirtualFree(mark_bits_allocated_start,
+            mark_bits_allocated_end - mark_bits_allocated_start, MEM_DECOMMIT);
+    assert(res);
+#else
+    void UNUSED *res = mmap(mark_bits, mark_bits_size, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+    assert(res == (void*)mark_bits);
+    assert(mark_bits[0] == 0);
+#endif
+}
+
+void heap_extend(size_t size) {
+    size = (size + 65535) & ~65535;
+    if (size > max_heap_size) size = max_heap_size;
+    if (size <= heap.size) return;
+
+#ifdef _WIN32
+    void* UNUSED res = VirtualAlloc(heap.base + heap.size, size - heap.size, MEM_COMMIT, PAGE_READWRITE);
+    assert(res);
+#endif
+    heap.size = size;
+    unsigned char *old_ceiling = heap.ceiling;
+    heap.ceiling = heap.base + heap.size - RESERVED_FOR_LAST_HASH;
+
+    if (old_ceiling == heap.pos_limit) {
+        heap.pos_limit = heap.ceiling;
+    }
+    chunk_size = round_down(heap.size / (10 * num_threads),128);
+    INFO("heap extended to  " << mb(heap.size) << " mb");
+}
+
+// disabled now
+void heap_shrink(size_t size) {
+    size = (size + 65535) & ~65535;
+    if (size < min_heap_size) size = min_heap_size;
+    if (!pinned_areas.empty()) {
+        size_t pin_limit = pinned_areas[pinned_areas.size() - 1] - heap.base;
+        pin_limit = (pin_limit + 65535) & ~65535;
+        if (size < pin_limit) size = pin_limit;
+    }
+    if (size >= heap.size) return;
+
+#ifdef _WIN32
+    bool UNUSED res = VirtualFree(heap.base + size, heap.size - size, MEM_DECOMMIT);
+    assert(res);
+#else
+    void UNUSED *res = mmap(heap.base + size, heap.size - size, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+    assert(res == (void*)(heap.base + size));
+#endif
+
+    heap.size = size;
+    heap.ceiling = heap.base + heap.size - RESERVED_FOR_LAST_HASH;
+
+    if (heap.ceiling > heap.pos_limit) {
+        heap.pos_limit = heap.ceiling;
+    }
+    chunk_size = round_down(heap.size / (10 * num_threads),128);
+    INFO("heap shrinked to  " << mb(heap.size) << " mb");
+}

Propchange: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/init.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Added: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/large_pages_linux.cpp
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/large_pages_linux.cpp?view=auto&rev=442092
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/large_pages_linux.cpp (added)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/large_pages_linux.cpp Sun Sep 10 21:31:36 2006
@@ -0,0 +1,143 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+/**
+ * @author Ivan Volosyuk
+ */
+
+#ifdef __linux__
+
+#include "gc_types.h"
+#include <string.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+
+static size_t proc_huge_page_size = 4096 * 1024;
+static size_t proc_huge_pages_total = (size_t)-1;
+static size_t proc_huge_pages_free = 0;
+static const char *str_HugePages_Total = "HugePages_Total:";
+static const char *str_HugePages_Free = "HugePages_Free:";
+static const char *str_Hugepagesize = "Hugepagesize:";
+
+static const char *parse_value(const char *buf, int len, const char *name, int name_len, size_t *value) {
+    if (len < name_len) return NULL;
+    if (strncmp(buf, name, name_len)) return NULL;
+    buf += name_len;
+    char *endpos;
+    long int res = strtol(buf, &endpos, 10);
+    if (endpos == buf) return NULL;
+    *value = (size_t) res;
+    return endpos;
+}
+
+static void parse_proc_meminfo(size_t required_size) {
+    if (!is_info_enabled("gc.lp")) return;
+    FILE *f = fopen("/proc/meminfo", "r");
+    if (f == NULL) {
+        INFO2("gc.lp", "gc.lp: Can't open /proc/meminfo: " << strerror(errno)
+              << ". Mount /proc filesystem.");
+        return;
+    }
+
+    size_t size = 128;
+    char *buf = (char*) malloc(size);
+    while (true) {
+        ssize_t len = getline(&buf, &size, f);
+        if (len == -1) break;
+        parse_value(buf, len, str_HugePages_Total, strlen(str_HugePages_Total), &proc_huge_pages_total);
+        parse_value(buf, len, str_HugePages_Free, strlen(str_HugePages_Free), &proc_huge_pages_free);
+        const char *end =
+            parse_value(buf, len, str_Hugepagesize, strlen(str_Hugepagesize), &proc_huge_page_size);
+        if (end && !strncmp(end, " kB", 3)) proc_huge_page_size *= 1024;
+    }
+    if (buf) free(buf);
+    INFO2("gc.lp", "gc.lp: /proc/meminfo: System total huge pages = " << proc_huge_pages_total);
+    INFO2("gc.lp", "gc.lp: /proc/meminfo: System free huge pages = " << proc_huge_pages_free);
+    INFO2("gc.lp", "gc.lp: /proc/meminfo: Huge page size = " << proc_huge_page_size / 1024 << " kB");
+    if (proc_huge_pages_total == (size_t)-1) {
+        INFO2("gc.lp", "gc.lp: large pages are not supported by kernel\n"
+                       "       CONFIG_HUGETLB_PAGE and CONFIG_HUGETLBFS needs to be enabled");
+    } else if (proc_huge_pages_total == 0) {
+        INFO2("gc.lp", "gc.lp: no large pages reserved\n"
+                       "       Use following command:\n"
+                       "             echo 20 > /proc/sys/vm/nr_hugepages\n"
+                       "       Do it just after kernel boot before huge pages become"
+                       " fragmented");
+    } else if (proc_huge_pages_free * proc_huge_page_size < required_size) {
+        if (proc_huge_pages_total * proc_huge_page_size >= required_size) {
+            INFO2("gc.lp", "gc.lp: not enough free large pages\n"
+                    "       some of reserved space is already busy");
+        } else {
+            INFO2("gc.lp", "gc.lp: not enough reserved large pages")
+        }
+        INFO2("gc.lp", "gc.lp: " << mb(proc_huge_pages_free * proc_huge_page_size)
+                << " mb can be only allocated");
+    }
+}
+
+void *map_large_pages(const char *path, size_t size) {
+    INFO2("gc.lp", "gc.lp: large pages using mmap");
+    const char *postfix = "/vm_heap";
+    char *buf = (char *) malloc(strlen(path) + strlen(postfix) + 1);
+    assert(buf);
+
+    strcpy(buf, path);
+    strcat(buf, postfix);
+
+    int fd = open(buf, O_CREAT | O_RDWR, 0700);
+    if (fd == -1) {
+        INFO2("gc.lp", "gc.lp: can't open " << buf << ": " << strerror(errno) << "\n"
+                       "Mount hugetlbfs with: mount none /mnt/huge -t hugetlbfs\n"
+                       "Check you have appropriate permissions to /mnt/huge\n"
+                       "Use command line switch -Dgc.lp=/mnt/huge");
+        free(buf);
+        return NULL;
+    }
+    unlink(buf);
+
+    void *addr = mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
+    if (addr == MAP_FAILED) {
+        INFO2("gc.lp", "gc.lp: map failed: " << strerror(errno));
+        close(fd);
+        free(buf);
+        return NULL;
+    }
+    close(fd);
+    free(buf);
+    INFO2("gc.lp", "gc.lp: large pages successfully allocated");
+    return addr;
+}
+
+
+void *alloc_large_pages(size_t size, const char *hint) {
+    INFO2("gc.lp", "gc.lp: size = " << mb(size) << " mb, hint = " << hint);
+    parse_proc_meminfo(size);
+    void *addr = map_large_pages(hint, size);
+    if (addr == NULL) {
+        if (is_info_enabled("gc.lp")) {
+            INFO2("gc.lp", "read also /usr/src/linux/Documentation/vm/hugetlbpage.txt");
+        } else {
+            WARN2("gc.lp", "large pages allocation failed, use -verbose:gc.lp for more info");
+        }
+    }
+    return addr;
+}
+
+#endif

Propchange: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/large_pages_linux.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Added: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/large_pages_win32.cpp
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/large_pages_win32.cpp?view=auto&rev=442092
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/large_pages_win32.cpp (added)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/large_pages_win32.cpp Sun Sep 10 21:31:36 2006
@@ -0,0 +1,106 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+/**
+ * @author Ivan Volosyuk
+ */
+
+#ifdef _WIN32
+
+#include "gc_types.h"
+
+void trace_msg(char *msg) {
+    int error = GetLastError();
+    char buffer[1024];
+
+    DWORD n = FormatMessage(
+            FORMAT_MESSAGE_FROM_SYSTEM,
+            0,
+            error,
+            LANG_SYSTEM_DEFAULT,
+            buffer,
+            1024,
+            0);
+    INFO2("gc.lp", "gc.lp: " << msg << buffer);
+}
+
+bool large_pages_token(bool obtain) {
+    HANDLE process = GetCurrentProcess();
+    HANDLE token;
+    TOKEN_PRIVILEGES tp;
+
+    bool res = OpenProcessToken(process, TOKEN_ADJUST_PRIVILEGES, &token);
+
+    if (!res) {
+        trace_msg("OpenProcessToken(): ");
+        return false;
+    }
+
+    size_t size = 4096 * 1024;
+    // FIXME: not defined on WinXp
+    //size = GetLargePageMinimum();
+
+    tp.PrivilegeCount = 1;
+    tp.Privileges[0].Attributes = obtain ? SE_PRIVILEGE_ENABLED : 0;
+
+    res = LookupPrivilegeValue( NULL, SE_LOCK_MEMORY_NAME, &tp.Privileges[0].Luid);
+
+    if (!res) {
+        trace_msg("LookupPrivilegeValue(): ");
+        CloseHandle(token);
+        return false;
+    }
+
+    
+    if (AdjustTokenPrivileges( token, FALSE, &tp, 0, NULL, 0) == ERROR_NOT_ALL_ASSIGNED) {
+        trace_msg("AdjustTokenPrivileges(): ");
+        CloseHandle(token);
+        return false;
+    }
+    return true;
+}
+
+void *alloc_large_pages(size_t size, const char *hint) {
+    bool priv = large_pages_token(true);
+    void *res = NULL;
+
+    if (priv) {
+        res = VirtualAlloc(NULL, size,
+                MEM_RESERVE | MEM_COMMIT|MEM_LARGE_PAGES, PAGE_READWRITE);
+        if (res == NULL) {
+            INFO2("gc.lp", "gc.lp: No required number of large pages found, reboot!\n\n");
+        }
+    }
+
+    if (res == NULL) {
+        if (is_info_enabled("gc.lp")) {
+            INFO2("gc.lp", "gc.lp: Check that you have permissions:\n"
+                           "gc.lp:  Control Panel->Administrative Tools->Local Security Settings->\n"
+                           "gc.lp:  ->User Rights Assignment->Lock pages in memory\n"
+                           "gc.lp: Start VM as soon after reboot as possible, because large pages\n"
+                           "gc.lp: become fragmented and unusable after a while\n"
+                           "gc.lp: Heap size should be multiple of large page size");
+        } else {
+            WARN2("gc.lp", "large pages allocation failed, use -verbose:gc.lp for more info");
+        }
+        return NULL;
+    } else {
+        INFO2("gc.lp", "gc.lp: large pages are allocated\n");
+    }
+    large_pages_token(false);
+    return res;
+}
+
+#endif

Propchange: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/large_pages_win32.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Added: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/prepare.cpp
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/prepare.cpp?view=auto&rev=442092
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/prepare.cpp (added)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/prepare.cpp Sun Sep 10 21:31:36 2006
@@ -0,0 +1,161 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+/**
+ * @author Ivan Volosyuk
+ */
+
+#include <assert.h>
+#include <open/vm_gc.h>
+#include <open/vm.h>
+#include <open/gc.h>
+#include <port_malloc.h>
+#include <tl/memory_pool.h>
+#include "gc_types.h"
+
+tl::MemoryPoolMT *gcvt_pool;
+int global_referent_offset = 0;
+
+void init_gcvt() {
+    gcvt_pool = new tl::MemoryPoolMT();
+}
+
+void deinit_gcvt() {
+    delete gcvt_pool;
+}
+
+// A comparison function for qsort().
+static int 
+intcompare(const void *vi, const void *vj)
+{
+    const int *i = (const int *) vi;
+    const int *j = (const int *) vj;
+    if (*i > *j)
+        return 1;
+    if (*i < *j)
+        return -1;
+    return 0;
+}
+
+static GC_VTable_Info* build_slot_offset_array(Class_Handle ch, Partial_Reveal_VTable *vt, WeakReferenceType type) 
+{
+    GC_VTable_Info *result = NULL;
+
+    unsigned num_ref_fields = 0;
+    //
+    // Careful this doesn't give you the number of instance fields.
+    // Calculate the size needed for the offset table.
+    //
+    unsigned num_fields = class_num_instance_fields_recursive(ch);
+
+    unsigned idx;
+    for(idx = 0; idx < num_fields; idx++) {
+        Field_Handle fh = class_get_instance_field_recursive(ch, idx);
+        if(field_is_reference(fh)) {
+            num_ref_fields++;
+        }
+    }
+
+    int skip = -1; // not skip any reference
+    if (type != NOT_REFERENCE) {
+        int offset = class_get_referent_offset(ch);
+        if (global_referent_offset == 0) {
+            global_referent_offset = offset;
+        } else {
+            assert(global_referent_offset == offset);
+        }
+
+        skip = global_referent_offset; // skip global referent offset
+        num_ref_fields--;
+    }
+
+    // We need room for the terminating 0 so add 1.
+    unsigned int size = (num_ref_fields+1) * sizeof (unsigned int) + sizeof(GC_VTable_Info);
+
+    // malloc up the array if we need one.
+    result = (GC_VTable_Info*) gcvt_pool->alloc(size);
+
+    int *new_ref_array = (int *) (result + 1);
+    int *refs = new_ref_array;
+
+    for(idx = 0; idx < num_fields; idx++) {
+        Field_Handle fh = class_get_instance_field_recursive(ch, idx);
+        if(field_is_reference(fh)) {
+            int offset = field_get_offset(fh);
+            if (offset == skip) continue;
+            *refs = offset;
+            refs++;
+        }
+    }
+
+    // It is 0 delimited.
+    *refs = 0;
+
+    // The VM doesn't necessarily report the reference fields in
+    // memory order, so we sort the slot offset array.  The sorting
+    // is required by the verify_live_heap code.
+    qsort(new_ref_array, num_ref_fields, sizeof(*result), intcompare);
+    return result;
+}
+
+
+void gc_class_prepared(Class_Handle ch, VTable_Handle vth) {
+    TRACE2("gc.init", "gc_class_prepared");
+    assert(ch);
+    assert(vth);
+    Partial_Reveal_VTable *vt = (Partial_Reveal_VTable *)vth;
+
+
+    if (class_is_array(ch)) {
+        int el_size = class_element_size(ch);
+        int el_offset;
+        for(el_offset = -1; el_size; el_size >>= 1, el_offset++);
+
+        int first_element = (el_offset == 3) ? 16 : 12;
+
+        int flags = GC_VT_ARRAY
+            | (el_offset << GC_VT_ARRAY_ELEMENT_SHIFT)
+            | (first_element << GC_VT_ARRAY_FIRST_SHIFT);
+
+        if (!class_is_non_ref_array(ch)) {
+            flags |= GC_VT_HAS_SLOTS;
+        }
+        
+        GC_VTable_Info *info = (GC_VTable_Info*) flags;
+        vt->set_gcvt(info);
+        return;
+    }
+
+    WeakReferenceType type = class_is_reference(ch);
+    GC_VTable_Info *info = build_slot_offset_array(ch, vt, type);
+    info->size_and_ref_type = class_get_boxed_data_size(ch) | (int)type;
+
+    int flags = 0;
+    if (!ignore_finalizers && class_is_finalizable(ch)) {
+        flags |= GC_VT_FINALIZIBLE;
+    }
+
+    int *offset_array = (int*)(info + 1);
+    if (type != NOT_REFERENCE || (*offset_array != 0)) {
+        flags |= GC_VT_HAS_SLOTS;
+    }
+
+    int addr = (int) info;
+    assert((addr & 7) == 0); // required alignment
+
+    flags |= addr;
+    vt->set_gcvt((GC_VTable_Info*) flags);
+}
+

Propchange: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/prepare.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Added: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/root_set_cache.h
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/root_set_cache.h?view=auto&rev=442092
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/root_set_cache.h (added)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/root_set_cache.h Sun Sep 10 21:31:36 2006
@@ -0,0 +1,21 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+/**
+ * @author Ivan Volosyuk
+ */
+
+typedef fast_list<Partial_Reveal_Object**,65536> roots_vector;
+extern roots_vector root_set;

Propchange: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/root_set_cache.h
------------------------------------------------------------------------------
    svn:eol-style = native

Added: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/selector.cpp
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/selector.cpp?view=auto&rev=442092
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/selector.cpp (added)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/selector.cpp Sun Sep 10 21:31:36 2006
@@ -0,0 +1,323 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+/**
+ * @author Ivan Volosyuk
+ */
+
+#include "gc_types.h"
+#include "collect.h"
+#include <math.h>
+
+void reserve_old_object_space(int size) {
+    size &= ~3;
+
+    int free = heap.old_objects.end - heap.old_objects.pos;
+    if (size < 0) {
+        TRACE2("gc.select", "Reserve old object space: can't shrink old object space");
+        return;
+    }
+
+    assert(heap.old_objects.end == heap.pos);
+    if (heap.old_objects.end + size > heap.ceiling) {
+        size = heap.ceiling - heap.old_objects.end;
+    }
+
+    heap.old_objects.end += size;
+    TRACE2("gc.select", "Reserved space = " << mb(heap.old_objects.end - heap.old_objects.pos));
+
+    // balancing free areas.
+    pinned_areas.push_back(heap.ceiling);
+
+    // update heap.old_objects.pos_limit
+    if (heap.old_objects.pos_limit == heap.pos) {
+        assert(old_pinned_areas_pos > old_pinned_areas.size());
+        if (heap.pos_limit < heap.old_objects.end) {
+            // area entirely in old objects
+            heap.old_objects.pos_limit = heap.pos_limit;
+            heap.pos = pinned_areas[pinned_areas_pos];
+            old_pinned_areas.push_back(heap.pos_limit);
+            old_pinned_areas.push_back(heap.pos);
+            heap.pos_limit = pinned_areas[pinned_areas_pos + 1];
+            pinned_areas_pos += 2;
+        } else {
+            heap.old_objects.pos_limit = heap.old_objects.end;
+            heap.pos = heap.old_objects.end;
+        }
+    }
+
+    while (heap.pos_limit < heap.old_objects.end) {
+        // area entirely in old objects
+        heap.pos = pinned_areas[pinned_areas_pos];
+        old_pinned_areas.push_back(heap.pos_limit);
+        old_pinned_areas.push_back(heap.pos);
+        heap.pos_limit = pinned_areas[pinned_areas_pos + 1];
+        pinned_areas_pos += 2;
+    }
+
+    if (heap.pos < heap.old_objects.end) {
+        heap.pos = heap.old_objects.end;
+    }
+    heap.old_objects.end = heap.pos;
+
+    // restore pinned areas.
+    pinned_areas.pop_back();
+}
+
+unsigned char *select_gc(int size) {
+    // FIXME: missing information of finalizible objects added to evacuation area during allocation
+    heap.old_objects.prev_pos = heap.old_objects.pos;
+
+    unsigned char *res;
+    int alg = gc_algorithm % 10;
+
+    switch (alg) {
+        case 0: break;
+        case 1: return full_gc(size);
+        case 2: return slide_gc(size);
+        default: abort();
+    }
+
+    GC_TYPE gc = heap.next_gc;
+    TRACE2("gc.select", "starting gc = " << gc_name(gc));
+    switch(gc) {
+        case GC_COPY: res = copy_gc(size); break;
+        case GC_FULL: res = full_gc(size); break;
+        case GC_SLIDE_COMPACT: res = slide_gc(size); break;
+        default: abort();
+    }
+
+    GC_TYPE gc_out = gc_type;
+
+    if (gc_out != gc) {
+        // too small reserved space or behaviour changed
+    }
+
+    if (!res) {
+        TRACE2("gc.mem", "Not enough free memory after collection to allocate " << size << " bytes");
+    }
+    
+    TRACE2("gc.mem", "select_gc = " << res);
+    if ((!res) && gc != GC_FULL) {
+        TRACE2("gc.select", "no free mem after gc, trying full gc");
+        heap.next_gc = GC_FULL;
+        res = full_gc(size);
+    }
+
+    TRACE2("gc.mem", "select_gc2 = " << res);
+
+    if (res == 0 && heap.size != heap.max_size) {
+        assert(heap.pos_limit == heap.ceiling);
+        heap_extend(round_up(heap.size + size, 65536));
+        if (heap.pos + size <= heap.pos_limit) {
+            res = heap.pos;
+            heap.pos += size;
+        }
+    }
+
+    return res;
+}
+
+float Smin(float Smax, float Tslow, float Tfast, float dS) {
+    /* The function finds maximum for performance function below:
+     * Smax - maximum free size = heap.size - working set size
+     * Tslow - time of full compaction
+     * Tfast - time of copying GC
+     * dS - space consumed after each coping GC
+     * Smin - minimum free space after which compaction is better then copying
+     *
+    float perf(float Smax, float Tslow, float Tfast, float dS, float Smin) {
+        float avg_free = (Smax + Smin) / 2;
+        float n_iter = (Smax - Smin) / dS;
+        float total_time = Tslow + Tfast * n_iter;
+        float total_free = avg_free * (n_iter + 1);
+        return total_free / total_time;
+    }*/
+
+    // TODO: simplify expression
+    float k = Tslow / Tfast;
+    float m = dS / Smax;
+    float a = 1;
+    float b = - (2 + 2 * k * m);
+    float c = k * m * m + 2 * m + 1;
+    float D = b * b - 4 * a * c;
+    if (D <= 0) {
+        return Smax;
+    }
+    float pm = sqrt (D) / 2 / a;
+    float base = - b / 2 / a;
+    float res = base - pm;
+    if (res > 1.f) res = 1.f;
+    return res * Smax;
+}
+
+bool need_compaction_next_gc() {
+    if (heap.working_set_size == 0 || !gc_adaptive) {
+        TRACE2("gc.adaptive", "static Smin analisis");
+        return heap.ceiling - heap.pos < heap.size * 0.7f;
+    } else {
+        float smin = Smin(heap.size - heap.working_set_size,
+                heap.Tcompact, heap.Tcopy, heap.dS_copy);
+        float free = (float) (heap.ceiling - heap.pos);
+        //INFO2("gc.logic", "Smin = " << (int) mb((int)smin) << "mb, free = " << mb((int)free) << " mb");
+        return free < smin;
+            
+    }
+}
+
+static void check_heap_extend() {
+    int free_space = heap.allocation_region_end() - heap.allocation_region_start();
+    int used_space = heap.size - free_space;
+
+    if (free_space < used_space) {
+        size_t new_heap_size = used_space * 8;
+        if (new_heap_size / 8 != used_space) {
+            // overflow!
+            new_heap_size = heap.max_size;
+        } else if (new_heap_size > heap.max_size) {
+            new_heap_size = heap.max_size;
+        }
+
+        if (new_heap_size != heap.size) {
+            heap_extend(new_heap_size);
+        }
+    }
+}
+
+size_t correction;
+
+static void update_evacuation_area() {
+    POINTER_SIZE_SINT free = heap.allocation_region_end() - heap.allocation_region_start();
+    POINTER_SIZE_SINT incr = heap.allocation_region_start() - heap.old_objects.prev_pos;
+    //INFO2("gc.logic", "free = " << free / 1024 / 1024 << " incr = " << incr / 1024 / 1024);
+
+    if (incr > 0 && incr > free) {
+        //INFO2("gc.logic", "increment too large, switching to compaction");
+        heap.next_gc = GC_FULL;
+        return;
+    }
+
+    if (need_compaction_next_gc()) {
+        //INFO2("gc.logic", "compaction triggered by Smin");
+        heap.next_gc = GC_FULL;
+        heap.dS_copy = 0;
+        return;
+    }
+
+    // original gc type
+    GC_TYPE gc = heap.next_gc;
+    POINTER_SIZE_SINT overflow = heap.old_objects.pos - heap.predicted_pos;
+
+    // heuristics down here
+    
+    if (gc != GC_COPY) {
+        heap.next_gc = GC_COPY;
+        float reserve = (heap.incr_abs + heap.incr_rel * free);
+        heap.predicted_pos = heap.old_objects.pos + (POINTER_SIZE_SINT) reserve;
+        //INFO2("gc.logic", "1.incr_abs = " << heap.incr_abs / 1024 / 1024 << " mb incr_rel = " << (double) heap.incr_rel);
+        reserve_old_object_space(heap.predicted_pos - heap.old_objects.end);
+        return;
+    }
+    assert(incr > 0);
+    heap.dS_copy = (float)incr;
+
+    /*INFO2("gc.logic", 
+            "mb overflow = " << overflow / 1024 / 1024
+            << "mb rest = " << mb(heap.old_object_region_end - heap.old_object_region_pos) << " mb");*/
+
+    // correct heap.incr_abs, heap.incr_rel
+    if (correction == 0) {
+        correction = heap.size / 30;
+    }
+    overflow += correction;
+    float fullness = (float) (free + incr) / heap.size;
+    float overflow_rel = fullness * overflow;
+    float overflow_abs = (1.f - fullness) * overflow;
+    heap.incr_abs += (size_t) overflow_abs;
+    if (heap.incr_abs < 0) {
+        heap.incr_rel += (overflow_rel + heap.incr_abs) / (free + incr);
+        heap.incr_abs = 0;
+    } else {
+        heap.incr_rel += overflow_rel / (free + incr);
+    }
+
+
+    float reserve = (heap.incr_abs + heap.incr_rel * free);
+    heap.predicted_pos = heap.old_objects.pos + (POINTER_SIZE_SINT) reserve;
+
+    //INFO2("gc.logic", "2.incr_abs = " << heap.incr_abs / 1024 / 1024 << " mb incr_rel = " << heap.incr_rel);
+    reserve_old_object_space(heap.predicted_pos - heap.old_objects.end);
+    heap.next_gc = GC_COPY;
+}
+
+
+void after_copy_gc() {
+    update_evacuation_area();
+}
+
+void after_slide_gc() {
+
+    check_heap_extend();
+
+    /* FIXME: shrink disabled for safety
+        else if (free_space / 9 > used_space) {
+        heap_shrink(free_space * 10);
+    */
+
+    if (gc_algorithm % 10 != 0) return;
+
+    update_evacuation_area();
+}
+
+void select_force_gc() {
+    if (gc_algorithm < 10) {
+        vm_gc_lock_enum();
+        force_gc();
+        vm_gc_unlock_enum();
+        vm_hint_finalize();
+    } else if ((gc_algorithm / 10) == 2) {
+        vm_gc_lock_enum();
+        full_gc(0);
+        vm_gc_unlock_enum();
+        vm_hint_finalize();
+    }
+}
+
+void init_select_gc() {
+    heap.old_objects.end = heap.old_objects.pos = heap.old_objects.pos_limit = heap.base;
+
+    heap.pos = heap.base;
+    heap.pos_limit = heap.ceiling;
+
+    heap.incr_abs = 0;
+    heap.incr_rel = 0.2f;
+
+    old_pinned_areas_pos = 1;
+    heap_mark_phase = 1;
+    pinned_areas_pos = 1;
+
+    if (gc_algorithm % 10 == 0) {
+        int reserve = heap.size / 5;
+        reserve_old_object_space(reserve);
+        heap.predicted_pos = heap.base + reserve;
+    }
+    if (gc_algorithm % 10 == 3) {
+        int reserve = heap.size / 3;
+        reserve_old_object_space(reserve);
+        heap.predicted_pos = heap.base + reserve;
+    }
+    heap.next_gc = GC_COPY;
+
+}

Propchange: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/selector.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Added: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/slide_compact.h
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/slide_compact.h?view=auto&rev=442092
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/slide_compact.h (added)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/slide_compact.h Sun Sep 10 21:31:36 2006
@@ -0,0 +1,33 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+/**
+ * @author Ivan Volosyuk
+ */
+
+struct InteriorPointer {
+    Partial_Reveal_Object *obj;
+    int offset;
+    Partial_Reveal_Object **interior_ref;
+};
+
+extern fast_list<Partial_Reveal_Object*, 65536> objects;
+extern fast_list<InteriorPointer,256> interior_pointers;
+
+
+inline bool is_left_object(Partial_Reveal_Object *refobj, Partial_Reveal_Object **ref) {
+    return (void*)refobj <= (void*) ref;
+}
+

Propchange: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/slide_compact.h
------------------------------------------------------------------------------
    svn:eol-style = native

Added: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/timer.h
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/timer.h?view=auto&rev=442092
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/timer.h (added)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/timer.h Sun Sep 10 21:31:36 2006
@@ -0,0 +1,95 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+/**
+ * @author Ivan Volosyuk
+ */
+
+#define STATISTICS
+#ifdef STATISTICS
+#  define TIME(action_name, action_params) { Timer timer_##action_name(#action_name); \
+                                           action_name action_params;               \
+                                           timer_##action_name.finish(); }
+
+#ifdef _WIN32
+static __declspec(naked) __int64
+ticks(void) {
+    __asm       {
+        rdtsc
+        ret
+    }
+}
+#else
+static int64
+ticks(void) {
+    int64 val;
+    __asm__ __volatile__ ("rdtsc" : "=A" (val));
+    return val;
+}
+#endif
+
+extern int64 timer_start;
+extern int64 timer_dt;
+
+class Timer {
+    const char *action;
+    const char *category;
+    apr_time_t start;
+    bool finished;
+    public:
+    
+    Timer(const char *str) {
+        action = str;
+        finished = false;
+        category = "gc.time";
+        start = ticks();
+    }
+
+    Timer(const char *str, const char *_category) {
+        action = str;
+        category = _category;
+        finished = false;
+        start = ticks();
+    }
+
+    void finish() {
+        finished = true;
+        apr_time_t end = ticks();
+        INFO2(category, action << " " << (end - start) / timer_dt / 1000 << " ms");
+    }
+
+    ~Timer() {
+        if (!finished) finish();
+    }
+
+    apr_time_t dt() { return ticks() - start; }
+};
+
+inline void timer_init() {
+    timer_start = ticks();
+}
+
+inline void timer_calibrate(apr_time_t time_from_start) {
+    int64 ticks_from_start = ticks() - timer_start;
+    int64 dt = ticks_from_start / time_from_start;
+    timer_dt = dt;
+    
+}
+
+#else
+#  define TIME(action_name, action_params) action_name action_params
+#  define timer_init()
+#  define timer_calibrate()
+#endif

Propchange: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/timer.h
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/include/open/gc.h
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/include/open/gc.h?view=diff&rev=442092&r1=442091&r2=442092
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/include/open/gc.h (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/include/open/gc.h Sun Sep 10 21:31:36 2006
@@ -330,7 +330,6 @@
 
 
 
-
 /*
  * *****
  * *
@@ -584,6 +583,7 @@
 
 extern void (*gc_pin_object)(Managed_Object_Handle* p_object);
 extern void (*gc_unpin_object)(Managed_Object_Handle* p_object);
+extern int32 (*gc_get_hashcode)(Managed_Object_Handle);
 extern Managed_Object_Handle (*gc_get_next_live_object)(void *iterator);
 
 extern void (*gc_finalize_on_exit)();
@@ -738,6 +738,11 @@
  * Unpin object.
  */
 GCExport void gc_unpin_object (Managed_Object_Handle* p_object);
+
+/**
+ * Get identity hashcode.
+ */
+GCExport int32 gc_get_hashcode (Managed_Object_Handle object);
 
 /**
  * Iterate all live objects in heap.

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/interpreter/src/interp_stack_trace.cpp
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/interpreter/src/interp_stack_trace.cpp?view=diff&rev=442092&r1=442091&r2=442092
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/interpreter/src/interp_stack_trace.cpp (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/interpreter/src/interp_stack_trace.cpp Sun Sep 10 21:31:36 2006
@@ -220,7 +220,6 @@
         method = method;
 
         if (si->This) {
-            ASSERT_OBJECT(si->This);
             vm_enumerate_root_reference((void**)&si->This, FALSE);
             DEBUG_GC("  [THIS]: " << si->This->vt()->clss->name->bytes << endl);
         }
@@ -255,7 +254,6 @@
                         DEBUG_GC("NULL");
                     } else {
                         DEBUG_GC(obj->vt()->clss->name->bytes << endl);
-                        ASSERT_OBJECT(UNCOMPRESS_REF(*cref));
                         vm_enumerate(cref, FALSE);
                     }
                 }
@@ -272,14 +270,12 @@
                         DEBUG_GC("NULL\n");
                     } else {
                         DEBUG_GC(obj->vt()->clss->name->bytes << endl);
-                        ASSERT_OBJECT(UNCOMPRESS_REF(*cref));
                         vm_enumerate(cref, FALSE);
                     }
                 }
             }
         MonitorList *ml = si->locked_monitors;
         while(ml) {
-            ASSERT_OBJECT(ml->monitor);
             vm_enumerate_root_reference((void**)&ml->monitor, FALSE);
             ml = ml->next;
         }

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/tests/smoke/gc/LOS.java
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/tests/smoke/gc/LOS.java?view=diff&rev=442092&r1=442091&r2=442092
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/tests/smoke/gc/LOS.java (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/tests/smoke/gc/LOS.java Sun Sep 10 21:31:36 2006
@@ -77,7 +77,7 @@
         long los_space = allocate_max(large_object_size);
         System.out.println("" + (los_space/1048576) + " Mb available in LOS");
 
-        if (los_space < smos_space - 1048576) {
+        if (los_space * 1.0 < smos_space * 0.8) {
             System.out.println("FAILED, LOS space is too small");
         } else {
             System.out.println("PASSED, LOS available space is on par with SmOS");

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/include/object_generic.h
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/include/object_generic.h?view=diff&rev=442092&r1=442091&r2=442092
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/include/object_generic.h (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/include/object_generic.h Sun Sep 10 21:31:36 2006
@@ -41,6 +41,9 @@
 long
 generic_hashcode(ManagedObject*);
 
+int32
+default_hashcode(ManagedObject*);
+
 #ifdef __cplusplus
 }
 #endif

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/include/version_svn_tag.h
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/include/version_svn_tag.h?view=diff&rev=442092&r1=442091&r2=442092
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/include/version_svn_tag.h (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/include/version_svn_tag.h Sun Sep 10 21:31:36 2006
@@ -17,6 +17,6 @@
 #ifndef _VERSION_SVN_TAG_
 #define _VERSION_SVN_TAG_
 
-#define VERSION_SVN_TAG  "433609"
+#define VERSION_SVN_TAG  "441745"
 
 #endif // _VERSION_SVN_TAG_

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/src/class_support/Class.cpp
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/src/class_support/Class.cpp?view=diff&rev=442092&r1=442091&r2=442092
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/src/class_support/Class.cpp (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/src/class_support/Class.cpp Sun Sep 10 21:31:36 2006
@@ -403,7 +403,7 @@
     assert(!hythread_is_suspend_enabled());
     assert(jlc != NULL);
     assert(jlc->vt());
-    assert(jlc->vt()->clss == VM_Global_State::loader_env->JavaLangClass_Class);
+    //assert(jlc->vt()->clss == VM_Global_State::loader_env->JavaLangClass_Class);
 
     assert(VM_Global_State::loader_env->vm_class_offset != 0);
     Class **vm_class_ptr = (Class **)(((Byte *)jlc) + VM_Global_State::loader_env->vm_class_offset);

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/src/gc/dll_gc.cpp
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/src/gc/dll_gc.cpp?view=diff&rev=442092&r1=442091&r2=442092
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/src/gc/dll_gc.cpp (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/src/gc/dll_gc.cpp Sun Sep 10 21:31:36 2006
@@ -32,10 +32,12 @@
 #include "open/vm_util.h"
 #include "environment.h"
 #include "properties.h"
+#include "object_generic.h"
 
 static void default_gc_write_barrier(Managed_Object_Handle);
 static void default_gc_pin_object(Managed_Object_Handle*);
 static void default_gc_unpin_object(Managed_Object_Handle*);
+static int32 default_gc_get_hashcode(Managed_Object_Handle);
 static Managed_Object_Handle default_gc_get_next_live_object(void*);
 static void default_gc_finalize_on_exit();
 static int64 default_gc_max_memory();
@@ -114,6 +116,7 @@
 
 void (*gc_pin_object)(Managed_Object_Handle* p_object) = 0;
 void (*gc_unpin_object)(Managed_Object_Handle* p_object) = 0;
+int32 (*gc_get_hashcode)(Managed_Object_Handle obj) = 0;
 Managed_Object_Handle (*gc_get_next_live_object)(void *iterator) = 0;
 
 void (*gc_finalize_on_exit)() = 0;
@@ -233,6 +236,10 @@
         getFunctionOptional(handle, "gc_unpin_object", dllName,
             (apr_dso_handle_sym_t)default_gc_unpin_object);
 
+    gc_get_hashcode = (int32 (*)(Managed_Object_Handle))
+        getFunctionOptional(handle, "gc_get_hashcode", dllName,
+            (apr_dso_handle_sym_t)default_gc_get_hashcode);
+
     gc_get_next_live_object = (Managed_Object_Handle (*)(void*))
         getFunctionOptional(handle, "gc_get_next_live_object", dllName,
             (apr_dso_handle_sym_t)default_gc_get_next_live_object);
@@ -444,6 +451,11 @@
 static void default_gc_unpin_object(Managed_Object_Handle*)
 {
     WARN_ONCE("The GC did not provide gc_unpin_object()");
+}
+
+static int32 default_gc_get_hashcode(Managed_Object_Handle obj)
+{
+    return default_hashcode((ManagedObject*) obj);
 }
 
 static Managed_Object_Handle default_gc_get_next_live_object(void*)

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/src/thread/object_generic.cpp
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/src/thread/object_generic.cpp?view=diff&rev=442092&r1=442091&r2=442092
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/src/thread/object_generic.cpp (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/src/thread/object_generic.cpp Sun Sep 10 21:31:36 2006
@@ -66,7 +66,12 @@
     port_atomic_cas8(P_HASH_CONTENTION_BYTE(p_obj),hb, 0);
 }
 
-long generic_hashcode(ManagedObject * p_obj)
+long generic_hashcode(ManagedObject *obj) {
+    return (long) gc_get_hashcode(obj);
+}
+
+
+int32 default_hashcode(ManagedObject * p_obj)
 {
     if (!p_obj) return 0L;
     if ( *P_HASH_CONTENTION_BYTE(p_obj) & HASH_MASK)



Mime
View raw message