harmony-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ge...@apache.org
Subject svn commit: r450941 - in /incubator/harmony/enhanced/drlvm/trunk/vm: gc/src/ vmcore/include/ vmcore/src/util/
Date Thu, 28 Sep 2006 17:53:55 GMT
Author: geirm
Date: Thu Sep 28 10:53:54 2006
New Revision: 450941

URL: http://svn.apache.org/viewvc?view=rev&rev=450941
Log:
Rollback of r450918, r450919 (HARMONY-1372 it appears) via :

$ svn merge -r 450919:450917 https://svn.apache.org/repos/asf/incubator/harmony/enhanced/drlvm/trunk
U    vm/gc/src/prepare.cpp
U    vm/gc/src/gc_for_vm.cpp
U    vm/gc/src/init.cpp
U    vm/gc/src/collect_forced.cpp
U    vm/gc/src/collect_cache.cpp
U    vm/gc/src/slide_compact.h
U    vm/gc/src/collect_slide_compact.cpp
U    vm/gc/src/collect_copy.cpp
U    vm/gc/src/selector.cpp
U    vm/gc/src/gc_types.h
U    vm/gc/src/collect.cpp
U    vm/gc/src/timer.h
U    vm/gc/src/root_set_cache.h
U    vm/gc/src/collect.h
U    vm/vmcore/src/util/mem_alloc.cpp

because it doesn't build, even after a clean, on this box anyway - Ubuntu 6.  We'll 
investigate after the snapshot is done.




Modified:
    incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect.cpp
    incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect.h
    incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_cache.cpp
    incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_copy.cpp
    incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_forced.cpp
    incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_slide_compact.cpp
    incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/gc_for_vm.cpp
    incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/gc_types.h
    incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/init.cpp
    incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/prepare.cpp
    incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/root_set_cache.h
    incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/selector.cpp
    incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/slide_compact.h
    incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/timer.h
    incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/include/version_svn_tag.h
    incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/src/util/mem_alloc.cpp

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect.cpp
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect.cpp?view=diff&rev=450941&r1=450940&r2=450941
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect.cpp (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect.cpp Thu Sep 28 10:53:54 2006
@@ -25,7 +25,7 @@
 #include "timer.h"
 #include <stdio.h>
 
-fast_list<Slot,65536> slots;
+fast_list<Partial_Reveal_Object**,65536> slots;
 reference_vector soft_references;
 reference_vector weak_references;
 reference_vector phantom_references;
@@ -149,7 +149,7 @@
             i != finalizible_objects.end();) {
 
         Partial_Reveal_Object *obj = *i;
-        assert (obj);
+        if (!obj) { ++i; continue; }
 
         int info = obj->obj_info();
         if (info & heap_mark_phase) {
@@ -192,28 +192,28 @@
             i != array.end(); ++i) {
         Partial_Reveal_Object *ref = *i;
 
-        Slot referent( (Reference*) ((Ptr)ref + global_referent_offset) );
-        Partial_Reveal_Object* obj = referent.read();
+        Partial_Reveal_Object **referent = (Partial_Reveal_Object**) ((Ptr)ref + global_referent_offset);
+        Partial_Reveal_Object* obj = *referent;
 
-        if (obj == heap_null) {
+        if (obj == 0) {
             // reference already cleared
             continue;
         }
 
-        unsigned info = obj->obj_info();
+        int info = obj->obj_info();
         if (info & heap_mark_phase) {
             // object marked, is it moved?
-            unsigned vt = obj->vt();
+            int vt = obj->vt();
             if (!(vt & FORWARDING_BIT)) continue;
             // moved, updating referent field
-            referent.write( fw_to_pointer(vt & ~FORWARDING_BIT) );
+            *referent = (Partial_Reveal_Object*)(vt & ~FORWARDING_BIT);
             continue;
         }
 
         // object not marked
-        referent.write(heap_null);
+        *referent = 0;
         TRACE2("gc.ref", "process_special_references: reference enquequed");
-        vm_enqueue_reference((Managed_Object_Handle)ref);
+        vm_enqueue_reference((Managed_Object_Handle*)ref);
     }
 }
 
@@ -260,11 +260,11 @@
 
 unsigned char *full_gc(int size) {
     Timer gc_time("FULL_GC", "gc.time.total");
-    heap.old_objects.end = heap.old_objects.pos = heap.old_objects.pos_limit = heap.base + RESERVED_FOR_HEAP_NULL;
+    heap.old_objects.end = heap.old_objects.pos = heap.old_objects.pos_limit = heap.base;
     unsigned char *res = slide_gc(size);
 
     heap.Tcompact = (float) gc_time.dt();
-    heap.working_set_size = (float) (heap.old_objects.pos - heap.base);
+    heap.working_set_size = (float) (heap.old_objects.end - heap.base);
     return res;
 }
 
@@ -278,12 +278,12 @@
     gc_slide_process_special_references(phantom_references);
 
     TIME(gc_slide_move_all,());
-    roots_update();
     gc_slide_postprocess_special_references(soft_references);
     gc_slide_postprocess_special_references(weak_references);
+    finalize_objects();
     gc_slide_postprocess_special_references(phantom_references);
+    gc_process_interior_pointers();
     gc_deallocate_mark_bits();
-    finalize_objects();
 
     heap_mark_phase ^= 3;
     // reset thread-local allocation areas
@@ -304,18 +304,19 @@
 
     pinned_areas.clear();
     pinned_areas_unsorted.clear();
-    roots_clear();
     gc_type = GC_SLIDE_COMPACT;
     gc_allocate_mark_bits();
+    gc_reset_interior_pointers();
 
     TIME(enumerate_universe,());
     return finish_slide_gc(size, 0);
 }
 
-void transition_copy_to_sliding_compaction(fast_list<Slot,65536>& slots) {
+void transition_copy_to_sliding_compaction(fast_list<Partial_Reveal_Object**,65536>& slots) {
     INFO2("gc.verbose", "COPY -> COMP on go transition");
     gc_type = GC_SLIDE_COMPACT;
     gc_allocate_mark_bits();
+    gc_reset_interior_pointers();
     gc_slide_process_transitional_slots(slots);
 }
 
@@ -326,7 +327,6 @@
 
     pinned_areas.clear();
     pinned_areas_unsorted.clear();
-    roots_clear();
 
     gc_type = GC_COPY;
     TIME(enumerate_universe,());
@@ -344,13 +344,11 @@
         heap.Tcopy = (float) gc_time.dt();
         return res;
     }
-    process_special_references(phantom_references);
-    roots_update();
     finalize_objects();
+    process_special_references(phantom_references);
 
     heap_mark_phase ^= 3;
     gc_copy_update_regions();
-    heap.Tcopy = (float) gc_time.dt();
     after_copy_gc();
     // reset thread-local allocation areas
     clear_thread_local_buffers();
@@ -359,6 +357,7 @@
     vm_resume_threads_after();
     notify_gc_end();
     TRACE2("gc.mem", "copy_gc = " << res);
+    heap.Tcopy = (float) gc_time.dt();
     return res;
 }
 
@@ -366,15 +365,12 @@
     Timer gc_time("FORCE_GC", "gc.time.total");
     prepare_gc();
 
-    roots_clear();
-
     gc_type = GC_FORCED;
     TIME(enumerate_universe,());
     TIME(process_special_references,(soft_references));
     TIME(process_special_references,(weak_references));
     TIME(process_finalizable_objects,());
     TIME(process_special_references,(phantom_references));
-    roots_update();
     TIME(finalize_objects,());
 
     heap_mark_phase ^= 3;
@@ -385,3 +381,28 @@
     notify_gc_end();
 }
 
+void gc_add_root_set_entry(Managed_Object_Handle *ref, Boolean is_pinned) {
+    //Partial_Reveal_Object **ref1 = (Partial_Reveal_Object**)ref;
+    switch(gc_type) {
+        case GC_COPY: gc_copy_add_root_set_entry(ref, is_pinned); break;
+        case GC_FORCED: gc_forced_add_root_set_entry(ref, is_pinned); break;
+        case GC_SLIDE_COMPACT: gc_slide_add_root_set_entry(ref, is_pinned); break;
+        case GC_CACHE: gc_cache_add_root_set_entry(ref, is_pinned); break;
+                      
+        case GC_FULL:
+        default: abort();
+    }
+}
+
+void gc_add_root_set_entry_interior_pointer (void **slot, int offset, Boolean is_pinned)
+{
+    switch (gc_type) {
+        case GC_COPY: gc_copy_add_root_set_entry_interior_pointer(slot, offset, is_pinned); break;
+        case GC_FORCED: gc_forced_add_root_set_entry_interior_pointer(slot, offset, is_pinned); break;
+        case GC_SLIDE_COMPACT: gc_slide_add_root_set_entry_interior_pointer(slot, offset, is_pinned); break;
+        case GC_CACHE: gc_cache_add_root_set_entry_interior_pointer(slot, offset, is_pinned); break;
+
+        case GC_FULL:
+        default: abort();
+    }
+}

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect.h
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect.h?view=diff&rev=450941&r1=450940&r2=450941
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect.h (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect.h Thu Sep 28 10:53:54 2006
@@ -23,9 +23,8 @@
 #include <assert.h>
 #include <open/gc.h>
 #include <open/types.h>
-#include "slot.h"
 
-extern fast_list<Slot,65536> slots;
+extern fast_list<Partial_Reveal_Object**,65536> slots;
 typedef fast_list<Partial_Reveal_Object*,1024> reference_vector;
 extern reference_vector finalizible_objects;
 extern reference_vector soft_references;
@@ -65,24 +64,27 @@
     assert(!(obj->vt() & (FORWARDING_BIT|RESCAN_BIT)));
 }
 
-void roots_clear();
-void roots_update();
-
-void gc_copy_add_root_set_entry(Slot slot);
+void gc_copy_add_root_set_entry(Managed_Object_Handle *ref, Boolean is_pinned);
+void gc_copy_add_root_set_entry_interior_pointer (void **slot, int offset, Boolean is_pinned);
 void gc_copy_update_regions();
 
-void gc_forced_add_root_set_entry(Slot slot);
+void gc_forced_add_root_set_entry(Managed_Object_Handle *ref, Boolean is_pinned);
+void gc_forced_add_root_set_entry_interior_pointer (void **slot, int offset, Boolean is_pinned);
 
-void gc_slide_add_root_set_entry(Slot slot);
+void gc_reset_interior_pointers();
+void gc_process_interior_pointers();
+void gc_slide_add_root_set_entry(Managed_Object_Handle *ref, Boolean is_pinned);
+void gc_slide_add_root_set_entry_interior_pointer (void **slot, int offset, Boolean is_pinned);
 void gc_slide_move_all();
 void gc_slide_process_special_references(reference_vector& array);
 void gc_slide_postprocess_special_references(reference_vector& array);
 
-void transition_copy_to_sliding_compaction(fast_list<Slot,65536>& slots);
-void gc_slide_process_transitional_slots(fast_list<Slot,65536>& slots);
-void gc_slide_process_transitional_slots(Reference *refs, int pos, int length);
+void transition_copy_to_sliding_compaction(fast_list<Partial_Reveal_Object**,65536>& slots);
+void gc_slide_process_transitional_slots(fast_list<Partial_Reveal_Object**,65536>& slots);
+void gc_slide_process_transitional_slots(Partial_Reveal_Object **refs, int pos, int length);
 
-void gc_cache_add_root_set_entry(Slot slot);
+void gc_cache_add_root_set_entry(Managed_Object_Handle *ref, Boolean is_pinned);
+void gc_cache_add_root_set_entry_interior_pointer (void **slot, int offset, Boolean is_pinned);
 void gc_cache_retrieve_root_set();
 void gc_cache_emit_root_set();
 

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_cache.cpp
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_cache.cpp?view=diff&rev=450941&r1=450940&r2=450941
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_cache.cpp (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_cache.cpp Thu Sep 28 10:53:54 2006
@@ -29,22 +29,43 @@
 #include "root_set_cache.h"
 
 roots_vector root_set;
+fast_list<InteriorPointer,256> interior_pointers;
 
-void gc_cache_add_root_set_entry(Slot slot) {
-    root_set.push_back(slot);
+void gc_cache_add_root_set_entry(Managed_Object_Handle *ref, Boolean is_pinned) {
+    assert(!is_pinned);
+    assert(ref != NULL);
+    assert(*ref == NULL || ((unsigned char*)*ref >= heap.base && (unsigned char*)*ref < heap.ceiling));
+    root_set.push_back((Partial_Reveal_Object**)ref);
+}
+
+void gc_cache_add_root_set_entry_interior_pointer (void **slot, int offset, Boolean is_pinned)
+{
+    assert(!is_pinned);
+    InteriorPointer ip;
+    ip.obj = (Partial_Reveal_Object*) (*(unsigned char**)slot - offset);
+    ip.interior_ref = (Partial_Reveal_Object**)slot;
+    ip.offset = offset;
+    interior_pointers.push_back(ip);
 }
 
 void gc_cache_emit_root_set() {
     for(roots_vector::iterator r = root_set.begin(); r != root_set.end(); ++r) {
-        gc_add_root_set_entry_slot(*r);
+        gc_add_root_set_entry((Managed_Object_Handle*)*r, false);
+    }
+
+    for(fast_list<InteriorPointer,256>::iterator ip = interior_pointers.begin();
+            ip != interior_pointers.end(); ++ip) {
+        gc_add_root_set_entry_interior_pointer ((void**)(*ip).interior_ref, (*ip).offset, false);
     }
 }
 
 void gc_cache_retrieve_root_set() {
     root_set.clear();
+    interior_pointers.clear();
     GC_TYPE orig_gc_type = gc_type;
     gc_type = GC_CACHE;
     vm_enumerate_root_set_all_threads();
     gc_type = orig_gc_type;
     INFO2("gc.verbose", root_set.count() << " roots collected");
+    INFO2("gc.verbose", interior_pointers.count() << " interior pointers collected");
 }

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_copy.cpp
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_copy.cpp?view=diff&rev=450941&r1=450940&r2=450941
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_copy.cpp (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_copy.cpp Thu Sep 28 10:53:54 2006
@@ -25,8 +25,6 @@
 #include <jni_types.h>
 #include "gc_types.h"
 #include "collect.h"
-#include "slot.h"
-
 
 void gc_copy_update_regions() {
     int n = 0;
@@ -54,7 +52,7 @@
     cleaning_needed = true;
 }
 
-static bool gc_copy_process_reference(Slot slot, int phase);
+static bool gc_copy_process_reference(Partial_Reveal_Object **ref, Boolean is_pinned, int phase);
 
 static inline bool 
 gc_copy_scan_array_object(Partial_Reveal_Object *array, int vector_length, int phase)
@@ -63,13 +61,13 @@
 
     int32 array_length = vector_length; //vector_get_length((Vector_Handle) array);
 
-    Reference *refs = (Reference*)
+    Partial_Reveal_Object **refs = (Partial_Reveal_Object**)
         vector_get_element_address_ref ((Vector_Handle) array, 0);
 
     for(int i = 0; i < array_length; i++) {
-        Slot slot(refs + i);
+        Partial_Reveal_Object **ref = &refs[i];
 
-        bool success = gc_copy_process_reference(slot, phase);
+        bool success = gc_copy_process_reference(ref, false, phase);
 
         if (!success) {
             // overflow in old objects
@@ -92,7 +90,6 @@
     if (endpos <= heap.old_objects.pos_limit) {
         heap.old_objects.pos = endpos;
         assert(endpos <= heap.old_objects.end);
-        assert(((POINTER_SIZE_INT) endpos & (GC_OBJECT_ALIGNMENT - 1)) == 0);
         return true;
     }
     TRACE2("gc.pin.gc", "old area: reached heap.old_objects.pos_limit =" << heap.old_objects.pos_limit);
@@ -130,54 +127,53 @@
         endpos = newpos + size;
         if (endpos <= heap.old_objects.pos_limit) {
             heap.old_objects.pos = endpos;
-            assert(((POINTER_SIZE_INT) endpos & (GC_OBJECT_ALIGNMENT - 1)) == 0);
             return true;
         }
     }
     return false;
 }
 
-static bool gc_copy_process_reference(Slot slot, int phase) {
-    Partial_Reveal_Object* obj = slot.read();
+static bool gc_copy_process_reference(Partial_Reveal_Object **ref, Boolean is_pinned, int phase) {
+    assert(ref);
+ 
+    Partial_Reveal_Object* obj = *ref;
 
-    if (obj == heap_null) return true;
-    assert(obj);
+    if (!obj) return true;
     assert(obj->vt() & ~(FORWARDING_BIT|RESCAN_BIT));
     TRACE2("gc.debug", "0x" << obj << " info = " << obj->obj_info());
 
-    unsigned info = obj->obj_info();
-    unsigned vt = obj->vt();
+    int info = obj->obj_info();
+    int vt = obj->vt();
 
     if (info & phase) {
         // object already marked, need to check if it is forwared still
         
         if (vt & FORWARDING_BIT) {
-            Partial_Reveal_Object *newpos = fw_to_pointer(vt & ~FORWARDING_BIT);
+            Partial_Reveal_Object *newpos = (Partial_Reveal_Object*) (vt & ~FORWARDING_BIT);
             assert_vt(newpos);
-            slot.write(newpos);
-        } else obj->valid();
+            *ref = newpos;
+        }
         return true;
     }
-    obj->valid();
 
     VMEXPORT Class_Handle vtable_get_class(VTable_Handle vh);
-    assert(class_get_vtable(vtable_get_class((VTable_Handle)obj->vtable())) == (VTable_Handle)obj->vtable());
-    TRACE2("gc.debug", "0x" << obj << " is " << class_get_name(vtable_get_class((VTable_Handle)obj->vtable())));
+    assert(class_get_vtable(vtable_get_class((VTable_Handle)obj->vt())) == (VTable_Handle)obj->vt());
+    TRACE2("gc.debug", "0x" << obj << " is " << class_get_name(vtable_get_class((VTable_Handle)obj->vt())));
 
     obj->obj_info() = (info & ~MARK_BITS) | phase;
 
     // move the object?
 #define pos ((unsigned char*) obj)
-    Partial_Reveal_VTable *vtable = ah_to_vtable(vt);
+    Partial_Reveal_VTable *vtable = (Partial_Reveal_VTable*) vt;
     GC_VTable_Info *gcvt = vtable->get_gcvt();
 
     if (pos >= heap.compaction_region_start() && pos < heap.compaction_region_end()) {
         int size = get_object_size(obj, gcvt);
 
         // is it not pinned?
-        if (size < 5000 && ((info & OBJECT_IS_PINNED_BITS) == 0)) {
+        if (size < 5000 &&  (!is_pinned) && ((info & OBJECT_IS_PINNED_BITS) == 0)) {
             if (info & HASHCODE_IS_SET_BIT) {
-                size += GC_OBJECT_ALIGNMENT;
+                size += 4;
             }
 
             // move the object
@@ -188,8 +184,8 @@
 
                 Partial_Reveal_Object *newobj = (Partial_Reveal_Object*) newpos;
                 if ((info & HASHCODE_IS_SET_BIT) && !(info & HASHCODE_IS_ALLOCATED_BIT)) {
-                    memcpy(newobj, obj, size-GC_OBJECT_ALIGNMENT);
-                    *(int*)(newpos + size-GC_OBJECT_ALIGNMENT) = gen_hashcode(obj);
+                    memcpy(newobj, obj, size-4);
+                    *(int*)(newpos + size-4) = gen_hashcode(obj);
                     newobj->obj_info() |= HASHCODE_IS_ALLOCATED_BIT;
                 } else {
                     memcpy(newobj, obj, size);
@@ -197,21 +193,21 @@
                 //TRACE2("gc.copy", "obj " << obj << " -> " << newobj << " + " << size);
                 assert(newobj->vt() == obj->vt());
                 assert(newobj->obj_info() & phase);
-                obj->vt() = pointer_to_fw(newobj);
+                obj->vt() = (POINTER_SIZE_INT)newobj | FORWARDING_BIT;
                 assert_vt(newobj);
-                slot.write(newobj);
+                *ref = newobj;
                 obj = newobj;
             } else {
                 // overflow! no more space in old objects area
                 // pinning the overflow object
                 pinned_areas_unsorted.push_back(pos);
                 pinned_areas_unsorted.push_back(pos + size
-                        + ((obj->obj_info() & HASHCODE_IS_ALLOCATED_BIT) ? GC_OBJECT_ALIGNMENT : 0));
+                        + ((obj->obj_info() & HASHCODE_IS_ALLOCATED_BIT) ? 4 : 0));
                 TRACE2("gc.pin", "add failed pinned area = " << pos << " " << pinned_areas_unsorted.back());
                 TRACE2("gc.pin", "failed object = " << pos);
                 // arange transition to slide compaction
                 obj->obj_info() &= ~MARK_BITS;
-                slots.push_back(slot);
+                slots.push_back(ref);
                 transition_copy_to_sliding_compaction(slots);
                 return false;
             }
@@ -220,9 +216,9 @@
             assert(gc_num != 1 || !(obj->obj_info() & HASHCODE_IS_ALLOCATED_BIT));
             pinned_areas_unsorted.push_back(pos);
             pinned_areas_unsorted.push_back(pos + size
-                    + ((obj->obj_info() & HASHCODE_IS_ALLOCATED_BIT) ? GC_OBJECT_ALIGNMENT : 0));
+                    + ((obj->obj_info() & HASHCODE_IS_ALLOCATED_BIT) ? 4 : 0));
             TRACE2("gc.pin", "add pinned area = " << pos << " " << pinned_areas_unsorted.back() << " hash = " 
-                    << ((obj->obj_info() & HASHCODE_IS_ALLOCATED_BIT) ? GC_OBJECT_ALIGNMENT : 0));
+                    << ((obj->obj_info() & HASHCODE_IS_ALLOCATED_BIT) ? 4 : 0));
         }
     }
 
@@ -240,12 +236,15 @@
     if (type != NOT_REFERENCE) {
         switch (type) {
             case SOFT_REFERENCE:
+                TRACE2("gc.debug", "soft reference 0x" << obj);
                 add_soft_reference(obj);
                 break;
             case WEAK_REFERENCE:
+                TRACE2("gc.debug", "weak reference 0x" << obj);
                 add_weak_reference(obj);
                 break;
             case PHANTOM_REFERENCE:
+                TRACE2("gc.debug", "phantom reference 0x" << obj);
                 add_phantom_reference(obj);
                 break;
             default:
@@ -256,10 +255,10 @@
 
     int offset;
     while ((offset = *offset_list) != 0) {
-        Slot inner_slot((Reference*)(pos + offset));
+        Partial_Reveal_Object **slot = (Partial_Reveal_Object**)(pos + offset);
         //if (*slot) { looks like without check is better
-            TRACE2("gc.debug", "0x" << inner_slot.read() << " referenced from object = 0x" << obj);
-            slots.push_back(inner_slot);
+            TRACE2("gc.debug", "0x" << *slot << " referenced from object = 0x" << obj);
+            slots.push_back(slot);
         //}
 
         offset_list++;
@@ -269,19 +268,44 @@
 #undef pos
 }
 
-void gc_copy_add_root_set_entry(Slot root) {
+static void gc_copy_add_root_set_entry_internal(Partial_Reveal_Object **ref, Boolean is_pinned) {
     // FIXME: check for zero here, how it reflect perfomance, should be better!
     // and possibly remove check in gc_copy_process_reference
     // while added check in array handling
 
+#ifdef _DEBUG
+    if (*ref) {
+        TRACE2("gc.debug", "0x" << *ref << " referenced from root = 0x" << ref << " info = " << (*ref)->obj_info());
+    }
+#endif
+
     int phase = heap_mark_phase;
-    gc_copy_process_reference(root, phase);
+    gc_copy_process_reference(ref, is_pinned, phase);
 
     while (true) {
         if (slots.empty()) break;
-        Slot slot = slots.pop_back();
-        slot.read();
-        gc_copy_process_reference(slot, phase);
+        Partial_Reveal_Object **ref = slots.pop_back();
+        *ref;
+        gc_copy_process_reference(ref, false, phase);
+    }
+}
+
+void gc_copy_add_root_set_entry(Managed_Object_Handle *ref, Boolean is_pinned) {
+    assert(!is_pinned);
+    //TRACE2("gc.enum", "gc_add_root_set_entry");
+    gc_copy_add_root_set_entry_internal((Partial_Reveal_Object**)ref, is_pinned);
+}
+
+void gc_copy_add_root_set_entry_interior_pointer (void **slot, int offset, Boolean is_pinned)
+{
+    assert(!is_pinned);
+    int *ref = (int*)slot;
+    int oldobj = *ref - offset;
+    int newobj = oldobj;
+
+    gc_copy_add_root_set_entry_internal((Partial_Reveal_Object**)&newobj, is_pinned);
+    if (newobj != oldobj) {
+        *ref = newobj + offset;
     }
 }
 

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_forced.cpp
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_forced.cpp?view=diff&rev=450941&r1=450940&r2=450941
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_forced.cpp (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_forced.cpp Thu Sep 28 10:53:54 2006
@@ -28,7 +28,7 @@
 extern fast_list<Partial_Reveal_Object*, 65536> objects; // FIXME: duplication of memory slots and objects
                                                   // FIXME: move to header file
 
-static void forced_process_reference(Partial_Reveal_Object *obj);
+static void forced_process_reference(Partial_Reveal_Object *obj, Boolean is_pinned);
 
 static inline void 
 forced_scan_array_object(Partial_Reveal_Object *array, int vector_length)
@@ -38,27 +38,27 @@
 
     int32 array_length = vector_length; //vector_get_length((Vector_Handle) array);
 
-    Reference *refs = (Reference*)
+    Partial_Reveal_Object **refs = (Partial_Reveal_Object**)
         vector_get_element_address_ref ((Vector_Handle) array, 0);
 
     for(int i = 0; i < array_length; i++) {
-        Slot slot(refs + i);
-        Partial_Reveal_Object *obj = slot.read();
-        if (obj != heap_null) {
-            forced_process_reference(obj);
+        Partial_Reveal_Object **ref = &refs[i];
+        Partial_Reveal_Object *obj = *ref;
+        if (obj != 0) {
+            forced_process_reference(obj, false);
         }
     }
 }
 
-static void forced_process_reference(Partial_Reveal_Object *obj) {
+static void forced_process_reference(Partial_Reveal_Object *obj, Boolean is_pinned) {
+    assert(!is_pinned);
 
     assert(obj->vt() & ~FORWARDING_BIT);
 
-    unsigned info = obj->obj_info();
+    int info = obj->obj_info();
     if (info & heap_mark_phase) {
         return;
     }
-    obj->valid();
 
     obj->obj_info() = (info & ~MARK_BITS) | heap_mark_phase;
 
@@ -99,23 +99,35 @@
 
     int offset;
     while ((offset = *offset_list) != 0) {
-        Slot slot( (Reference*)(((char*)obj) + offset) );
+        Partial_Reveal_Object **slot = (Partial_Reveal_Object**)(((char*)obj) + offset);
         offset_list++;
-        Partial_Reveal_Object *object = slot.read();
-        if (object != heap_null) {
+        Partial_Reveal_Object *object = *slot;
+        if (object != 0) {
             objects.push_back(object);
         }
     }
 }
 
-void gc_forced_add_root_set_entry(Slot slot) {
-    Partial_Reveal_Object *obj = slot.read();
-    if (obj == heap_null) return;
-    forced_process_reference(obj);
+static void gc_forced_add_root_set_entry_internal(Partial_Reveal_Object *obj, Boolean is_pinned) {
+    forced_process_reference(obj, is_pinned);
 
     while (!objects.empty()) {
         Partial_Reveal_Object *obj = objects.pop_back();
-        forced_process_reference(obj);
+        forced_process_reference(obj, false);
     }
 }
 
+void gc_forced_add_root_set_entry(Managed_Object_Handle *ref, Boolean is_pinned) {
+    Partial_Reveal_Object *obj = *(Partial_Reveal_Object**)ref;
+    if (obj == 0) return;
+    gc_forced_add_root_set_entry_internal(obj, is_pinned);
+}
+
+void gc_forced_add_root_set_entry_interior_pointer (void **slot, int offset, Boolean is_pinned)
+{
+    int *ref = (int*)slot;
+    int obj = *ref - offset;
+    if (obj == 0) return;
+
+    gc_forced_add_root_set_entry_internal((Partial_Reveal_Object*)obj, is_pinned);
+}

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_slide_compact.cpp
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_slide_compact.cpp?view=diff&rev=450941&r1=450940&r2=450941
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_slide_compact.cpp (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_slide_compact.cpp Thu Sep 28 10:53:54 2006
@@ -31,6 +31,7 @@
 unsigned char *mark_bits;
 int mark_bits_size;
 fast_list<Partial_Reveal_Object*, 65536> objects;
+static fast_list<InteriorPointer,256> comp_interior_pointers;
 
 static inline bool
 is_compaction_object(Partial_Reveal_Object *refobj) {
@@ -45,37 +46,29 @@
 }
 
 static inline void
-update_forwarded_reference(Partial_Reveal_Object *obj, Slot slot) {
+update_forwarded_reference(Partial_Reveal_Object *obj, Partial_Reveal_Object **ref) {
     assert(!(obj->vt() & RESCAN_BIT));
     assert(obj->vt() & FORWARDING_BIT);
-    slot.write(fw_to_pointer(obj->vt() & ~FORWARDING_BIT));
+    *(int*)ref = obj->vt() & ~FORWARDING_BIT;
 }
 
-#if GC_OBJECT_ALIGNMENT == 8
-#define GC_OBJECT_ALIGNMENT_SHIFT 3
-#elif GC_OBJECT_ALIGNMENT == 4
-#define GC_OBJECT_ALIGNMENT_SHIFT 2
-#else
-#error not detected GC_OBJECT_ALIGNMENT
-#endif
-
 static inline bool mark_bit_is_set(Partial_Reveal_Object *obj) {
-    size_t addr = (POINTER_SIZE_INT)obj - (POINTER_SIZE_INT) heap_base;
-    addr >>= GC_OBJECT_ALIGNMENT_SHIFT;
-    size_t bit = addr & 7; // FIXME: use defines
-    size_t byte = addr >> 3;
+    int addr = (POINTER_SIZE_INT)obj - (POINTER_SIZE_INT) heap_base;
+    addr >>= 2;
+    int bit = addr & 7; // FIXME: use defines
+    int byte = addr >> 3;
     return mark_bits[byte] & ((unsigned char)1 << bit);
 }
 
-static inline void enqueue_reference(Partial_Reveal_Object *refobj, Slot slot) {
+static inline void enqueue_reference(Partial_Reveal_Object *refobj, Partial_Reveal_Object **ref) {
     assert(is_compaction_object(refobj));
     assert(!is_forwarded_object(refobj));
     //assert(*ref == refobj);
     assert(refobj->obj_info());
 
-    unsigned &info = refobj->obj_info();
-    slot.write_raw(info);
-    info = slot.addr() | heap_mark_phase; //(int)ref
+    int &info = refobj->obj_info();
+    *(int*)ref = info;
+    info = (int)ref | heap_mark_phase;
 }
 
 static inline bool is_object_marked(Partial_Reveal_Object *obj) {
@@ -83,17 +76,17 @@
 }
 
 static inline void set_mark_bit(Partial_Reveal_Object *obj) {
-    size_t addr = (POINTER_SIZE_INT)obj - (POINTER_SIZE_INT) heap_base;
-    addr >>= GC_OBJECT_ALIGNMENT_SHIFT;
-    size_t bit = addr & 7; // FIXME: use defines
-    size_t byte = addr >> 3;
+    int addr = (POINTER_SIZE_INT)obj - (POINTER_SIZE_INT) heap_base;
+    addr >>= 2;
+    int bit = addr & 7; // FIXME: use defines
+    int byte = addr >> 3;
     mark_bits[byte] |=  ((unsigned char) 1 << bit);
 }
 
 static inline bool mark_object(Partial_Reveal_Object *obj) {
     int phase = heap_mark_phase;
 
-    assert((unsigned char*) obj >= heap.base && (unsigned char*) obj < heap.ceiling);
+    assert((unsigned char*) obj >= heap_base && (unsigned char*) obj < heap_ceiling);
     assert(obj->vt() != 0);
 
     // is object already marked
@@ -101,10 +94,9 @@
         return false;
     }
 
-    obj->valid();
     assert(!is_forwarded_object(obj));
 
-    unsigned info = obj->obj_info();
+    int info = obj->obj_info();
 
     if (is_compaction_object(obj)) {
         set_mark_bit(obj);
@@ -113,7 +105,7 @@
             pinned_areas_unsorted.push_back((unsigned char*)obj);
             int size = get_object_size(obj, obj->vtable()->get_gcvt());
             pinned_areas_unsorted.push_back((unsigned char*)obj + size
-                    + ((info & HASHCODE_IS_ALLOCATED_BIT) ? GC_OBJECT_ALIGNMENT : 0));
+                    + ((info & HASHCODE_IS_ALLOCATED_BIT) ? 4 : 0));
             TRACE2("gc.pin", "add pinned area = " << (unsigned char*)obj << " " << pinned_areas_unsorted.back());
         }
 
@@ -132,37 +124,41 @@
 }
 
 static inline void process_reference_queue(Partial_Reveal_Object *newobj, Partial_Reveal_Object *obj) {
-    unsigned info = obj->obj_info();
+    int info = obj->obj_info();
     assert(info);
     assert(info & heap_mark_phase); assert(is_compaction_object(obj));
 
     while (!(info & prev_mark_phase)) {
         assert(info);
         assert(info & heap_mark_phase);
-        Slot slot((Reference*) fw_to_pointer(info & ~MARK_BITS));
-        info = slot.read_raw(); //(int)*ref;
-        slot.write(newobj);
+        Partial_Reveal_Object **ref = (Partial_Reveal_Object**) (info & ~MARK_BITS);
+        info = (int)*ref;
+        *ref = newobj;
     }
     obj->obj_info() = info & ~MARK_BITS;
 }
 
+void gc_reset_interior_pointers() { // FIXME: rename
+    comp_interior_pointers.clear();
+}
+
 static void postprocess_array(Partial_Reveal_Object *array, int vector_length, Partial_Reveal_Object *oldobj) {
     // No primitive arrays allowed
     assert(!is_array_of_primitives(array));
     assert(is_compaction_object(array));
     assert(!is_forwarded_object(array));
 
-    int array_length = vector_length; //vector_get_length((Vector_Handle) array);
+    int32 array_length = vector_length; //vector_get_length((Vector_Handle) array);
 
-    Reference *refs = (Reference*) vector_get_element_address_ref ((Vector_Handle) array, 0);
+    Partial_Reveal_Object **refs = (Partial_Reveal_Object**) vector_get_element_address_ref ((Vector_Handle) array, 0);
 
     for(int i = 0; i < array_length; i++) {
-        Slot slot(refs + i);
-        POINTER_SIZE_INT refobj_int = (POINTER_SIZE_INT)slot.read();
+        Partial_Reveal_Object **ref = &refs[i];
+        POINTER_SIZE_INT refobj_int = (POINTER_SIZE_INT)*ref;
         POINTER_SIZE_INT refobj_unmarked = refobj_int & ~1;
         if (refobj_int == refobj_unmarked) continue; // not specially marked reference
         Partial_Reveal_Object *refobj = (Partial_Reveal_Object*) refobj_unmarked;
-        enqueue_reference(refobj, slot);
+        enqueue_reference(refobj, ref);
     }
 }
 
@@ -177,12 +173,10 @@
     assert(is_compaction_object(obj));
     assert(!is_forwarded_object(obj));
  
-    assert((unsigned char*) obj >= heap.base && (unsigned char*) obj < heap.ceiling);
+    assert((unsigned char*) obj >= heap_base && (unsigned char*) obj < heap_ceiling);
     assert(obj->vt() & RESCAN_BIT);
-
-    VT32 vt = obj->vt() & ~RESCAN_BIT;
-    obj->vt() = vt;
-    Partial_Reveal_VTable *vtable = ah_to_vtable(vt);
+    Partial_Reveal_VTable *vtable = (Partial_Reveal_VTable*) (obj->vt() & ~RESCAN_BIT);
+    obj->vt() = (int) vtable;
     GC_VTable_Info *gcvt = vtable->get_gcvt();
 
     // process slots
@@ -195,34 +189,34 @@
     }
 
     if (gcvt->reference_type() != NOT_REFERENCE) {
-        Slot slot((Reference*)((char*)obj + global_referent_offset));
+        Partial_Reveal_Object **ref = (Partial_Reveal_Object**)((char*)obj + global_referent_offset);
 
-        POINTER_SIZE_INT refobj_int = (POINTER_SIZE_INT)slot.read();
+        POINTER_SIZE_INT refobj_int = (POINTER_SIZE_INT)*ref;
         POINTER_SIZE_INT refobj_unmarked = refobj_int & ~1;
         if (refobj_int != refobj_unmarked) {
             Partial_Reveal_Object *refobj = (Partial_Reveal_Object*) refobj_unmarked;
-            enqueue_reference(refobj, slot);
+            enqueue_reference(refobj, ref);
         }
     }
 
     int *offset_list = gcvt->offset_array();
     int offset;
     while ((offset = *offset_list) != 0) {
-        Slot slot( (Reference*)((char*)obj + offset));
+        Partial_Reveal_Object **ref = (Partial_Reveal_Object**)((char*)obj + offset);
         offset_list++;
 
-        POINTER_SIZE_INT refobj_int = (POINTER_SIZE_INT)slot.read();
+        POINTER_SIZE_INT refobj_int = (POINTER_SIZE_INT)*ref;
         POINTER_SIZE_INT refobj_unmarked = refobj_int & ~1;
         if (refobj_int == refobj_unmarked) continue; // not specially marked reference
         Partial_Reveal_Object *refobj = (Partial_Reveal_Object*) refobj_unmarked;
-        enqueue_reference(refobj, slot);
+        enqueue_reference(refobj, ref);
     }
 }
 
 void gc_slide_move_all() {
     unsigned char *compact_pos = heap.compaction_region_start();
     unsigned char *compact_pos_limit = heap.compaction_region_end();
-    unsigned char *next_pinned_object = heap.compaction_region_end();
+    unsigned char *next_pinned_object = heap.ceiling;
     unsigned next_pinned_object_pos = 0;
 
     prev_mark_phase = heap_mark_phase ^ 3;
@@ -266,14 +260,14 @@
         break;
     }
 
-    pinned_areas.push_back(heap.compaction_region_end());
+    pinned_areas.push_back(heap.ceiling);
 
     int *mark_words = (int*) mark_bits;
     // Searching marked bits
-    unsigned start = (unsigned)(heap.compaction_region_start() - heap_base) / GC_OBJECT_ALIGNMENT / sizeof(int) / 8;
-    unsigned end = (unsigned)(heap.compaction_region_end() - heap_base + GC_OBJECT_ALIGNMENT * sizeof(int) * 8 - 1) / GC_OBJECT_ALIGNMENT / sizeof(int) / 8;
-    if (end > mark_bits_size/sizeof(int)) end = mark_bits_size/sizeof(int);
-    for(unsigned i = start; i < end; i++) {
+    int start = (heap.compaction_region_start() - heap_base) / sizeof(void*) / sizeof(int) / 8;
+    int end = (heap.compaction_region_end() - heap_base + sizeof(void*) + sizeof(int) * 8 - 1) / sizeof(void*) / sizeof(int) / 8;
+    if (end > mark_bits_size/4) end = mark_bits_size/4;
+    for(int i = start; i < end; i++) {
         // no marked bits in word - skip
 
         int word = mark_words[i];
@@ -281,12 +275,12 @@
 
         for(int bit = 0; bit < 32; bit++) {
             if (word & 1) {
-                unsigned char *pos = heap_base + i * 8 * GC_OBJECT_ALIGNMENT * sizeof(int) + bit * GC_OBJECT_ALIGNMENT;
+                unsigned char *pos = heap_base + i * 32 * 4 + bit * 4;
                 Partial_Reveal_Object *obj = (Partial_Reveal_Object*) pos;
 
-                VT32 vt = obj->vt();
+                int vt = obj->vt();
                 bool post_processing = vt & RESCAN_BIT;
-                Partial_Reveal_VTable *vtable = ah_to_vtable(vt & ~RESCAN_BIT);
+                Partial_Reveal_VTable *vtable = (Partial_Reveal_VTable*)(vt & ~RESCAN_BIT);
                 int size = get_object_size(obj, vtable->get_gcvt());
 
                 assert(is_object_marked(obj));
@@ -294,7 +288,7 @@
 
                 if ((unsigned char*)obj != next_pinned_object) {
 
-                    // 4/8 bytes reserved for hash
+                    // 4 bytes reserved for hash
                     while (compact_pos + size > compact_pos_limit) {
                         assert(pinned_areas_pos < pinned_areas.size());
                         compact_pos = pinned_areas[pinned_areas_pos];
@@ -307,10 +301,10 @@
                     if (compact_pos >= pos) {
                         newobj = obj;
                         process_reference_queue(obj, obj);
-                        unsigned info = obj->obj_info();
+                        int info = obj->obj_info();
                         if (compact_pos == pos) {
-                            compact_pos += size +
-                                (((info & HASHCODE_IS_ALLOCATED_BIT) != 0) ? GC_OBJECT_ALIGNMENT : 0);
+                            assert(HASHCODE_IS_ALLOCATED_BIT == 4);
+                            compact_pos += size + (info & HASHCODE_IS_ALLOCATED_BIT);
                         } else {
                             assert(compact_pos >= pos + size);
                         }
@@ -320,11 +314,11 @@
 
                         newobj = (Partial_Reveal_Object*) newpos;
                         process_reference_queue(newobj, obj);
-                        unsigned info = obj->obj_info();
+                        int info = obj->obj_info();
 
                         if (info & HASHCODE_IS_SET_BIT) {
-                            size += GC_OBJECT_ALIGNMENT;
-                            compact_pos += GC_OBJECT_ALIGNMENT;
+                            size += 4;
+                            compact_pos += 4;
                         }
 
                         if (newpos + size <= pos) {
@@ -333,7 +327,7 @@
                             memmove(newpos, pos, size);
                         }
                         if (info & HASHCODE_IS_SET_BIT && !(info & HASHCODE_IS_ALLOCATED_BIT)) {
-                            *(int*)(newpos + size - GC_OBJECT_ALIGNMENT) = gen_hashcode(pos);
+                            *(int*)(newpos + size - 4) = gen_hashcode(pos);
                             newobj->obj_info() |= HASHCODE_IS_ALLOCATED_BIT;
                         }
                     }
@@ -357,12 +351,12 @@
         }
     }
     assert(next_pinned_object >= heap.compaction_region_end());
-    pinned_areas.pop_back(); //heap.compaction_region_end()
+    pinned_areas.pop_back(); //heap.ceiling
 
     TRACE2("gc.mem", "compaction: region size = "
             << (heap.compaction_region_end() - heap.compaction_region_start()) / 1024 / 1024 << " mb");
     TRACE2("gc.mem", "compaction: free_space = "
-            << (heap.compaction_region_end() - compact_pos) / 1024 / 1024 << " mb");
+            << (heap.ceiling - compact_pos) / 1024 / 1024 << " mb");
 
     cleaning_needed = true;
     heap.pos = compact_pos;
@@ -376,7 +370,7 @@
     old_pinned_areas_pos = 1;
 }
 
-static void slide_process_object(Partial_Reveal_Object *obj);
+static void slide_process_object(Partial_Reveal_Object *obj, Boolean is_pinned);
 
 static inline void 
 slide_scan_array_object(Partial_Reveal_Object *array, Partial_Reveal_VTable *vtable, int vector_length)
@@ -387,28 +381,28 @@
 
     int32 array_length = vector_length; //vector_get_length((Vector_Handle) array);
 
-    Reference *refs = (Reference*) vector_get_element_address_ref ((Vector_Handle) array, 0);
+    Partial_Reveal_Object **refs = (Partial_Reveal_Object**) vector_get_element_address_ref ((Vector_Handle) array, 0);
 
     if (is_compaction_object(array)) {
         bool rescan = false;
         for(int i = 0; i < array_length; i++) {
-            Slot slot(refs + i);
-            Partial_Reveal_Object *refobj = slot.read();
-            if (refobj == heap_null) continue;
+            Partial_Reveal_Object **ref = &refs[i];
+            Partial_Reveal_Object *refobj = *ref;
+            if (!refobj) continue;
 
             if (mark_object(refobj)) {
-                slide_process_object(refobj);
+                slide_process_object(refobj, false);
             } else if (is_forwarded_object(refobj)) {
-                update_forwarded_reference(refobj, slot);
+                update_forwarded_reference(refobj, ref);
                 continue;
             }
 
             if (is_compaction_object(refobj)) {
-                if (is_left_object(refobj, slot)) {
-                    enqueue_reference(refobj, slot);
+                if (is_left_object(refobj, ref)) {
+                    enqueue_reference(refobj, ref);
                 } else {
                     // mark_rescan_reference
-                    slot.write( (Partial_Reveal_Object*) ((size_t)refobj | 1) );
+                    *ref = (Partial_Reveal_Object*) ((size_t)refobj | 1);
                     rescan = true;
                 }
             }
@@ -416,35 +410,36 @@
         if (rescan) set_rescan_bit(array);
     } else {
         for(int i = 0; i < array_length; i++) {
-            Slot slot(refs + i);
-            Partial_Reveal_Object *refobj = slot.read();
-            if (refobj == heap_null) continue;
+            Partial_Reveal_Object **ref = &refs[i];
+            Partial_Reveal_Object *refobj = *ref;
+            if (!refobj) continue;
 
             if (mark_object(refobj)) {
-                slide_process_object(refobj);
+                slide_process_object(refobj, false);
             } else if (is_forwarded_object(refobj)) {
-                update_forwarded_reference(refobj, slot);
+                update_forwarded_reference(refobj, ref);
                 continue;
             }
 
             if (is_compaction_object(refobj)) {
-                enqueue_reference(refobj, slot);
+                enqueue_reference(refobj, ref);
             }
         }
     }
 }
 
-static void slide_process_object(Partial_Reveal_Object *obj) {
+static void slide_process_object(Partial_Reveal_Object *obj, Boolean is_pinned) {
 
+    assert(!is_pinned);
     assert(obj);
-    assert((unsigned char*) obj >= heap.base && (unsigned char*) obj < heap.ceiling);
+    assert((unsigned char*) obj >= heap_base && (unsigned char*) obj < heap_ceiling);
     assert(is_object_marked(obj));
     //assert(mark_bit_is_set(obj) || !is_compaction_object(obj));
 
-    unsigned vt = obj->vt();
+    int vt = obj->vt();
     assert(obj->vt() & ~RESCAN_BIT); // has vt
 
-    Partial_Reveal_VTable *vtable = ah_to_vtable(vt & ~RESCAN_BIT);
+    Partial_Reveal_VTable *vtable = (Partial_Reveal_VTable*) (vt & ~RESCAN_BIT);
     GC_VTable_Info *gcvt = vtable->get_gcvt();
 
     // process slots
@@ -485,25 +480,25 @@
         bool rescan = false;
         int offset;
         while ((offset = *offset_list) != 0) {
-            Slot slot((Reference*)((char*)obj + offset));
-            Partial_Reveal_Object *refobj = slot.read();
+            Partial_Reveal_Object **ref = (Partial_Reveal_Object**)((char*)obj + offset);
+            Partial_Reveal_Object *refobj = *ref;
             offset_list++;
 
-            if (refobj == heap_null) continue;
+            if (!refobj) continue;
 
             if (mark_object(refobj)) {
                 objects.push_back(refobj);
             } else if (is_forwarded_object(refobj)) {
-                update_forwarded_reference(refobj, slot);
+                update_forwarded_reference(refobj, ref);
                 continue;
             }
 
             if (is_compaction_object(refobj)) {
-                if (is_left_object(refobj, slot)) {
-                    enqueue_reference(refobj, slot);
+                if (is_left_object(refobj, ref)) {
+                    enqueue_reference(refobj, ref);
                 } else {
                     // mark_rescan_reference
-                    slot.write( (Partial_Reveal_Object*) ((size_t)refobj | 1) );
+                    *ref = (Partial_Reveal_Object*) ((size_t)refobj | 1);
                     rescan = true;
                 }
             }
@@ -512,54 +507,79 @@
     } else {
         int offset;
         while ((offset = *offset_list) != 0) {
-            Slot slot((Reference*)((char*)obj + offset));
-            Partial_Reveal_Object *refobj = slot.read();
+            Partial_Reveal_Object **ref = (Partial_Reveal_Object**)((char*)obj + offset);
+            Partial_Reveal_Object *refobj = *ref;
             offset_list++;
 
-            if (refobj == heap_null) continue;
+            if (!refobj) continue;
 
             if (mark_object(refobj)) {
                 objects.push_back(refobj);
             } else if (is_forwarded_object(refobj)) {
-                update_forwarded_reference(refobj, slot);
+                update_forwarded_reference(refobj, ref);
                 continue;
             }
 
             if (is_compaction_object(refobj)) {
-                enqueue_reference(refobj, slot);
+                enqueue_reference(refobj, ref);
             }
         }
     }
 
 }
 
-void gc_slide_add_root_set_entry(Slot slot) {
+static void gc_slide_add_root_set_entry_internal(Partial_Reveal_Object **ref, Boolean is_pinned) {
     // get object
-    Partial_Reveal_Object *refobj = slot.read();
+    Partial_Reveal_Object *refobj = *ref;
 
     // check no garbage
-    assert(((POINTER_SIZE_INT)refobj & 3) == 0);
+    assert(((int)refobj & 3) == 0);
 
     // empty references is not interesting
-    if (refobj == heap_null) return;
+    if (!refobj) return;
+    assert(!is_pinned); // no pinning allowed for now
 
     if (mark_object(refobj)) {
         // object wasn't marked yet
-        slide_process_object(refobj);
+        slide_process_object(refobj, is_pinned);
     } else if (is_forwarded_object(refobj)) {
-        update_forwarded_reference(refobj, slot);
+        update_forwarded_reference(refobj, ref);
         goto skip;
     }
 
     if (is_compaction_object(refobj)) {
-        enqueue_reference(refobj, slot);
+        enqueue_reference(refobj, ref);
     }
 skip:
 
     while (true) {
         if (objects.empty()) break;
         Partial_Reveal_Object *obj = objects.pop_back();
-        slide_process_object(obj);
+        slide_process_object(obj, false);
+    }
+}
+
+void gc_slide_add_root_set_entry(Managed_Object_Handle *ref, Boolean is_pinned) {
+    //TRACE2("gc.enum", "gc_add_root_set_entry");
+    gc_slide_add_root_set_entry_internal((Partial_Reveal_Object**)ref, is_pinned);
+}
+
+void gc_slide_add_root_set_entry_interior_pointer (void **slot, int offset, Boolean is_pinned)
+{
+    InteriorPointer ip;
+    ip.obj = (Partial_Reveal_Object*) (*(unsigned char**)slot - offset);
+    ip.interior_ref = (Partial_Reveal_Object**)slot;
+    ip.offset = offset;
+    InteriorPointer& ips = comp_interior_pointers.push_back(ip);
+    gc_slide_add_root_set_entry_internal((Partial_Reveal_Object**)&ips.obj, is_pinned);
+}
+
+void gc_process_interior_pointers() {
+    fast_list<InteriorPointer,256>::iterator begin = comp_interior_pointers.begin();
+    fast_list<InteriorPointer,256>::iterator end = comp_interior_pointers.end();
+
+    for(fast_list<InteriorPointer,256>::iterator i = begin; i != end; ++i) {
+        *(*i).interior_ref = (Partial_Reveal_Object*)((unsigned char*)(*i).obj + (*i).offset);
     }
 }
 
@@ -568,13 +588,13 @@
             i != array.end(); ++i) {
         Partial_Reveal_Object *obj = *i;
 
-        Slot slot(
-            (Reference*) ((unsigned char *)obj + global_referent_offset));
-        Partial_Reveal_Object* refobj = slot.read();
+        Partial_Reveal_Object **ref = 
+            (Partial_Reveal_Object**) ((unsigned char *)obj + global_referent_offset);
+        Partial_Reveal_Object* refobj = *ref;
 
         if (refobj == 0) {
             // reference already cleared, no post processing needed
-            *i = heap_null;
+            *i = 0;
             continue;
         }
 
@@ -582,32 +602,31 @@
             //assert(mark_bit_is_set(refobj) || !is_compaction_object(refobj) || is_forwarded_object(refobj));
 
             if (is_forwarded_object(refobj)) {
-                update_forwarded_reference(refobj, slot);
+                update_forwarded_reference(refobj, ref);
             } else if (is_compaction_object(refobj)) {
-                if (is_left_object(refobj, slot) || !is_compaction_object(obj)) {
-                    enqueue_reference(refobj, slot);
+                if (is_left_object(refobj, ref) || !is_compaction_object(obj)) {
+                    enqueue_reference(refobj, ref);
                 } else {
                     // mark_rescan_reference
-                    slot.write( (Partial_Reveal_Object*) ((size_t)refobj | 1) );
+                    *ref = (Partial_Reveal_Object*) ((size_t)refobj | 1);
                     set_rescan_bit(obj);
                 }
             }
 
             // no post processing needed
-            *i = heap_null;
+            *i = 0;
             continue;
         } else {
             //assert(!mark_bit_is_set(refobj));
         }
 
         // object not marked, clear reference
-        slot.write((Partial_Reveal_Object*) heap_null);
-        Slot root = make_direct_root(&*i);
+        *ref = (Partial_Reveal_Object*)0;
 
         if (is_forwarded_object(obj)) {
-            update_forwarded_reference(obj, root);
+            update_forwarded_reference(obj, &*i);
         } else if (is_compaction_object(obj)) {
-            enqueue_reference(obj, root);
+            enqueue_reference(obj, &*i);
         }
     }
 }
@@ -617,8 +636,8 @@
             i != array.end(); ++i) {
         Partial_Reveal_Object *obj = *i;
 
-        if (obj == heap_null) continue;
-        vm_enqueue_reference((Managed_Object_Handle)obj);
+        if (!obj) continue;
+        vm_enqueue_reference((Managed_Object_Handle*)obj);
     }
 }
 
@@ -626,7 +645,7 @@
 // all previous references are processed in copying collector
 // so will not move, they can be considered as root references here
 
-void gc_slide_process_transitional_slots(fast_list<Slot,65536>& slots) {
+void gc_slide_process_transitional_slots(fast_list<Partial_Reveal_Object**,65536>& slots) {
     // also process pinned objects all but last
     pinned_areas_unsorted_t::iterator end = --(--pinned_areas_unsorted.end());
     for(pinned_areas_unsorted_t::iterator i = pinned_areas_unsorted.begin();
@@ -640,14 +659,13 @@
 
     while (true) {
         if (slots.empty()) break;
-        Slot slot = slots.pop_back();
-        gc_slide_add_root_set_entry(slot);
+        Partial_Reveal_Object **ref = slots.pop_back();
+        gc_slide_add_root_set_entry_internal(ref, false);
     }
 }
-
-void gc_slide_process_transitional_slots(Reference *refs, int pos, int length) {
+void gc_slide_process_transitional_slots(Partial_Reveal_Object **refs, int pos, int length) {
     for(int i = pos; i < length; i++) {
-        Slot slot(refs + i);
-        gc_slide_add_root_set_entry(slot);
+        Partial_Reveal_Object **ref = &refs[i];
+        gc_slide_add_root_set_entry_internal(ref, false);
     }
 }

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/gc_for_vm.cpp
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/gc_for_vm.cpp?view=diff&rev=450941&r1=450940&r2=450941
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/gc_for_vm.cpp (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/gc_for_vm.cpp Thu Sep 28 10:53:54 2006
@@ -29,17 +29,11 @@
 GC_Thread_Info *thread_list;
 volatile int thread_list_lock;
 int num_threads = 0;
-Ptr vtable_base;
 
 fast_list<Partial_Reveal_Object*, 1024> finalizible_objects;
 
-#ifdef POINTER64
-GCExport Boolean gc_supports_compressed_references() {
-    vtable_base = (Ptr) vm_get_vtable_base();
-    return true;
-}
-#endif
 
+// GCExport Boolean gc_supports_compressed_references(); optional
 GCExport void gc_write_barrier(Managed_Object_Handle p_base_of_obj_with_slot) {
     TRACE2("gc.wb", "gc_write_barrier");
 }
@@ -85,6 +79,8 @@
     TRACE2("gc.init2", "gc_vm_initialized called (" << count++ << ")");
 }
 
+//GCExport void gc_add_compressed_root_set_entry(uint32 *ref, Boolean is_pinned); optional
+
 void gc_add_weak_root_set_entry(Managed_Object_Handle *slot, 
     Boolean is_pinned, Boolean is_short_weak) {
     TRACE2("gc.enum", "gc_add_weak_root_set_entry - EMPTY");
@@ -162,7 +158,7 @@
     unsigned char *next;
 
     GC_Thread_Info *info = (GC_Thread_Info *) thread_pointer;
-    Partial_Reveal_VTable *vtable = ah_to_vtable(ah);
+    Partial_Reveal_VTable *vtable = (Partial_Reveal_VTable*) ah;
     GC_VTable_Info *gcvt = vtable->get_gcvt();
     unsigned char *cleaned = info->tls_current_cleaned;
     unsigned char *res = info->tls_current_free;
@@ -171,9 +167,8 @@
         if (gcvt->is_finalizible()) return 0;
 
         info->tls_current_free =  res + in_size;
-        *(VT32*)res = ah;
+        *(int*)res = ah;
 
-        assert(((POINTER_SIZE_INT)res & (GC_OBJECT_ALIGNMENT - 1)) == 0);
         return res;
     }
 
@@ -191,9 +186,8 @@
         if (cleaned_new > ceiling) cleaned_new = ceiling;
         info->tls_current_cleaned = cleaned_new;
         memset(cleaned, 0, cleaned_new - cleaned);
-        *(VT32*)res = ah;
+        *(int*)res = ah;
 
-        assert(((POINTER_SIZE_INT)res & (GC_OBJECT_ALIGNMENT - 1)) == 0);
         return res;
     }
 
@@ -208,7 +202,7 @@
     assert (ah);
 
     GC_Thread_Info *info = (GC_Thread_Info *) thread_pointer;
-    Partial_Reveal_VTable *vtable = ah_to_vtable(ah);
+    Partial_Reveal_VTable *vtable = (Partial_Reveal_VTable*) ah;
     GC_VTable_Info *gcvt = vtable->get_gcvt();
     unsigned char *res = info->tls_current_free;
     unsigned char *cleaned = info->tls_current_cleaned;
@@ -217,9 +211,8 @@
 
         if (res + in_size <= cleaned) {
             info->tls_current_free =  res + in_size;
-            *(VT32*)res = ah;
+            *(int*)res = ah;
 
-            assert(((POINTER_SIZE_INT)res & (GC_OBJECT_ALIGNMENT - 1)) == 0);
             return res;
         }
 
@@ -236,8 +229,7 @@
             info->tls_current_cleaned = cleaned_new;
             memset(cleaned, 0, cleaned_new - cleaned);
 
-            *(VT32*)res = ah;
-            assert(((POINTER_SIZE_INT)res & (GC_OBJECT_ALIGNMENT - 1)) == 0);
+            *(int*)res = ah;
             return (Managed_Object_Handle)res;
         }
     }
@@ -263,8 +255,7 @@
             memset(obj, 0, size);
             finalizible_objects.push_back((Partial_Reveal_Object*) obj);
             vm_gc_unlock_enum();
-            *(VT32*)obj = ah;
-            assert(((POINTER_SIZE_INT)obj & (GC_OBJECT_ALIGNMENT - 1)) == 0);
+            *(int*)obj = ah;
             return (Managed_Object_Handle)obj;
         }
 
@@ -275,6 +266,7 @@
         if (res + size <= info->tls_current_ceiling) {
             unsigned char *next;
             info->tls_current_free = next = info->tls_current_free + in_size;
+            assert(!((POINTER_SIZE_INT)res & 3));
             finalizible_objects.push_back((Partial_Reveal_Object*) res);
 
             if (cleaned < next) {
@@ -282,8 +274,7 @@
                 info->tls_current_cleaned = next;
             }
             vm_gc_unlock_enum();
-            *(VT32*)res = ah;
-            assert(((POINTER_SIZE_INT)res & (GC_OBJECT_ALIGNMENT - 1)) == 0);
+            *(int*)res = ah;
             return (Managed_Object_Handle)res;
         }
     }
@@ -309,10 +300,9 @@
         }
         vm_gc_unlock_enum();
         if (cleaning_needed) memset(res, 0, size);
-        *(VT32*)res = ah; // NOTE: object partially initialized, should not be moved!!
+        *(int*)res = ah; // NOTE: object partially initialized, should not be moved!!
                          //       problems with arrays
                          //       no way to call vm_hint_finalize() here
-        assert(((POINTER_SIZE_INT)res & (GC_OBJECT_ALIGNMENT - 1)) == 0);
         return res;
     }
 
@@ -326,8 +316,7 @@
         // chunk is not expired yet, reuse it
         vm_gc_unlock_enum();
         if (cleaning_needed) memset(res, 0, size);
-        *(VT32*)res = ah;
-        assert(((POINTER_SIZE_INT)res & (GC_OBJECT_ALIGNMENT - 1)) == 0);
+        *(int*)res = ah;
         return (Managed_Object_Handle)res;
     }
 
@@ -342,8 +331,7 @@
     vm_gc_unlock_enum();
     if (cleaning_needed) memset(res, 0, size);
 
-    *(VT32*)res = ah;
-    assert(((POINTER_SIZE_INT)res & (GC_OBJECT_ALIGNMENT - 1)) == 0);
+    *(int*)res = ah;
     return (Managed_Object_Handle)res;
 }
 
@@ -418,7 +406,6 @@
 }
 
 void gc_pin_object (Managed_Object_Handle* p_object) {
-#if 0
     // FIXME: overflow check and handling
     Partial_Reveal_Object *obj = *(Partial_Reveal_Object**) p_object;
 
@@ -433,11 +420,9 @@
         if (old_value == value) return;
         value = old_value;
     }
-#endif
 }
 
 void gc_unpin_object (Managed_Object_Handle* p_object) {
-#if 0
     Partial_Reveal_Object *obj = *(Partial_Reveal_Object**) p_object;
     assert((obj->obj_info_byte() & OBJECT_IS_PINNED_BITS) != 0);
 
@@ -448,23 +433,17 @@
         if (old_value == value) return;
         value = old_value;
     }
-#endif
 }
 
 Boolean gc_is_object_pinned (Managed_Object_Handle p_object) {
     Partial_Reveal_Object *obj = (Partial_Reveal_Object*) p_object;
-    assert ((obj->obj_info_byte() & OBJECT_IS_PINNED_INCR) == 0);
-    return false;
-#if 0
-    Partial_Reveal_Object *obj = (Partial_Reveal_Object*) p_object;
     return (obj->obj_info_byte() & OBJECT_IS_PINNED_INCR) != 0;
-#endif
 }
 
 int32 gc_get_hashcode(Managed_Object_Handle p_object) {
     Partial_Reveal_Object *obj = (Partial_Reveal_Object*) p_object;
     if (!obj) return 0;
-    assert((unsigned char*)obj >= heap.base && (unsigned char*)obj < heap.ceiling);
+    assert((unsigned char*)obj >= heap_base && (unsigned char*)obj < heap_ceiling);
     assert(obj->vtable());
     unsigned char info = obj->obj_info_byte();
     // FIXME: atomic ops need to keep pinning work?
@@ -485,6 +464,7 @@
     return hash;
 }
 
+
 Managed_Object_Handle gc_get_next_live_object(void *iterator) {
     TRACE2("gc.iter", "gc_get_next_live_object - NOT IMPLEMENTED");
     abort();
@@ -496,10 +476,10 @@
 }
 
 void *gc_heap_base_address() {
-    return (void*) heap.base;
+    return (void*) heap_base;
 }
 void *gc_heap_ceiling_address() {
-    return (void*) (heap.base + heap.max_size);
+    return (void*) heap_ceiling;
 }
 
 void gc_finalize_on_exit() {

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/gc_types.h
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/gc_types.h?view=diff&rev=450941&r1=450940&r2=450941
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/gc_types.h (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/gc_types.h Thu Sep 28 10:53:54 2006
@@ -27,25 +27,10 @@
 #include <list>
 #include <open/vm.h>
 #include <open/vm_gc.h>
-#include <open/gc.h>
 #include <port_vmem.h>
 #include <apr_time.h>
 #include <apr_atomic.h>
 #include <cxxlog.h>
-#include "slot.h"
-
-static char* gc_version_string() {
-#if (defined _DEBUG) || ! (defined NDEBUG)
-#define BUILD_MODE "debug"
-#else
-#define BUILD_MODE "release"
-#endif
-#ifndef __TIMESTAMP__
-#define __TIMESTAMP__
-#endif /* TIMESTAMP */
-//    return "GC v4.1 " __TIMESTAMP__ " (" BUILD_MODE ")";
-    return "GC v4.1 " __TIMESTAMP__ " (" BUILD_MODE ")";
-}
 
 /// obtains a spinlock.
 inline void spin_lock(volatile int* lock) {
@@ -93,8 +78,15 @@
     GC_Thread_Info **prev;
 } GC_Thread_Info;
 
-// Heap layout
-#define RESERVED_FOR_HEAP_NULL (4 * 32)
+#define FORWARDING_BIT 1
+#define RESCAN_BIT 2
+#define GC_OBJECT_MARK_BIT_MASK 0x00000080
+#define MARK_BITS 3
+
+#define HASHCODE_IS_ALLOCATED_BIT 4
+#define HASHCODE_IS_SET_BIT 8
+#define OBJECT_IS_PINNED_BITS (7 << 4)
+#define OBJECT_IS_PINNED_INCR (1 << 4)
 
 // FLAGS
 extern const char *lp_hint; // Use large pages
@@ -118,7 +110,7 @@
     unsigned size_and_ref_type;
 
     // Methods
-    POINTER_SIZE_INT flags() { return (POINTER_SIZE_INT)this; }
+    unsigned flags() { return (int)this; }
     GC_VTable_Info *ptr() {
         assert(!is_array());
         return (GC_VTable_Info*) ((POINTER_SIZE_INT)this & ~GC_VT_FLAGS);
@@ -136,7 +128,6 @@
 };
 
 typedef POINTER_SIZE_INT GC_VT;
-typedef uint32 VT32;
 
 typedef struct Partial_Reveal_VTable {
 private:
@@ -148,27 +139,21 @@
 
 } Partial_Reveal_VTable;
 
-
 class Partial_Reveal_Object {
     private:
     Partial_Reveal_Object();
-    VT32 vt_raw;
-    unsigned info;
+    int vt_raw;
+    int info;
     int array_len;
 
     public:
-    VT32 &vt() { assert(/* alignment check */ !((POINTER_SIZE_INT)this & (GC_OBJECT_ALIGNMENT - 1))); return vt_raw; }
-    unsigned &obj_info() { assert(/* alignment check */ !((POINTER_SIZE_INT)this & (GC_OBJECT_ALIGNMENT - 1))); return info; }
+    int &vt() { assert(/* alignment check */ !((int)this & 3)); return vt_raw; }
+    int &obj_info() { assert(/* alignment check */ !((int)this & 3)); return info; }
     unsigned char &obj_info_byte() { return *(unsigned char*)&obj_info(); }
 
     Partial_Reveal_VTable *vtable() {
-#ifdef POINTER64
-        assert(!(vt() & FORWARDING_BIT));
-        return ah_to_vtable(vt());
-#else
         assert(!(vt() & FORWARDING_BIT));
         return (Partial_Reveal_VTable*) vt();
-#endif
     }
 
     int array_length() { return array_len; }
@@ -179,16 +164,6 @@
         return (Partial_Reveal_Object**)
             ((unsigned char*) this + (gcvt->flags() >> GC_VT_ARRAY_FIRST_SHIFT));
     }
-
-#if _DEBUG
-    void valid() {
-        assert((vt() & FORWARDING_BIT) == 0);
-        Class_Handle c = allocation_handle_get_class(vt());
-        assert(class_get_allocation_handle(c) == vt());
-    }
-#else
-    void valid() {}
-#endif
 };
 
 
@@ -198,7 +173,7 @@
     unsigned f = flags();
     unsigned element_shift = f >> GC_VT_ARRAY_ELEMENT_SHIFT;
     unsigned first_element = element_shift >> (GC_VT_ARRAY_FIRST_SHIFT - GC_VT_ARRAY_ELEMENT_SHIFT);
-    return (first_element + (length << (element_shift & GC_VT_ARRAY_ELEMENT_MASK)) + (GC_OBJECT_ALIGNMENT - 1)) & ~(GC_OBJECT_ALIGNMENT - 1);
+    return (first_element + (length << (element_shift & GC_VT_ARRAY_ELEMENT_MASK)) + 3) & ~3;
 }
 
 static inline int get_object_size(Partial_Reveal_Object *obj, GC_VTable_Info *gcvt) {
@@ -233,6 +208,8 @@
     return (size + m/2-1)/m;
 }
 
+typedef unsigned char* Ptr;
+
 struct OldObjects {
     Ptr end;
     Ptr pos;
@@ -250,10 +227,6 @@
     Ptr pos; // current allocation position
     Ptr pos_limit; // end of continuous allocation region
 
-    Ptr roots_start;
-    Ptr roots_pos;
-    Ptr roots_end;
-
     Ptr compaction_region_start() { return old_objects.end; }  // compaction region
     Ptr compaction_region_end() { return ceiling; }
 
@@ -280,9 +253,10 @@
 
 // GLOBALS
 extern Ptr heap_base;
+extern Ptr heap_ceiling;
 
 extern int pending_finalizers;
-extern uint32 chunk_size;
+extern int chunk_size;
 extern bool cleaning_needed;
 extern std::vector<unsigned char*> pinned_areas;
 extern unsigned pinned_areas_pos;
@@ -349,7 +323,7 @@
     assert((hash & ~0x7e) == 0x3a00);
 }
 #else /* DEBUG_HASHCODE */
-inline int gen_hashcode(void *addr) { return (int)(POINTER_SIZE_INT)addr; }
+inline int gen_hashcode(void *addr) { return (int)addr; }
 inline void check_hashcode(int hash) {}
 #endif /* DEBUG_HASHCODE */
 

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/init.cpp
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/init.cpp?view=diff&rev=450941&r1=450940&r2=450941
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/init.cpp (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/init.cpp Thu Sep 28 10:53:54 2006
@@ -31,7 +31,6 @@
 #include "gc_types.h"
 #include "cxxlog.h"
 #include "timer.h"
-#include "apr_time.h"
 #ifndef _WIN32
 #include <sys/mman.h>
 #endif
@@ -41,14 +40,14 @@
 unsigned int heap_mark_phase;
 
 HeapSegment heap;
-uint32 chunk_size;
+int chunk_size;
 
 int pending_finalizers = false;
 
 #define RESERVED_FOR_LAST_HASH 4
 
 #define MB * (1024 * 1024)
-size_t HEAP_SIZE_DEFAULT = 256 MB;
+int HEAP_SIZE_DEFAULT = 256 MB;
 
 unsigned int prev_mark_phase;
 bool cleaning_needed = false;
@@ -57,15 +56,16 @@
 int64 timer_start;
 int64 timer_dt;
 Ptr heap_base;
+Ptr heap_ceiling;
 size_t max_heap_size;
 size_t min_heap_size;
 bool ignore_finalizers = false;
 bool remember_root_set = false;
 const char *lp_hint = NULL;
 
-static size_t parse_size_string(const char* size_string) {
+static long parse_size_string(const char* size_string) {
     size_t len = strlen(size_string);
-    size_t unit = 1;
+    int unit = 1;
     if (tolower(size_string[len - 1]) == 'k') {
         unit = 1024;
     } else if (tolower(size_string[len - 1]) == 'm') {
@@ -73,8 +73,8 @@
     } else if (tolower(size_string[len - 1]) == 'g') {
         unit = 1024 * 1024 * 1024;
     }
-    size_t size = atol(size_string);
-    size_t res = size * unit;
+    long size = atol(size_string);
+    long res = size * unit;
     if (res / unit != size) {
         // overflow happened
         return 0;
@@ -104,13 +104,13 @@
 
 static void parse_configuration_properties() {
     max_heap_size = HEAP_SIZE_DEFAULT;
-    min_heap_size = 16 MB;
+    min_heap_size = 8 MB;
     if (is_property_set("gc.mx")) {
         max_heap_size = parse_size_string(vm_get_property_value("gc.mx"));
 
-        if (max_heap_size < 16 MB) {
+        if (max_heap_size < 8 MB) {
             INFO("max heap size is too small: " << max_heap_size);
-            max_heap_size = 16 MB;
+            max_heap_size = 8 MB;
         }
         if (0 == max_heap_size) {
             INFO("wrong max heap size");
@@ -118,15 +118,15 @@
         }
 
         min_heap_size = max_heap_size / 10;
-        if (min_heap_size < 16 MB) min_heap_size = 16 MB;
+        if (min_heap_size < 8 MB) min_heap_size = 8 MB;
     }
 
     if (is_property_set("gc.ms")) {
         min_heap_size = parse_size_string(vm_get_property_value("gc.ms"));
 
-        if (min_heap_size < 16 MB) {
+        if (min_heap_size < 1 MB) {
             INFO("min heap size is too small: " << min_heap_size);
-            min_heap_size = 16 MB;
+            min_heap_size = 1 MB;
         }
 
         if (0 == min_heap_size)
@@ -138,17 +138,6 @@
         max_heap_size = min_heap_size;
     }
 
-#ifdef POINTER64
-        size_t max_compressed = (4096 * (size_t) 1024 * 1024);
-        if (max_heap_size > max_compressed) {
-            INFO("maximum heap size is limited"
-                    " to 4 Gb due to pointer compression");
-            max_heap_size = max_compressed;
-            if (min_heap_size > max_heap_size)
-                min_heap_size = max_heap_size;
-        }
-#endif
-
 
     if (is_property_set("gc.lp")) {
         lp_hint = vm_get_property_value("gc.lp");
@@ -157,8 +146,12 @@
     if (is_property_set("gc.type"))
         gc_algorithm = get_property_value_int("gc.type");
 
-    // version
-    INFO(gc_version_string());
+#if (defined _DEBUG) || ! (defined NDEBUG)
+    char *build_mode = " (debug)";
+#else
+    char *build_mode = " (release)";
+#endif
+    INFO("gc 4.1" << build_mode);
     INFO("GC type = " << gc_algorithm);
 
     if (get_property_value_boolean("gc.ignore_finalizers", false)) {
@@ -180,29 +173,13 @@
 }
 
 #ifdef _WIN32
-static inline void *reserve_mem(size_t size) {
+static inline void *reserve_mem(long size) {
     return VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_READWRITE);
 }
 static const void* RESERVE_FAILURE = 0;
 #else
-static inline void *reserve_mem(size_t size) {
-#ifdef POINTER64
-    /* We have planty of address space, let's protect unaccessible part of heap
-     * to find some of bad pointers. */
-    size_t four_gig = 4 * 1024 * (size_t) 1024 * 1024;
-    size_t padding = 4 * 1024 * (size_t) 1024 * 1024;
-    void *addr = mmap(0, padding + four_gig, PROT_READ | PROT_WRITE,
-            MAP_NORESERVE | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
-    assert(addr != MAP_FAILED);
-    UNUSED int err = mprotect((Ptr)addr, padding, PROT_NONE);
-    assert(!err);
-    err = mprotect((Ptr)addr + padding + max_heap_size,
-                    four_gig - max_heap_size, PROT_NONE);
-    assert(!err);
-    return (Ptr)addr + padding;
-#else
+static inline void *reserve_mem(long size) {
     return mmap(0, max_heap_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
-#endif
 }
 static const void* RESERVE_FAILURE = MAP_FAILED;
 #endif
@@ -235,7 +212,7 @@
     if (heap_base == NULL) {
         heap_base = (unsigned char*) reserve_mem(max_heap_size);
         if (heap_base == RESERVE_FAILURE) {
-            size_t dec = 100 * 1024 * 1024;
+            long dec = 100 * 1024 * 1024;
             max_heap_size = max_heap_size / dec * dec;
 
             while(true) {
@@ -253,13 +230,12 @@
         ECHO("WARNING: min heap size reduced to " << mb(min_heap_size) << " Mb");
     }
 
-    heap.ceiling = heap_base + min_heap_size - RESERVED_FOR_LAST_HASH;
+    heap_ceiling = heap_base + max_heap_size;
 
     heap.base = heap_base;
     heap.size = min_heap_size;
+    heap.ceiling = heap.base + heap.size - RESERVED_FOR_LAST_HASH;
     heap.max_size = max_heap_size;
-    heap.roots_start = heap.roots_pos = heap.roots_end =
-        heap.base + heap.max_size - RESERVED_FOR_LAST_HASH;
 
 #ifdef _WIN32
     void *res;
@@ -276,7 +252,6 @@
 void gc_init() {
     INFO2("gc.init", "GC init called\n");
     init_mem();
-    init_slots();
     init_select_gc();
     gc_end = apr_time_now();
     timer_init();
@@ -335,8 +310,8 @@
     unsigned char *start = mark_bits + (heap.compaction_region_start() - heap_base) / sizeof(void*) / 8;
     unsigned char *end = mark_bits + (heap.compaction_region_end() - heap_base + sizeof(void*) * 8 - 1) / sizeof(void*) / 8;
     int page = 4096; // FIXME
-    mark_bits_allocated_start = (unsigned char*)((POINTER_SIZE_INT)start & ~(page - 1));
-    mark_bits_allocated_end = (unsigned char*)(((POINTER_SIZE_INT)end + page - 1) & ~(page - 1));
+    mark_bits_allocated_start = (unsigned char*)((int)start & ~(page - 1));
+    mark_bits_allocated_end = (unsigned char*)(((int)end + page - 1) & ~(page - 1));
 #ifdef _WIN32
     unsigned char *res = (unsigned char*) VirtualAlloc(mark_bits_allocated_start,
             mark_bits_allocated_end - mark_bits_allocated_start, MEM_COMMIT, PAGE_READWRITE);
@@ -358,8 +333,7 @@
 
 void heap_extend(size_t size) {
     size = (size + 65535) & ~65535;
-    size_t max_size = heap.max_size - (heap.roots_end - heap.roots_start);
-    if (size > max_size) size = max_size;
+    if (size > max_heap_size) size = max_heap_size;
     if (size <= heap.size) return;
 
 #ifdef _WIN32
@@ -370,7 +344,7 @@
     unsigned char *old_ceiling = heap.ceiling;
     heap.ceiling = heap.base + heap.size - RESERVED_FOR_LAST_HASH;
 
-    if (heap.pos_limit == old_ceiling) {
+    if (old_ceiling == heap.pos_limit) {
         heap.pos_limit = heap.ceiling;
     }
     chunk_size = round_down(heap.size / (10 * num_threads),128);

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/prepare.cpp
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/prepare.cpp?view=diff&rev=450941&r1=450940&r2=450941
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/prepare.cpp (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/prepare.cpp Thu Sep 28 10:53:54 2006
@@ -123,14 +123,9 @@
         int el_offset;
         for(el_offset = -1; el_size; el_size >>= 1, el_offset++);
 
-        // FIXME: use data from VM
-#ifdef _EM64T_
-        int first_element = 16;
-#else
         int first_element = (el_offset == 3) ? 16 : 12;
-#endif
 
-        POINTER_SIZE_INT flags = GC_VT_ARRAY
+        int flags = GC_VT_ARRAY
             | (el_offset << GC_VT_ARRAY_ELEMENT_SHIFT)
             | (first_element << GC_VT_ARRAY_FIRST_SHIFT);
 
@@ -147,7 +142,7 @@
     GC_VTable_Info *info = build_slot_offset_array(ch, vt, type);
     info->size_and_ref_type = class_get_boxed_data_size(ch) | (int)type;
 
-    POINTER_SIZE_INT flags = 0;
+    int flags = 0;
     if (!ignore_finalizers && class_is_finalizable(ch)) {
         flags |= GC_VT_FINALIZIBLE;
     }
@@ -157,7 +152,7 @@
         flags |= GC_VT_HAS_SLOTS;
     }
 
-    POINTER_SIZE_INT addr = (POINTER_SIZE_INT) info;
+    int addr = (int) info;
     assert((addr & 7) == 0); // required alignment
 
     flags |= addr;

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/root_set_cache.h
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/root_set_cache.h?view=diff&rev=450941&r1=450940&r2=450941
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/root_set_cache.h (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/root_set_cache.h Thu Sep 28 10:53:54 2006
@@ -17,5 +17,5 @@
  * @author Ivan Volosyuk
  */
 
-typedef fast_list<Slot,65536> roots_vector;
+typedef fast_list<Partial_Reveal_Object**,65536> roots_vector;
 extern roots_vector root_set;

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/selector.cpp
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/selector.cpp?view=diff&rev=450941&r1=450940&r2=450941
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/selector.cpp (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/selector.cpp Thu Sep 28 10:53:54 2006
@@ -21,24 +21,25 @@
 #include "collect.h"
 #include <math.h>
 
-void reserve_old_object_space(POINTER_SIZE_SINT size) {
-    size &= ~(GC_OBJECT_ALIGNMENT - 1);
+void reserve_old_object_space(int size) {
+    size &= ~3;
 
+    int free = heap.old_objects.end - heap.old_objects.pos;
     if (size < 0) {
         TRACE2("gc.select", "Reserve old object space: can't shrink old object space");
         return;
     }
 
     assert(heap.old_objects.end == heap.pos);
-    if (heap.old_objects.end + size > heap.allocation_region_end()) {
-        size = heap.allocation_region_end() - heap.old_objects.end;
+    if (heap.old_objects.end + size > heap.ceiling) {
+        size = heap.ceiling - heap.old_objects.end;
     }
 
     heap.old_objects.end += size;
     TRACE2("gc.select", "Reserved space = " << mb(heap.old_objects.end - heap.old_objects.pos));
 
     // balancing free areas.
-    pinned_areas.push_back(heap.allocation_region_end());
+    pinned_areas.push_back(heap.ceiling);
 
     // update heap.old_objects.pos_limit
     if (heap.old_objects.pos_limit == heap.pos) {
@@ -72,7 +73,7 @@
     heap.old_objects.end = heap.pos;
 
     // restore pinned areas.
-    pinned_areas.pop_back(); // heap.allocation_region_end()
+    pinned_areas.pop_back();
 }
 
 unsigned char *select_gc(int size) {
@@ -118,7 +119,7 @@
     TRACE2("gc.mem", "select_gc2 = " << res);
 
     if (res == 0 && heap.size != heap.max_size) {
-        assert(heap.pos_limit == heap.allocation_region_end());
+        assert(heap.pos_limit == heap.ceiling);
         heap_extend(round_up(heap.size + size, 65536));
         if (heap.pos + size <= heap.pos_limit) {
             res = heap.pos;
@@ -165,25 +166,20 @@
 bool need_compaction_next_gc() {
     if (heap.working_set_size == 0 || !gc_adaptive) {
         TRACE2("gc.adaptive", "static Smin analisis");
-        return heap.allocation_region_end() - heap.pos < heap.size * 0.7f;
+        return heap.ceiling - heap.pos < heap.size * 0.7f;
     } else {
-        float smin = Smin(heap.roots_start - heap.base - RESERVED_FOR_HEAP_NULL - heap.working_set_size,
+        float smin = Smin(heap.size - heap.working_set_size,
                 heap.Tcompact, heap.Tcopy, heap.dS_copy);
-        float free = (float) (heap.allocation_region_end() - heap.old_objects.pos);
-        INFO2("gc.smin", "smin = " << mb((size_t)smin)
-                << " (working set " << mb((size_t)heap.working_set_size)
-                << " Tfast " << (int)(heap.Tcopy / 1000.)
-                << " Tslow " << (int)(heap.Tcompact / 1000.)
-                << " dS " << mb((size_t)heap.dS_copy)
-                << "), free = " << mb((int)free));
+        float free = (float) (heap.ceiling - heap.pos);
         //INFO2("gc.logic", "Smin = " << (int) mb((int)smin) << "mb, free = " << mb((int)free) << " mb");
         return free < smin;
+            
     }
 }
 
 static void check_heap_extend() {
-    size_t free_space = heap.allocation_region_end() - heap.allocation_region_start();
-    size_t used_space = heap.size - free_space;
+    int free_space = heap.allocation_region_end() - heap.allocation_region_start();
+    int used_space = heap.size - free_space;
 
     if (free_space < used_space) {
         size_t new_heap_size = used_space * 8;
@@ -201,7 +197,6 @@
 }
 
 size_t correction;
-Ptr prev_alloc_start;
 
 static void update_evacuation_area() {
     POINTER_SIZE_SINT free = heap.allocation_region_end() - heap.allocation_region_start();
@@ -214,16 +209,10 @@
         return;
     }
 
-    POINTER_SIZE_SINT dS = heap.old_objects.pos - prev_alloc_start;
-    if (prev_alloc_start != 0) {
-        heap.dS_copy = (float)dS;
-    }
-    prev_alloc_start = heap.old_objects.pos;
-
     if (need_compaction_next_gc()) {
         //INFO2("gc.logic", "compaction triggered by Smin");
         heap.next_gc = GC_FULL;
-        prev_alloc_start = 0;
+        heap.dS_copy = 0;
         return;
     }
 
@@ -242,6 +231,7 @@
         return;
     }
     assert(incr > 0);
+    heap.dS_copy = (float)incr;
 
     /*INFO2("gc.logic", 
             "mb overflow = " << overflow / 1024 / 1024
@@ -292,25 +282,29 @@
 }
 
 void select_force_gc() {
-    vm_gc_lock_enum();
     if (gc_algorithm < 10) {
+        vm_gc_lock_enum();
         force_gc();
+        vm_gc_unlock_enum();
+        vm_hint_finalize();
     } else if ((gc_algorithm / 10) == 2) {
+        vm_gc_lock_enum();
         full_gc(0);
+        vm_gc_unlock_enum();
+        vm_hint_finalize();
     } else if ((gc_algorithm / 10) == 3) {
-        heap.old_objects.prev_pos = heap.old_objects.pos;
+        vm_gc_lock_enum();
         copy_gc(0);
+        vm_gc_unlock_enum();
+        vm_hint_finalize();
     }
-    vm_gc_unlock_enum();
-    vm_hint_finalize();
 }
 
 void init_select_gc() {
-    heap.old_objects.end = heap.old_objects.pos = heap.old_objects.pos_limit
-        = heap.base + RESERVED_FOR_HEAP_NULL;
+    heap.old_objects.end = heap.old_objects.pos = heap.old_objects.pos_limit = heap.base;
 
-    heap.pos = heap.base + RESERVED_FOR_HEAP_NULL;
-    heap.pos_limit = heap.allocation_region_end();
+    heap.pos = heap.base;
+    heap.pos_limit = heap.ceiling;
 
     heap.incr_abs = 0;
     heap.incr_rel = 0.2f;
@@ -320,14 +314,15 @@
     pinned_areas_pos = 1;
 
     if (gc_algorithm % 10 == 0) {
-        size_t reserve = heap.size / 5;
+        int reserve = heap.size / 5;
         reserve_old_object_space(reserve);
-        heap.predicted_pos = heap.base + reserve + RESERVED_FOR_HEAP_NULL;
+        heap.predicted_pos = heap.base + reserve;
     }
     if (gc_algorithm % 10 == 3) {
-        size_t reserve = heap.size / 3;
+        int reserve = heap.size / 3;
         reserve_old_object_space(reserve);
-        heap.predicted_pos = heap.base + reserve + RESERVED_FOR_HEAP_NULL;
+        heap.predicted_pos = heap.base + reserve;
     }
     heap.next_gc = GC_COPY;
+
 }

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/slide_compact.h
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/slide_compact.h?view=diff&rev=450941&r1=450940&r2=450941
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/slide_compact.h (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/slide_compact.h Thu Sep 28 10:53:54 2006
@@ -17,10 +17,17 @@
  * @author Ivan Volosyuk
  */
 
+struct InteriorPointer {
+    Partial_Reveal_Object *obj;
+    int offset;
+    Partial_Reveal_Object **interior_ref;
+};
+
 extern fast_list<Partial_Reveal_Object*, 65536> objects;
+extern fast_list<InteriorPointer,256> interior_pointers;
 
 
-inline bool is_left_object(Partial_Reveal_Object *refobj, Slot slot) {
-    return (void*)refobj <= slot.ptr();
+inline bool is_left_object(Partial_Reveal_Object *refobj, Partial_Reveal_Object **ref) {
+    return (void*)refobj <= (void*) ref;
 }
 

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/timer.h
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/timer.h?view=diff&rev=450941&r1=450940&r2=450941
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/timer.h (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/timer.h Thu Sep 28 10:53:54 2006
@@ -23,6 +23,26 @@
                                            action_name action_params;               \
                                            timer_##action_name.finish(); }
 
+#ifdef _WIN32
+static __declspec(naked) __int64
+ticks(void) {
+    __asm       {
+        rdtsc
+        ret
+    }
+}
+#else
+static int64
+ticks(void) {
+    int64 val;
+    __asm__ __volatile__ ("rdtsc" : "=A" (val));
+    return val;
+}
+#endif
+
+extern int64 timer_start;
+extern int64 timer_dt;
+
 class Timer {
     const char *action;
     const char *category;
@@ -34,31 +54,39 @@
         action = str;
         finished = false;
         category = "gc.time";
-        start = apr_time_now();
+        start = ticks();
     }
 
     Timer(const char *str, const char *_category) {
         action = str;
         category = _category;
         finished = false;
-        start = apr_time_now();
+        start = ticks();
     }
 
     void finish() {
         finished = true;
-        apr_time_t end = apr_time_now();
-        INFO2(category, action << " " << (end - start + 500) / 1000 << " ms");
+        apr_time_t end = ticks();
+        INFO2(category, action << " " << (end - start) / timer_dt / 1000 << " ms");
     }
 
     ~Timer() {
         if (!finished) finish();
     }
 
-    apr_time_t dt() { return apr_time_now() - start; }
+    apr_time_t dt() { return ticks() - start; }
 };
 
-inline void timer_init() {}
-inline void timer_calibrate(apr_time_t time_from_start) {}
+inline void timer_init() {
+    timer_start = ticks();
+}
+
+inline void timer_calibrate(apr_time_t time_from_start) {
+    int64 ticks_from_start = ticks() - timer_start;
+    int64 dt = ticks_from_start / time_from_start;
+    timer_dt = dt;
+    
+}
 
 #else
 #  define TIME(action_name, action_params) action_name action_params

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/include/version_svn_tag.h
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/include/version_svn_tag.h?view=diff&rev=450941&r1=450940&r2=450941
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/include/version_svn_tag.h (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/include/version_svn_tag.h Thu Sep 28 10:53:54 2006
@@ -17,6 +17,6 @@
 #ifndef _VERSION_SVN_TAG_
 #define _VERSION_SVN_TAG_
 
-#define VERSION_SVN_TAG  "450693"
+#define VERSION_SVN_TAG  "450932"
 
 #endif // _VERSION_SVN_TAG_

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/src/util/mem_alloc.cpp
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/src/util/mem_alloc.cpp?view=diff&rev=450941&r1=450940&r2=450941
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/src/util/mem_alloc.cpp (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/src/util/mem_alloc.cpp Thu Sep 28 10:53:54 2006
@@ -308,5 +308,5 @@
     assert (base);
     // Subtract a small number (like 1) from the real base so that
     // no valid vtable offsets will ever be 0.
-    return (POINTER_SIZE_INT) (base - 8);
+    return (POINTER_SIZE_INT) (base - 1);
 } //vm_get_vtable_base



Mime
View raw message