Return-Path: Delivered-To: apmail-harmony-commits-archive@www.apache.org Received: (qmail 29493 invoked from network); 9 Feb 2007 17:25:36 -0000 Received: from hermes.apache.org (HELO mail.apache.org) (140.211.11.2) by minotaur.apache.org with SMTP; 9 Feb 2007 17:25:35 -0000 Received: (qmail 27125 invoked by uid 500); 9 Feb 2007 17:25:36 -0000 Delivered-To: apmail-harmony-commits-archive@harmony.apache.org Received: (qmail 27047 invoked by uid 500); 9 Feb 2007 17:25:36 -0000 Mailing-List: contact commits-help@harmony.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@harmony.apache.org Delivered-To: mailing list commits@harmony.apache.org Received: (qmail 27019 invoked by uid 99); 9 Feb 2007 17:25:35 -0000 Received: from herse.apache.org (HELO herse.apache.org) (140.211.11.133) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 09 Feb 2007 09:25:35 -0800 X-ASF-Spam-Status: No, hits=-9.4 required=10.0 tests=ALL_TRUSTED,NO_REAL_NAME X-Spam-Check-By: apache.org Received: from [140.211.11.3] (HELO eris.apache.org) (140.211.11.3) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 09 Feb 2007 09:25:26 -0800 Received: by eris.apache.org (Postfix, from userid 65534) id 72F701A981D; Fri, 9 Feb 2007 09:25:06 -0800 (PST) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r505384 [2/2] - /harmony/enhanced/drlvm/trunk/vm/thread/src/ Date: Fri, 09 Feb 2007 17:25:03 -0000 To: commits@harmony.apache.org From: wjwashburn@apache.org X-Mailer: svnmailer-1.1.0 Message-Id: <20070209172506.72F701A981D@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Modified: harmony/enhanced/drlvm/trunk/vm/thread/src/thread_private.h URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/thread/src/thread_private.h?view=diff&rev=505384&r1=505383&r2=505384 ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/thread/src/thread_private.h (original) +++ harmony/enhanced/drlvm/trunk/vm/thread/src/thread_private.h Fri Feb 9 09:25:01 2007 @@ -14,12 +14,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -/** - * @author Andrey Chernyshev - * @version $Revision: 1.1.2.14 $ - */ - - #ifndef THREAD_PRIVATE_H #define THREAD_PRIVATE_H @@ -44,7 +38,7 @@ //#include "clog.h" // FIXME move to the global header, add error converter -#define RET_ON_ERROR(stat) if(stat) { return -1; } +#define RET_ON_ERROR(stat) if (stat) { return -1; } #define CONVERT_ERROR(stat) (stat) #define MAX_OWNED_MONITOR_NUMBER 200 //FIXME: switch to dynamic resize @@ -108,27 +102,27 @@ #endif /** - * get_local_pool() function return apr pool associated with the current thread. - * the memory could be allocated without lock using this pool - * deallocation should be done in the same thread, otherwise - * local_pool_cleanup_register() should be called - */ - apr_pool_t* get_local_pool(); + * get_local_pool() function return apr pool associated with the current thread. + * the memory could be allocated without lock using this pool + * deallocation should be done in the same thread, otherwise + * local_pool_cleanup_register() should be called + */ +apr_pool_t* get_local_pool(); /** - * local_pool_cleanup_register() synchronously register the cleanup function. - * It should be called to request cleanup in thread local pool, from other thread - * Usage scenario: - * IDATA hymutex_destroy (tm_mutex_t *mutex) { - * apr_pool_t *pool = apr_thread_mutex_pool_get ((apr_thread_mutex_t*)mutex); - * if(pool != get_local_pool()) { - * return local_pool_cleanup_register(hymutex_destroy, mutex); - * } - * apr_thread_mutex_destroy(mutex); - * return TM_ERROR_NONE; - * } - * - */ + * local_pool_cleanup_register() synchronously register the cleanup function. + * It should be called to request cleanup in thread local pool, from other thread + * Usage scenario: + * IDATA hymutex_destroy (tm_mutex_t *mutex) { + * apr_pool_t *pool = apr_thread_mutex_pool_get ((apr_thread_mutex_t*)mutex); + * if (pool != get_local_pool()) { + * return local_pool_cleanup_register(hymutex_destroy, mutex); + * } + * apr_thread_mutex_destroy(mutex); + * return TM_ERROR_NONE; + * } + * + */ IDATA local_pool_cleanup_register(void* func, void* data); @@ -208,23 +202,22 @@ // Basic manipulation fields - /** - * Group for this thread. Different groups are needed in order - * to be able to quickly iterate over the specific group. - * Examples are: Java threads, GC private threads. - * Equal to the address of the head of the list of threads for this group. - */ + /** + * Group for this thread. Different groups are needed in order + * to be able to quickly iterate over the specific group. + * Examples are: Java threads, GC private threads. + * Equal to the address of the head of the list of threads for this group. + */ hythread_group_t group; - /** - * Points to the next thread within the group. - */ + /** + * Points to the next thread within the group. + */ hythread_t next; - /** + /** * Points to the last thread within the group. */ - hythread_t prev; /** @@ -457,39 +450,41 @@ */ typedef struct HyThreadMonitor { - /** - * Mutex - */ - hymutex_t mutex; - - /** - * Condition variable - */ - hycond_t condition; - - /** - * Recursion count - */ - IDATA recursion_count; - hythread_t owner; - hythread_t inflate_owner; - hythread_t last_wait; - int inflate_count; - int wait_count; - int notify_flag; - /** - * monitor sub pool - * will be destroyed by monitor_destroy() - */ - apr_pool_t *pool; - /** - * Owner thread ID. - */ - IDATA thread_id; + /** + * Mutex + */ + hymutex_t mutex; + + /** + * Condition variable + */ + hycond_t condition; + + /** + * Recursion count + */ + IDATA recursion_count; + hythread_t owner; + hythread_t inflate_owner; + hythread_t last_wait; + int inflate_count; + int wait_count; + int notify_flag; + + /** + * monitor sub pool + * will be destroyed by monitor_destroy() + */ + apr_pool_t *pool; + + /** + * Owner thread ID. + */ + IDATA thread_id; - UDATA flags; + UDATA flags; - char *name; + char *name; } HyThreadMonitor; @@ -498,25 +493,25 @@ */ typedef struct HyLatch { - /** - * Latch count - */ - int count; - - /** - * Condition event used to signal threads which are waiting on the latch. - */ - hycond_t condition; - - /** - * Mutex associated with the latch data. - */ - hymutex_t mutex; - /** - * latch sub pool - * will be destroyed by latch_destroy() - */ - apr_pool_t *pool; + /** + * Latch count + */ + int count; + + /** + * Condition event used to signal threads which are waiting on the latch. + */ + hycond_t condition; + + /** + * Mutex associated with the latch data. + */ + hymutex_t mutex; + /** + * latch sub pool + * will be destroyed by latch_destroy() + */ + apr_pool_t *pool; } HyLatch; @@ -526,30 +521,31 @@ */ typedef struct HySemaphore { - /** - * Semaphore count - */ - int count; - - /** - * Semaphore max count - */ - int max_count; - - /** - * Condition event used to signal threads which are waiting on the semaphore. - */ - hycond_t condition; - - /** - * Mutex associated with the semaphore data. - */ - hymutex_t mutex; - /** - * semaphore sub pool - * will be destroyed by sem_destroy() - */ - apr_pool_t *pool; + /** + * Semaphore count + */ + int count; + + /** + * Semaphore max count + */ + int max_count; + + /** + * Condition event used to signal threads which are waiting on the semaphore. + */ + hycond_t condition; + + /** + * Mutex associated with the semaphore data. + */ + hymutex_t mutex; + + /** + * semaphore sub pool + * will be destroyed by sem_destroy() + */ + apr_pool_t *pool; } HySemaphore; // Global variables Modified: harmony/enhanced/drlvm/trunk/vm/thread/src/thread_ti_instr.c URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/thread/src/thread_ti_instr.c?view=diff&rev=505384&r1=505383&r2=505384 ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/thread/src/thread_ti_instr.c (original) +++ harmony/enhanced/drlvm/trunk/vm/thread/src/thread_ti_instr.c Fri Feb 9 09:25:01 2007 @@ -15,11 +15,6 @@ * limitations under the License. */ -/** - * @author Sergey Petrovsky - * @version $Revision: 1.1.2.10 $ - */ - /** * @file thread_ti_instr.c * @brief JVMTI basic related functions @@ -39,81 +34,80 @@ IDATA VMCALL jthread_get_all_threads(jthread** threads, jint *count_ptr) { hythread_group_t java_thread_group = get_java_thread_group(); - hythread_iterator_t iterator; - hythread_t tm_native_thread; - jvmti_thread_t tm_java_thread; - jthread* java_threads; - int i; - int count = 0; - int java_thread_count = 0; - IDATA status; + hythread_iterator_t iterator; + hythread_t tm_native_thread; + jvmti_thread_t tm_java_thread; + jthread* java_threads; + int i; + int count = 0; + int java_thread_count = 0; + IDATA status; //apr_status_t apr_status; //apr_pool_t *pool; assert(java_thread_group); - iterator = hythread_iterator_create(java_thread_group); - count = hythread_iterator_size (iterator); - for (i = 0; i < count; i++){ + iterator = hythread_iterator_create(java_thread_group); + count = hythread_iterator_size (iterator); + for (i = 0; i < count; i++) { tm_native_thread = hythread_iterator_next(&iterator); - tm_java_thread = hythread_get_private_data(tm_native_thread); - if (tm_java_thread){ - java_thread_count++; - } + tm_java_thread = hythread_get_private_data(tm_native_thread); + if (tm_java_thread) { + java_thread_count++; } + } /*apr_status = apr_pool_create(&pool, 0); - if (apr_status != APR_SUCCESS){ + if (apr_status != APR_SUCCESS) { hythread_iterator_release(&iterator); return CONVERT_ERROR(apr_status); } java_threads = apr_palloc(pool, sizeof(jthread)* java_thread_count);*/ java_threads = (jthread*)malloc(sizeof(jthread)* java_thread_count); - if (!java_threads){ - hythread_iterator_release(&iterator); - return TM_ERROR_OUT_OF_MEMORY; - } - hythread_iterator_reset(&iterator); - java_thread_count = 0; - for (i = 0; i < count; i++){ + if (!java_threads) { + hythread_iterator_release(&iterator); + return TM_ERROR_OUT_OF_MEMORY; + } + hythread_iterator_reset(&iterator); + java_thread_count = 0; + for (i = 0; i < count; i++) { tm_native_thread = hythread_iterator_next(&iterator); - tm_java_thread = hythread_get_private_data(tm_native_thread); - if (tm_java_thread){ - java_threads[java_thread_count] = tm_java_thread->thread_object; - java_thread_count++; - } + tm_java_thread = hythread_get_private_data(tm_native_thread); + if (tm_java_thread) { + java_threads[java_thread_count] = tm_java_thread->thread_object; + java_thread_count++; } + } *threads = java_threads; - *count_ptr = java_thread_count; + *count_ptr = java_thread_count; status = hythread_iterator_release(&iterator); return status; } /* */ -IDATA deads_expand(jthread **deads, int deads_size){ +IDATA deads_expand(jthread **deads, int deads_size) { jthread *new_deads; int i; new_deads = (jthread *)malloc(sizeof(jthread) * deads_size * 2); - if (!new_deads) return TM_ERROR_OUT_OF_MEMORY; + if (!new_deads) return TM_ERROR_OUT_OF_MEMORY; - for (i = 0; i < deads_size; i++){ + for (i = 0; i < deads_size; i++) { new_deads[i] = (*deads)[i]; - } + } *deads = new_deads; - return TM_ERROR_NONE; + return TM_ERROR_NONE; } /* */ -int deads_find(jobject thread, jobject *deads, int base, int top, int deads_size){ - - int i; +int deads_find(jobject thread, jobject *deads, int base, int top, int deads_size) { + int i; - for (i = 0; i < top; i++){ - if (vm_objects_are_equal(thread, deads[i])){ - return 1; - } + for (i = 0; i < top; i++) { + if (vm_objects_are_equal(thread, deads[i])) { + return 1; } - return 0; + } + return 0; } // FIXME: synchronization and maybe thread suspension needed /** @@ -125,74 +119,73 @@ * @param[out] dead_count number of deadlocked threads */ IDATA VMCALL jthread_get_deadlocked_threads(jthread *thread_list, jint thread_count, jthread **dead_list, jint *dead_count) { - - jthread *deads; - int deads_size = 1; - int deads_base = 0; - int deads_top = 0; + jthread *deads; + int deads_size = 1; + int deads_base = 0; + int deads_top = 0; jthread *output; - int output_top = 0; - jobject monitor; - jthread thread; + int output_top = 0; + jobject monitor; + jthread thread; //apr_pool_t *pool; /*apr_pool_t *pool_out; apr_status_t apr_status;*/ - IDATA status; - int i; + IDATA status; + int i; /*apr_status = apr_pool_create(&pool, NULL); - if (apr_status != APR_SUCCESS) return CONVERT_ERROR(apr_status); + if (apr_status != APR_SUCCESS) return CONVERT_ERROR(apr_status); - deads = apr_palloc(pool, sizeof(jthread) * deads_size); + deads = apr_palloc(pool, sizeof(jthread) * deads_size); output = apr_palloc(pool, sizeof(jthread) * thread_count); if (!deads || !output) return TM_ERROR_OUT_OF_MEMORY;*/ deads = (jthread *)malloc(sizeof(jthread) * deads_size); output = (jthread *)malloc(sizeof(jthread) * thread_count); - if ((deads==NULL)||(output==NULL)) - { + if ((deads==NULL)||(output==NULL)) { return TM_ERROR_OUT_OF_MEMORY; } - for (i = 0; i < thread_count; i++){ - thread = thread_list[i]; - while (1){ - status=jthread_get_contended_monitor(thread, &monitor); - if (status != TM_ERROR_NONE) return status; - if (! monitor){ - deads_top = deads_base; // remove frame - break; - } - if (deads_find(thread, deads, deads_base, deads_top, deads_size)){ - output[output_top] = thread; + for (i = 0; i < thread_count; i++) { + thread = thread_list[i]; + while (1) { + status=jthread_get_contended_monitor(thread, &monitor); + if (status != TM_ERROR_NONE) return status; + if (! monitor) { + deads_top = deads_base; // remove frame + break; + } + if (deads_find(thread, deads, deads_base, deads_top, deads_size)) { + output[output_top] = thread; output_top++; - deads_base = deads_top; // add frame - break; - } - if (deads_top == deads_size){ - status = deads_expand(&deads, deads_size); - if (status != TM_ERROR_NONE) return status; - } - deads[deads_top] = thread; - deads_top++; - status = jthread_get_lock_owner(monitor, &thread); + deads_base = deads_top; // add frame + break; + } + if (deads_top == deads_size) { + status = deads_expand(&deads, deads_size); if (status != TM_ERROR_NONE) return status; - } + } + deads[deads_top] = thread; + deads_top++; + status = jthread_get_lock_owner(monitor, &thread); + if (status != TM_ERROR_NONE) return status; } - if (output_top > 0){ + } + + if (output_top > 0) { /* apr_status = apr_pool_create(&pool_out, NULL); if (apr_status != APR_SUCCESS) return CONVERT_ERROR(apr_status);*/ *dead_list = (jthread *)malloc(sizeof(jthread) * output_top); - if (! *dead_list) return TM_ERROR_OUT_OF_MEMORY; + if (! *dead_list) return TM_ERROR_OUT_OF_MEMORY; - for (i = 0; i < output_top; i++){ + for (i = 0; i < output_top; i++) { (*dead_list)[i] = output[i]; - } - } else { - *dead_list = NULL; } + } else { + *dead_list = NULL; + } *dead_count = output_top; - return TM_ERROR_NONE; + return TM_ERROR_NONE; } /** @@ -203,25 +196,25 @@ IDATA VMCALL jthread_get_thread_count(jint *count_ptr) { hythread_group_t java_thread_group = get_java_thread_group(); - hythread_iterator_t iterator; - hythread_t tm_native_thread; - jvmti_thread_t tm_java_thread; - int i; - int count = 0; - int java_thread_count = 0; - IDATA status; + hythread_iterator_t iterator; + hythread_t tm_native_thread; + jvmti_thread_t tm_java_thread; + int i; + int count = 0; + int java_thread_count = 0; + IDATA status; assert(java_thread_group); - iterator = hythread_iterator_create(java_thread_group); - count = hythread_iterator_size (iterator); - for (i = 0; i < count; i++){ + iterator = hythread_iterator_create(java_thread_group); + count = hythread_iterator_size (iterator); + for (i = 0; i < count; i++) { tm_native_thread = hythread_iterator_next(&iterator); - tm_java_thread = hythread_get_private_data(tm_native_thread); - if (tm_java_thread){ - java_thread_count++; - } + tm_java_thread = hythread_get_private_data(tm_native_thread); + if (tm_java_thread) { + java_thread_count++; } - *count_ptr = java_thread_count; + } + *count_ptr = java_thread_count; status = hythread_iterator_release(&iterator); return status; @@ -235,23 +228,23 @@ IDATA VMCALL jthread_get_blocked_count(jint* count_ptr) { hythread_group_t java_thread_group = get_java_thread_group(); - hythread_iterator_t iterator; - hythread_t tm_native_thread; - int nmb = 0; - int count; - IDATA status; + hythread_iterator_t iterator; + hythread_t tm_native_thread; + int nmb = 0; + int count; + IDATA status; assert(java_thread_group); - iterator = hythread_iterator_create(java_thread_group); - count = hythread_iterator_size (iterator); + iterator = hythread_iterator_create(java_thread_group); + count = hythread_iterator_size (iterator); - while(hythread_iterator_has_next(iterator)){ - tm_native_thread = hythread_iterator_next(&iterator); - if (tm_native_thread && hythread_is_blocked_on_monitor_enter(tm_native_thread)){ - nmb++; - } + while (hythread_iterator_has_next(iterator)) { + tm_native_thread = hythread_iterator_next(&iterator); + if (tm_native_thread && hythread_is_blocked_on_monitor_enter(tm_native_thread)) { + nmb++; } - *count_ptr = nmb; + } + *count_ptr = nmb; status = hythread_iterator_release(&iterator); return status; @@ -265,22 +258,22 @@ IDATA VMCALL jthread_get_waited_count(jint* count) { hythread_group_t java_thread_group = get_java_thread_group(); - hythread_iterator_t iterator; - hythread_t tm_native_thread; - int nmb = 0; - IDATA status; + hythread_iterator_t iterator; + hythread_t tm_native_thread; + int nmb = 0; + IDATA status; assert(java_thread_group); - iterator = hythread_iterator_create(java_thread_group); + iterator = hythread_iterator_create(java_thread_group); - while(hythread_iterator_has_next(iterator)){ - tm_native_thread = hythread_iterator_next(&iterator); - //if (hythread_is_in_monitor_wait(tm_native_thread)){ ??????????????????????? - if (hythread_is_waiting(tm_native_thread)){ - nmb++; - } + while (hythread_iterator_has_next(iterator)) { + tm_native_thread = hythread_iterator_next(&iterator); + //if (hythread_is_in_monitor_wait(tm_native_thread)) { ??????????????????????? + if (hythread_is_waiting(tm_native_thread)) { + nmb++; } - *count = nmb; + } + *count = nmb; status = hythread_iterator_release(&iterator); return status; @@ -296,29 +289,28 @@ * */ IDATA VMCALL jthread_get_state(jthread java_thread, jint *state) { - - hythread_t tm_native_thread; + hythread_t tm_native_thread; assert(java_thread); assert(state); - tm_native_thread = vm_jthread_get_tm_data(java_thread); + tm_native_thread = vm_jthread_get_tm_data(java_thread); - *state = 0; - if (! tm_native_thread) return TM_ERROR_NONE; // Not started yet + *state = 0; + if (! tm_native_thread) return TM_ERROR_NONE; // Not started yet - if (hythread_is_alive(tm_native_thread)) {*state |= JVMTI_THREAD_STATE_ALIVE;} - if (hythread_is_runnable(tm_native_thread)) {*state |= JVMTI_THREAD_STATE_RUNNABLE;} - if (hythread_is_blocked_on_monitor_enter(tm_native_thread)) {*state |= JVMTI_THREAD_STATE_BLOCKED_ON_MONITOR_ENTER;} - if (hythread_is_waiting(tm_native_thread)) {*state |= JVMTI_THREAD_STATE_WAITING;} - if (hythread_is_waiting_indefinitely(tm_native_thread)) {*state |= JVMTI_THREAD_STATE_WAITING_INDEFINITELY;} - if (hythread_is_waiting_with_timeout(tm_native_thread)) {*state |= JVMTI_THREAD_STATE_WAITING_WITH_TIMEOUT;} - if (hythread_is_sleeping(tm_native_thread)) {*state |= JVMTI_THREAD_STATE_SLEEPING;} - if (hythread_is_in_monitor_wait(tm_native_thread)) {*state |= JVMTI_THREAD_STATE_IN_OBJECT_WAIT;} - if (hythread_is_parked(tm_native_thread)) {*state |= JVMTI_THREAD_STATE_PARKED;} - if (hythread_is_suspended(tm_native_thread)) {*state |= JVMTI_THREAD_STATE_SUSPENDED;} - if (hythread_interrupted(tm_native_thread)) {*state |= JVMTI_THREAD_STATE_INTERRUPTED;} - if (hythread_is_in_native(tm_native_thread)) {*state |= JVMTI_THREAD_STATE_IN_NATIVE;} - if (hythread_is_terminated(tm_native_thread)) {*state |= JVMTI_THREAD_STATE_TERMINATED;} + if (hythread_is_alive(tm_native_thread)) {*state |= JVMTI_THREAD_STATE_ALIVE;} + if (hythread_is_runnable(tm_native_thread)) {*state |= JVMTI_THREAD_STATE_RUNNABLE;} + if (hythread_is_blocked_on_monitor_enter(tm_native_thread)) {*state |= JVMTI_THREAD_STATE_BLOCKED_ON_MONITOR_ENTER;} + if (hythread_is_waiting(tm_native_thread)) {*state |= JVMTI_THREAD_STATE_WAITING;} + if (hythread_is_waiting_indefinitely(tm_native_thread)) {*state |= JVMTI_THREAD_STATE_WAITING_INDEFINITELY;} + if (hythread_is_waiting_with_timeout(tm_native_thread)) {*state |= JVMTI_THREAD_STATE_WAITING_WITH_TIMEOUT;} + if (hythread_is_sleeping(tm_native_thread)) {*state |= JVMTI_THREAD_STATE_SLEEPING;} + if (hythread_is_in_monitor_wait(tm_native_thread)) {*state |= JVMTI_THREAD_STATE_IN_OBJECT_WAIT;} + if (hythread_is_parked(tm_native_thread)) {*state |= JVMTI_THREAD_STATE_PARKED;} + if (hythread_is_suspended(tm_native_thread)) {*state |= JVMTI_THREAD_STATE_SUSPENDED;} + if (hythread_interrupted(tm_native_thread)) {*state |= JVMTI_THREAD_STATE_INTERRUPTED;} + if (hythread_is_in_native(tm_native_thread)) {*state |= JVMTI_THREAD_STATE_IN_NATIVE;} + if (hythread_is_terminated(tm_native_thread)) {*state |= JVMTI_THREAD_STATE_TERMINATED;} return TM_ERROR_NONE; } @@ -330,14 +322,13 @@ * @param[in] data data to be put */ IDATA VMCALL jthread_set_local_storage(jthread java_thread, const void* data) { - - hythread_t tm_native_thread; + hythread_t tm_native_thread; assert(java_thread); - tm_native_thread = vm_jthread_get_tm_data(java_thread); + tm_native_thread = vm_jthread_get_tm_data(java_thread); assert(tm_native_thread); - return hythread_set_private_data(tm_native_thread, (void *)data); + return hythread_set_private_data(tm_native_thread, (void *)data); } /** @@ -347,14 +338,13 @@ * @param[out] data_ptr pointer to the data */ IDATA VMCALL jthread_get_local_storage(jthread java_thread, void** data_ptr) { - - hythread_t tm_native_thread; + hythread_t tm_native_thread; assert(java_thread); assert(data_ptr); - tm_native_thread = vm_jthread_get_tm_data(java_thread); + tm_native_thread = vm_jthread_get_tm_data(java_thread); assert(tm_native_thread); - *data_ptr = hythread_get_private_data (tm_native_thread); + *data_ptr = hythread_get_private_data (tm_native_thread); return TM_ERROR_NONE; } @@ -391,14 +381,14 @@ */ IDATA VMCALL jthread_get_contended_monitor(jthread java_thread, jobject* monitor) { - hythread_t tm_native_thread; - jvmti_thread_t tm_java_thread; + hythread_t tm_native_thread; + jvmti_thread_t tm_java_thread; assert(java_thread); - tm_native_thread = vm_jthread_get_tm_data(java_thread); + tm_native_thread = vm_jthread_get_tm_data(java_thread); tm_java_thread = hythread_get_private_data(tm_native_thread); - assert(tm_java_thread); - *monitor = tm_java_thread->contended_monitor; + assert(tm_java_thread); + *monitor = tm_java_thread->contended_monitor; return TM_ERROR_NONE; } @@ -411,14 +401,14 @@ */ IDATA VMCALL jthread_get_wait_monitor(jthread java_thread, jobject* monitor) { - hythread_t tm_native_thread; - jvmti_thread_t tm_java_thread; + hythread_t tm_native_thread; + jvmti_thread_t tm_java_thread; assert(java_thread); - tm_native_thread = vm_jthread_get_tm_data(java_thread); + tm_native_thread = vm_jthread_get_tm_data(java_thread); tm_java_thread = hythread_get_private_data(tm_native_thread); - assert(tm_java_thread); - *monitor = tm_java_thread->wait_monitor; + assert(tm_java_thread); + *monitor = tm_java_thread->wait_monitor; return TM_ERROR_NONE; } @@ -440,7 +430,7 @@ hythread_suspend_disable(); lockword = vm_object_get_lockword_addr(monitor); tm_native_thread = hythread_thin_monitor_get_owner(lockword); - if (!tm_native_thread){ + if (!tm_native_thread) { *lock_owner = NULL; } else { tm_java_thread = hythread_get_private_data(tm_native_thread); @@ -466,19 +456,19 @@ hythread_thin_monitor_t *lockword; IDATA recursion = 0; - assert(monitor); + assert(monitor); given_thread = owner?vm_jthread_get_tm_data(owner):NULL; - hythread_suspend_disable(); + hythread_suspend_disable(); - lockword = vm_object_get_lockword_addr(monitor); + lockword = vm_object_get_lockword_addr(monitor); lock_owner = hythread_thin_monitor_get_owner(lockword); if (lock_owner && (!given_thread || lock_owner->thread_id == given_thread->thread_id)) - recursion = hythread_thin_monitor_get_recursion(lockword); + recursion = hythread_thin_monitor_get_recursion(lockword); - hythread_suspend_enable(); + hythread_suspend_enable(); - return recursion; + return recursion; } /** @@ -489,22 +479,22 @@ * @param[out] monitors_ptr array of owned monitors */ IDATA VMCALL jthread_get_owned_monitors(jthread java_thread, - jint* monitor_count_ptr, jobject** monitors_ptr) { + jint* monitor_count_ptr, jobject** monitors_ptr) { - hythread_t tm_native_thread; - jvmti_thread_t tm_java_thread; + hythread_t tm_native_thread; + jvmti_thread_t tm_java_thread; // apr_pool_t* pool; // apr_status_t apr_status; jobject* monitors; - int i; - IDATA status; - - status =hythread_global_lock(); - if (status != TM_ERROR_NONE) return status; + int i; + IDATA status; + + status =hythread_global_lock(); + if (status != TM_ERROR_NONE) return status; assert(java_thread); - tm_native_thread = vm_jthread_get_tm_data(java_thread); + tm_native_thread = vm_jthread_get_tm_data(java_thread); tm_java_thread = hythread_get_private_data(tm_native_thread); - assert(tm_java_thread); + assert(tm_java_thread); /* apr_status = apr_pool_create(&pool, 0); if (apr_status != APR_SUCCESS) { hythread_global_unlock(); @@ -516,12 +506,12 @@ hythread_global_unlock(); return TM_ERROR_OUT_OF_MEMORY; } - for (i = 0; i < tm_java_thread->owned_monitors_nmb; i++){ - monitors[i] = tm_java_thread->owned_monitors[i]; - } + for (i = 0; i < tm_java_thread->owned_monitors_nmb; i++) { + monitors[i] = tm_java_thread->owned_monitors[i]; + } *monitors_ptr = monitors; - *monitor_count_ptr = tm_java_thread->owned_monitors_nmb; + *monitor_count_ptr = tm_java_thread->owned_monitors_nmb; status = hythread_global_unlock(); - return status; + return status; } Modified: harmony/enhanced/drlvm/trunk/vm/thread/src/thread_ti_monitors.c URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/thread/src/thread_ti_monitors.c?view=diff&rev=505384&r1=505383&r2=505384 ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/thread/src/thread_ti_monitors.c (original) +++ harmony/enhanced/drlvm/trunk/vm/thread/src/thread_ti_monitors.c Fri Feb 9 09:25:01 2007 @@ -15,11 +15,6 @@ * limitations under the License. */ -/** - * @author Artem Aliev - * @version $Revision: 1.1.2.7 $ - */ - /** * @file thread_ti_monitors.c * @brief JVMTI raw monitors related functions @@ -51,28 +46,29 @@ //// if (!jvmti_monitor_table) { status =hythread_global_lock(); - if (status != TM_ERROR_NONE) return status; + if (status != TM_ERROR_NONE) return status; if (!jvmti_monitor_table) { if (array_create(&jvmti_monitor_table)) { - hythread_global_unlock(); - return TM_ERROR_OUT_OF_MEMORY; + hythread_global_unlock(); + return TM_ERROR_OUT_OF_MEMORY; + } + status = hymutex_create(&jvmti_monitor_table_lock, TM_MUTEX_NESTED); + if (status != TM_ERROR_NONE) { + hythread_global_unlock(); + return status; } - status =hymutex_create(&jvmti_monitor_table_lock, TM_MUTEX_NESTED); - if (status != TM_ERROR_NONE){ - hythread_global_unlock(); - return status; - } } status =hythread_global_unlock(); - if (status != TM_ERROR_NONE) return status; + if (status != TM_ERROR_NONE) return status; } status =hymutex_lock(jvmti_monitor_table_lock); - if (status != TM_ERROR_NONE) return status; + if (status != TM_ERROR_NONE) return status; *mon_ptr = array_add(jvmti_monitor_table, monitor); + status =hymutex_unlock(jvmti_monitor_table_lock); - if (status != TM_ERROR_NONE) return status; + if (status != TM_ERROR_NONE) return status; if (!(*mon_ptr)) return TM_ERROR_OUT_OF_MEMORY; return TM_ERROR_NONE; @@ -92,14 +88,14 @@ return TM_ERROR_INVALID_MONITOR; } - while(hythread_monitor_destroy((hythread_monitor_t)monitor) != TM_ERROR_NONE) { + while (hythread_monitor_destroy((hythread_monitor_t)monitor) != TM_ERROR_NONE) { if ((status = hythread_monitor_exit((hythread_monitor_t)monitor)) != TM_ERROR_NONE) - return status; + return status; } status =hymutex_lock(jvmti_monitor_table_lock); - if (status != TM_ERROR_NONE) return status; - array_delete(jvmti_monitor_table, (UDATA)mon_ptr); + if (status != TM_ERROR_NONE) return status; + array_delete(jvmti_monitor_table, (UDATA)mon_ptr); status =hymutex_unlock(jvmti_monitor_table_lock); return status; } @@ -129,7 +125,7 @@ * @return 0 in case of successful attempt. */ IDATA VMCALL jthread_raw_monitor_try_enter(jrawMonitorID mon_ptr) { - hythread_monitor_t monitor; + hythread_monitor_t monitor; if (!(monitor = (hythread_monitor_t)array_get(jvmti_monitor_table, (UDATA)mon_ptr))) { return TM_ERROR_INVALID_MONITOR; } @@ -197,7 +193,7 @@ if (!(monitor = (hythread_monitor_t)array_get(jvmti_monitor_table, (UDATA)mon_ptr))) { return TM_ERROR_INVALID_MONITOR; } - return hythread_monitor_notify(monitor); + return hythread_monitor_notify(monitor); } /** @@ -213,5 +209,5 @@ if (!(monitor = (hythread_monitor_t)array_get(jvmti_monitor_table, (UDATA)mon_ptr))) { return TM_ERROR_INVALID_MONITOR; } - return hythread_monitor_notify_all(monitor); + return hythread_monitor_notify_all(monitor); } Modified: harmony/enhanced/drlvm/trunk/vm/thread/src/thread_ti_others.c URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/thread/src/thread_ti_others.c?view=diff&rev=505384&r1=505383&r2=505384 ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/thread/src/thread_ti_others.c (original) +++ harmony/enhanced/drlvm/trunk/vm/thread/src/thread_ti_others.c Fri Feb 9 09:25:01 2007 @@ -15,11 +15,6 @@ * limitations under the License. */ -/** - * @author Sergey Petrovsky - * @version $Revision: 1.1.2.4 $ - */ - /** * @file thread_ti_others.c * @brief JVMTI peak related functions @@ -53,14 +48,14 @@ * * @param[in] java_thread */ -JVMTILocalStorage* jthread_get_jvmti_local_storage(jthread java_thread){ +JVMTILocalStorage* jthread_get_jvmti_local_storage(jthread java_thread) { jvmti_thread_t tm_java_thread; hythread_t tm_native_thread; - tm_native_thread = vm_jthread_get_tm_data(java_thread); - tm_java_thread = hythread_get_private_data(tm_native_thread); + tm_native_thread = vm_jthread_get_tm_data(java_thread); + tm_java_thread = hythread_get_private_data(tm_native_thread); - return &tm_java_thread->jvmti_local_storage; + return &tm_java_thread->jvmti_local_storage; } Modified: harmony/enhanced/drlvm/trunk/vm/thread/src/thread_ti_timing.c URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/thread/src/thread_ti_timing.c?view=diff&rev=505384&r1=505383&r2=505384 ============================================================================== --- harmony/enhanced/drlvm/trunk/vm/thread/src/thread_ti_timing.c (original) +++ harmony/enhanced/drlvm/trunk/vm/thread/src/thread_ti_timing.c Fri Feb 9 09:25:01 2007 @@ -15,11 +15,6 @@ * limitations under the License. */ -/** - * @author Sergey Petrovsky - * @version $Revision: 1.1.2.7 $ - */ - /** * @file thread_ti_timing.c * @brief JVMTI timing related functions @@ -39,13 +34,13 @@ */ IDATA VMCALL jthread_get_thread_blocked_time(jthread java_thread, jlong *nanos_ptr) { - jvmti_thread_t tm_java_thread; + jvmti_thread_t tm_java_thread; hythread_t tm_native_thread; - assert(java_thread); - assert(nanos_ptr); - tm_native_thread = vm_jthread_get_tm_data(java_thread); - tm_java_thread = hythread_get_private_data(tm_native_thread); + assert(java_thread); + assert(nanos_ptr); + tm_native_thread = vm_jthread_get_tm_data(java_thread); + tm_java_thread = hythread_get_private_data(tm_native_thread); *nanos_ptr = tm_java_thread->blocked_time; return TM_ERROR_NONE; @@ -90,13 +85,13 @@ IDATA VMCALL jthread_get_thread_user_cpu_time(jthread java_thread, jlong *nanos_ptr) { hythread_t tm_native_thread; - apr_time_t kernel_time; - apr_time_t user_time; + apr_time_t kernel_time; + apr_time_t user_time; - assert(java_thread); - assert(nanos_ptr); - tm_native_thread = vm_jthread_get_tm_data(java_thread); - apr_thread_times(tm_native_thread->os_handle, &user_time, &kernel_time); + assert(java_thread); + assert(nanos_ptr); + tm_native_thread = vm_jthread_get_tm_data(java_thread); + apr_thread_times(tm_native_thread->os_handle, &user_time, &kernel_time); *nanos_ptr = user_time; return TM_ERROR_NONE; @@ -110,13 +105,13 @@ */ IDATA VMCALL jthread_get_thread_waited_time(jthread java_thread, jlong *nanos_ptr) { - jvmti_thread_t tm_java_thread; + jvmti_thread_t tm_java_thread; hythread_t tm_native_thread; - assert(java_thread); - assert(nanos_ptr); - tm_native_thread = vm_jthread_get_tm_data(java_thread); - tm_java_thread = hythread_get_private_data(tm_native_thread); + assert(java_thread); + assert(nanos_ptr); + tm_native_thread = vm_jthread_get_tm_data(java_thread); + tm_java_thread = hythread_get_private_data(tm_native_thread); *nanos_ptr = tm_java_thread->waited_time; return TM_ERROR_NONE;