Go to:
Gentoo Home
Documentation
Forums
Lists
Bugs
Planet
Store
Wiki
Get Gentoo!
Gentoo's Bugzilla – Attachment 25410 Details for
Bug 32966
Valgrind doesnt work with NPTL pthread implementation
Home
|
New
–
[Ex]
|
Browse
|
Search
|
Privacy Policy
|
[?]
|
Reports
|
Requests
|
Help
|
New Account
|
Log In
[x]
|
Forgot Password
Login:
[x]
[patch]
Diff to make valgrind-2.1.0 compile against nptl version of pthreadtypes.h
nptl.diff (text/plain), 38.55 KB, created by
Sami Nieminen
on 2004-02-11 07:50:27 UTC
(
hide
)
Description:
Diff to make valgrind-2.1.0 compile against nptl version of pthreadtypes.h
Filename:
MIME Type:
Creator:
Sami Nieminen
Created:
2004-02-11 07:50:27 UTC
Size:
38.55 KB
patch
obsolete
>diff -Nru coregrind.orig/vg_include.h coregrind/vg_include.h >--- coregrind.orig/vg_include.h 2004-02-11 14:17:21.490342466 +0200 >+++ coregrind/vg_include.h 2004-02-11 14:16:16.841610703 +0200 >@@ -676,6 +676,41 @@ > > > /* --------------------------------------------------------------------- >+ Exports of vg_libpthread.c >+ ------------------------------------------------------------------ */ >+ >+/* Replacements for pthread types, shared between vg_libpthread.c and >+ vg_scheduler.c. See comment in vg_libpthread.c above the other >+ vg_pthread_*_t types for a description of how these are used. */ >+ >+struct _vg_pthread_fastlock >+{ >+ long int __vg_status; /* "Free" or "taken" or head of waiting list */ >+ int __vg_spinlock; /* Used by compare_and_swap emulation. Also, >+ adaptive SMP lock stores spin count here. */ >+}; >+ >+typedef struct >+{ >+ int __vg_m_reserved; /* Reserved for future use */ >+ int __vg_m_count; /* Depth of recursive locking */ >+ /*_pthread_descr*/ void* __vg_m_owner; /* Owner thread (if recursive or errcheck) */ >+ int __vg_m_kind; /* Mutex kind: fast, recursive or errcheck */ >+ struct _vg_pthread_fastlock __vg_m_lock; /* Underlying fast lock */ >+} vg_pthread_mutex_t; >+ >+typedef struct >+{ >+ struct _vg_pthread_fastlock __vg_c_lock; /* Protect against concurrent access */ >+ /*_pthread_descr*/ void* __vg_c_waiting; /* Threads waiting on this condition */ >+ // Padding ensures the size is 48 bytes >+ char __vg_padding[48 - sizeof(struct _vg_pthread_fastlock) >+ - sizeof(void*) - sizeof(long long)]; >+ long long __vg_align; >+} vg_pthread_cond_t; >+ >+ >+/* --------------------------------------------------------------------- > Exports of vg_scheduler.c > ------------------------------------------------------------------ */ > >@@ -734,7 +769,7 @@ > When .status == WaitCV, points to the mutex associated with > the condition variable indicated by the .associated_cv field. > In all other cases, should be NULL. */ >- void* /*pthread_mutex_t* */ associated_mx; >+ vg_pthread_mutex_t* associated_mx; > > /* When .status == WaitCV, points to the condition variable I am > waiting for. In all other cases, should be NULL. */ >@@ -1011,6 +1046,7 @@ > out what's happening. */ > #define VG_PTHREAD_PREHISTORY 0x80000000 > >+ > /* --------------------------------------------------------------------- > Exports of vg_signals.c > ------------------------------------------------------------------ */ >diff -Nru coregrind.orig/vg_libpthread.c coregrind/vg_libpthread.c >--- coregrind.orig/vg_libpthread.c 2004-02-11 14:17:21.520338630 +0200 >+++ coregrind/vg_libpthread.c 2004-02-11 14:16:16.858608529 +0200 >@@ -77,6 +77,88 @@ > > > /* --------------------------------------------------------------------- >+ Our own definition of types that vary between LinuxThreads and NPTL. >+ ------------------------------------------------------------------ */ >+ >+/* Moving from LinuxThreads to NPTL, several crucial types (eg. >+ pthread_mutex_t, pthread_mutexattr_t, etc) were changed in >+ binary-compatible, but source-incompatible, ways. We can similarly use >+ any layout we want, so long as it's binary-compatible. However, we can >+ no longer use the LinuxThreads types, because they won't work on NPTL >+ systems. Thus, we have to introduce a layer of indirection, and define >+ our own versions of these types (vg_pthread_mutex_t, etc). NPTL does >+ pretty much the same thing, and it keeps many of its internal types >+ secret. >+ >+ We can layout our types however we want, as long as we put the small >+ number of fields in the right place for binary compatibility (eg. >+ mutex->kind). To make life easy, our versions have the exact same layout >+ as the LinuxThreads ones; only the type names and field names are >+ different. >+ >+ In our implementation of the pthread operations (pthread_mutex_lock(), >+ pthread_mutexattr_settype(), etc) we always cast the standard pthread >+ types to our own types, (eg. pthread_mutex_t --> vg_pthread_mutex_t), >+ before working with them. >+ >+ Note that we have various mutexes (and condvars) in this file that have the >+ type pthread_mutex_t (and pthread_cond_t). That is fine, because they >+ are always only handled by calling the standard pthread functions (eg. >+ pthread_mutex_lock()) on them. Phew. >+ >+ WARNING: as a result of all this, we should *never* access these standard >+ pthread types as is; they *must* be converted to the vg_pthread_foo_t >+ equivalent. XXX: how to enforce this? pre-processor hackery? (well, >+ it won't compile on any NPTL-only system if not followed...) >+*/ >+ >+#include <sched.h> // for 'struct __sched_param' >+ >+typedef struct __vg_pthread_attr_s >+{ >+ int __vg_detachstate; >+ int __vg_schedpolicy; >+ struct __sched_param __vg_schedparam; >+ int __vg_inheritsched; >+ int __vg_scope; >+ size_t __vg_guardsize; >+ int __vg_stackaddr_set; >+ void *__vg_stackaddr; >+ size_t __vg_stacksize; >+} vg_pthread_attr_t; >+ >+typedef struct >+{ >+ int __vg_mutexkind; >+} vg_pthread_mutexattr_t; >+ >+typedef struct _vg_pthread_rwlock_t >+{ >+ struct _vg_pthread_fastlock __vg_rw_lock; /* Lock to guarantee mutual exclusion */ >+ int __vg_rw_readers; /* Number of readers */ >+ /*_pthread_descr*/ void* __vg_rw_writer; /* Identity of writer, or NULL if none */ >+ /*_pthread_descr*/ void* __vg_rw_read_waiting; /* Threads waiting for reading */ >+ /*_pthread_descr*/ void* __vg_rw_write_waiting; /* Threads waiting for writing */ >+ int __vg_rw_kind; /* Reader/Writer preference selection */ >+ int __vg_rw_pshared; /* Shared between processes or not */ >+} vg_pthread_rwlock_t; >+ >+typedef struct >+{ >+ int __vg_lockkind; >+ int __vg_pshared; >+} vg_pthread_rwlockattr_t; >+ >+/* Converting pthread types to vg_pthread types. We always check that the >+ passed-in type is as big as ours, for safety. We also zero the pointer >+ to the original struct, to ensure we don't accidentally use it again. */ >+ >+#define CONVERT(foo, x, vg_x) \ >+ my_assert(sizeof(*x) >= sizeof(vg_pthread_##foo##_t)); \ >+ vg_x = (vg_pthread_##foo##_t*)x; \ >+ x = 0; // ensure we don't accidentally use x again! >+ >+/* --------------------------------------------------------------------- > Forwardses. > ------------------------------------------------------------------ */ > >@@ -305,28 +387,38 @@ > > int pthread_attr_init(pthread_attr_t *attr) > { >+ vg_pthread_attr_t* vg_attr; >+ CONVERT(attr, attr, vg_attr); >+ > /* Just initialise the fields which we might look at. */ >- attr->__detachstate = PTHREAD_CREATE_JOINABLE; >+ vg_attr->__vg_detachstate = PTHREAD_CREATE_JOINABLE; > /* Linuxthreads sets this field to the value __getpagesize(), so I > guess the following is OK. */ >- attr->__guardsize = VKI_BYTES_PER_PAGE; return 0; >+ vg_attr->__vg_guardsize = VKI_BYTES_PER_PAGE; >+ return 0; > } > > int pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate) > { >+ vg_pthread_attr_t* vg_attr; >+ CONVERT(attr, attr, vg_attr); >+ > if (detachstate != PTHREAD_CREATE_JOINABLE > && detachstate != PTHREAD_CREATE_DETACHED) { > pthread_error("pthread_attr_setdetachstate: " > "detachstate is invalid"); > return EINVAL; > } >- attr->__detachstate = detachstate; >+ vg_attr->__vg_detachstate = detachstate; > return 0; > } > > int pthread_attr_getdetachstate(const pthread_attr_t *attr, int *detachstate) > { >- *detachstate = attr->__detachstate; >+ vg_pthread_attr_t* vg_attr; >+ CONVERT(attr, attr, vg_attr); >+ >+ *detachstate = vg_attr->__vg_detachstate; > return 0; > } > >@@ -344,6 +436,7 @@ > { > size_t limit; > char buf[1024]; >+ > ensure_valgrind("pthread_attr_setstacksize"); > limit = VG_PTHREAD_STACK_SIZE - VG_AR_CLIENT_STACKBASE_REDZONE_SZB > - 1000; /* paranoia */ >@@ -416,25 +509,28 @@ > { > int detached; > size_t limit; >+ vg_pthread_attr_t* vg_attr; >+ CONVERT(attr, attr, vg_attr); >+ > ensure_valgrind("pthread_getattr_np"); > kludged("pthread_getattr_np", NULL); > limit = VG_PTHREAD_STACK_SIZE - VG_AR_CLIENT_STACKBASE_REDZONE_SZB > - 1000; /* paranoia */ >- attr->__detachstate = PTHREAD_CREATE_JOINABLE; >- attr->__schedpolicy = SCHED_OTHER; >- attr->__schedparam.sched_priority = 0; >- attr->__inheritsched = PTHREAD_EXPLICIT_SCHED; >- attr->__scope = PTHREAD_SCOPE_SYSTEM; >- attr->__guardsize = VKI_BYTES_PER_PAGE; >- attr->__stackaddr = NULL; >- attr->__stackaddr_set = 0; >- attr->__stacksize = limit; >+ vg_attr->__vg_detachstate = PTHREAD_CREATE_JOINABLE; >+ vg_attr->__vg_schedpolicy = SCHED_OTHER; >+ vg_attr->__vg_schedparam.sched_priority = 0; >+ vg_attr->__vg_inheritsched = PTHREAD_EXPLICIT_SCHED; >+ vg_attr->__vg_scope = PTHREAD_SCOPE_SYSTEM; >+ vg_attr->__vg_guardsize = VKI_BYTES_PER_PAGE; >+ vg_attr->__vg_stackaddr = NULL; >+ vg_attr->__vg_stackaddr_set = 0; >+ vg_attr->__vg_stacksize = limit; > VALGRIND_MAGIC_SEQUENCE(detached, (-1) /* default */, > VG_USERREQ__SET_OR_GET_DETACH, > 2 /* get */, thread, 0, 0); > my_assert(detached == 0 || detached == 1); > if (detached) >- attr->__detachstate = PTHREAD_CREATE_DETACHED; >+ vg_attr->__vg_detachstate = PTHREAD_CREATE_DETACHED; > return 0; > } > >@@ -467,16 +563,20 @@ > > int pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy) > { >- if (policy != SCHED_OTHER && policy != SCHED_FIFO && policy != SCHED_RR) >- return EINVAL; >- attr->__schedpolicy = policy; >- return 0; >+ vg_pthread_attr_t* vg_attr; >+ CONVERT(attr, attr, vg_attr); >+ if (policy != SCHED_OTHER && policy != SCHED_FIFO && policy != SCHED_RR) >+ return EINVAL; >+ vg_attr->__vg_schedpolicy = policy; >+ return 0; > } > > int pthread_attr_getschedpolicy(const pthread_attr_t *attr, int *policy) > { >- *policy = attr->__schedpolicy; >- return 0; >+ vg_pthread_attr_t* vg_attr; >+ CONVERT(attr, attr, vg_attr); >+ *policy = vg_attr->__vg_schedpolicy; >+ return 0; > } > > >@@ -502,7 +602,9 @@ > WEAK > int pthread_attr_getguardsize(const pthread_attr_t *attr, size_t *guardsize) > { >- *guardsize = attr->__guardsize; >+ vg_pthread_attr_t* vg_attr; >+ CONVERT(attr, attr, vg_attr); >+ *guardsize = vg_attr->__vg_guardsize; > return 0; > } > >@@ -699,6 +801,8 @@ > { > int tid_child; > NewThreadInfo* info; >+ vg_pthread_attr_t* __vg_attr; >+ CONVERT(attr, __attr, __vg_attr); > > ensure_valgrind("pthread_create"); > >@@ -711,8 +815,8 @@ > info = my_malloc(sizeof(NewThreadInfo)); > my_assert(info != NULL); > >- if (__attr) >- info->attr__detachstate = __attr->__detachstate; >+ if (__vg_attr) >+ info->attr__detachstate = __vg_attr->__vg_detachstate; > else > info->attr__detachstate = PTHREAD_CREATE_JOINABLE; > >@@ -874,12 +978,17 @@ > > int __pthread_mutexattr_init(pthread_mutexattr_t *attr) > { >- attr->__mutexkind = PTHREAD_MUTEX_ERRORCHECK_NP; >+ vg_pthread_mutexattr_t* vg_attr; >+ CONVERT(mutexattr, attr, vg_attr); >+ vg_attr->__vg_mutexkind = PTHREAD_MUTEX_ERRORCHECK_NP; > return 0; > } > > int __pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type) > { >+ vg_pthread_mutexattr_t* vg_attr; >+ CONVERT(mutexattr, attr, vg_attr); >+ > switch (type) { > # ifndef GLIBC_2_1 > case PTHREAD_MUTEX_TIMED_NP: >@@ -890,7 +999,7 @@ > # endif > case PTHREAD_MUTEX_RECURSIVE_NP: > case PTHREAD_MUTEX_ERRORCHECK_NP: >- attr->__mutexkind = type; >+ vg_attr->__vg_mutexkind = type; > return 0; > default: > pthread_error("pthread_mutexattr_settype: " >@@ -924,11 +1033,16 @@ > int __pthread_mutex_init(pthread_mutex_t *mutex, > const pthread_mutexattr_t *mutexattr) > { >- mutex->__m_count = 0; >- mutex->__m_owner = (_pthread_descr)VG_INVALID_THREADID; >- mutex->__m_kind = PTHREAD_MUTEX_ERRORCHECK_NP; >- if (mutexattr) >- mutex->__m_kind = mutexattr->__mutexkind; >+ vg_pthread_mutex_t* vg_mutex; >+ vg_pthread_mutexattr_t* vg_mutexattr; >+ CONVERT(mutex, mutex, vg_mutex); >+ CONVERT(mutexattr, mutexattr, vg_mutexattr); >+ >+ vg_mutex->__vg_m_count = 0; >+ vg_mutex->__vg_m_owner = (/*_pthread_descr*/void*)VG_INVALID_THREADID; >+ vg_mutex->__vg_m_kind = PTHREAD_MUTEX_ERRORCHECK_NP; >+ if (vg_mutexattr) >+ vg_mutex->__vg_m_kind = vg_mutexattr->__vg_mutexkind; > return 0; > } > >@@ -936,19 +1050,21 @@ > int __pthread_mutex_lock(pthread_mutex_t *mutex) > { > int res; >- >+ vg_pthread_mutex_t* vg_mutex; >+ CONVERT(mutex, mutex, vg_mutex); >+ > if (RUNNING_ON_VALGRIND) { > VALGRIND_MAGIC_SEQUENCE(res, 0 /* default */, > VG_USERREQ__PTHREAD_MUTEX_LOCK, >- mutex, 0, 0, 0); >+ vg_mutex, 0, 0, 0); > return res; > } else { > /* Play at locking */ > if (0) > kludged("prehistoric lock", NULL); >- mutex->__m_owner = (_pthread_descr)1; >- mutex->__m_count = 1; >- mutex->__m_kind |= VG_PTHREAD_PREHISTORY; >+ vg_mutex->__vg_m_owner = (/*_pthread_descr*/void*)1; >+ vg_mutex->__vg_m_count = 1; >+ vg_mutex->__vg_m_kind |= VG_PTHREAD_PREHISTORY; > return 0; /* success */ > } > } >@@ -957,19 +1073,21 @@ > int __pthread_mutex_trylock(pthread_mutex_t *mutex) > { > int res; >- >+ vg_pthread_mutex_t* vg_mutex; >+ CONVERT(mutex, mutex, vg_mutex); >+ > if (RUNNING_ON_VALGRIND) { > VALGRIND_MAGIC_SEQUENCE(res, 0 /* default */, > VG_USERREQ__PTHREAD_MUTEX_TRYLOCK, >- mutex, 0, 0, 0); >+ vg_mutex, 0, 0, 0); > return res; > } else { > /* Play at locking */ > if (0) > kludged("prehistoric trylock", NULL); >- mutex->__m_owner = (_pthread_descr)1; >- mutex->__m_count = 1; >- mutex->__m_kind |= VG_PTHREAD_PREHISTORY; >+ vg_mutex->__vg_m_owner = (/*_pthread_descr*/void*)1; >+ vg_mutex->__vg_m_count = 1; >+ vg_mutex->__vg_m_kind |= VG_PTHREAD_PREHISTORY; > return 0; /* success */ > } > } >@@ -978,19 +1096,21 @@ > int __pthread_mutex_unlock(pthread_mutex_t *mutex) > { > int res; >- >+ vg_pthread_mutex_t* vg_mutex; >+ CONVERT(mutex, mutex, vg_mutex); >+ > if (RUNNING_ON_VALGRIND) { > VALGRIND_MAGIC_SEQUENCE(res, 0 /* default */, > VG_USERREQ__PTHREAD_MUTEX_UNLOCK, >- mutex, 0, 0, 0); >+ vg_mutex, 0, 0, 0); > return res; > } else { > /* Play at locking */ > if (0) > kludged("prehistoric unlock", NULL); >- mutex->__m_owner = 0; >- mutex->__m_count = 0; >- mutex->__m_kind &= ~VG_PTHREAD_PREHISTORY; >+ vg_mutex->__vg_m_owner = 0; >+ vg_mutex->__vg_m_count = 0; >+ vg_mutex->__vg_m_kind &= ~VG_PTHREAD_PREHISTORY; > return 0; /* success */ > } > } >@@ -998,20 +1118,22 @@ > > int __pthread_mutex_destroy(pthread_mutex_t *mutex) > { >+ vg_pthread_mutex_t* vg_mutex; >+ CONVERT(mutex, mutex, vg_mutex); >+ > /* Valgrind doesn't hold any resources on behalf of the mutex, so no > need to involve it. */ >- if (mutex->__m_count > 0) { >+ if (vg_mutex->__vg_m_count > 0) { > /* Oh, the horror. glibc's internal use of pthreads "knows" > that destroying a lock does an implicit unlock. Make it > explicit. */ >- __pthread_mutex_unlock(mutex); >- pthread_error("pthread_mutex_destroy: " >- "mutex is still in use"); >+ __pthread_mutex_unlock( (pthread_mutex_t*)vg_mutex ); >+ pthread_error("pthread_mutex_destroy: mutex is still in use"); > return EBUSY; > } >- mutex->__m_count = 0; >- mutex->__m_owner = (_pthread_descr)VG_INVALID_THREADID; >- mutex->__m_kind = PTHREAD_MUTEX_ERRORCHECK_NP; >+ vg_mutex->__vg_m_count = 0; >+ vg_mutex->__vg_m_owner = (/*_pthread_descr*/void*)VG_INVALID_THREADID; >+ vg_mutex->__vg_m_kind = PTHREAD_MUTEX_ERRORCHECK_NP; > return 0; > } > >@@ -1035,7 +1157,9 @@ > int pthread_cond_init( pthread_cond_t *cond, > const pthread_condattr_t *cond_attr) > { >- cond->__c_waiting = (_pthread_descr)VG_INVALID_THREADID; >+ vg_pthread_cond_t* vg_cond; >+ CONVERT(cond, cond, vg_cond); >+ vg_cond->__vg_c_waiting = (/*_pthread_descr*/void*)VG_INVALID_THREADID; > return 0; > } > >@@ -1083,10 +1207,13 @@ > int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) > { > int res; >+ vg_pthread_mutex_t* vg_mutex; >+ CONVERT(mutex, mutex, vg_mutex); >+ > ensure_valgrind("pthread_cond_wait"); > VALGRIND_MAGIC_SEQUENCE(res, 0 /* default */, > VG_USERREQ__PTHREAD_COND_WAIT, >- cond, mutex, 0, 0); >+ cond, vg_mutex, 0, 0); > return res; > } > >@@ -1099,6 +1226,8 @@ > struct timeval timeval_now; > unsigned long long int ull_ms_now_after_1970; > unsigned long long int ull_ms_end_after_1970; >+ vg_pthread_mutex_t* vg_mutex; >+ CONVERT(mutex, mutex, vg_mutex); > > ensure_valgrind("pthread_cond_timedwait"); > VALGRIND_MAGIC_SEQUENCE(ms_now, 0xFFFFFFFF /* default */, >@@ -1120,7 +1249,7 @@ > = ms_now + (unsigned int)(ull_ms_end_after_1970 - ull_ms_now_after_1970); > VALGRIND_MAGIC_SEQUENCE(res, 0 /* default */, > VG_USERREQ__PTHREAD_COND_TIMEDWAIT, >- cond, mutex, ms_end, 0); >+ cond, vg_mutex, ms_end, 0); > return res; > } > >@@ -2465,14 +2594,16 @@ > /* Take the address of a LinuxThreads rwlock_t and return the shadow > address of our version. Further, if the LinuxThreads version > appears to have been statically initialised, do the same to the one >- we allocate here. The pthread_rwlock_t.__rw_readers field is set >- to zero by PTHREAD_RWLOCK_INITIALIZER, so we take zero as meaning >- uninitialised and non-zero meaning initialised. >+ we allocate here. The vg_pthread_rwlock_t.__vg_rw_readers field is set >+ to zero by PTHREAD_RWLOCK_INITIALIZER (as are several other fields), so >+ we take zero as meaning uninitialised and non-zero meaning initialised. > */ > static vg_rwlock_t* rw_remap ( pthread_rwlock_t* orig ) > { > int res, i; > vg_rwlock_t* vg_rwl; >+ vg_pthread_rwlock_t* vg_orig; >+ > res = __pthread_mutex_lock(&rw_remap_mx); > my_assert(res == 0); > >@@ -2496,10 +2627,11 @@ > vg_rwl = &rw_remap_new[i]; > > /* Initialise the shadow, if required. */ >- if (orig->__rw_readers == 0) { >- orig->__rw_readers = 1; >+ CONVERT(rwlock, orig, vg_orig); >+ if (vg_orig->__vg_rw_readers == 0) { >+ vg_orig->__vg_rw_readers = 1; > init_vg_rwlock(vg_rwl); >- if (orig->__rw_kind == PTHREAD_RWLOCK_PREFER_READER_NP) >+ if (vg_orig->__vg_rw_kind == PTHREAD_RWLOCK_PREFER_READER_NP) > vg_rwl->prefer_w = 0; > } > >@@ -2511,14 +2643,19 @@ > const pthread_rwlockattr_t* attr ) > { > vg_rwlock_t* rwl; >+ vg_pthread_rwlock_t* vg_orig; >+ vg_pthread_rwlockattr_t* vg_attr; >+ CONVERT(rwlock, orig, vg_orig); >+ CONVERT(rwlockattr, attr, vg_attr); >+ > if (0) printf ("pthread_rwlock_init\n"); > /* Force the remapper to initialise the shadow. */ >- orig->__rw_readers = 0; >+ vg_orig->__vg_rw_readers = 0; > /* Install the lock preference; the remapper needs to know it. */ >- orig->__rw_kind = PTHREAD_RWLOCK_DEFAULT_NP; >- if (attr) >- orig->__rw_kind = attr->__lockkind; >- rwl = rw_remap ( orig ); >+ vg_orig->__vg_rw_kind = PTHREAD_RWLOCK_DEFAULT_NP; >+ if (vg_attr) >+ vg_orig->__vg_rw_kind = vg_attr->__vg_lockkind; >+ rwl = rw_remap ( (pthread_rwlock_t*)vg_orig ); > return 0; > } > >@@ -2536,6 +2673,7 @@ > { > int res; > vg_rwlock_t* rwl; >+ > if (0) printf ("pthread_rwlock_rdlock\n"); > rwl = rw_remap ( orig ); > res = __pthread_mutex_lock(&rwl->mx); >@@ -2569,6 +2707,7 @@ > { > int res; > vg_rwlock_t* rwl; >+ > if (0) printf ("pthread_rwlock_tryrdlock\n"); > rwl = rw_remap ( orig ); > res = __pthread_mutex_lock(&rwl->mx); >@@ -2606,6 +2745,7 @@ > { > int res; > vg_rwlock_t* rwl; >+ > if (0) printf ("pthread_rwlock_wrlock\n"); > rwl = rw_remap ( orig ); > res = __pthread_mutex_lock(&rwl->mx); >@@ -2767,10 +2907,11 @@ > int > pthread_rwlockattr_init (pthread_rwlockattr_t *attr) > { >- attr->__lockkind = 0; >- attr->__pshared = PTHREAD_PROCESS_PRIVATE; >- >- return 0; >+ vg_pthread_rwlockattr_t* vg_attr; >+ CONVERT(rwlockattr, attr, vg_attr); >+ vg_attr->__vg_lockkind = 0; >+ vg_attr->__vg_pshared = PTHREAD_PROCESS_PRIVATE; >+ return 0; > } > > /* Copied directly from LinuxThreads. */ >@@ -2784,16 +2925,19 @@ > int > pthread_rwlockattr_setpshared (pthread_rwlockattr_t *attr, int pshared) > { >- if (pshared != PTHREAD_PROCESS_PRIVATE && pshared != PTHREAD_PROCESS_SHARED) >- return EINVAL; >+ vg_pthread_rwlockattr_t* vg_attr; >+ CONVERT(rwlockattr, attr, vg_attr); > >- /* For now it is not possible to shared a conditional variable. */ >- if (pshared != PTHREAD_PROCESS_PRIVATE) >- return ENOSYS; >+ if (pshared != PTHREAD_PROCESS_PRIVATE && pshared != PTHREAD_PROCESS_SHARED) >+ return EINVAL; > >- attr->__pshared = pshared; >+ /* For now it is not possible to shared a conditional variable. */ >+ if (pshared != PTHREAD_PROCESS_PRIVATE) >+ return ENOSYS; > >- return 0; >+ vg_attr->__vg_pshared = pshared; >+ >+ return 0; > } > > >diff -Nru coregrind.orig/vg_scheduler.c coregrind/vg_scheduler.c >--- coregrind.orig/vg_scheduler.c 2004-02-11 14:17:21.520338630 +0200 >+++ coregrind/vg_scheduler.c 2004-02-11 14:16:16.858608529 +0200 >@@ -1921,39 +1921,26 @@ > MUTEXes > -------------------------------------------------------- */ > >-/* pthread_mutex_t is a struct with at 5 words: >- typedef struct >- { >- int __m_reserved; -- Reserved for future use >- int __m_count; -- Depth of recursive locking >- _pthread_descr __m_owner; -- Owner thread (if recursive or errcheck) >- int __m_kind; -- Mutex kind: fast, recursive or errcheck >- struct _pthread_fastlock __m_lock; -- Underlying fast lock >- } pthread_mutex_t; >- >- #define PTHREAD_MUTEX_INITIALIZER \ >- {0, 0, 0, PTHREAD_MUTEX_TIMED_NP, __LOCK_INITIALIZER} >- # define PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP \ >- {0, 0, 0, PTHREAD_MUTEX_RECURSIVE_NP, __LOCK_INITIALIZER} >- # define PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP \ >- {0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, __LOCK_INITIALIZER} >- # define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP \ >- {0, 0, 0, PTHREAD_MUTEX_ADAPTIVE_NP, __LOCK_INITIALIZER} >+/* vg_pthread_mutex_t is defined in vg_include.h. >+ >+ The initializers zero everything, except possibly the fourth word, >+ which in vg_pthread_mutex_t is the __vg_m_kind field. It gets set to one >+ of PTHREAD_MUTEX_{TIMED,RECURSIVE,ERRORCHECK,ADAPTIVE}_NP > > How we use it: > >- __m_kind never changes and indicates whether or not it is recursive. >+ __vg_m_kind never changes and indicates whether or not it is recursive. > >- __m_count indicates the lock count; if 0, the mutex is not owned by >+ __vg_m_count indicates the lock count; if 0, the mutex is not owned by > anybody. > >- __m_owner has a ThreadId value stuffed into it. We carefully arrange >+ __vg_m_owner has a ThreadId value stuffed into it. We carefully arrange > that ThreadId == 0 is invalid (VG_INVALID_THREADID), so that > statically initialised mutexes correctly appear > to belong to nobody. > >- In summary, a not-in-use mutex is distinguised by having __m_owner >- == 0 (VG_INVALID_THREADID) and __m_count == 0 too. If one of those >+ In summary, a not-in-use mutex is distinguised by having __vg_m_owner >+ == 0 (VG_INVALID_THREADID) and __vg_m_count == 0 too. If one of those > conditions holds, the other should too. > > There is no linked list of threads waiting for this mutex. Instead >@@ -1967,7 +1954,7 @@ > > /* Helper fns ... */ > static >-void release_one_thread_waiting_on_mutex ( pthread_mutex_t* mutex, >+void release_one_thread_waiting_on_mutex ( vg_pthread_mutex_t* mutex, > Char* caller ) > { > Int i; >@@ -1983,19 +1970,19 @@ > break; > } > >- VG_TRACK( post_mutex_unlock, (ThreadId)mutex->__m_owner, mutex ); >+ VG_TRACK( post_mutex_unlock, (ThreadId)mutex->__vg_m_owner, mutex ); > > vg_assert(i <= VG_N_THREADS); > if (i == VG_N_THREADS) { > /* Nobody else is waiting on it. */ >- mutex->__m_count = 0; >- mutex->__m_owner = VG_INVALID_THREADID; >+ mutex->__vg_m_count = 0; >+ mutex->__vg_m_owner = VG_INVALID_THREADID; > } else { > /* Notionally transfer the hold to thread i, whose > pthread_mutex_lock() call now returns with 0 (success). */ > /* The .count is already == 1. */ > vg_assert(VG_(threads)[i].associated_mx == mutex); >- mutex->__m_owner = (_pthread_descr)i; >+ mutex->__vg_m_owner = (/*_pthread_descr*/void*)i; > VG_(threads)[i].status = VgTs_Runnable; > VG_(threads)[i].associated_mx = NULL; > /* m_edx already holds pth_mx_lock() success (0) */ >@@ -2014,7 +2001,7 @@ > static > void do_pthread_mutex_lock( ThreadId tid, > Bool is_trylock, >- pthread_mutex_t* mutex ) >+ vg_pthread_mutex_t* mutex ) > { > Char msg_buf[100]; > Char* caller >@@ -2039,7 +2026,7 @@ > } > > /* More paranoia ... */ >- switch (mutex->__m_kind) { >+ switch (mutex->__vg_m_kind) { > # ifndef GLIBC_2_1 > case PTHREAD_MUTEX_TIMED_NP: > case PTHREAD_MUTEX_ADAPTIVE_NP: >@@ -2049,7 +2036,7 @@ > # endif > case PTHREAD_MUTEX_RECURSIVE_NP: > case PTHREAD_MUTEX_ERRORCHECK_NP: >- if (mutex->__m_count >= 0) break; >+ if (mutex->__vg_m_count >= 0) break; > /* else fall thru */ > default: > VG_(record_pthread_error)( tid, >@@ -2058,20 +2045,20 @@ > return; > } > >- if (mutex->__m_count > 0) { >+ if (mutex->__vg_m_count > 0) { > >- vg_assert(VG_(is_valid_tid)((ThreadId)mutex->__m_owner)); >+ vg_assert(VG_(is_valid_tid)((ThreadId)mutex->__vg_m_owner)); > > /* Someone has it already. */ >- if ((ThreadId)mutex->__m_owner == tid) { >+ if ((ThreadId)mutex->__vg_m_owner == tid) { > /* It's locked -- by me! */ >- if (mutex->__m_kind == PTHREAD_MUTEX_RECURSIVE_NP) { >+ if (mutex->__vg_m_kind == PTHREAD_MUTEX_RECURSIVE_NP) { > /* return 0 (success). */ >- mutex->__m_count++; >+ mutex->__vg_m_count++; > SET_PTHREQ_RETVAL(tid, 0); > if (0) > VG_(printf)("!!!!!! tid %d, mx %p -> locked %d\n", >- tid, mutex, mutex->__m_count); >+ tid, mutex, mutex->__vg_m_count); > return; > } else { > if (is_trylock) >@@ -2083,7 +2070,7 @@ > } else { > /* Someone else has it; we have to wait. Mark ourselves > thusly. */ >- /* GUARD: __m_count > 0 && __m_owner is valid */ >+ /* GUARD: __vg_m_count > 0 && __vg_m_owner is valid */ > if (is_trylock) { > /* caller is polling; so return immediately. */ > SET_PTHREQ_RETVAL(tid, EBUSY); >@@ -2104,13 +2091,13 @@ > > } else { > /* Nobody owns it. Sanity check ... */ >- vg_assert(mutex->__m_owner == VG_INVALID_THREADID); >+ vg_assert(mutex->__vg_m_owner == VG_INVALID_THREADID); > > VG_TRACK ( pre_mutex_lock, tid, mutex ); > > /* We get it! [for the first time]. */ >- mutex->__m_count = 1; >- mutex->__m_owner = (_pthread_descr)tid; >+ mutex->__vg_m_count = 1; >+ mutex->__vg_m_owner = (/*_pthread_descr*/void*)tid; > > /* return 0 (success). */ > SET_PTHREQ_RETVAL(tid, 0); >@@ -2122,7 +2109,7 @@ > > static > void do_pthread_mutex_unlock ( ThreadId tid, >- pthread_mutex_t* mutex ) >+ vg_pthread_mutex_t* mutex ) > { > Char msg_buf[100]; > >@@ -2144,14 +2131,14 @@ > > /* If this was locked before the dawn of time, pretend it was > locked now so that it balances with unlocks */ >- if (mutex->__m_kind & VG_PTHREAD_PREHISTORY) { >- mutex->__m_kind &= ~VG_PTHREAD_PREHISTORY; >- VG_TRACK( pre_mutex_lock, (ThreadId)mutex->__m_owner, mutex ); >- VG_TRACK( post_mutex_lock, (ThreadId)mutex->__m_owner, mutex ); >+ if (mutex->__vg_m_kind & VG_PTHREAD_PREHISTORY) { >+ mutex->__vg_m_kind &= ~VG_PTHREAD_PREHISTORY; >+ VG_TRACK( pre_mutex_lock, (ThreadId)mutex->__vg_m_owner, mutex ); >+ VG_TRACK( post_mutex_lock, (ThreadId)mutex->__vg_m_owner, mutex ); > } > > /* More paranoia ... */ >- switch (mutex->__m_kind) { >+ switch (mutex->__vg_m_kind) { > # ifndef GLIBC_2_1 > case PTHREAD_MUTEX_TIMED_NP: > case PTHREAD_MUTEX_ADAPTIVE_NP: >@@ -2161,7 +2148,7 @@ > # endif > case PTHREAD_MUTEX_RECURSIVE_NP: > case PTHREAD_MUTEX_ERRORCHECK_NP: >- if (mutex->__m_count >= 0) break; >+ if (mutex->__vg_m_count >= 0) break; > /* else fall thru */ > default: > VG_(record_pthread_error)( tid, >@@ -2171,7 +2158,7 @@ > } > > /* Barf if we don't currently hold the mutex. */ >- if (mutex->__m_count == 0) { >+ if (mutex->__vg_m_count == 0) { > /* nobody holds it */ > VG_(record_pthread_error)( tid, > "pthread_mutex_unlock: mutex is not locked"); >@@ -2179,7 +2166,7 @@ > return; > } > >- if ((ThreadId)mutex->__m_owner != tid) { >+ if ((ThreadId)mutex->__vg_m_owner != tid) { > /* we don't hold it */ > VG_(record_pthread_error)( tid, > "pthread_mutex_unlock: mutex is locked by a different thread"); >@@ -2189,17 +2176,17 @@ > > /* If it's a multiply-locked recursive mutex, just decrement the > lock count and return. */ >- if (mutex->__m_count > 1) { >- vg_assert(mutex->__m_kind == PTHREAD_MUTEX_RECURSIVE_NP); >- mutex->__m_count --; >+ if (mutex->__vg_m_count > 1) { >+ vg_assert(mutex->__vg_m_kind == PTHREAD_MUTEX_RECURSIVE_NP); >+ mutex->__vg_m_count --; > SET_PTHREQ_RETVAL(tid, 0); /* success */ > return; > } > > /* Now we're sure it is locked exactly once, and by the thread who > is now doing an unlock on it. */ >- vg_assert(mutex->__m_count == 1); >- vg_assert((ThreadId)mutex->__m_owner == tid); >+ vg_assert(mutex->__vg_m_count == 1); >+ vg_assert((ThreadId)mutex->__vg_m_owner == tid); > > /* Release at max one thread waiting on this mutex. */ > release_one_thread_waiting_on_mutex ( mutex, "pthread_mutex_lock" ); >@@ -2213,26 +2200,11 @@ > CONDITION VARIABLES > -------------------------------------------------------- */ > >-/* The relevant native types are as follows: >- (copied from /usr/include/bits/pthreadtypes.h) >- >- -- Conditions (not abstract because of PTHREAD_COND_INITIALIZER >- typedef struct >- { >- struct _pthread_fastlock __c_lock; -- Protect against concurrent access >- _pthread_descr __c_waiting; -- Threads waiting on this condition >- } pthread_cond_t; >- >- -- Attribute for conditionally variables. >- typedef struct >- { >- int __dummy; >- } pthread_condattr_t; >- >- #define PTHREAD_COND_INITIALIZER {__LOCK_INITIALIZER, 0} >+/* The relevant type (vg_pthread_cond_t) is in vg_include.h. > >- We don't use any fields of pthread_cond_t for anything at all. >- Only the identity of the CVs is important. >+ We don't use any fields of vg_pthread_cond_t for anything at all. >+ Only the identity of the CVs is important. (Actually, we initialise >+ __vg_c_waiting in pthread_cond_init() to VG_INVALID_THREADID.) > > Linux pthreads supports no attributes on condition variables, so we > don't need to think too hard there. */ >@@ -2242,8 +2214,8 @@ > void do_pthread_cond_timedwait_TIMEOUT ( ThreadId tid ) > { > Char msg_buf[100]; >- pthread_mutex_t* mx; >- pthread_cond_t* cv; >+ vg_pthread_mutex_t* mx; >+ vg_pthread_cond_t* cv; > > vg_assert(VG_(is_valid_tid)(tid) > && VG_(threads)[tid].status == VgTs_WaitCV >@@ -2253,27 +2225,27 @@ > cv = VG_(threads)[tid].associated_cv; > vg_assert(cv != NULL); > >- if (mx->__m_owner == VG_INVALID_THREADID) { >+ if (mx->__vg_m_owner == VG_INVALID_THREADID) { > /* Currently unheld; hand it out to thread tid. */ >- vg_assert(mx->__m_count == 0); >+ vg_assert(mx->__vg_m_count == 0); > VG_(threads)[tid].status = VgTs_Runnable; > SET_PTHREQ_RETVAL(tid, ETIMEDOUT); /* pthread_cond_wait return value */ > VG_(threads)[tid].associated_cv = NULL; > VG_(threads)[tid].associated_mx = NULL; >- mx->__m_owner = (_pthread_descr)tid; >- mx->__m_count = 1; >+ mx->__vg_m_owner = (/*_pthread_descr*/void*)tid; >+ mx->__vg_m_count = 1; > > VG_TRACK( post_mutex_lock, tid, mx ); > > if (VG_(clo_trace_pthread_level) >= 1) { > VG_(sprintf)(msg_buf, >- "pthread_cond_timedwai cv %p: TIMEOUT with mx %p", >+ "pthread_cond_timedwait cv %p: TIMEOUT with mx %p", > cv, mx ); > print_pthread_event(tid, msg_buf); > } > } else { > /* Currently held. Make thread tid be blocked on it. */ >- vg_assert(mx->__m_count > 0); >+ vg_assert(mx->__vg_m_count > 0); > VG_TRACK( pre_mutex_lock, tid, mx ); > > VG_(threads)[tid].status = VgTs_WaitMX; >@@ -2282,7 +2254,7 @@ > VG_(threads)[tid].associated_mx = mx; > if (VG_(clo_trace_pthread_level) >= 1) { > VG_(sprintf)(msg_buf, >- "pthread_cond_timedwai cv %p: TIMEOUT -> BLOCK for mx %p", >+ "pthread_cond_timedwait cv %p: TIMEOUT -> BLOCK for mx %p", > cv, mx ); > print_pthread_event(tid, msg_buf); > } >@@ -2291,13 +2263,13 @@ > > > static >-void release_N_threads_waiting_on_cond ( pthread_cond_t* cond, >+void release_N_threads_waiting_on_cond ( vg_pthread_cond_t* cond, > Int n_to_release, > Char* caller ) > { > Int i; > Char msg_buf[100]; >- pthread_mutex_t* mx; >+ vg_pthread_mutex_t* mx; > > while (True) { > if (n_to_release == 0) >@@ -2323,14 +2295,14 @@ > > VG_TRACK( pre_mutex_lock, i, mx ); > >- if (mx->__m_owner == VG_INVALID_THREADID) { >+ if (mx->__vg_m_owner == VG_INVALID_THREADID) { > /* Currently unheld; hand it out to thread i. */ >- vg_assert(mx->__m_count == 0); >+ vg_assert(mx->__vg_m_count == 0); > VG_(threads)[i].status = VgTs_Runnable; > VG_(threads)[i].associated_cv = NULL; > VG_(threads)[i].associated_mx = NULL; >- mx->__m_owner = (_pthread_descr)i; >- mx->__m_count = 1; >+ mx->__vg_m_owner = (/*_pthread_descr*/void*)i; >+ mx->__vg_m_count = 1; > /* .m_edx already holds pth_cond_wait success value (0) */ > > VG_TRACK( post_mutex_lock, i, mx ); >@@ -2343,7 +2315,7 @@ > > } else { > /* Currently held. Make thread i be blocked on it. */ >- vg_assert(mx->__m_count > 0); >+ vg_assert(mx->__vg_m_count > 0); > VG_(threads)[i].status = VgTs_WaitMX; > VG_(threads)[i].associated_cv = NULL; > VG_(threads)[i].associated_mx = mx; >@@ -2364,8 +2336,8 @@ > > static > void do_pthread_cond_wait ( ThreadId tid, >- pthread_cond_t *cond, >- pthread_mutex_t *mutex, >+ vg_pthread_cond_t *cond, >+ vg_pthread_mutex_t *mutex, > UInt ms_end ) > { > Char msg_buf[100]; >@@ -2392,7 +2364,7 @@ > } > > /* More paranoia ... */ >- switch (mutex->__m_kind) { >+ switch (mutex->__vg_m_kind) { > # ifndef GLIBC_2_1 > case PTHREAD_MUTEX_TIMED_NP: > case PTHREAD_MUTEX_ADAPTIVE_NP: >@@ -2402,7 +2374,7 @@ > # endif > case PTHREAD_MUTEX_RECURSIVE_NP: > case PTHREAD_MUTEX_ERRORCHECK_NP: >- if (mutex->__m_count >= 0) break; >+ if (mutex->__vg_m_count >= 0) break; > /* else fall thru */ > default: > VG_(record_pthread_error)( tid, >@@ -2412,8 +2384,8 @@ > } > > /* Barf if we don't currently hold the mutex. */ >- if (mutex->__m_count == 0 /* nobody holds it */ >- || (ThreadId)mutex->__m_owner != tid /* we don't hold it */) { >+ if (mutex->__vg_m_count == 0 /* nobody holds it */ >+ || (ThreadId)mutex->__vg_m_owner != tid /* we don't hold it */) { > VG_(record_pthread_error)( tid, > "pthread_cond_wait/timedwait: mutex is unlocked " > "or is locked but not owned by thread"); >@@ -2444,7 +2416,7 @@ > static > void do_pthread_cond_signal_or_broadcast ( ThreadId tid, > Bool broadcast, >- pthread_cond_t *cond ) >+ vg_pthread_cond_t *cond ) > { > Char msg_buf[100]; > Char* caller >@@ -2983,15 +2955,15 @@ > > case VG_USERREQ__PTHREAD_COND_WAIT: > do_pthread_cond_wait( tid, >- (pthread_cond_t *)(arg[1]), >- (pthread_mutex_t *)(arg[2]), >+ (vg_pthread_cond_t *)(arg[1]), >+ (vg_pthread_mutex_t *)(arg[2]), > 0xFFFFFFFF /* no timeout */ ); > break; > > case VG_USERREQ__PTHREAD_COND_TIMEDWAIT: > do_pthread_cond_wait( tid, >- (pthread_cond_t *)(arg[1]), >- (pthread_mutex_t *)(arg[2]), >+ (vg_pthread_cond_t *)(arg[1]), >+ (vg_pthread_mutex_t *)(arg[2]), > arg[3] /* timeout millisecond point */ ); > break; > >@@ -2999,14 +2971,14 @@ > do_pthread_cond_signal_or_broadcast( > tid, > False, /* signal, not broadcast */ >- (pthread_cond_t *)(arg[1]) ); >+ (vg_pthread_cond_t *)(arg[1]) ); > break; > > case VG_USERREQ__PTHREAD_COND_BROADCAST: > do_pthread_cond_signal_or_broadcast( > tid, > True, /* broadcast, not signal */ >- (pthread_cond_t *)(arg[1]) ); >+ (vg_pthread_cond_t *)(arg[1]) ); > break; > > case VG_USERREQ__PTHREAD_KEY_VALIDATE: >@@ -3195,8 +3167,8 @@ > static > void scheduler_sanity ( void ) > { >- pthread_mutex_t* mx; >- pthread_cond_t* cv; >+ vg_pthread_mutex_t* mx; >+ vg_pthread_cond_t* cv; > Int i; > struct timeout* top; > UInt lasttime = 0; >@@ -3235,9 +3207,9 @@ > Possibly to do with signals. */ > vg_assert(cv == NULL); > /* 1 */ vg_assert(mx != NULL); >- /* 2 */ vg_assert(mx->__m_count > 0); >- /* 3 */ vg_assert(VG_(is_valid_tid)((ThreadId)mx->__m_owner)); >- /* 4 */ vg_assert((UInt)i != (ThreadId)mx->__m_owner); >+ /* 2 */ vg_assert(mx->__vg_m_count > 0); >+ /* 3 */ vg_assert(VG_(is_valid_tid)((ThreadId)mx->__vg_m_owner)); >+ /* 4 */ vg_assert((UInt)i != (ThreadId)mx->__vg_m_owner); > } else > if (VG_(threads)[i].status == VgTs_WaitCV) { > vg_assert(cv != NULL);
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 32966
: 25410 |
25450
|
25451
|
25452