diff options
author | Rich Felker <dalias@aerifal.cx> | 2014-08-16 19:15:19 -0400 |
---|---|---|
committer | Rich Felker <dalias@aerifal.cx> | 2014-08-16 19:15:19 -0400 |
commit | fffc5cda10e0c5c910b40f7be0d4fa4e15bb3f48 (patch) | |
tree | 7fdff6f18f2f03803e6fc7a8a879beafa771c40f /src | |
parent | 25d12fc0fc51f1fae0f85b4649a6463eb805aa8f (diff) | |
download | musl-fffc5cda10e0c5c910b40f7be0d4fa4e15bb3f48.tar.gz |
fix false ownership of mutexes due to tid reuse, using robust list
per the resolution of Austin Group issue 755, the POSIX requirement
that ownership be enforced for recursive and error-checking mutexes
does not allow a random new thread to acquire ownership of an orphaned
mutex just because it happened to be assigned the same tid as the
original owner that exited with the mutex locked.
one possible fix for this issue would be to disallow the kernel thread
to terminate when it exited with mutexes held, permanently reserving
the tid against reuse. however, this does not solve the problem for
process-shared mutexes where lifetime cannot be controlled, so it was
not used.
the alternate approach I've taken is to reuse the robust mutex system
for non-robust recursive and error-checking mutexes. when a thread
exits, the kernel (or the new userspace robust-list code added in
commit b092f1c5fa9c048e12d002c7b972df5ecbe96d1d) will set the
owner-died bit for these orphaned mutexes, but since the mutex-type is
not robust, pthread_mutex_trylock will not allow a new owner to
acquire them. instead, they remain in a state of being permanently
locked, as desired.
Diffstat (limited to 'src')
-rw-r--r-- | src/thread/pthread_mutex_consistent.c | 4 | ||||
-rw-r--r-- | src/thread/pthread_mutex_timedlock.c | 5 | ||||
-rw-r--r-- | src/thread/pthread_mutex_trylock.c | 28 | ||||
-rw-r--r-- | src/thread/pthread_mutex_unlock.c | 12 |
4 files changed, 26 insertions, 23 deletions
diff --git a/src/thread/pthread_mutex_consistent.c b/src/thread/pthread_mutex_consistent.c index baea0ff4..0db976ab 100644 --- a/src/thread/pthread_mutex_consistent.c +++ b/src/thread/pthread_mutex_consistent.c @@ -2,8 +2,8 @@ int pthread_mutex_consistent(pthread_mutex_t *m) { - if ((m->_m_type & 15) < 8) return EINVAL; - if ((m->_m_lock & 0x3fffffff) != __pthread_self()->tid) + if (!(m->_m_type & 8)) return EINVAL; + if ((m->_m_lock & 0x7fffffff) != __pthread_self()->tid) return EPERM; m->_m_type -= 8; return 0; diff --git a/src/thread/pthread_mutex_timedlock.c b/src/thread/pthread_mutex_timedlock.c index 849febb7..2a959d25 100644 --- a/src/thread/pthread_mutex_timedlock.c +++ b/src/thread/pthread_mutex_timedlock.c @@ -9,9 +9,10 @@ int pthread_mutex_timedlock(pthread_mutex_t *restrict m, const struct timespec * int r, t, priv = (m->_m_type & 128) ^ 128; while ((r=pthread_mutex_trylock(m)) == EBUSY) { - if (!(r=m->_m_lock) || (r&0x40000000)) continue; + if (!(r=m->_m_lock) || ((r&0x40000000) && (m->_m_type&4))) + continue; if ((m->_m_type&3) == PTHREAD_MUTEX_ERRORCHECK - && (r&0x1fffffff) == __pthread_self()->tid) + && (r&0x7fffffff) == __pthread_self()->tid) return EDEADLK; a_inc(&m->_m_waiters); diff --git a/src/thread/pthread_mutex_trylock.c b/src/thread/pthread_mutex_trylock.c index 850fcb90..f871e9e0 100644 --- a/src/thread/pthread_mutex_trylock.c +++ b/src/thread/pthread_mutex_trylock.c @@ -7,12 +7,9 @@ int __pthread_mutex_trylock_owner(pthread_mutex_t *m) pthread_t self = __pthread_self(); int tid = self->tid; - if (type >= 4) { - if (!self->robust_list.off) - __syscall(SYS_set_robust_list, - &self->robust_list, 3*sizeof(long)); + if (!self->robust_list.off) { + __syscall(SYS_set_robust_list, &self->robust_list, 3*sizeof(long)); self->robust_list.off = (char*)&m->_m_lock-(char *)&m->_m_next; - self->robust_list.pending = &m->_m_next; } old = m->_m_lock; @@ -23,21 +20,28 @@ int __pthread_mutex_trylock_owner(pthread_mutex_t *m) return 0; } - if ((own && !(own & 0x40000000)) || a_cas(&m->_m_lock, old, tid)!=old) - return EBUSY; - - if (type < 4) return 0; + self->robust_list.pending = &m->_m_next; - if (type >= 8) { - m->_m_lock = 0; - return ENOTRECOVERABLE; + if ((own && (!(own & 0x40000000) || !(type & 4))) + || a_cas(&m->_m_lock, old, tid) != old) { + self->robust_list.pending = 0; + return EBUSY; } + m->_m_next = self->robust_list.head; m->_m_prev = &self->robust_list.head; if (self->robust_list.head) self->robust_list.head[-1] = &m->_m_next; self->robust_list.head = &m->_m_next; self->robust_list.pending = 0; + + if (type < 4) return 0; + + if (type >= 8) { + m->_m_lock = 0; + return ENOTRECOVERABLE; + } + if (own) { m->_m_count = 0; m->_m_type += 8; diff --git a/src/thread/pthread_mutex_unlock.c b/src/thread/pthread_mutex_unlock.c index 769d6e56..b4ed3f87 100644 --- a/src/thread/pthread_mutex_unlock.c +++ b/src/thread/pthread_mutex_unlock.c @@ -8,7 +8,6 @@ int pthread_mutex_unlock(pthread_mutex_t *m) pthread_t self; int waiters = m->_m_waiters; int cont; - int robust = 0; int type = m->_m_type & 15; int priv = (m->_m_type & 128) ^ 128; @@ -16,20 +15,19 @@ int pthread_mutex_unlock(pthread_mutex_t *m) if (!m->_m_lock) return EPERM; self = __pthread_self(); - if ((m->_m_lock&0x1fffffff) != self->tid) + if ((m->_m_lock&0x7fffffff) != self->tid) return EPERM; if ((type&3) == PTHREAD_MUTEX_RECURSIVE && m->_m_count) return m->_m_count--, 0; - if (type >= 4) { - robust = 1; + if (!priv) { self->robust_list.pending = &m->_m_next; - *(void **)m->_m_prev = m->_m_next; - if (m->_m_next) ((void **)m->_m_next)[-1] = m->_m_prev; __vm_lock_impl(+1); } + *(void **)m->_m_prev = m->_m_next; + if (m->_m_next) ((void **)m->_m_next)[-1] = m->_m_prev; } cont = a_swap(&m->_m_lock, 0); - if (robust) { + if (type != PTHREAD_MUTEX_NORMAL && !priv) { self->robust_list.pending = 0; __vm_unlock_impl(); } |