summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2015-04-10 02:27:52 -0400
committerRich Felker <dalias@aerifal.cx>2015-04-10 02:27:52 -0400
commitf08ab9e61a147630497198fe3239149275c0a3f4 (patch)
tree65f0898637a5306485e665ec95c753b99f4e3740
parent4e98cce1c529a304d7b55b5455078b9532f93e9b (diff)
downloadmusl-f08ab9e61a147630497198fe3239149275c0a3f4.tar.gz
redesign and simplify vmlock system
this global lock allows certain unlock-type primitives to exclude mmap/munmap operations which could change the identity of virtual addresses while references to them still exist. the original design mistakenly assumed mmap/munmap would conversely need to exclude the same operations which exclude mmap/munmap, so the vmlock was implemented as a sort of 'symmetric recursive rwlock'. this turned out to be unnecessary. commit 25d12fc0fc51f1fae0f85b4649a6463eb805aa8f already shortened the interval during which mmap/munmap held their side of the lock, but left the inappropriate lock design and some inefficiency. the new design uses a separate function, __vm_wait, which does not hold any lock itself and only waits for lock users which were already present when it was called to release the lock. this is sufficient because of the way operations that need to be excluded are sequenced: the "unlock-type" operations using the vmlock need only block mmap/munmap operations that are precipitated by (and thus sequenced after) the atomic-unlock they perform while holding the vmlock. this allows for a spectacular lack of synchronization in the __vm_wait function itself.
-rw-r--r--src/internal/pthread_impl.h4
-rw-r--r--src/mman/mmap.c9
-rw-r--r--src/mman/munmap.c13
-rw-r--r--src/thread/pthread_barrier_destroy.c5
-rw-r--r--src/thread/pthread_barrier_wait.c7
-rw-r--r--src/thread/pthread_create.c6
-rw-r--r--src/thread/pthread_mutex_unlock.c7
-rw-r--r--src/thread/vmlock.c23
8 files changed, 29 insertions, 45 deletions
diff --git a/src/internal/pthread_impl.h b/src/internal/pthread_impl.h
index 7c352bc4..12df9690 100644
--- a/src/internal/pthread_impl.h
+++ b/src/internal/pthread_impl.h
@@ -108,6 +108,10 @@ int __libc_sigprocmask(int, const sigset_t *, sigset_t *);
void __lock(volatile int *);
void __unmapself(void *, size_t);
+void __vm_wait();
+void __vm_lock();
+void __vm_unlock();
+
int __timedwait(volatile int *, int, clockid_t, const struct timespec *, int);
int __timedwait_cp(volatile int *, int, clockid_t, const struct timespec *, int);
void __wait(volatile int *, volatile int *, int, int);
diff --git a/src/mman/mmap.c b/src/mman/mmap.c
index 56e39a7a..b85f25ca 100644
--- a/src/mman/mmap.c
+++ b/src/mman/mmap.c
@@ -6,10 +6,8 @@
#include "syscall.h"
#include "libc.h"
-static void dummy1(int x) { }
-static void dummy0(void) { }
-weak_alias(dummy1, __vm_lock);
-weak_alias(dummy0, __vm_unlock);
+static void dummy(void) { }
+weak_alias(dummy, __vm_wait);
#define UNIT SYSCALL_MMAP2_UNIT
#define OFF_MASK ((-0x2000ULL << (8*sizeof(long)-1)) | (UNIT-1))
@@ -25,8 +23,7 @@ void *__mmap(void *start, size_t len, int prot, int flags, int fd, off_t off)
return MAP_FAILED;
}
if (flags & MAP_FIXED) {
- __vm_lock(-1);
- __vm_unlock();
+ __vm_wait();
}
#ifdef SYS_mmap2
return (void *)syscall(SYS_mmap2, start, len, prot, flags, fd, off/UNIT);
diff --git a/src/mman/munmap.c b/src/mman/munmap.c
index 359c691f..3f711ee5 100644
--- a/src/mman/munmap.c
+++ b/src/mman/munmap.c
@@ -2,18 +2,13 @@
#include "syscall.h"
#include "libc.h"
-static void dummy1(int x) { }
-static void dummy0(void) { }
-weak_alias(dummy1, __vm_lock);
-weak_alias(dummy0, __vm_unlock);
+static void dummy(void) { }
+weak_alias(dummy, __vm_wait);
int __munmap(void *start, size_t len)
{
- int ret;
- __vm_lock(-1);
- __vm_unlock();
- ret = syscall(SYS_munmap, start, len);
- return ret;
+ __vm_wait();
+ return syscall(SYS_munmap, start, len);
}
weak_alias(__munmap, munmap);
diff --git a/src/thread/pthread_barrier_destroy.c b/src/thread/pthread_barrier_destroy.c
index e0da197a..4ce0b2e1 100644
--- a/src/thread/pthread_barrier_destroy.c
+++ b/src/thread/pthread_barrier_destroy.c
@@ -1,7 +1,5 @@
#include "pthread_impl.h"
-void __vm_lock(int), __vm_unlock(void);
-
int pthread_barrier_destroy(pthread_barrier_t *b)
{
if (b->_b_limit < 0) {
@@ -11,8 +9,7 @@ int pthread_barrier_destroy(pthread_barrier_t *b)
while ((v = b->_b_lock) & INT_MAX)
__wait(&b->_b_lock, 0, v, 0);
}
- __vm_lock(-1);
- __vm_unlock();
+ __vm_wait();
}
return 0;
}
diff --git a/src/thread/pthread_barrier_wait.c b/src/thread/pthread_barrier_wait.c
index bfeb3464..06b83db9 100644
--- a/src/thread/pthread_barrier_wait.c
+++ b/src/thread/pthread_barrier_wait.c
@@ -1,8 +1,5 @@
#include "pthread_impl.h"
-void __vm_lock_impl(int);
-void __vm_unlock_impl(void);
-
static int pshared_barrier_wait(pthread_barrier_t *b)
{
int limit = (b->_b_limit & INT_MAX) + 1;
@@ -26,7 +23,7 @@ static int pshared_barrier_wait(pthread_barrier_t *b)
__wait(&b->_b_count, &b->_b_waiters2, v, 0);
}
- __vm_lock_impl(+1);
+ __vm_lock();
/* Ensure all threads have a vm lock before proceeding */
if (a_fetch_add(&b->_b_count, -1)==1-limit) {
@@ -47,7 +44,7 @@ static int pshared_barrier_wait(pthread_barrier_t *b)
if (v==INT_MIN+1 || (v==1 && w))
__wake(&b->_b_lock, 1, 0);
- __vm_unlock_impl();
+ __vm_unlock();
return ret;
}
diff --git a/src/thread/pthread_create.c b/src/thread/pthread_create.c
index 8b0135bc..08c5f4f8 100644
--- a/src/thread/pthread_create.c
+++ b/src/thread/pthread_create.c
@@ -9,8 +9,6 @@
void *__mmap(void *, size_t, int, int, int, off_t);
int __munmap(void *, size_t);
int __mprotect(void *, size_t, int);
-void __vm_lock_impl(int);
-void __vm_unlock_impl(void);
static void dummy_0()
{
@@ -77,7 +75,7 @@ _Noreturn void __pthread_exit(void *result)
/* Process robust list in userspace to handle non-pshared mutexes
* and the detached thread case where the robust list head will
* be invalid when the kernel would process it. */
- __vm_lock_impl(+1);
+ __vm_lock();
volatile void *volatile *rp;
while ((rp=self->robust_list.head) && rp != &self->robust_list.head) {
pthread_mutex_t *m = (void *)((char *)rp
@@ -91,7 +89,7 @@ _Noreturn void __pthread_exit(void *result)
if (cont < 0 || waiters)
__wake(&m->_m_lock, 1, priv);
}
- __vm_unlock_impl();
+ __vm_unlock();
__do_orphaned_stdio_locks();
diff --git a/src/thread/pthread_mutex_unlock.c b/src/thread/pthread_mutex_unlock.c
index a7f39c7f..02da92a9 100644
--- a/src/thread/pthread_mutex_unlock.c
+++ b/src/thread/pthread_mutex_unlock.c
@@ -1,8 +1,5 @@
#include "pthread_impl.h"
-void __vm_lock_impl(int);
-void __vm_unlock_impl(void);
-
int __pthread_mutex_unlock(pthread_mutex_t *m)
{
pthread_t self;
@@ -19,7 +16,7 @@ int __pthread_mutex_unlock(pthread_mutex_t *m)
return m->_m_count--, 0;
if (!priv) {
self->robust_list.pending = &m->_m_next;
- __vm_lock_impl(+1);
+ __vm_lock();
}
volatile void *prev = m->_m_prev;
volatile void *next = m->_m_next;
@@ -30,7 +27,7 @@ int __pthread_mutex_unlock(pthread_mutex_t *m)
cont = a_swap(&m->_m_lock, (type & 8) ? 0x40000000 : 0);
if (type != PTHREAD_MUTEX_NORMAL && !priv) {
self->robust_list.pending = 0;
- __vm_unlock_impl();
+ __vm_unlock();
}
if (waiters || cont<0)
__wake(&m->_m_lock, 1, priv);
diff --git a/src/thread/vmlock.c b/src/thread/vmlock.c
index 125c6dc9..0a69b3e3 100644
--- a/src/thread/vmlock.c
+++ b/src/thread/vmlock.c
@@ -2,21 +2,20 @@
static volatile int vmlock[2];
-void __vm_lock(int inc)
+void __vm_wait()
{
- for (;;) {
- int v = vmlock[0];
- if (inc*v < 0) __wait(vmlock, vmlock+1, v, 1);
- else if (a_cas(vmlock, v, v+inc)==v) break;
- }
+ int tmp;
+ while ((tmp=vmlock[0]))
+ __wait(vmlock, vmlock+1, tmp, 1);
}
-void __vm_unlock(void)
+void __vm_lock(pthread_t self)
{
- int inc = vmlock[0]>0 ? -1 : 1;
- if (a_fetch_add(vmlock, inc)==-inc && vmlock[1])
- __wake(vmlock, -1, 1);
+ a_inc(vmlock);
}
-weak_alias(__vm_lock, __vm_lock_impl);
-weak_alias(__vm_unlock, __vm_unlock_impl);
+void __vm_unlock(pthread_t self)
+{
+ if (a_fetch_add(vmlock, -1)==1 && vmlock[1])
+ __wake(vmlock, -1, 1);
+}