From 25d12fc0fc51f1fae0f85b4649a6463eb805aa8f Mon Sep 17 00:00:00 2001 From: Rich Felker Date: Sat, 16 Aug 2014 02:41:45 -0400 Subject: optimize locking against vm changes for mmap/munmap the whole point of this locking is to prevent munmap, or mmap with MAP_FIXED, from deallocating virtual addresses, or changing the backing a given virtual address refers to, during certain race windows involving self-synchronized unmapping or destruction of pthread synchronization objects. there is no need for exclusion in the other direction, so it suffices to take the lock momentarily and release it before making the syscall, rather than holding it across the syscall. --- src/mman/mmap.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'src/mman/mmap.c') diff --git a/src/mman/mmap.c b/src/mman/mmap.c index 1917a540..56e39a7a 100644 --- a/src/mman/mmap.c +++ b/src/mman/mmap.c @@ -16,8 +16,6 @@ weak_alias(dummy0, __vm_unlock); void *__mmap(void *start, size_t len, int prot, int flags, int fd, off_t off) { - void *ret; - if (off & OFF_MASK) { errno = EINVAL; return MAP_FAILED; @@ -26,14 +24,15 @@ void *__mmap(void *start, size_t len, int prot, int flags, int fd, off_t off) errno = ENOMEM; return MAP_FAILED; } - if (flags & MAP_FIXED) __vm_lock(-1); + if (flags & MAP_FIXED) { + __vm_lock(-1); + __vm_unlock(); + } #ifdef SYS_mmap2 - ret = (void *)syscall(SYS_mmap2, start, len, prot, flags, fd, off/UNIT); + return (void *)syscall(SYS_mmap2, start, len, prot, flags, fd, off/UNIT); #else - ret = (void *)syscall(SYS_mmap, start, len, prot, flags, fd, off); + return (void *)syscall(SYS_mmap, start, len, prot, flags, fd, off); #endif - if (flags & MAP_FIXED) __vm_unlock(); - return ret; } weak_alias(__mmap, mmap); -- cgit v1.2.1