summaryrefslogtreecommitdiff
path: root/src/internal
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2019-02-15 22:29:01 -0500
committerRich Felker <dalias@aerifal.cx>2019-02-15 22:29:01 -0500
commit8f11e6127fe93093f81a52b15bb1537edc3fc8af (patch)
tree6868c949ea888b3695361bb9fa67af86dc546bfe /src/internal
parent04335d9260c076cf4d9264bd93dd3b06c237a639 (diff)
downloadmusl-8f11e6127fe93093f81a52b15bb1537edc3fc8af.tar.gz
track all live threads in an AS-safe, fully-consistent linked list
the hard problem here is unlinking threads from a list when they exit without creating a window of inconsistency where the kernel task for a thread still exists and is still executing instructions in userspace, but is not reflected in the list. the magic solution here is getting rid of per-thread exit futex addresses (set_tid_address), and instead using the exit futex to unlock the global thread list. since pthread_join can no longer see the thread enter a detach_state of EXITED (which depended on the exit futex address pointing to the detach_state), it must now observe the unlocking of the thread list lock before it can unmap the joined thread and return. it doesn't actually have to take the lock. for this, a __tl_sync primitive is offered, with a signature that will allow it to be enhanced for quick return even under contention on the lock, if needed. for now, the exiting thread always performs a futex wake on its detach_state. a future change could optimize this out except when there is already a joiner waiting. initial/dynamic variants of detached state no longer need to be tracked separately, since the futex address is always set to the global list lock, not a thread-local address that could become invalid on detached thread exit. all detached threads, however, must perform a second sigprocmask syscall to block implementation-internal signals, since locking the thread list with them already blocked is not permissible. the arch-independent C version of __unmapself no longer needs to take a lock or setup its own futex address to release the lock, since it must necessarily be called with the thread list lock already held, guaranteeing exclusive access to the temporary stack. changes to libc.threads_minus_1 no longer need to be atomic, since they are guarded by the thread list lock. it is largely vestigial at this point, and can be replaced with a cheaper boolean indicating whether the process is multithreaded at some point in the future.
Diffstat (limited to 'src/internal')
-rw-r--r--src/internal/pthread_impl.h12
1 files changed, 8 insertions, 4 deletions
diff --git a/src/internal/pthread_impl.h b/src/internal/pthread_impl.h
index c677f7f6..508b40b5 100644
--- a/src/internal/pthread_impl.h
+++ b/src/internal/pthread_impl.h
@@ -18,7 +18,7 @@ struct pthread {
* internal (accessed via asm) ABI. Do not change. */
struct pthread *self;
uintptr_t *dtv;
- void *unused1, *unused2;
+ struct pthread *prev, *next; /* non-ABI */
uintptr_t sysinfo;
uintptr_t canary, canary2;
@@ -56,11 +56,9 @@ struct pthread {
};
enum {
- DT_EXITED = 0,
- DT_EXITING,
+ DT_EXITING = 0,
DT_JOINABLE,
DT_DETACHED,
- DT_DYNAMIC,
};
struct __timer {
@@ -173,6 +171,12 @@ hidden void __acquire_ptc(void);
hidden void __release_ptc(void);
hidden void __inhibit_ptc(void);
+hidden void __tl_lock(void);
+hidden void __tl_unlock(void);
+hidden void __tl_sync(pthread_t);
+
+extern hidden volatile int __thread_list_lock;
+
extern hidden unsigned __default_stacksize;
extern hidden unsigned __default_guardsize;