path: root/kernel/exit.c
diff options
authorRik van Riel <>2014-08-16 13:40:10 -0400
committerIngo Molnar <>2014-09-08 08:17:01 +0200
commite78c3496790ee8a36522a838b59b388e8a709e65 (patch)
tree0473b9ea676754d50b19eb1a862ac16fdffacbeb /kernel/exit.c
parent90ed9cbe765ad358b3151a12b8bf889a3cbcd573 (diff)
time, signal: Protect resource use statistics with seqlock
Both times() and clock_gettime(CLOCK_PROCESS_CPUTIME_ID) have scalability issues on large systems, due to both functions being serialized with a lock. The lock protects against reporting a wrong value, due to a thread in the task group exiting, its statistics reporting up to the signal struct, and that exited task's statistics being counted twice (or not at all). Protecting that with a lock results in times() and clock_gettime() being completely serialized on large systems. This can be fixed by using a seqlock around the events that gather and propagate statistics. As an additional benefit, the protection code can be moved into thread_group_cputime(), slightly simplifying the calling functions. In the case of posix_cpu_clock_get_task() things can be simplified a lot, because the calling function already ensures that the task sticks around, and the rest is now taken care of in thread_group_cputime(). This way the statistics reporting code can run lockless. Signed-off-by: Rik van Riel <> Signed-off-by: Peter Zijlstra (Intel) <> Cc: Alex Thorlton <> Cc: Andrew Morton <> Cc: Daeseok Youn <> Cc: David Rientjes <> Cc: Dongsheng Yang <> Cc: Geert Uytterhoeven <> Cc: Guillaume Morin <> Cc: Ionut Alexa <> Cc: Kees Cook <> Cc: Linus Torvalds <> Cc: Li Zefan <> Cc: Michal Hocko <> Cc: Michal Schmidt <> Cc: Oleg Nesterov <> Cc: Vladimir Davydov <> Cc: Cc: Cc: Cc: Cc: Link: Signed-off-by: Ingo Molnar <>
Diffstat (limited to 'kernel/exit.c')
1 files changed, 4 insertions, 0 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index b93d46dab6fc..fa09b86609db 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -127,6 +127,7 @@ static void __exit_signal(struct task_struct *tsk)
* the signal_struct.
task_cputime(tsk, &utime, &stime);
+ write_seqlock(&sig->stats_lock);
sig->utime += utime;
sig->stime += stime;
sig->gtime += task_gtime(tsk);
@@ -140,6 +141,7 @@ static void __exit_signal(struct task_struct *tsk)
sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
__unhash_process(tsk, group_dead);
+ write_sequnlock(&sig->stats_lock);
* Do this under ->siglock, we can race with another thread
@@ -1042,6 +1044,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
psig = p->real_parent->signal;
sig = p->signal;
+ write_seqlock(&psig->stats_lock);
psig->cutime += tgutime + sig->cutime;
psig->cstime += tgstime + sig->cstime;
psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime;
@@ -1064,6 +1067,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
psig->cmaxrss = maxrss;
task_io_accounting_add(&psig->ioac, &p->ioac);
task_io_accounting_add(&psig->ioac, &sig->ioac);
+ write_sequnlock(&psig->stats_lock);