Skip to content

Commit

Permalink
signal: Split up __sigqueue_alloc()
Browse files Browse the repository at this point in the history
To cure the SIG_IGN handling for posix interval timers, the preallocated
sigqueue needs to be embedded into struct k_itimer to prevent life time
races of all sorts.

Reorganize __sigqueue_alloc() so the ucounts retrieval and the
initialization can be used independently.

No functional change.

Signed-off-by: Thomas Gleixner <[email protected]>
Reviewed-by: Frederic Weisbecker <[email protected]>
Acked-by: Peter Zijlstra (Intel) <[email protected]>
Link: https://lore.kernel.org/all/[email protected]
  • Loading branch information
KAGA-KOKO committed Nov 7, 2024
1 parent 5d916a0 commit 5cac427
Showing 1 changed file with 35 additions and 17 deletions.
52 changes: 35 additions & 17 deletions kernel/signal.c
Original file line number Diff line number Diff line change
Expand Up @@ -396,16 +396,9 @@ void task_join_group_stop(struct task_struct *task)
task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
}

/*
* allocate a new signal queue record
* - this may be called without locks if and only if t == current, otherwise an
* appropriate lock must be held to stop the target task from exiting
*/
static struct sigqueue *
__sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
int override_rlimit, const unsigned int sigqueue_flags)
static struct ucounts *sig_get_ucounts(struct task_struct *t, int sig,
int override_rlimit)
{
struct sigqueue *q = NULL;
struct ucounts *ucounts;
long sigpending;

Expand All @@ -424,19 +417,44 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
if (!sigpending)
return NULL;

if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
} else {
if (unlikely(!override_rlimit && sigpending > task_rlimit(t, RLIMIT_SIGPENDING))) {
dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
print_dropped_signal(sig);
return NULL;
}

if (unlikely(q == NULL)) {
return ucounts;
}

static void __sigqueue_init(struct sigqueue *q, struct ucounts *ucounts,
const unsigned int sigqueue_flags)
{
INIT_LIST_HEAD(&q->list);
q->flags = sigqueue_flags;
q->ucounts = ucounts;
}

/*
* allocate a new signal queue record
* - this may be called without locks if and only if t == current, otherwise an
* appropriate lock must be held to stop the target task from exiting
*/
static struct sigqueue *__sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
int override_rlimit, const unsigned int sigqueue_flags)
{
struct ucounts *ucounts = sig_get_ucounts(t, sig, override_rlimit);
struct sigqueue *q;

if (!ucounts)
return NULL;

q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
if (!q) {
dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
} else {
INIT_LIST_HEAD(&q->list);
q->flags = sigqueue_flags;
q->ucounts = ucounts;
return NULL;
}

__sigqueue_init(q, ucounts, sigqueue_flags);
return q;
}

Expand Down

0 comments on commit 5cac427

Please sign in to comment.