Skip to content

Commit 2f064a5

Browse files
author
Peter Zijlstra
committed
sched: Change task_struct::state
Change the type and name of task_struct::state. Drop the volatile and shrink it to an 'unsigned int'. Rename it in order to find all uses such that we can use READ_ONCE/WRITE_ONCE as appropriate. Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Daniel Bristot de Oliveira <[email protected]> Acked-by: Will Deacon <[email protected]> Acked-by: Daniel Thompson <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 7c3edd6 commit 2f064a5

28 files changed

+123
-111
lines changed

arch/ia64/kernel/mca.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -1788,7 +1788,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
17881788
ti->task = p;
17891789
ti->cpu = cpu;
17901790
p->stack = ti;
1791-
p->state = TASK_UNINTERRUPTIBLE;
1791+
p->__state = TASK_UNINTERRUPTIBLE;
17921792
cpumask_set_cpu(cpu, &p->cpus_mask);
17931793
INIT_LIST_HEAD(&p->tasks);
17941794
p->parent = p->real_parent = p->group_leader = p;

arch/ia64/kernel/ptrace.c

+4-4
Original file line numberDiff line numberDiff line change
@@ -641,11 +641,11 @@ ptrace_attach_sync_user_rbs (struct task_struct *child)
641641
read_lock(&tasklist_lock);
642642
if (child->sighand) {
643643
spin_lock_irq(&child->sighand->siglock);
644-
if (child->state == TASK_STOPPED &&
644+
if (READ_ONCE(child->__state) == TASK_STOPPED &&
645645
!test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
646646
set_notify_resume(child);
647647

648-
child->state = TASK_TRACED;
648+
WRITE_ONCE(child->__state, TASK_TRACED);
649649
stopped = 1;
650650
}
651651
spin_unlock_irq(&child->sighand->siglock);
@@ -665,9 +665,9 @@ ptrace_attach_sync_user_rbs (struct task_struct *child)
665665
read_lock(&tasklist_lock);
666666
if (child->sighand) {
667667
spin_lock_irq(&child->sighand->siglock);
668-
if (child->state == TASK_TRACED &&
668+
if (READ_ONCE(child->__state) == TASK_TRACED &&
669669
(child->signal->flags & SIGNAL_STOP_STOPPED)) {
670-
child->state = TASK_STOPPED;
670+
WRITE_ONCE(child->__state, TASK_STOPPED);
671671
}
672672
spin_unlock_irq(&child->sighand->siglock);
673673
}

arch/powerpc/xmon/xmon.c

+7-6
Original file line numberDiff line numberDiff line change
@@ -3162,21 +3162,22 @@ memzcan(void)
31623162

31633163
static void show_task(struct task_struct *tsk)
31643164
{
3165+
unsigned int p_state = READ_ONCE(tsk->__state);
31653166
char state;
31663167

31673168
/*
31683169
* Cloned from kdb_task_state_char(), which is not entirely
31693170
* appropriate for calling from xmon. This could be moved
31703171
* to a common, generic, routine used by both.
31713172
*/
3172-
state = (tsk->state == 0) ? 'R' :
3173-
(tsk->state < 0) ? 'U' :
3174-
(tsk->state & TASK_UNINTERRUPTIBLE) ? 'D' :
3175-
(tsk->state & TASK_STOPPED) ? 'T' :
3176-
(tsk->state & TASK_TRACED) ? 'C' :
3173+
state = (p_state == 0) ? 'R' :
3174+
(p_state < 0) ? 'U' :
3175+
(p_state & TASK_UNINTERRUPTIBLE) ? 'D' :
3176+
(p_state & TASK_STOPPED) ? 'T' :
3177+
(p_state & TASK_TRACED) ? 'C' :
31773178
(tsk->exit_state & EXIT_ZOMBIE) ? 'Z' :
31783179
(tsk->exit_state & EXIT_DEAD) ? 'E' :
3179-
(tsk->state & TASK_INTERRUPTIBLE) ? 'S' : '?';
3180+
(p_state & TASK_INTERRUPTIBLE) ? 'S' : '?';
31803181

31813182
printf("%16px %16lx %16px %6d %6d %c %2d %s\n", tsk,
31823183
tsk->thread.ksp, tsk->thread.regs,

block/blk-mq.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -3886,7 +3886,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
38863886
int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
38873887
{
38883888
struct blk_mq_hw_ctx *hctx;
3889-
long state;
3889+
unsigned int state;
38903890

38913891
if (!blk_qc_t_valid(cookie) ||
38923892
!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))

drivers/md/dm.c

+3-3
Original file line numberDiff line numberDiff line change
@@ -2328,7 +2328,7 @@ static bool md_in_flight_bios(struct mapped_device *md)
23282328
return sum != 0;
23292329
}
23302330

2331-
static int dm_wait_for_bios_completion(struct mapped_device *md, long task_state)
2331+
static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int task_state)
23322332
{
23332333
int r = 0;
23342334
DEFINE_WAIT(wait);
@@ -2351,7 +2351,7 @@ static int dm_wait_for_bios_completion(struct mapped_device *md, long task_state
23512351
return r;
23522352
}
23532353

2354-
static int dm_wait_for_completion(struct mapped_device *md, long task_state)
2354+
static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_state)
23552355
{
23562356
int r = 0;
23572357

@@ -2478,7 +2478,7 @@ static void unlock_fs(struct mapped_device *md)
24782478
* are being added to md->deferred list.
24792479
*/
24802480
static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
2481-
unsigned suspend_flags, long task_state,
2481+
unsigned suspend_flags, unsigned int task_state,
24822482
int dmf_suspended_flag)
24832483
{
24842484
bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;

fs/binfmt_elf.c

+5-3
Original file line numberDiff line numberDiff line change
@@ -1537,7 +1537,8 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
15371537
{
15381538
const struct cred *cred;
15391539
unsigned int i, len;
1540-
1540+
unsigned int state;
1541+
15411542
/* first copy the parameters from user space */
15421543
memset(psinfo, 0, sizeof(struct elf_prpsinfo));
15431544

@@ -1559,7 +1560,8 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
15591560
psinfo->pr_pgrp = task_pgrp_vnr(p);
15601561
psinfo->pr_sid = task_session_vnr(p);
15611562

1562-
i = p->state ? ffz(~p->state) + 1 : 0;
1563+
state = READ_ONCE(p->__state);
1564+
i = state ? ffz(~state) + 1 : 0;
15631565
psinfo->pr_state = i;
15641566
psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
15651567
psinfo->pr_zomb = psinfo->pr_sname == 'Z';
@@ -1571,7 +1573,7 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
15711573
SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid));
15721574
rcu_read_unlock();
15731575
strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1574-
1576+
15751577
return 0;
15761578
}
15771579

fs/binfmt_elf_fdpic.c

+3-1
Original file line numberDiff line numberDiff line change
@@ -1331,6 +1331,7 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
13311331
{
13321332
const struct cred *cred;
13331333
unsigned int i, len;
1334+
unsigned int state;
13341335

13351336
/* first copy the parameters from user space */
13361337
memset(psinfo, 0, sizeof(struct elf_prpsinfo));
@@ -1353,7 +1354,8 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
13531354
psinfo->pr_pgrp = task_pgrp_vnr(p);
13541355
psinfo->pr_sid = task_session_vnr(p);
13551356

1356-
i = p->state ? ffz(~p->state) + 1 : 0;
1357+
state = READ_ONCE(p->__state);
1358+
i = state ? ffz(~state) + 1 : 0;
13571359
psinfo->pr_state = i;
13581360
psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
13591361
psinfo->pr_zomb = psinfo->pr_sname == 'Z';

fs/userfaultfd.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -337,7 +337,7 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
337337
return ret;
338338
}
339339

340-
static inline long userfaultfd_get_blocking_state(unsigned int flags)
340+
static inline unsigned int userfaultfd_get_blocking_state(unsigned int flags)
341341
{
342342
if (flags & FAULT_FLAG_INTERRUPTIBLE)
343343
return TASK_INTERRUPTIBLE;
@@ -370,7 +370,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
370370
struct userfaultfd_wait_queue uwq;
371371
vm_fault_t ret = VM_FAULT_SIGBUS;
372372
bool must_wait;
373-
long blocking_state;
373+
unsigned int blocking_state;
374374

375375
/*
376376
* We don't do userfault handling for the final child pid update.

include/linux/sched.h

+15-16
Original file line numberDiff line numberDiff line change
@@ -113,13 +113,13 @@ struct task_group;
113113
__TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
114114
TASK_PARKED)
115115

116-
#define task_is_running(task) (READ_ONCE((task)->state) == TASK_RUNNING)
116+
#define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING)
117117

118-
#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
118+
#define task_is_traced(task) ((READ_ONCE(task->__state) & __TASK_TRACED) != 0)
119119

120-
#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
120+
#define task_is_stopped(task) ((READ_ONCE(task->__state) & __TASK_STOPPED) != 0)
121121

122-
#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
122+
#define task_is_stopped_or_traced(task) ((READ_ONCE(task->__state) & (__TASK_STOPPED | __TASK_TRACED)) != 0)
123123

124124
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
125125

@@ -134,14 +134,14 @@ struct task_group;
134134
do { \
135135
WARN_ON_ONCE(is_special_task_state(state_value));\
136136
current->task_state_change = _THIS_IP_; \
137-
current->state = (state_value); \
137+
WRITE_ONCE(current->__state, (state_value)); \
138138
} while (0)
139139

140140
#define set_current_state(state_value) \
141141
do { \
142142
WARN_ON_ONCE(is_special_task_state(state_value));\
143143
current->task_state_change = _THIS_IP_; \
144-
smp_store_mb(current->state, (state_value)); \
144+
smp_store_mb(current->__state, (state_value)); \
145145
} while (0)
146146

147147
#define set_special_state(state_value) \
@@ -150,7 +150,7 @@ struct task_group;
150150
WARN_ON_ONCE(!is_special_task_state(state_value)); \
151151
raw_spin_lock_irqsave(&current->pi_lock, flags); \
152152
current->task_state_change = _THIS_IP_; \
153-
current->state = (state_value); \
153+
WRITE_ONCE(current->__state, (state_value)); \
154154
raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
155155
} while (0)
156156
#else
@@ -192,10 +192,10 @@ struct task_group;
192192
* Also see the comments of try_to_wake_up().
193193
*/
194194
#define __set_current_state(state_value) \
195-
current->state = (state_value)
195+
WRITE_ONCE(current->__state, (state_value))
196196

197197
#define set_current_state(state_value) \
198-
smp_store_mb(current->state, (state_value))
198+
smp_store_mb(current->__state, (state_value))
199199

200200
/*
201201
* set_special_state() should be used for those states when the blocking task
@@ -207,13 +207,13 @@ struct task_group;
207207
do { \
208208
unsigned long flags; /* may shadow */ \
209209
raw_spin_lock_irqsave(&current->pi_lock, flags); \
210-
current->state = (state_value); \
210+
WRITE_ONCE(current->__state, (state_value)); \
211211
raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
212212
} while (0)
213213

214214
#endif
215215

216-
#define get_current_state() READ_ONCE(current->state)
216+
#define get_current_state() READ_ONCE(current->__state)
217217

218218
/* Task command name length: */
219219
#define TASK_COMM_LEN 16
@@ -666,8 +666,7 @@ struct task_struct {
666666
*/
667667
struct thread_info thread_info;
668668
#endif
669-
/* -1 unrunnable, 0 runnable, >0 stopped: */
670-
volatile long state;
669+
unsigned int __state;
671670

672671
/*
673672
* This begins the randomizable portion of task_struct. Only
@@ -1532,7 +1531,7 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
15321531

15331532
static inline unsigned int task_state_index(struct task_struct *tsk)
15341533
{
1535-
unsigned int tsk_state = READ_ONCE(tsk->state);
1534+
unsigned int tsk_state = READ_ONCE(tsk->__state);
15361535
unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
15371536

15381537
BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
@@ -1840,10 +1839,10 @@ static __always_inline void scheduler_ipi(void)
18401839
*/
18411840
preempt_fold_need_resched();
18421841
}
1843-
extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
1842+
extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
18441843
#else
18451844
static inline void scheduler_ipi(void) { }
1846-
static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1845+
static inline unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
18471846
{
18481847
return 1;
18491848
}

include/linux/sched/debug.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ extern void dump_cpu_task(int cpu);
1414
/*
1515
* Only dump TASK_* tasks. (0 for all tasks)
1616
*/
17-
extern void show_state_filter(unsigned long state_filter);
17+
extern void show_state_filter(unsigned int state_filter);
1818

1919
static inline void show_state(void)
2020
{

include/linux/sched/signal.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -382,7 +382,7 @@ static inline int fatal_signal_pending(struct task_struct *p)
382382
return task_sigpending(p) && __fatal_signal_pending(p);
383383
}
384384

385-
static inline int signal_pending_state(long state, struct task_struct *p)
385+
static inline int signal_pending_state(unsigned int state, struct task_struct *p)
386386
{
387387
if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
388388
return 0;

init/init_task.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ struct task_struct init_task
7171
.thread_info = INIT_THREAD_INFO(init_task),
7272
.stack_refcount = REFCOUNT_INIT(1),
7373
#endif
74-
.state = 0,
74+
.__state = 0,
7575
.stack = init_stack,
7676
.usage = REFCOUNT_INIT(2),
7777
.flags = PF_KTHREAD,

kernel/cgroup/cgroup-v1.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -713,7 +713,7 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
713713

714714
css_task_iter_start(&cgrp->self, 0, &it);
715715
while ((tsk = css_task_iter_next(&it))) {
716-
switch (tsk->state) {
716+
switch (READ_ONCE(tsk->__state)) {
717717
case TASK_RUNNING:
718718
stats->nr_running++;
719719
break;

kernel/debug/kdb/kdb_support.c

+10-8
Original file line numberDiff line numberDiff line change
@@ -609,23 +609,25 @@ unsigned long kdb_task_state_string(const char *s)
609609
*/
610610
char kdb_task_state_char (const struct task_struct *p)
611611
{
612-
int cpu;
613-
char state;
612+
unsigned int p_state;
614613
unsigned long tmp;
614+
char state;
615+
int cpu;
615616

616617
if (!p ||
617618
copy_from_kernel_nofault(&tmp, (char *)p, sizeof(unsigned long)))
618619
return 'E';
619620

620621
cpu = kdb_process_cpu(p);
621-
state = (p->state == 0) ? 'R' :
622-
(p->state < 0) ? 'U' :
623-
(p->state & TASK_UNINTERRUPTIBLE) ? 'D' :
624-
(p->state & TASK_STOPPED) ? 'T' :
625-
(p->state & TASK_TRACED) ? 'C' :
622+
p_state = READ_ONCE(p->__state);
623+
state = (p_state == 0) ? 'R' :
624+
(p_state < 0) ? 'U' :
625+
(p_state & TASK_UNINTERRUPTIBLE) ? 'D' :
626+
(p_state & TASK_STOPPED) ? 'T' :
627+
(p_state & TASK_TRACED) ? 'C' :
626628
(p->exit_state & EXIT_ZOMBIE) ? 'Z' :
627629
(p->exit_state & EXIT_DEAD) ? 'E' :
628-
(p->state & TASK_INTERRUPTIBLE) ? 'S' : '?';
630+
(p_state & TASK_INTERRUPTIBLE) ? 'S' : '?';
629631
if (is_idle_task(p)) {
630632
/* Idle task. Is it really idle, apart from the kdb
631633
* interrupt? */

kernel/fork.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -425,7 +425,7 @@ static int memcg_charge_kernel_stack(struct task_struct *tsk)
425425

426426
static void release_task_stack(struct task_struct *tsk)
427427
{
428-
if (WARN_ON(tsk->state != TASK_DEAD))
428+
if (WARN_ON(READ_ONCE(tsk->__state) != TASK_DEAD))
429429
return; /* Better to leak the stack than to free prematurely */
430430

431431
account_kernel_stack(tsk, -1);
@@ -2392,7 +2392,7 @@ static __latent_entropy struct task_struct *copy_process(
23922392
atomic_dec(&p->cred->user->processes);
23932393
exit_creds(p);
23942394
bad_fork_free:
2395-
p->state = TASK_DEAD;
2395+
WRITE_ONCE(p->__state, TASK_DEAD);
23962396
put_task_stack(p);
23972397
delayed_free_task(p);
23982398
fork_out:

kernel/hung_task.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
196196
last_break = jiffies;
197197
}
198198
/* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
199-
if (t->state == TASK_UNINTERRUPTIBLE)
199+
if (READ_ONCE(t->__state) == TASK_UNINTERRUPTIBLE)
200200
check_hung_task(t, timeout);
201201
}
202202
unlock:

kernel/kthread.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -457,7 +457,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
457457
}
458458
EXPORT_SYMBOL(kthread_create_on_node);
459459

460-
static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
460+
static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
461461
{
462462
unsigned long flags;
463463

@@ -473,7 +473,7 @@ static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mas
473473
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
474474
}
475475

476-
static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
476+
static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
477477
{
478478
__kthread_bind_mask(p, cpumask_of(cpu), state);
479479
}

0 commit comments

Comments
 (0)