@@ -113,13 +113,13 @@ struct task_group;
113
113
__TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
114
114
TASK_PARKED)
115
115
116
- #define task_is_running (task ) (READ_ONCE((task)->state ) == TASK_RUNNING)
116
+ #define task_is_running (task ) (READ_ONCE((task)->__state ) == TASK_RUNNING)
117
117
118
- #define task_is_traced (task ) ((task->state & __TASK_TRACED) != 0)
118
+ #define task_is_traced (task ) ((READ_ONCE( task->__state) & __TASK_TRACED) != 0)
119
119
120
- #define task_is_stopped (task ) ((task->state & __TASK_STOPPED) != 0)
120
+ #define task_is_stopped (task ) ((READ_ONCE( task->__state) & __TASK_STOPPED) != 0)
121
121
122
- #define task_is_stopped_or_traced (task ) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
122
+ #define task_is_stopped_or_traced (task ) ((READ_ONCE( task->__state) & (__TASK_STOPPED | __TASK_TRACED)) != 0)
123
123
124
124
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
125
125
@@ -134,14 +134,14 @@ struct task_group;
134
134
do { \
135
135
WARN_ON_ONCE(is_special_task_state(state_value));\
136
136
current->task_state_change = _THIS_IP_; \
137
- current->state = (state_value); \
137
+ WRITE_ONCE( current->__state, (state_value)); \
138
138
} while (0)
139
139
140
140
#define set_current_state (state_value ) \
141
141
do { \
142
142
WARN_ON_ONCE(is_special_task_state(state_value));\
143
143
current->task_state_change = _THIS_IP_; \
144
- smp_store_mb(current->state , (state_value)); \
144
+ smp_store_mb(current->__state , (state_value)); \
145
145
} while (0)
146
146
147
147
#define set_special_state (state_value ) \
@@ -150,7 +150,7 @@ struct task_group;
150
150
WARN_ON_ONCE (!is_special_task_state (state_value )); \
151
151
raw_spin_lock_irqsave (& current -> pi_lock , flags ); \
152
152
current -> task_state_change = _THIS_IP_ ; \
153
- current -> state = (state_value ); \
153
+ WRITE_ONCE ( current -> __state , (state_value )); \
154
154
raw_spin_unlock_irqrestore (& current -> pi_lock , flags ); \
155
155
} while (0 )
156
156
#else
@@ -192,10 +192,10 @@ struct task_group;
192
192
* Also see the comments of try_to_wake_up().
193
193
*/
194
194
#define __set_current_state (state_value ) \
195
- current->state = (state_value)
195
+ WRITE_ONCE( current->__state, (state_value) )
196
196
197
197
#define set_current_state (state_value ) \
198
- smp_store_mb(current->state , (state_value))
198
+ smp_store_mb(current->__state , (state_value))
199
199
200
200
/*
201
201
* set_special_state() should be used for those states when the blocking task
@@ -207,13 +207,13 @@ struct task_group;
207
207
do { \
208
208
unsigned long flags; /* may shadow */ \
209
209
raw_spin_lock_irqsave (& current -> pi_lock , flags ); \
210
- current -> state = (state_value ); \
210
+ WRITE_ONCE ( current -> __state , (state_value )); \
211
211
raw_spin_unlock_irqrestore (& current -> pi_lock , flags ); \
212
212
} while (0 )
213
213
214
214
#endif
215
215
216
- #define get_current_state () READ_ONCE(current->state )
216
+ #define get_current_state () READ_ONCE(current->__state )
217
217
218
218
/* Task command name length: */
219
219
#define TASK_COMM_LEN 16
@@ -666,8 +666,7 @@ struct task_struct {
666
666
*/
667
667
struct thread_info thread_info ;
668
668
#endif
669
- /* -1 unrunnable, 0 runnable, >0 stopped: */
670
- volatile long state ;
669
+ unsigned int __state ;
671
670
672
671
/*
673
672
* This begins the randomizable portion of task_struct. Only
@@ -1532,7 +1531,7 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1532
1531
1533
1532
static inline unsigned int task_state_index (struct task_struct * tsk )
1534
1533
{
1535
- unsigned int tsk_state = READ_ONCE (tsk -> state );
1534
+ unsigned int tsk_state = READ_ONCE (tsk -> __state );
1536
1535
unsigned int state = (tsk_state | tsk -> exit_state ) & TASK_REPORT ;
1537
1536
1538
1537
BUILD_BUG_ON_NOT_POWER_OF_2 (TASK_REPORT_MAX );
@@ -1840,10 +1839,10 @@ static __always_inline void scheduler_ipi(void)
1840
1839
*/
1841
1840
preempt_fold_need_resched ();
1842
1841
}
1843
- extern unsigned long wait_task_inactive (struct task_struct * , long match_state );
1842
+ extern unsigned long wait_task_inactive (struct task_struct * , unsigned int match_state );
1844
1843
#else
1845
1844
static inline void scheduler_ipi (void ) { }
1846
- static inline unsigned long wait_task_inactive (struct task_struct * p , long match_state )
1845
+ static inline unsigned long wait_task_inactive (struct task_struct * p , unsigned int match_state )
1847
1846
{
1848
1847
return 1 ;
1849
1848
}
0 commit comments