Skip to content

Commit 6aa7de0

Browse files
Mark RutlandIngo Molnar
Mark Rutland
authored and
Ingo Molnar
committed
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the coccinelle script shown below and apply its output. For several reasons, it is desirable to use {READ,WRITE}_ONCE() in preference to ACCESS_ONCE(), and new code is expected to use one of the former. So far, there's been no reason to change most existing uses of ACCESS_ONCE(), as these aren't harmful, and changing them results in churn. However, for some features, the read/write distinction is critical to correct operation. To distinguish these cases, separate read/write accessors must be used. This patch migrates (most) remaining ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following coccinelle script: ---- // Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and // WRITE_ONCE() // $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch virtual patch @ depends on patch @ expression E1, E2; @@ - ACCESS_ONCE(E1) = E2 + WRITE_ONCE(E1, E2) @ depends on patch @ expression E; @@ - ACCESS_ONCE(E) + READ_ONCE(E) ---- Signed-off-by: Mark Rutland <[email protected]> Signed-off-by: Paul E. McKenney <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent b03a0fe commit 6aa7de0

File tree

180 files changed

+383
-385
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

180 files changed

+383
-385
lines changed

arch/arc/kernel/smp.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -245,7 +245,7 @@ static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
245245
* and read back old value
246246
*/
247247
do {
248-
new = old = ACCESS_ONCE(*ipi_data_ptr);
248+
new = old = READ_ONCE(*ipi_data_ptr);
249249
new |= 1U << msg;
250250
} while (cmpxchg(ipi_data_ptr, old, new) != old);
251251

arch/arm/include/asm/spinlock.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
7171

7272
while (lockval.tickets.next != lockval.tickets.owner) {
7373
wfe();
74-
lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
74+
lockval.tickets.owner = READ_ONCE(lock->tickets.owner);
7575
}
7676

7777
smp_mb();

arch/arm/mach-tegra/cpuidle-tegra20.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -179,7 +179,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
179179
bool entered_lp2 = false;
180180

181181
if (tegra_pending_sgi())
182-
ACCESS_ONCE(abort_flag) = true;
182+
WRITE_ONCE(abort_flag, true);
183183

184184
cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
185185

arch/arm/vdso/vgettimeofday.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ static notrace u32 __vdso_read_begin(const struct vdso_data *vdata)
3535
{
3636
u32 seq;
3737
repeat:
38-
seq = ACCESS_ONCE(vdata->seq_count);
38+
seq = READ_ONCE(vdata->seq_count);
3939
if (seq & 1) {
4040
cpu_relax();
4141
goto repeat;

arch/ia64/include/asm/spinlock.h

+4-4
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
6161

6262
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
6363
{
64-
int tmp = ACCESS_ONCE(lock->lock);
64+
int tmp = READ_ONCE(lock->lock);
6565

6666
if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK))
6767
return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp;
@@ -73,19 +73,19 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
7373
unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
7474

7575
asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
76-
ACCESS_ONCE(*p) = (tmp + 2) & ~1;
76+
WRITE_ONCE(*p, (tmp + 2) & ~1);
7777
}
7878

7979
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
8080
{
81-
long tmp = ACCESS_ONCE(lock->lock);
81+
long tmp = READ_ONCE(lock->lock);
8282

8383
return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
8484
}
8585

8686
static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
8787
{
88-
long tmp = ACCESS_ONCE(lock->lock);
88+
long tmp = READ_ONCE(lock->lock);
8989

9090
return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
9191
}

arch/mips/include/asm/vdso.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ static inline u32 vdso_data_read_begin(const union mips_vdso_data *data)
9999
u32 seq;
100100

101101
while (true) {
102-
seq = ACCESS_ONCE(data->seq_count);
102+
seq = READ_ONCE(data->seq_count);
103103
if (likely(!(seq & 1))) {
104104
/* Paired with smp_wmb() in vdso_data_write_*(). */
105105
smp_rmb();

arch/mips/kernel/pm-cps.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -166,7 +166,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
166166
nc_core_ready_count = nc_addr;
167167

168168
/* Ensure ready_count is zero-initialised before the assembly runs */
169-
ACCESS_ONCE(*nc_core_ready_count) = 0;
169+
WRITE_ONCE(*nc_core_ready_count, 0);
170170
coupled_barrier(&per_cpu(pm_barrier, core), online);
171171

172172
/* Run the generated entry code */

arch/mn10300/kernel/mn10300-serial.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -543,7 +543,7 @@ static void mn10300_serial_receive_interrupt(struct mn10300_serial_port *port)
543543

544544
try_again:
545545
/* pull chars out of the hat */
546-
ix = ACCESS_ONCE(port->rx_outp);
546+
ix = READ_ONCE(port->rx_outp);
547547
if (CIRC_CNT(port->rx_inp, ix, MNSC_BUFFER_SIZE) == 0) {
548548
if (push && !tport->low_latency)
549549
tty_flip_buffer_push(tport);
@@ -1724,7 +1724,7 @@ static int mn10300_serial_poll_get_char(struct uart_port *_port)
17241724
if (mn10300_serial_int_tbl[port->rx_irq].port != NULL) {
17251725
do {
17261726
/* pull chars out of the hat */
1727-
ix = ACCESS_ONCE(port->rx_outp);
1727+
ix = READ_ONCE(port->rx_outp);
17281728
if (CIRC_CNT(port->rx_inp, ix, MNSC_BUFFER_SIZE) == 0)
17291729
return NO_POLL_CHAR;
17301730

arch/parisc/include/asm/atomic.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -260,7 +260,7 @@ atomic64_set(atomic64_t *v, s64 i)
260260
static __inline__ s64
261261
atomic64_read(const atomic64_t *v)
262262
{
263-
return ACCESS_ONCE((v)->counter);
263+
return READ_ONCE((v)->counter);
264264
}
265265

266266
#define atomic64_inc(v) (atomic64_add( 1,(v)))

arch/powerpc/platforms/powernv/opal-msglog.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ ssize_t opal_msglog_copy(char *to, loff_t pos, size_t count)
4343
if (!opal_memcons)
4444
return -ENODEV;
4545

46-
out_pos = be32_to_cpu(ACCESS_ONCE(opal_memcons->out_pos));
46+
out_pos = be32_to_cpu(READ_ONCE(opal_memcons->out_pos));
4747

4848
/* Now we've read out_pos, put a barrier in before reading the new
4949
* data it points to in conbuf. */

arch/s390/include/asm/spinlock.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -117,14 +117,14 @@ extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
117117

118118
static inline int arch_read_trylock_once(arch_rwlock_t *rw)
119119
{
120-
int old = ACCESS_ONCE(rw->lock);
120+
int old = READ_ONCE(rw->lock);
121121
return likely(old >= 0 &&
122122
__atomic_cmpxchg_bool(&rw->lock, old, old + 1));
123123
}
124124

125125
static inline int arch_write_trylock_once(arch_rwlock_t *rw)
126126
{
127-
int old = ACCESS_ONCE(rw->lock);
127+
int old = READ_ONCE(rw->lock);
128128
return likely(old == 0 &&
129129
__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000));
130130
}
@@ -211,7 +211,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
211211
int old;
212212

213213
do {
214-
old = ACCESS_ONCE(rw->lock);
214+
old = READ_ONCE(rw->lock);
215215
} while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1));
216216
}
217217

arch/s390/lib/spinlock.c

+8-8
Original file line numberDiff line numberDiff line change
@@ -162,8 +162,8 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
162162
smp_yield_cpu(~owner);
163163
count = spin_retry;
164164
}
165-
old = ACCESS_ONCE(rw->lock);
166-
owner = ACCESS_ONCE(rw->owner);
165+
old = READ_ONCE(rw->lock);
166+
owner = READ_ONCE(rw->owner);
167167
if (old < 0)
168168
continue;
169169
if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
@@ -178,7 +178,7 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw)
178178
int old;
179179

180180
while (count-- > 0) {
181-
old = ACCESS_ONCE(rw->lock);
181+
old = READ_ONCE(rw->lock);
182182
if (old < 0)
183183
continue;
184184
if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
@@ -202,8 +202,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, int prev)
202202
smp_yield_cpu(~owner);
203203
count = spin_retry;
204204
}
205-
old = ACCESS_ONCE(rw->lock);
206-
owner = ACCESS_ONCE(rw->owner);
205+
old = READ_ONCE(rw->lock);
206+
owner = READ_ONCE(rw->owner);
207207
smp_mb();
208208
if (old >= 0) {
209209
prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
@@ -230,8 +230,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
230230
smp_yield_cpu(~owner);
231231
count = spin_retry;
232232
}
233-
old = ACCESS_ONCE(rw->lock);
234-
owner = ACCESS_ONCE(rw->owner);
233+
old = READ_ONCE(rw->lock);
234+
owner = READ_ONCE(rw->owner);
235235
if (old >= 0 &&
236236
__atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000))
237237
prev = old;
@@ -251,7 +251,7 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw)
251251
int old;
252252

253253
while (count-- > 0) {
254-
old = ACCESS_ONCE(rw->lock);
254+
old = READ_ONCE(rw->lock);
255255
if (old)
256256
continue;
257257
if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000))

arch/sparc/include/asm/atomic_32.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ void atomic_set(atomic_t *, int);
3131

3232
#define atomic_set_release(v, i) atomic_set((v), (i))
3333

34-
#define atomic_read(v) ACCESS_ONCE((v)->counter)
34+
#define atomic_read(v) READ_ONCE((v)->counter)
3535

3636
#define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v)))
3737
#define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v)))

arch/tile/gxio/dma_queue.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -163,14 +163,14 @@ int __gxio_dma_queue_is_complete(__gxio_dma_queue_t *dma_queue,
163163
int64_t completion_slot, int update)
164164
{
165165
if (update) {
166-
if (ACCESS_ONCE(dma_queue->hw_complete_count) >
166+
if (READ_ONCE(dma_queue->hw_complete_count) >
167167
completion_slot)
168168
return 1;
169169

170170
__gxio_dma_queue_update_credits(dma_queue);
171171
}
172172

173-
return ACCESS_ONCE(dma_queue->hw_complete_count) > completion_slot;
173+
return READ_ONCE(dma_queue->hw_complete_count) > completion_slot;
174174
}
175175

176176
EXPORT_SYMBOL_GPL(__gxio_dma_queue_is_complete);

arch/tile/include/gxio/dma_queue.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ static inline int64_t __gxio_dma_queue_reserve(__gxio_dma_queue_t *dma_queue,
121121
* if the result is LESS than "hw_complete_count".
122122
*/
123123
uint64_t complete;
124-
complete = ACCESS_ONCE(dma_queue->hw_complete_count);
124+
complete = READ_ONCE(dma_queue->hw_complete_count);
125125
slot |= (complete & 0xffffffffff000000);
126126
if (slot < complete)
127127
slot += 0x1000000;

arch/tile/kernel/ptrace.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -255,7 +255,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
255255

256256
int do_syscall_trace_enter(struct pt_regs *regs)
257257
{
258-
u32 work = ACCESS_ONCE(current_thread_info()->flags);
258+
u32 work = READ_ONCE(current_thread_info()->flags);
259259

260260
if ((work & _TIF_SYSCALL_TRACE) &&
261261
tracehook_report_syscall_entry(regs)) {

arch/x86/entry/common.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ static long syscall_trace_enter(struct pt_regs *regs)
7575
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
7676
BUG_ON(regs != task_pt_regs(current));
7777

78-
work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
78+
work = READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
7979

8080
if (unlikely(work & _TIF_SYSCALL_EMU))
8181
emulated = true;

arch/x86/entry/vdso/vclock_gettime.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -318,7 +318,7 @@ int gettimeofday(struct timeval *, struct timezone *)
318318
notrace time_t __vdso_time(time_t *t)
319319
{
320320
/* This is atomic on x86 so we don't need any locks. */
321-
time_t result = ACCESS_ONCE(gtod->wall_time_sec);
321+
time_t result = READ_ONCE(gtod->wall_time_sec);
322322

323323
if (t)
324324
*t = result;

arch/x86/events/core.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -2118,7 +2118,7 @@ static int x86_pmu_event_init(struct perf_event *event)
21182118
event->destroy(event);
21192119
}
21202120

2121-
if (ACCESS_ONCE(x86_pmu.attr_rdpmc))
2121+
if (READ_ONCE(x86_pmu.attr_rdpmc))
21222122
event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED;
21232123

21242124
return err;

arch/x86/include/asm/vgtod.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s)
4848
unsigned ret;
4949

5050
repeat:
51-
ret = ACCESS_ONCE(s->seq);
51+
ret = READ_ONCE(s->seq);
5252
if (unlikely(ret & 1)) {
5353
cpu_relax();
5454
goto repeat;

arch/x86/kernel/espfix_64.c

+3-3
Original file line numberDiff line numberDiff line change
@@ -155,14 +155,14 @@ void init_espfix_ap(int cpu)
155155
page = cpu/ESPFIX_STACKS_PER_PAGE;
156156

157157
/* Did another CPU already set this up? */
158-
stack_page = ACCESS_ONCE(espfix_pages[page]);
158+
stack_page = READ_ONCE(espfix_pages[page]);
159159
if (likely(stack_page))
160160
goto done;
161161

162162
mutex_lock(&espfix_init_mutex);
163163

164164
/* Did we race on the lock? */
165-
stack_page = ACCESS_ONCE(espfix_pages[page]);
165+
stack_page = READ_ONCE(espfix_pages[page]);
166166
if (stack_page)
167167
goto unlock_done;
168168

@@ -200,7 +200,7 @@ void init_espfix_ap(int cpu)
200200
set_pte(&pte_p[n*PTE_STRIDE], pte);
201201

202202
/* Job is done for this CPU and any CPU which shares this page */
203-
ACCESS_ONCE(espfix_pages[page]) = stack_page;
203+
WRITE_ONCE(espfix_pages[page], stack_page);
204204

205205
unlock_done:
206206
mutex_unlock(&espfix_init_mutex);

arch/x86/kernel/nmi.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ static void nmi_max_handler(struct irq_work *w)
105105
{
106106
struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
107107
int remainder_ns, decimal_msecs;
108-
u64 whole_msecs = ACCESS_ONCE(a->max_duration);
108+
u64 whole_msecs = READ_ONCE(a->max_duration);
109109

110110
remainder_ns = do_div(whole_msecs, (1000 * 1000));
111111
decimal_msecs = remainder_ns / 1000;

arch/x86/kvm/mmu.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -443,7 +443,7 @@ static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
443443

444444
static u64 __get_spte_lockless(u64 *sptep)
445445
{
446-
return ACCESS_ONCE(*sptep);
446+
return READ_ONCE(*sptep);
447447
}
448448
#else
449449
union split_spte {
@@ -4819,7 +4819,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
48194819
* If we don't have indirect shadow pages, it means no page is
48204820
* write-protected, so we can exit simply.
48214821
*/
4822-
if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
4822+
if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
48234823
return;
48244824

48254825
remote_flush = local_flush = false;

arch/x86/kvm/page_track.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,7 @@ bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
157157
return false;
158158

159159
index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL);
160-
return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]);
160+
return !!READ_ONCE(slot->arch.gfn_track[mode][index]);
161161
}
162162

163163
void kvm_page_track_cleanup(struct kvm *kvm)

arch/x86/xen/p2m.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -547,7 +547,7 @@ int xen_alloc_p2m_entry(unsigned long pfn)
547547
if (p2m_top_mfn && pfn < MAX_P2M_PFN) {
548548
topidx = p2m_top_index(pfn);
549549
top_mfn_p = &p2m_top_mfn[topidx];
550-
mid_mfn = ACCESS_ONCE(p2m_top_mfn_p[topidx]);
550+
mid_mfn = READ_ONCE(p2m_top_mfn_p[topidx]);
551551

552552
BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
553553

arch/xtensa/platforms/xtfpga/lcd.c

+7-7
Original file line numberDiff line numberDiff line change
@@ -34,23 +34,23 @@
3434
static void lcd_put_byte(u8 *addr, u8 data)
3535
{
3636
#ifdef CONFIG_XTFPGA_LCD_8BIT_ACCESS
37-
ACCESS_ONCE(*addr) = data;
37+
WRITE_ONCE(*addr, data);
3838
#else
39-
ACCESS_ONCE(*addr) = data & 0xf0;
40-
ACCESS_ONCE(*addr) = (data << 4) & 0xf0;
39+
WRITE_ONCE(*addr, data & 0xf0);
40+
WRITE_ONCE(*addr, (data << 4) & 0xf0);
4141
#endif
4242
}
4343

4444
static int __init lcd_init(void)
4545
{
46-
ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
46+
WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT);
4747
mdelay(5);
48-
ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
48+
WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT);
4949
udelay(200);
50-
ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
50+
WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT);
5151
udelay(50);
5252
#ifndef CONFIG_XTFPGA_LCD_8BIT_ACCESS
53-
ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE4BIT;
53+
WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT);
5454
udelay(50);
5555
lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT);
5656
udelay(50);

block/blk-wbt.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -261,7 +261,7 @@ static inline bool stat_sample_valid(struct blk_rq_stat *stat)
261261

262262
static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
263263
{
264-
u64 now, issue = ACCESS_ONCE(rwb->sync_issue);
264+
u64 now, issue = READ_ONCE(rwb->sync_issue);
265265

266266
if (!issue || !rwb->sync_cookie)
267267
return 0;

drivers/base/core.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -668,7 +668,7 @@ const char *dev_driver_string(const struct device *dev)
668668
* so be careful about accessing it. dev->bus and dev->class should
669669
* never change once they are set, so they don't need special care.
670670
*/
671-
drv = ACCESS_ONCE(dev->driver);
671+
drv = READ_ONCE(dev->driver);
672672
return drv ? drv->name :
673673
(dev->bus ? dev->bus->name :
674674
(dev->class ? dev->class->name : ""));

0 commit comments

Comments
 (0)