forked from torvalds/linux
-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathtasks.h
1980 lines (1771 loc) · 66.5 KB
/
tasks.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Task-based RCU implementations.
*
* Copyright (C) 2020 Paul E. McKenney
*/
#ifdef CONFIG_TASKS_RCU_GENERIC
#include "rcu_segcblist.h"
////////////////////////////////////////////////////////////////////////
//
// Generic data structures.
struct rcu_tasks;
typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
typedef void (*pregp_func_t)(struct list_head *hop);
typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
typedef void (*postscan_func_t)(struct list_head *hop);
typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
/**
* struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism.
* @cblist: Callback list.
* @lock: Lock protecting per-CPU callback list.
* @rtp_jiffies: Jiffies counter value for statistics.
* @rtp_n_lock_retries: Rough lock-contention statistic.
* @rtp_work: Work queue for invoking callbacks.
* @rtp_irq_work: IRQ work queue for deferred wakeups.
* @barrier_q_head: RCU callback for barrier operation.
* @rtp_blkd_tasks: List of tasks blocked as readers.
* @cpu: CPU number corresponding to this entry.
* @rtpp: Pointer to the rcu_tasks structure.
*/
struct rcu_tasks_percpu {
struct rcu_segcblist cblist;
raw_spinlock_t __private lock;
unsigned long rtp_jiffies;
unsigned long rtp_n_lock_retries;
struct work_struct rtp_work;
struct irq_work rtp_irq_work;
struct rcu_head barrier_q_head;
struct list_head rtp_blkd_tasks;
int cpu;
struct rcu_tasks *rtpp;
};
/**
* struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
* @cbs_wait: RCU wait allowing a new callback to get kthread's attention.
* @cbs_gbl_lock: Lock protecting callback list.
* @tasks_gp_mutex: Mutex protecting grace period, needed during mid-boot dead zone.
* @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
* @gp_func: This flavor's grace-period-wait function.
* @gp_state: Grace period's most recent state transition (debugging).
* @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
* @init_fract: Initial backoff sleep interval.
* @gp_jiffies: Time of last @gp_state transition.
* @gp_start: Most recent grace-period start in jiffies.
* @tasks_gp_seq: Number of grace periods completed since boot.
* @n_ipis: Number of IPIs sent to encourage grace periods to end.
* @n_ipis_fails: Number of IPI-send failures.
* @pregp_func: This flavor's pre-grace-period function (optional).
* @pertask_func: This flavor's per-task scan function (optional).
* @postscan_func: This flavor's post-task scan function (optional).
* @holdouts_func: This flavor's holdout-list scan function (optional).
* @postgp_func: This flavor's post-grace-period function (optional).
* @call_func: This flavor's call_rcu()-equivalent function.
* @rtpcpu: This flavor's rcu_tasks_percpu structure.
* @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks.
* @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing.
* @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing.
* @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers.
* @barrier_q_mutex: Serialize barrier operations.
* @barrier_q_count: Number of queues being waited on.
* @barrier_q_completion: Barrier wait/wakeup mechanism.
* @barrier_q_seq: Sequence number for barrier operations.
* @name: This flavor's textual name.
* @kname: This flavor's kthread name.
*/
struct rcu_tasks {
struct rcuwait cbs_wait;
raw_spinlock_t cbs_gbl_lock;
struct mutex tasks_gp_mutex;
int gp_state;
int gp_sleep;
int init_fract;
unsigned long gp_jiffies;
unsigned long gp_start;
unsigned long tasks_gp_seq;
unsigned long n_ipis;
unsigned long n_ipis_fails;
struct task_struct *kthread_ptr;
rcu_tasks_gp_func_t gp_func;
pregp_func_t pregp_func;
pertask_func_t pertask_func;
postscan_func_t postscan_func;
holdouts_func_t holdouts_func;
postgp_func_t postgp_func;
call_rcu_func_t call_func;
struct rcu_tasks_percpu __percpu *rtpcpu;
int percpu_enqueue_shift;
int percpu_enqueue_lim;
int percpu_dequeue_lim;
unsigned long percpu_dequeue_gpseq;
struct mutex barrier_q_mutex;
atomic_t barrier_q_count;
struct completion barrier_q_completion;
unsigned long barrier_q_seq;
char *name;
char *kname;
};
static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp);
#define DEFINE_RCU_TASKS(rt_name, gp, call, n) \
static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \
.lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \
.rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup), \
}; \
static struct rcu_tasks rt_name = \
{ \
.cbs_wait = __RCUWAIT_INITIALIZER(rt_name.wait), \
.cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock), \
.tasks_gp_mutex = __MUTEX_INITIALIZER(rt_name.tasks_gp_mutex), \
.gp_func = gp, \
.call_func = call, \
.rtpcpu = &rt_name ## __percpu, \
.name = n, \
.percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS), \
.percpu_enqueue_lim = 1, \
.percpu_dequeue_lim = 1, \
.barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \
.barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT, \
.kname = #rt_name, \
}
#ifdef CONFIG_TASKS_RCU
/* Track exiting tasks in order to allow them to be waited for. */
DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
#endif
#ifdef CONFIG_TASKS_RCU
/* Report delay in synchronize_srcu() completion in rcu_tasks_postscan(). */
static void tasks_rcu_exit_srcu_stall(struct timer_list *unused);
static DEFINE_TIMER(tasks_rcu_exit_srcu_stall_timer, tasks_rcu_exit_srcu_stall);
#endif
/* Avoid IPIing CPUs early in the grace period. */
#define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
module_param(rcu_task_ipi_delay, int, 0644);
/* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
#define RCU_TASK_BOOT_STALL_TIMEOUT (HZ * 30)
#define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
module_param(rcu_task_stall_timeout, int, 0644);
#define RCU_TASK_STALL_INFO (HZ * 10)
static int rcu_task_stall_info __read_mostly = RCU_TASK_STALL_INFO;
module_param(rcu_task_stall_info, int, 0644);
static int rcu_task_stall_info_mult __read_mostly = 3;
module_param(rcu_task_stall_info_mult, int, 0444);
static int rcu_task_enqueue_lim __read_mostly = -1;
module_param(rcu_task_enqueue_lim, int, 0444);
static bool rcu_task_cb_adjust;
static int rcu_task_contend_lim __read_mostly = 100;
module_param(rcu_task_contend_lim, int, 0444);
static int rcu_task_collapse_lim __read_mostly = 10;
module_param(rcu_task_collapse_lim, int, 0444);
/* RCU tasks grace-period state for debugging. */
#define RTGS_INIT 0
#define RTGS_WAIT_WAIT_CBS 1
#define RTGS_WAIT_GP 2
#define RTGS_PRE_WAIT_GP 3
#define RTGS_SCAN_TASKLIST 4
#define RTGS_POST_SCAN_TASKLIST 5
#define RTGS_WAIT_SCAN_HOLDOUTS 6
#define RTGS_SCAN_HOLDOUTS 7
#define RTGS_POST_GP 8
#define RTGS_WAIT_READERS 9
#define RTGS_INVOKE_CBS 10
#define RTGS_WAIT_CBS 11
#ifndef CONFIG_TINY_RCU
static const char * const rcu_tasks_gp_state_names[] = {
"RTGS_INIT",
"RTGS_WAIT_WAIT_CBS",
"RTGS_WAIT_GP",
"RTGS_PRE_WAIT_GP",
"RTGS_SCAN_TASKLIST",
"RTGS_POST_SCAN_TASKLIST",
"RTGS_WAIT_SCAN_HOLDOUTS",
"RTGS_SCAN_HOLDOUTS",
"RTGS_POST_GP",
"RTGS_WAIT_READERS",
"RTGS_INVOKE_CBS",
"RTGS_WAIT_CBS",
};
#endif /* #ifndef CONFIG_TINY_RCU */
////////////////////////////////////////////////////////////////////////
//
// Generic code.
static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp);
/* Record grace-period phase and time. */
static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
{
rtp->gp_state = newstate;
rtp->gp_jiffies = jiffies;
}
#ifndef CONFIG_TINY_RCU
/* Return state name. */
static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
{
int i = data_race(rtp->gp_state); // Let KCSAN detect update races
int j = READ_ONCE(i); // Prevent the compiler from reading twice
if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
return "???";
return rcu_tasks_gp_state_names[j];
}
#endif /* #ifndef CONFIG_TINY_RCU */
// Initialize per-CPU callback lists for the specified flavor of
// Tasks RCU.
static void cblist_init_generic(struct rcu_tasks *rtp)
{
int cpu;
unsigned long flags;
int lim;
int shift;
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
if (rcu_task_enqueue_lim < 0) {
rcu_task_enqueue_lim = 1;
rcu_task_cb_adjust = true;
} else if (rcu_task_enqueue_lim == 0) {
rcu_task_enqueue_lim = 1;
}
lim = rcu_task_enqueue_lim;
if (lim > nr_cpu_ids)
lim = nr_cpu_ids;
shift = ilog2(nr_cpu_ids / lim);
if (((nr_cpu_ids - 1) >> shift) >= lim)
shift++;
WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
smp_store_release(&rtp->percpu_enqueue_lim, lim);
for_each_possible_cpu(cpu) {
struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
WARN_ON_ONCE(!rtpcp);
if (cpu)
raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock));
raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
if (rcu_segcblist_empty(&rtpcp->cblist))
rcu_segcblist_init(&rtpcp->cblist);
INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq);
rtpcp->cpu = cpu;
rtpcp->rtpp = rtp;
if (!rtpcp->rtp_blkd_tasks.next)
INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled.
}
raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d.\n", rtp->name,
data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim), rcu_task_cb_adjust);
}
// IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic().
static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp)
{
struct rcu_tasks *rtp;
struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work);
rtp = rtpcp->rtpp;
rcuwait_wake_up(&rtp->cbs_wait);
}
// Enqueue a callback for the specified flavor of Tasks RCU.
static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
struct rcu_tasks *rtp)
{
int chosen_cpu;
unsigned long flags;
int ideal_cpu;
unsigned long j;
bool needadjust = false;
bool needwake;
struct rcu_tasks_percpu *rtpcp;
rhp->next = NULL;
rhp->func = func;
local_irq_save(flags);
rcu_read_lock();
ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift);
chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask);
rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu);
if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled.
raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
j = jiffies;
if (rtpcp->rtp_jiffies != j) {
rtpcp->rtp_jiffies = j;
rtpcp->rtp_n_lock_retries = 0;
}
if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim &&
READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids)
needadjust = true; // Defer adjustment to avoid deadlock.
}
if (!rcu_segcblist_is_enabled(&rtpcp->cblist)) {
raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled.
cblist_init_generic(rtp);
raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
}
needwake = rcu_segcblist_empty(&rtpcp->cblist);
rcu_segcblist_enqueue(&rtpcp->cblist, rhp);
raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
if (unlikely(needadjust)) {
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
if (rtp->percpu_enqueue_lim != nr_cpu_ids) {
WRITE_ONCE(rtp->percpu_enqueue_shift, 0);
WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids);
smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids);
pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
}
raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
}
rcu_read_unlock();
/* We can't create the thread unless interrupts are enabled. */
if (needwake && READ_ONCE(rtp->kthread_ptr))
irq_work_queue(&rtpcp->rtp_irq_work);
}
// RCU callback function for rcu_barrier_tasks_generic().
static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp)
{
struct rcu_tasks *rtp;
struct rcu_tasks_percpu *rtpcp;
rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head);
rtp = rtpcp->rtpp;
if (atomic_dec_and_test(&rtp->barrier_q_count))
complete(&rtp->barrier_q_completion);
}
// Wait for all in-flight callbacks for the specified RCU Tasks flavor.
// Operates in a manner similar to rcu_barrier().
static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp)
{
int cpu;
unsigned long flags;
struct rcu_tasks_percpu *rtpcp;
unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq);
mutex_lock(&rtp->barrier_q_mutex);
if (rcu_seq_done(&rtp->barrier_q_seq, s)) {
smp_mb();
mutex_unlock(&rtp->barrier_q_mutex);
return;
}
rcu_seq_start(&rtp->barrier_q_seq);
init_completion(&rtp->barrier_q_completion);
atomic_set(&rtp->barrier_q_count, 2);
for_each_possible_cpu(cpu) {
if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim))
break;
rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb;
raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head))
atomic_inc(&rtp->barrier_q_count);
raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
}
if (atomic_sub_and_test(2, &rtp->barrier_q_count))
complete(&rtp->barrier_q_completion);
wait_for_completion(&rtp->barrier_q_completion);
rcu_seq_end(&rtp->barrier_q_seq);
mutex_unlock(&rtp->barrier_q_mutex);
}
// Advance callbacks and indicate whether either a grace period or
// callback invocation is needed.
static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
{
int cpu;
unsigned long flags;
bool gpdone = poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq);
long n;
long ncbs = 0;
long ncbsnz = 0;
int needgpcb = 0;
for (cpu = 0; cpu < smp_load_acquire(&rtp->percpu_dequeue_lim); cpu++) {
struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
/* Advance and accelerate any new callbacks. */
if (!rcu_segcblist_n_cbs(&rtpcp->cblist))
continue;
raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
// Should we shrink down to a single callback queue?
n = rcu_segcblist_n_cbs(&rtpcp->cblist);
if (n) {
ncbs += n;
if (cpu > 0)
ncbsnz += n;
}
rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
(void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
if (rcu_segcblist_pend_cbs(&rtpcp->cblist))
needgpcb |= 0x3;
if (!rcu_segcblist_empty(&rtpcp->cblist))
needgpcb |= 0x1;
raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
}
// Shrink down to a single callback queue if appropriate.
// This is done in two stages: (1) If there are no more than
// rcu_task_collapse_lim callbacks on CPU 0 and none on any other
// CPU, limit enqueueing to CPU 0. (2) After an RCU grace period,
// if there has not been an increase in callbacks, limit dequeuing
// to CPU 0. Note the matching RCU read-side critical section in
// call_rcu_tasks_generic().
if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
if (rtp->percpu_enqueue_lim > 1) {
WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids));
smp_store_release(&rtp->percpu_enqueue_lim, 1);
rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
gpdone = false;
pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);
}
raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
}
if (rcu_task_cb_adjust && !ncbsnz && gpdone) {
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) {
WRITE_ONCE(rtp->percpu_dequeue_lim, 1);
pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name);
}
if (rtp->percpu_dequeue_lim == 1) {
for (cpu = rtp->percpu_dequeue_lim; cpu < nr_cpu_ids; cpu++) {
struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist));
}
}
raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
}
return needgpcb;
}
// Advance callbacks and invoke any that are ready.
static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp)
{
int cpu;
int cpunext;
int cpuwq;
unsigned long flags;
int len;
struct rcu_head *rhp;
struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
struct rcu_tasks_percpu *rtpcp_next;
cpu = rtpcp->cpu;
cpunext = cpu * 2 + 1;
if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND;
queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
cpunext++;
if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND;
queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
}
}
if (rcu_segcblist_empty(&rtpcp->cblist) || !cpu_possible(cpu))
return;
raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl);
raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
len = rcl.len;
for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) {
local_bh_disable();
rhp->func(rhp);
local_bh_enable();
cond_resched();
}
raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
rcu_segcblist_add_len(&rtpcp->cblist, -len);
(void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
}
// Workqueue flood to advance callbacks and invoke any that are ready.
static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp)
{
struct rcu_tasks *rtp;
struct rcu_tasks_percpu *rtpcp = container_of(wp, struct rcu_tasks_percpu, rtp_work);
rtp = rtpcp->rtpp;
rcu_tasks_invoke_cbs(rtp, rtpcp);
}
// Wait for one grace period.
static void rcu_tasks_one_gp(struct rcu_tasks *rtp, bool midboot)
{
int needgpcb;
mutex_lock(&rtp->tasks_gp_mutex);
// If there were none, wait a bit and start over.
if (unlikely(midboot)) {
needgpcb = 0x2;
} else {
set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
rcuwait_wait_event(&rtp->cbs_wait,
(needgpcb = rcu_tasks_need_gpcb(rtp)),
TASK_IDLE);
}
if (needgpcb & 0x2) {
// Wait for one grace period.
set_tasks_gp_state(rtp, RTGS_WAIT_GP);
rtp->gp_start = jiffies;
rcu_seq_start(&rtp->tasks_gp_seq);
rtp->gp_func(rtp);
rcu_seq_end(&rtp->tasks_gp_seq);
}
// Invoke callbacks.
set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0));
mutex_unlock(&rtp->tasks_gp_mutex);
}
// RCU-tasks kthread that detects grace periods and invokes callbacks.
static int __noreturn rcu_tasks_kthread(void *arg)
{
struct rcu_tasks *rtp = arg;
/* Run on housekeeping CPUs by default. Sysadm can move if desired. */
housekeeping_affine(current, HK_TYPE_RCU);
WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start!
/*
* Each pass through the following loop makes one check for
* newly arrived callbacks, and, if there are some, waits for
* one RCU-tasks grace period and then invokes the callbacks.
* This loop is terminated by the system going down. ;-)
*/
for (;;) {
// Wait for one grace period and invoke any callbacks
// that are ready.
rcu_tasks_one_gp(rtp, false);
// Paranoid sleep to keep this from entering a tight loop.
schedule_timeout_idle(rtp->gp_sleep);
}
}
// Wait for a grace period for the specified flavor of Tasks RCU.
static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
{
/* Complain if the scheduler has not started. */
if (WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
"synchronize_%s() called too soon", rtp->name))
return;
// If the grace-period kthread is running, use it.
if (READ_ONCE(rtp->kthread_ptr)) {
wait_rcu_gp(rtp->call_func);
return;
}
rcu_tasks_one_gp(rtp, true);
}
/* Spawn RCU-tasks grace-period kthread. */
static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
{
struct task_struct *t;
t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
return;
smp_mb(); /* Ensure others see full kthread. */
}
#ifndef CONFIG_TINY_RCU
/*
* Print any non-default Tasks RCU settings.
*/
static void __init rcu_tasks_bootup_oddness(void)
{
#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
int rtsimc;
if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
rtsimc = clamp(rcu_task_stall_info_mult, 1, 10);
if (rtsimc != rcu_task_stall_info_mult) {
pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n", rtsimc);
rcu_task_stall_info_mult = rtsimc;
}
#endif /* #ifdef CONFIG_TASKS_RCU */
#ifdef CONFIG_TASKS_RCU
pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
#endif /* #ifdef CONFIG_TASKS_RCU */
#ifdef CONFIG_TASKS_RUDE_RCU
pr_info("\tRude variant of Tasks RCU enabled.\n");
#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
#ifdef CONFIG_TASKS_TRACE_RCU
pr_info("\tTracing variant of Tasks RCU enabled.\n");
#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
}
#endif /* #ifndef CONFIG_TINY_RCU */
#ifndef CONFIG_TINY_RCU
/* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
{
int cpu;
bool havecbs = false;
for_each_possible_cpu(cpu) {
struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
if (!data_race(rcu_segcblist_empty(&rtpcp->cblist))) {
havecbs = true;
break;
}
}
pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n",
rtp->kname,
tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
jiffies - data_race(rtp->gp_jiffies),
data_race(rcu_seq_current(&rtp->tasks_gp_seq)),
data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
".k"[!!data_race(rtp->kthread_ptr)],
".C"[havecbs],
s);
}
#endif // #ifndef CONFIG_TINY_RCU
static void exit_tasks_rcu_finish_trace(struct task_struct *t);
#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
////////////////////////////////////////////////////////////////////////
//
// Shared code between task-list-scanning variants of Tasks RCU.
/* Wait for one RCU-tasks grace period. */
static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
{
struct task_struct *g;
int fract;
LIST_HEAD(holdouts);
unsigned long j;
unsigned long lastinfo;
unsigned long lastreport;
bool reported = false;
int rtsi;
struct task_struct *t;
set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
rtp->pregp_func(&holdouts);
/*
* There were callbacks, so we need to wait for an RCU-tasks
* grace period. Start off by scanning the task list for tasks
* that are not already voluntarily blocked. Mark these tasks
* and make a list of them in holdouts.
*/
set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
if (rtp->pertask_func) {
rcu_read_lock();
for_each_process_thread(g, t)
rtp->pertask_func(t, &holdouts);
rcu_read_unlock();
}
set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
rtp->postscan_func(&holdouts);
/*
* Each pass through the following loop scans the list of holdout
* tasks, removing any that are no longer holdouts. When the list
* is empty, we are done.
*/
lastreport = jiffies;
lastinfo = lastreport;
rtsi = READ_ONCE(rcu_task_stall_info);
// Start off with initial wait and slowly back off to 1 HZ wait.
fract = rtp->init_fract;
while (!list_empty(&holdouts)) {
ktime_t exp;
bool firstreport;
bool needreport;
int rtst;
// Slowly back off waiting for holdouts
set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
schedule_timeout_idle(fract);
} else {
exp = jiffies_to_nsecs(fract);
__set_current_state(TASK_IDLE);
schedule_hrtimeout_range(&exp, jiffies_to_nsecs(HZ / 2), HRTIMER_MODE_REL_HARD);
}
if (fract < HZ)
fract++;
rtst = READ_ONCE(rcu_task_stall_timeout);
needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
if (needreport) {
lastreport = jiffies;
reported = true;
}
firstreport = true;
WARN_ON(signal_pending(current));
set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
rtp->holdouts_func(&holdouts, needreport, &firstreport);
// Print pre-stall informational messages if needed.
j = jiffies;
if (rtsi > 0 && !reported && time_after(j, lastinfo + rtsi)) {
lastinfo = j;
rtsi = rtsi * rcu_task_stall_info_mult;
pr_info("%s: %s grace period number %lu (since boot) is %lu jiffies old.\n",
__func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start);
}
}
set_tasks_gp_state(rtp, RTGS_POST_GP);
rtp->postgp_func(rtp);
}
#endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
#ifdef CONFIG_TASKS_RCU
////////////////////////////////////////////////////////////////////////
//
// Simple variant of RCU whose quiescent states are voluntary context
// switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle.
// As such, grace periods can take one good long time. There are no
// read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
// because this implementation is intended to get the system into a safe
// state for some of the manipulations involved in tracing and the like.
// Finally, this implementation does not support high call_rcu_tasks()
// rates from multiple CPUs. If this is required, per-CPU callback lists
// will be needed.
//
// The implementation uses rcu_tasks_wait_gp(), which relies on function
// pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread()
// function sets these function pointers up so that rcu_tasks_wait_gp()
// invokes these functions in this order:
//
// rcu_tasks_pregp_step():
// Invokes synchronize_rcu() in order to wait for all in-flight
// t->on_rq and t->nvcsw transitions to complete. This works because
// all such transitions are carried out with interrupts disabled.
// rcu_tasks_pertask(), invoked on every non-idle task:
// For every runnable non-idle task other than the current one, use
// get_task_struct() to pin down that task, snapshot that task's
// number of voluntary context switches, and add that task to the
// holdout list.
// rcu_tasks_postscan():
// Invoke synchronize_srcu() to ensure that all tasks that were
// in the process of exiting (and which thus might not know to
// synchronize with this RCU Tasks grace period) have completed
// exiting.
// check_all_holdout_tasks(), repeatedly until holdout list is empty:
// Scans the holdout list, attempting to identify a quiescent state
// for each task on the list. If there is a quiescent state, the
// corresponding task is removed from the holdout list.
// rcu_tasks_postgp():
// Invokes synchronize_rcu() in order to ensure that all prior
// t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
// to have happened before the end of this RCU Tasks grace period.
// Again, this works because all such transitions are carried out
// with interrupts disabled.
//
// For each exiting task, the exit_tasks_rcu_start() and
// exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU
// read-side critical sections waited for by rcu_tasks_postscan().
//
// Pre-grace-period update-side code is ordered before the grace
// via the raw_spin_lock.*rcu_node(). Pre-grace-period read-side code
// is ordered before the grace period via synchronize_rcu() call in
// rcu_tasks_pregp_step() and by the scheduler's locks and interrupt
// disabling.
/* Pre-grace-period preparation. */
static void rcu_tasks_pregp_step(struct list_head *hop)
{
/*
* Wait for all pre-existing t->on_rq and t->nvcsw transitions
* to complete. Invoking synchronize_rcu() suffices because all
* these transitions occur with interrupts disabled. Without this
* synchronize_rcu(), a read-side critical section that started
* before the grace period might be incorrectly seen as having
* started after the grace period.
*
* This synchronize_rcu() also dispenses with the need for a
* memory barrier on the first store to t->rcu_tasks_holdout,
* as it forces the store to happen after the beginning of the
* grace period.
*/
synchronize_rcu();
}
/* Per-task initial processing. */
static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
{
if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
get_task_struct(t);
t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
WRITE_ONCE(t->rcu_tasks_holdout, true);
list_add(&t->rcu_tasks_holdout_list, hop);
}
}
/* Processing between scanning taskslist and draining the holdout list. */
static void rcu_tasks_postscan(struct list_head *hop)
{
int rtsi = READ_ONCE(rcu_task_stall_info);
if (!IS_ENABLED(CONFIG_TINY_RCU)) {
tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi;
add_timer(&tasks_rcu_exit_srcu_stall_timer);
}
/*
* Exiting tasks may escape the tasklist scan. Those are vulnerable
* until their final schedule() with TASK_DEAD state. To cope with
* this, divide the fragile exit path part in two intersecting
* read side critical sections:
*
* 1) An _SRCU_ read side starting before calling exit_notify(),
* which may remove the task from the tasklist, and ending after
* the final preempt_disable() call in do_exit().
*
* 2) An _RCU_ read side starting with the final preempt_disable()
* call in do_exit() and ending with the final call to schedule()
* with TASK_DEAD state.
*
* This handles the part 1). And postgp will handle part 2) with a
* call to synchronize_rcu().
*/
synchronize_srcu(&tasks_rcu_exit_srcu);
if (!IS_ENABLED(CONFIG_TINY_RCU))
del_timer_sync(&tasks_rcu_exit_srcu_stall_timer);
}
/* See if tasks are still holding out, complain if so. */
static void check_holdout_task(struct task_struct *t,
bool needreport, bool *firstreport)
{
int cpu;
if (!READ_ONCE(t->rcu_tasks_holdout) ||
t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
!READ_ONCE(t->on_rq) ||
(IS_ENABLED(CONFIG_NO_HZ_FULL) &&
!is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
WRITE_ONCE(t->rcu_tasks_holdout, false);
list_del_init(&t->rcu_tasks_holdout_list);
put_task_struct(t);
return;
}
rcu_request_urgent_qs_task(t);
if (!needreport)
return;
if (*firstreport) {
pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
*firstreport = false;
}
cpu = task_cpu(t);
pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
t, ".I"[is_idle_task(t)],
"N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
t->rcu_tasks_idle_cpu, cpu);
sched_show_task(t);
}
/* Scan the holdout lists for tasks no longer holding out. */
static void check_all_holdout_tasks(struct list_head *hop,
bool needreport, bool *firstreport)
{
struct task_struct *t, *t1;
list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
check_holdout_task(t, needreport, firstreport);
cond_resched();
}
}
/* Finish off the Tasks-RCU grace period. */
static void rcu_tasks_postgp(struct rcu_tasks *rtp)
{
/*
* Because ->on_rq and ->nvcsw are not guaranteed to have a full
* memory barriers prior to them in the schedule() path, memory
* reordering on other CPUs could cause their RCU-tasks read-side
* critical sections to extend past the end of the grace period.
* However, because these ->nvcsw updates are carried out with
* interrupts disabled, we can use synchronize_rcu() to force the
* needed ordering on all such CPUs.
*
* This synchronize_rcu() also confines all ->rcu_tasks_holdout
* accesses to be within the grace period, avoiding the need for
* memory barriers for ->rcu_tasks_holdout accesses.
*
* In addition, this synchronize_rcu() waits for exiting tasks
* to complete their final preempt_disable() region of execution,
* cleaning up after synchronize_srcu(&tasks_rcu_exit_srcu),
* enforcing the whole region before tasklist removal until
* the final schedule() with TASK_DEAD state to be an RCU TASKS
* read side critical section.
*/
synchronize_rcu();
}
void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
static void tasks_rcu_exit_srcu_stall(struct timer_list *unused)
{
#ifndef CONFIG_TINY_RCU
int rtsi;
rtsi = READ_ONCE(rcu_task_stall_info);
pr_info("%s: %s grace period number %lu (since boot) gp_state: %s is %lu jiffies old.\n",
__func__, rcu_tasks.kname, rcu_tasks.tasks_gp_seq,
tasks_gp_state_getname(&rcu_tasks), jiffies - rcu_tasks.gp_jiffies);
pr_info("Please check any exiting tasks stuck between calls to exit_tasks_rcu_start() and exit_tasks_rcu_finish()\n");
tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi;
add_timer(&tasks_rcu_exit_srcu_stall_timer);
#endif // #ifndef CONFIG_TINY_RCU
}
/**
* call_rcu_tasks() - Queue an RCU for invocation task-based grace period
* @rhp: structure to be used for queueing the RCU updates.
* @func: actual callback function to be invoked after the grace period
*
* The callback function will be invoked some time after a full grace
* period elapses, in other words after all currently executing RCU
* read-side critical sections have completed. call_rcu_tasks() assumes
* that the read-side critical sections end at a voluntary context
* switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle,
* or transition to usermode execution. As such, there are no read-side
* primitives analogous to rcu_read_lock() and rcu_read_unlock() because
* this primitive is intended to determine that all tasks have passed
* through a safe state, not so much for data-structure synchronization.
*
* See the description of call_rcu() for more detailed information on
* memory ordering guarantees.
*/
void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
{
call_rcu_tasks_generic(rhp, func, &rcu_tasks);
}
EXPORT_SYMBOL_GPL(call_rcu_tasks);
/**
* synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
*
* Control will return to the caller some time after a full rcu-tasks
* grace period has elapsed, in other words after all currently
* executing rcu-tasks read-side critical sections have elapsed. These
* read-side critical sections are delimited by calls to schedule(),
* cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
* to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
*
* This is a very specialized primitive, intended only for a few uses in
* tracing and other situations requiring manipulation of function
* preambles and profiling hooks. The synchronize_rcu_tasks() function
* is not (yet) intended for heavy use from multiple CPUs.