forked from torvalds/linux
-
Notifications
You must be signed in to change notification settings - Fork 0
/
sem.c
2178 lines (1903 loc) · 55 KB
/
sem.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* linux/ipc/sem.c
* Copyright (C) 1992 Krishna Balasubramanian
* Copyright (C) 1995 Eric Schenk, Bruno Haible
*
* /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <[email protected]>
*
* SMP-threaded, sysctl's added
* (c) 1999 Manfred Spraul <[email protected]>
* Enforced range limit on SEM_UNDO
* (c) 2001 Red Hat Inc
* Lockless wakeup
* (c) 2003 Manfred Spraul <[email protected]>
* Further wakeup optimizations, documentation
* (c) 2010 Manfred Spraul <[email protected]>
*
* support for audit of ipc object properties and permission changes
* Dustin Kirkland <[email protected]>
*
* namespaces support
* OpenVZ, SWsoft Inc.
* Pavel Emelianov <[email protected]>
*
* Implementation notes: (May 2010)
* This file implements System V semaphores.
*
* User space visible behavior:
* - FIFO ordering for semop() operations (just FIFO, not starvation
* protection)
* - multiple semaphore operations that alter the same semaphore in
* one semop() are handled.
* - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
* SETALL calls.
* - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
* - undo adjustments at process exit are limited to 0..SEMVMX.
* - namespace are supported.
* - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
* to /proc/sys/kernel/sem.
* - statistics about the usage are reported in /proc/sysvipc/sem.
*
* Internals:
* - scalability:
* - all global variables are read-mostly.
* - semop() calls and semctl(RMID) are synchronized by RCU.
* - most operations do write operations (actually: spin_lock calls) to
* the per-semaphore array structure.
* Thus: Perfect SMP scaling between independent semaphore arrays.
* If multiple semaphores in one array are used, then cache line
* trashing on the semaphore array spinlock will limit the scaling.
* - semncnt and semzcnt are calculated on demand in count_semcnt()
* - the task that performs a successful semop() scans the list of all
* sleeping tasks and completes any pending operations that can be fulfilled.
* Semaphores are actively given to waiting tasks (necessary for FIFO).
* (see update_queue())
* - To improve the scalability, the actual wake-up calls are performed after
* dropping all locks. (see wake_up_sem_queue_prepare(),
* wake_up_sem_queue_do())
* - All work is done by the waker, the woken up task does not have to do
* anything - not even acquiring a lock or dropping a refcount.
* - A woken up task may not even touch the semaphore array anymore, it may
* have been destroyed already by a semctl(RMID).
* - The synchronizations between wake-ups due to a timeout/signal and a
* wake-up due to a completed semaphore operation is achieved by using an
* intermediate state (IN_WAKEUP).
* - UNDO values are stored in an array (one per process and per
* semaphore array, lazily allocated). For backwards compatibility, multiple
* modes for the UNDO variables are supported (per process, per thread)
* (see copy_semundo, CLONE_SYSVSEM)
* - There are two lists of the pending operations: a per-array list
* and per-semaphore list (stored in the array). This allows to achieve FIFO
* ordering without always scanning all pending operations.
* The worst-case behavior is nevertheless O(N^2) for N wakeups.
*/
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/time.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/audit.h>
#include <linux/capability.h>
#include <linux/seq_file.h>
#include <linux/rwsem.h>
#include <linux/nsproxy.h>
#include <linux/ipc_namespace.h>
#include <linux/uaccess.h>
#include "util.h"
/* One semaphore structure for each semaphore in the system. */
struct sem {
int semval; /* current value */
int sempid; /* pid of last operation */
spinlock_t lock; /* spinlock for fine-grained semtimedop */
struct list_head pending_alter; /* pending single-sop operations */
/* that alter the semaphore */
struct list_head pending_const; /* pending single-sop operations */
/* that do not alter the semaphore*/
time_t sem_otime; /* candidate for sem_otime */
} ____cacheline_aligned_in_smp;
/* One queue for each sleeping process in the system. */
struct sem_queue {
struct list_head list; /* queue of pending operations */
struct task_struct *sleeper; /* this process */
struct sem_undo *undo; /* undo structure */
int pid; /* process id of requesting process */
int status; /* completion status of operation */
struct sembuf *sops; /* array of pending operations */
struct sembuf *blocking; /* the operation that blocked */
int nsops; /* number of operations */
int alter; /* does *sops alter the array? */
};
/* Each task has a list of undo requests. They are executed automatically
* when the process exits.
*/
struct sem_undo {
struct list_head list_proc; /* per-process list: *
* all undos from one process
* rcu protected */
struct rcu_head rcu; /* rcu struct for sem_undo */
struct sem_undo_list *ulp; /* back ptr to sem_undo_list */
struct list_head list_id; /* per semaphore array list:
* all undos for one array */
int semid; /* semaphore set identifier */
short *semadj; /* array of adjustments */
/* one per semaphore */
};
/* sem_undo_list controls shared access to the list of sem_undo structures
* that may be shared among all a CLONE_SYSVSEM task group.
*/
struct sem_undo_list {
atomic_t refcnt;
spinlock_t lock;
struct list_head list_proc;
};
#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
static int newary(struct ipc_namespace *, struct ipc_params *);
static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
#ifdef CONFIG_PROC_FS
static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
#endif
#define SEMMSL_FAST 256 /* 512 bytes on stack */
#define SEMOPM_FAST 64 /* ~ 372 bytes on stack */
/*
* Locking:
* sem_undo.id_next,
* sem_array.complex_count,
* sem_array.pending{_alter,_cont},
* sem_array.sem_undo: global sem_lock() for read/write
* sem_undo.proc_next: only "current" is allowed to read/write that field.
*
* sem_array.sem_base[i].pending_{const,alter}:
* global or semaphore sem_lock() for read/write
*/
#define sc_semmsl sem_ctls[0]
#define sc_semmns sem_ctls[1]
#define sc_semopm sem_ctls[2]
#define sc_semmni sem_ctls[3]
void sem_init_ns(struct ipc_namespace *ns)
{
ns->sc_semmsl = SEMMSL;
ns->sc_semmns = SEMMNS;
ns->sc_semopm = SEMOPM;
ns->sc_semmni = SEMMNI;
ns->used_sems = 0;
ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
}
#ifdef CONFIG_IPC_NS
void sem_exit_ns(struct ipc_namespace *ns)
{
free_ipcs(ns, &sem_ids(ns), freeary);
idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
}
#endif
void __init sem_init(void)
{
sem_init_ns(&init_ipc_ns);
ipc_init_proc_interface("sysvipc/sem",
" key semid perms nsems uid gid cuid cgid otime ctime\n",
IPC_SEM_IDS, sysvipc_sem_proc_show);
}
/**
* unmerge_queues - unmerge queues, if possible.
* @sma: semaphore array
*
* The function unmerges the wait queues if complex_count is 0.
* It must be called prior to dropping the global semaphore array lock.
*/
static void unmerge_queues(struct sem_array *sma)
{
struct sem_queue *q, *tq;
/* complex operations still around? */
if (sma->complex_count)
return;
/*
* We will switch back to simple mode.
* Move all pending operation back into the per-semaphore
* queues.
*/
list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
struct sem *curr;
curr = &sma->sem_base[q->sops[0].sem_num];
list_add_tail(&q->list, &curr->pending_alter);
}
INIT_LIST_HEAD(&sma->pending_alter);
}
/**
* merge_queues - merge single semop queues into global queue
* @sma: semaphore array
*
* This function merges all per-semaphore queues into the global queue.
* It is necessary to achieve FIFO ordering for the pending single-sop
* operations when a multi-semop operation must sleep.
* Only the alter operations must be moved, the const operations can stay.
*/
static void merge_queues(struct sem_array *sma)
{
int i;
for (i = 0; i < sma->sem_nsems; i++) {
struct sem *sem = sma->sem_base + i;
list_splice_init(&sem->pending_alter, &sma->pending_alter);
}
}
static void sem_rcu_free(struct rcu_head *head)
{
struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
struct sem_array *sma = ipc_rcu_to_struct(p);
security_sem_free(sma);
ipc_rcu_free(head);
}
/*
* Wait until all currently ongoing simple ops have completed.
* Caller must own sem_perm.lock.
* New simple ops cannot start, because simple ops first check
* that sem_perm.lock is free.
* that a) sem_perm.lock is free and b) complex_count is 0.
*/
static void sem_wait_array(struct sem_array *sma)
{
int i;
struct sem *sem;
if (sma->complex_count) {
/* The thread that increased sma->complex_count waited on
* all sem->lock locks. Thus we don't need to wait again.
*/
return;
}
for (i = 0; i < sma->sem_nsems; i++) {
sem = sma->sem_base + i;
spin_unlock_wait(&sem->lock);
}
}
/*
* If the request contains only one semaphore operation, and there are
* no complex transactions pending, lock only the semaphore involved.
* Otherwise, lock the entire semaphore array, since we either have
* multiple semaphores in our own semops, or we need to look at
* semaphores from other pending complex operations.
*/
static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
int nsops)
{
struct sem *sem;
if (nsops != 1) {
/* Complex operation - acquire a full lock */
ipc_lock_object(&sma->sem_perm);
/* And wait until all simple ops that are processed
* right now have dropped their locks.
*/
sem_wait_array(sma);
return -1;
}
/*
* Only one semaphore affected - try to optimize locking.
* The rules are:
* - optimized locking is possible if no complex operation
* is either enqueued or processed right now.
* - The test for enqueued complex ops is simple:
* sma->complex_count != 0
* - Testing for complex ops that are processed right now is
* a bit more difficult. Complex ops acquire the full lock
* and first wait that the running simple ops have completed.
* (see above)
* Thus: If we own a simple lock and the global lock is free
* and complex_count is now 0, then it will stay 0 and
* thus just locking sem->lock is sufficient.
*/
sem = sma->sem_base + sops->sem_num;
if (sma->complex_count == 0) {
/*
* It appears that no complex operation is around.
* Acquire the per-semaphore lock.
*/
spin_lock(&sem->lock);
/* Then check that the global lock is free */
if (!spin_is_locked(&sma->sem_perm.lock)) {
/* spin_is_locked() is not a memory barrier */
smp_mb();
/* Now repeat the test of complex_count:
* It can't change anymore until we drop sem->lock.
* Thus: if is now 0, then it will stay 0.
*/
if (sma->complex_count == 0) {
/* fast path successful! */
return sops->sem_num;
}
}
spin_unlock(&sem->lock);
}
/* slow path: acquire the full lock */
ipc_lock_object(&sma->sem_perm);
if (sma->complex_count == 0) {
/* False alarm:
* There is no complex operation, thus we can switch
* back to the fast path.
*/
spin_lock(&sem->lock);
ipc_unlock_object(&sma->sem_perm);
return sops->sem_num;
} else {
/* Not a false alarm, thus complete the sequence for a
* full lock.
*/
sem_wait_array(sma);
return -1;
}
}
static inline void sem_unlock(struct sem_array *sma, int locknum)
{
if (locknum == -1) {
unmerge_queues(sma);
ipc_unlock_object(&sma->sem_perm);
} else {
struct sem *sem = sma->sem_base + locknum;
spin_unlock(&sem->lock);
}
}
/*
* sem_lock_(check_) routines are called in the paths where the rwsem
* is not held.
*
* The caller holds the RCU read lock.
*/
static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
int id, struct sembuf *sops, int nsops, int *locknum)
{
struct kern_ipc_perm *ipcp;
struct sem_array *sma;
ipcp = ipc_obtain_object(&sem_ids(ns), id);
if (IS_ERR(ipcp))
return ERR_CAST(ipcp);
sma = container_of(ipcp, struct sem_array, sem_perm);
*locknum = sem_lock(sma, sops, nsops);
/* ipc_rmid() may have already freed the ID while sem_lock
* was spinning: verify that the structure is still valid
*/
if (ipc_valid_object(ipcp))
return container_of(ipcp, struct sem_array, sem_perm);
sem_unlock(sma, *locknum);
return ERR_PTR(-EINVAL);
}
static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
{
struct kern_ipc_perm *ipcp = ipc_obtain_object(&sem_ids(ns), id);
if (IS_ERR(ipcp))
return ERR_CAST(ipcp);
return container_of(ipcp, struct sem_array, sem_perm);
}
static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
int id)
{
struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
if (IS_ERR(ipcp))
return ERR_CAST(ipcp);
return container_of(ipcp, struct sem_array, sem_perm);
}
static inline void sem_lock_and_putref(struct sem_array *sma)
{
sem_lock(sma, NULL, -1);
ipc_rcu_putref(sma, ipc_rcu_free);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
{
ipc_rmid(&sem_ids(ns), &s->sem_perm);
}
/*
* Lockless wakeup algorithm:
* Without the check/retry algorithm a lockless wakeup is possible:
* - queue.status is initialized to -EINTR before blocking.
* - wakeup is performed by
* * unlinking the queue entry from the pending list
* * setting queue.status to IN_WAKEUP
* This is the notification for the blocked thread that a
* result value is imminent.
* * call wake_up_process
* * set queue.status to the final value.
* - the previously blocked thread checks queue.status:
* * if it's IN_WAKEUP, then it must wait until the value changes
* * if it's not -EINTR, then the operation was completed by
* update_queue. semtimedop can return queue.status without
* performing any operation on the sem array.
* * otherwise it must acquire the spinlock and check what's up.
*
* The two-stage algorithm is necessary to protect against the following
* races:
* - if queue.status is set after wake_up_process, then the woken up idle
* thread could race forward and try (and fail) to acquire sma->lock
* before update_queue had a chance to set queue.status
* - if queue.status is written before wake_up_process and if the
* blocked process is woken up by a signal between writing
* queue.status and the wake_up_process, then the woken up
* process could return from semtimedop and die by calling
* sys_exit before wake_up_process is called. Then wake_up_process
* will oops, because the task structure is already invalid.
* (yes, this happened on s390 with sysv msg).
*
*/
#define IN_WAKEUP 1
/**
* newary - Create a new semaphore set
* @ns: namespace
* @params: ptr to the structure that contains key, semflg and nsems
*
* Called with sem_ids.rwsem held (as a writer)
*/
static int newary(struct ipc_namespace *ns, struct ipc_params *params)
{
int id;
int retval;
struct sem_array *sma;
int size;
key_t key = params->key;
int nsems = params->u.nsems;
int semflg = params->flg;
int i;
if (!nsems)
return -EINVAL;
if (ns->used_sems + nsems > ns->sc_semmns)
return -ENOSPC;
size = sizeof(*sma) + nsems * sizeof(struct sem);
sma = ipc_rcu_alloc(size);
if (!sma)
return -ENOMEM;
memset(sma, 0, size);
sma->sem_perm.mode = (semflg & S_IRWXUGO);
sma->sem_perm.key = key;
sma->sem_perm.security = NULL;
retval = security_sem_alloc(sma);
if (retval) {
ipc_rcu_putref(sma, ipc_rcu_free);
return retval;
}
id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
if (id < 0) {
ipc_rcu_putref(sma, sem_rcu_free);
return id;
}
ns->used_sems += nsems;
sma->sem_base = (struct sem *) &sma[1];
for (i = 0; i < nsems; i++) {
INIT_LIST_HEAD(&sma->sem_base[i].pending_alter);
INIT_LIST_HEAD(&sma->sem_base[i].pending_const);
spin_lock_init(&sma->sem_base[i].lock);
}
sma->complex_count = 0;
INIT_LIST_HEAD(&sma->pending_alter);
INIT_LIST_HEAD(&sma->pending_const);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
sem_unlock(sma, -1);
rcu_read_unlock();
return sma->sem_perm.id;
}
/*
* Called with sem_ids.rwsem and ipcp locked.
*/
static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
{
struct sem_array *sma;
sma = container_of(ipcp, struct sem_array, sem_perm);
return security_sem_associate(sma, semflg);
}
/*
* Called with sem_ids.rwsem and ipcp locked.
*/
static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
struct ipc_params *params)
{
struct sem_array *sma;
sma = container_of(ipcp, struct sem_array, sem_perm);
if (params->u.nsems > sma->sem_nsems)
return -EINVAL;
return 0;
}
SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
{
struct ipc_namespace *ns;
static const struct ipc_ops sem_ops = {
.getnew = newary,
.associate = sem_security,
.more_checks = sem_more_checks,
};
struct ipc_params sem_params;
ns = current->nsproxy->ipc_ns;
if (nsems < 0 || nsems > ns->sc_semmsl)
return -EINVAL;
sem_params.key = key;
sem_params.flg = semflg;
sem_params.u.nsems = nsems;
return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
}
/**
* perform_atomic_semop - Perform (if possible) a semaphore operation
* @sma: semaphore array
* @q: struct sem_queue that describes the operation
*
* Returns 0 if the operation was possible.
* Returns 1 if the operation is impossible, the caller must sleep.
* Negative values are error codes.
*/
static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
{
int result, sem_op, nsops, pid;
struct sembuf *sop;
struct sem *curr;
struct sembuf *sops;
struct sem_undo *un;
sops = q->sops;
nsops = q->nsops;
un = q->undo;
for (sop = sops; sop < sops + nsops; sop++) {
curr = sma->sem_base + sop->sem_num;
sem_op = sop->sem_op;
result = curr->semval;
if (!sem_op && result)
goto would_block;
result += sem_op;
if (result < 0)
goto would_block;
if (result > SEMVMX)
goto out_of_range;
if (sop->sem_flg & SEM_UNDO) {
int undo = un->semadj[sop->sem_num] - sem_op;
/* Exceeding the undo range is an error. */
if (undo < (-SEMAEM - 1) || undo > SEMAEM)
goto out_of_range;
un->semadj[sop->sem_num] = undo;
}
curr->semval = result;
}
sop--;
pid = q->pid;
while (sop >= sops) {
sma->sem_base[sop->sem_num].sempid = pid;
sop--;
}
return 0;
out_of_range:
result = -ERANGE;
goto undo;
would_block:
q->blocking = sop;
if (sop->sem_flg & IPC_NOWAIT)
result = -EAGAIN;
else
result = 1;
undo:
sop--;
while (sop >= sops) {
sem_op = sop->sem_op;
sma->sem_base[sop->sem_num].semval -= sem_op;
if (sop->sem_flg & SEM_UNDO)
un->semadj[sop->sem_num] += sem_op;
sop--;
}
return result;
}
/** wake_up_sem_queue_prepare(q, error): Prepare wake-up
* @q: queue entry that must be signaled
* @error: Error value for the signal
*
* Prepare the wake-up of the queue entry q.
*/
static void wake_up_sem_queue_prepare(struct list_head *pt,
struct sem_queue *q, int error)
{
if (list_empty(pt)) {
/*
* Hold preempt off so that we don't get preempted and have the
* wakee busy-wait until we're scheduled back on.
*/
preempt_disable();
}
q->status = IN_WAKEUP;
q->pid = error;
list_add_tail(&q->list, pt);
}
/**
* wake_up_sem_queue_do - do the actual wake-up
* @pt: list of tasks to be woken up
*
* Do the actual wake-up.
* The function is called without any locks held, thus the semaphore array
* could be destroyed already and the tasks can disappear as soon as the
* status is set to the actual return code.
*/
static void wake_up_sem_queue_do(struct list_head *pt)
{
struct sem_queue *q, *t;
int did_something;
did_something = !list_empty(pt);
list_for_each_entry_safe(q, t, pt, list) {
wake_up_process(q->sleeper);
/* q can disappear immediately after writing q->status. */
smp_wmb();
q->status = q->pid;
}
if (did_something)
preempt_enable();
}
static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
{
list_del(&q->list);
if (q->nsops > 1)
sma->complex_count--;
}
/** check_restart(sma, q)
* @sma: semaphore array
* @q: the operation that just completed
*
* update_queue is O(N^2) when it restarts scanning the whole queue of
* waiting operations. Therefore this function checks if the restart is
* really necessary. It is called after a previously waiting operation
* modified the array.
* Note that wait-for-zero operations are handled without restart.
*/
static int check_restart(struct sem_array *sma, struct sem_queue *q)
{
/* pending complex alter operations are too difficult to analyse */
if (!list_empty(&sma->pending_alter))
return 1;
/* we were a sleeping complex operation. Too difficult */
if (q->nsops > 1)
return 1;
/* It is impossible that someone waits for the new value:
* - complex operations always restart.
* - wait-for-zero are handled seperately.
* - q is a previously sleeping simple operation that
* altered the array. It must be a decrement, because
* simple increments never sleep.
* - If there are older (higher priority) decrements
* in the queue, then they have observed the original
* semval value and couldn't proceed. The operation
* decremented to value - thus they won't proceed either.
*/
return 0;
}
/**
* wake_const_ops - wake up non-alter tasks
* @sma: semaphore array.
* @semnum: semaphore that was modified.
* @pt: list head for the tasks that must be woken up.
*
* wake_const_ops must be called after a semaphore in a semaphore array
* was set to 0. If complex const operations are pending, wake_const_ops must
* be called with semnum = -1, as well as with the number of each modified
* semaphore.
* The tasks that must be woken up are added to @pt. The return code
* is stored in q->pid.
* The function returns 1 if at least one operation was completed successfully.
*/
static int wake_const_ops(struct sem_array *sma, int semnum,
struct list_head *pt)
{
struct sem_queue *q;
struct list_head *walk;
struct list_head *pending_list;
int semop_completed = 0;
if (semnum == -1)
pending_list = &sma->pending_const;
else
pending_list = &sma->sem_base[semnum].pending_const;
walk = pending_list->next;
while (walk != pending_list) {
int error;
q = container_of(walk, struct sem_queue, list);
walk = walk->next;
error = perform_atomic_semop(sma, q);
if (error <= 0) {
/* operation completed, remove from queue & wakeup */
unlink_queue(sma, q);
wake_up_sem_queue_prepare(pt, q, error);
if (error == 0)
semop_completed = 1;
}
}
return semop_completed;
}
/**
* do_smart_wakeup_zero - wakeup all wait for zero tasks
* @sma: semaphore array
* @sops: operations that were performed
* @nsops: number of operations
* @pt: list head of the tasks that must be woken up.
*
* Checks all required queue for wait-for-zero operations, based
* on the actual changes that were performed on the semaphore array.
* The function returns 1 if at least one operation was completed successfully.
*/
static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
int nsops, struct list_head *pt)
{
int i;
int semop_completed = 0;
int got_zero = 0;
/* first: the per-semaphore queues, if known */
if (sops) {
for (i = 0; i < nsops; i++) {
int num = sops[i].sem_num;
if (sma->sem_base[num].semval == 0) {
got_zero = 1;
semop_completed |= wake_const_ops(sma, num, pt);
}
}
} else {
/*
* No sops means modified semaphores not known.
* Assume all were changed.
*/
for (i = 0; i < sma->sem_nsems; i++) {
if (sma->sem_base[i].semval == 0) {
got_zero = 1;
semop_completed |= wake_const_ops(sma, i, pt);
}
}
}
/*
* If one of the modified semaphores got 0,
* then check the global queue, too.
*/
if (got_zero)
semop_completed |= wake_const_ops(sma, -1, pt);
return semop_completed;
}
/**
* update_queue - look for tasks that can be completed.
* @sma: semaphore array.
* @semnum: semaphore that was modified.
* @pt: list head for the tasks that must be woken up.
*
* update_queue must be called after a semaphore in a semaphore array
* was modified. If multiple semaphores were modified, update_queue must
* be called with semnum = -1, as well as with the number of each modified
* semaphore.
* The tasks that must be woken up are added to @pt. The return code
* is stored in q->pid.
* The function internally checks if const operations can now succeed.
*
* The function return 1 if at least one semop was completed successfully.
*/
static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
{
struct sem_queue *q;
struct list_head *walk;
struct list_head *pending_list;
int semop_completed = 0;
if (semnum == -1)
pending_list = &sma->pending_alter;
else
pending_list = &sma->sem_base[semnum].pending_alter;
again:
walk = pending_list->next;
while (walk != pending_list) {
int error, restart;
q = container_of(walk, struct sem_queue, list);
walk = walk->next;
/* If we are scanning the single sop, per-semaphore list of
* one semaphore and that semaphore is 0, then it is not
* necessary to scan further: simple increments
* that affect only one entry succeed immediately and cannot
* be in the per semaphore pending queue, and decrements
* cannot be successful if the value is already 0.
*/
if (semnum != -1 && sma->sem_base[semnum].semval == 0)
break;
error = perform_atomic_semop(sma, q);
/* Does q->sleeper still need to sleep? */
if (error > 0)
continue;
unlink_queue(sma, q);
if (error) {
restart = 0;
} else {
semop_completed = 1;
do_smart_wakeup_zero(sma, q->sops, q->nsops, pt);
restart = check_restart(sma, q);
}
wake_up_sem_queue_prepare(pt, q, error);
if (restart)
goto again;
}
return semop_completed;
}
/**
* set_semotime - set sem_otime
* @sma: semaphore array
* @sops: operations that modified the array, may be NULL
*
* sem_otime is replicated to avoid cache line trashing.
* This function sets one instance to the current time.
*/
static void set_semotime(struct sem_array *sma, struct sembuf *sops)
{
if (sops == NULL) {
sma->sem_base[0].sem_otime = get_seconds();
} else {
sma->sem_base[sops[0].sem_num].sem_otime =
get_seconds();
}
}
/**
* do_smart_update - optimized update_queue
* @sma: semaphore array
* @sops: operations that were performed
* @nsops: number of operations
* @otime: force setting otime
* @pt: list head of the tasks that must be woken up.
*
* do_smart_update() does the required calls to update_queue and wakeup_zero,
* based on the actual changes that were performed on the semaphore array.
* Note that the function does not do the actual wake-up: the caller is
* responsible for calling wake_up_sem_queue_do(@pt).
* It is safe to perform this call after dropping all locks.
*/
static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
int otime, struct list_head *pt)
{
int i;
otime |= do_smart_wakeup_zero(sma, sops, nsops, pt);
if (!list_empty(&sma->pending_alter)) {
/* semaphore array uses the global queue - just process it. */
otime |= update_queue(sma, -1, pt);
} else {
if (!sops) {
/*
* No sops, thus the modified semaphores are not
* known. Check all.
*/
for (i = 0; i < sma->sem_nsems; i++)
otime |= update_queue(sma, i, pt);
} else {
/*
* Check the semaphores that were increased:
* - No complex ops, thus all sleeping ops are
* decrease.
* - if we decreased the value, then any sleeping
* semaphore ops wont be able to run: If the
* previous value was too small, then the new
* value will be too small, too.
*/
for (i = 0; i < nsops; i++) {
if (sops[i].sem_op > 0) {
otime |= update_queue(sma,
sops[i].sem_num, pt);
}
}
}
}
if (otime)
set_semotime(sma, sops);
}
/*
* check_qop: Test if a queued operation sleeps on the semaphore semnum
*/
static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
bool count_zero)
{