Loading...
1/*
2 * linux/ipc/sem.c
3 * Copyright (C) 1992 Krishna Balasubramanian
4 * Copyright (C) 1995 Eric Schenk, Bruno Haible
5 *
6 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
7 *
8 * SMP-threaded, sysctl's added
9 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
10 * Enforced range limit on SEM_UNDO
11 * (c) 2001 Red Hat Inc
12 * Lockless wakeup
13 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
14 * Further wakeup optimizations, documentation
15 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
16 *
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
19 *
20 * namespaces support
21 * OpenVZ, SWsoft Inc.
22 * Pavel Emelianov <xemul@openvz.org>
23 *
24 * Implementation notes: (May 2010)
25 * This file implements System V semaphores.
26 *
27 * User space visible behavior:
28 * - FIFO ordering for semop() operations (just FIFO, not starvation
29 * protection)
30 * - multiple semaphore operations that alter the same semaphore in
31 * one semop() are handled.
32 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
33 * SETALL calls.
34 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
35 * - undo adjustments at process exit are limited to 0..SEMVMX.
36 * - namespace are supported.
37 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
38 * to /proc/sys/kernel/sem.
39 * - statistics about the usage are reported in /proc/sysvipc/sem.
40 *
41 * Internals:
42 * - scalability:
43 * - all global variables are read-mostly.
44 * - semop() calls and semctl(RMID) are synchronized by RCU.
45 * - most operations do write operations (actually: spin_lock calls) to
46 * the per-semaphore array structure.
47 * Thus: Perfect SMP scaling between independent semaphore arrays.
48 * If multiple semaphores in one array are used, then cache line
49 * trashing on the semaphore array spinlock will limit the scaling.
50 * - semncnt and semzcnt are calculated on demand in count_semncnt() and
51 * count_semzcnt()
52 * - the task that performs a successful semop() scans the list of all
53 * sleeping tasks and completes any pending operations that can be fulfilled.
54 * Semaphores are actively given to waiting tasks (necessary for FIFO).
55 * (see update_queue())
56 * - To improve the scalability, the actual wake-up calls are performed after
57 * dropping all locks. (see wake_up_sem_queue_prepare(),
58 * wake_up_sem_queue_do())
59 * - All work is done by the waker, the woken up task does not have to do
60 * anything - not even acquiring a lock or dropping a refcount.
61 * - A woken up task may not even touch the semaphore array anymore, it may
62 * have been destroyed already by a semctl(RMID).
63 * - The synchronizations between wake-ups due to a timeout/signal and a
64 * wake-up due to a completed semaphore operation is achieved by using an
65 * intermediate state (IN_WAKEUP).
66 * - UNDO values are stored in an array (one per process and per
67 * semaphore array, lazily allocated). For backwards compatibility, multiple
68 * modes for the UNDO variables are supported (per process, per thread)
69 * (see copy_semundo, CLONE_SYSVSEM)
70 * - There are two lists of the pending operations: a per-array list
71 * and per-semaphore list (stored in the array). This allows to achieve FIFO
72 * ordering without always scanning all pending operations.
73 * The worst-case behavior is nevertheless O(N^2) for N wakeups.
74 */
75
76#include <linux/slab.h>
77#include <linux/spinlock.h>
78#include <linux/init.h>
79#include <linux/proc_fs.h>
80#include <linux/time.h>
81#include <linux/security.h>
82#include <linux/syscalls.h>
83#include <linux/audit.h>
84#include <linux/capability.h>
85#include <linux/seq_file.h>
86#include <linux/rwsem.h>
87#include <linux/nsproxy.h>
88#include <linux/ipc_namespace.h>
89
90#include <asm/uaccess.h>
91#include "util.h"
92
93/* One semaphore structure for each semaphore in the system. */
94struct sem {
95 int semval; /* current value */
96 int sempid; /* pid of last operation */
97 struct list_head sem_pending; /* pending single-sop operations */
98};
99
100/* One queue for each sleeping process in the system. */
101struct sem_queue {
102 struct list_head simple_list; /* queue of pending operations */
103 struct list_head list; /* queue of pending operations */
104 struct task_struct *sleeper; /* this process */
105 struct sem_undo *undo; /* undo structure */
106 int pid; /* process id of requesting process */
107 int status; /* completion status of operation */
108 struct sembuf *sops; /* array of pending operations */
109 int nsops; /* number of operations */
110 int alter; /* does *sops alter the array? */
111};
112
113/* Each task has a list of undo requests. They are executed automatically
114 * when the process exits.
115 */
116struct sem_undo {
117 struct list_head list_proc; /* per-process list: *
118 * all undos from one process
119 * rcu protected */
120 struct rcu_head rcu; /* rcu struct for sem_undo */
121 struct sem_undo_list *ulp; /* back ptr to sem_undo_list */
122 struct list_head list_id; /* per semaphore array list:
123 * all undos for one array */
124 int semid; /* semaphore set identifier */
125 short *semadj; /* array of adjustments */
126 /* one per semaphore */
127};
128
129/* sem_undo_list controls shared access to the list of sem_undo structures
130 * that may be shared among all a CLONE_SYSVSEM task group.
131 */
132struct sem_undo_list {
133 atomic_t refcnt;
134 spinlock_t lock;
135 struct list_head list_proc;
136};
137
138
139#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
140
141#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
142#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
143
144static int newary(struct ipc_namespace *, struct ipc_params *);
145static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
146#ifdef CONFIG_PROC_FS
147static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
148#endif
149
150#define SEMMSL_FAST 256 /* 512 bytes on stack */
151#define SEMOPM_FAST 64 /* ~ 372 bytes on stack */
152
153/*
154 * linked list protection:
155 * sem_undo.id_next,
156 * sem_array.sem_pending{,last},
157 * sem_array.sem_undo: sem_lock() for read/write
158 * sem_undo.proc_next: only "current" is allowed to read/write that field.
159 *
160 */
161
162#define sc_semmsl sem_ctls[0]
163#define sc_semmns sem_ctls[1]
164#define sc_semopm sem_ctls[2]
165#define sc_semmni sem_ctls[3]
166
167void sem_init_ns(struct ipc_namespace *ns)
168{
169 ns->sc_semmsl = SEMMSL;
170 ns->sc_semmns = SEMMNS;
171 ns->sc_semopm = SEMOPM;
172 ns->sc_semmni = SEMMNI;
173 ns->used_sems = 0;
174 ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
175}
176
177#ifdef CONFIG_IPC_NS
178void sem_exit_ns(struct ipc_namespace *ns)
179{
180 free_ipcs(ns, &sem_ids(ns), freeary);
181 idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
182}
183#endif
184
185void __init sem_init (void)
186{
187 sem_init_ns(&init_ipc_ns);
188 ipc_init_proc_interface("sysvipc/sem",
189 " key semid perms nsems uid gid cuid cgid otime ctime\n",
190 IPC_SEM_IDS, sysvipc_sem_proc_show);
191}
192
193/*
194 * sem_lock_(check_) routines are called in the paths where the rw_mutex
195 * is not held.
196 */
197static inline struct sem_array *sem_lock(struct ipc_namespace *ns, int id)
198{
199 struct kern_ipc_perm *ipcp = ipc_lock(&sem_ids(ns), id);
200
201 if (IS_ERR(ipcp))
202 return (struct sem_array *)ipcp;
203
204 return container_of(ipcp, struct sem_array, sem_perm);
205}
206
207static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
208 int id)
209{
210 struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
211
212 if (IS_ERR(ipcp))
213 return (struct sem_array *)ipcp;
214
215 return container_of(ipcp, struct sem_array, sem_perm);
216}
217
218static inline void sem_lock_and_putref(struct sem_array *sma)
219{
220 ipc_lock_by_ptr(&sma->sem_perm);
221 ipc_rcu_putref(sma);
222}
223
224static inline void sem_getref_and_unlock(struct sem_array *sma)
225{
226 ipc_rcu_getref(sma);
227 ipc_unlock(&(sma)->sem_perm);
228}
229
230static inline void sem_putref(struct sem_array *sma)
231{
232 ipc_lock_by_ptr(&sma->sem_perm);
233 ipc_rcu_putref(sma);
234 ipc_unlock(&(sma)->sem_perm);
235}
236
237static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
238{
239 ipc_rmid(&sem_ids(ns), &s->sem_perm);
240}
241
242/*
243 * Lockless wakeup algorithm:
244 * Without the check/retry algorithm a lockless wakeup is possible:
245 * - queue.status is initialized to -EINTR before blocking.
246 * - wakeup is performed by
247 * * unlinking the queue entry from sma->sem_pending
248 * * setting queue.status to IN_WAKEUP
249 * This is the notification for the blocked thread that a
250 * result value is imminent.
251 * * call wake_up_process
252 * * set queue.status to the final value.
253 * - the previously blocked thread checks queue.status:
254 * * if it's IN_WAKEUP, then it must wait until the value changes
255 * * if it's not -EINTR, then the operation was completed by
256 * update_queue. semtimedop can return queue.status without
257 * performing any operation on the sem array.
258 * * otherwise it must acquire the spinlock and check what's up.
259 *
260 * The two-stage algorithm is necessary to protect against the following
261 * races:
262 * - if queue.status is set after wake_up_process, then the woken up idle
263 * thread could race forward and try (and fail) to acquire sma->lock
264 * before update_queue had a chance to set queue.status
265 * - if queue.status is written before wake_up_process and if the
266 * blocked process is woken up by a signal between writing
267 * queue.status and the wake_up_process, then the woken up
268 * process could return from semtimedop and die by calling
269 * sys_exit before wake_up_process is called. Then wake_up_process
270 * will oops, because the task structure is already invalid.
271 * (yes, this happened on s390 with sysv msg).
272 *
273 */
274#define IN_WAKEUP 1
275
276/**
277 * newary - Create a new semaphore set
278 * @ns: namespace
279 * @params: ptr to the structure that contains key, semflg and nsems
280 *
281 * Called with sem_ids.rw_mutex held (as a writer)
282 */
283
284static int newary(struct ipc_namespace *ns, struct ipc_params *params)
285{
286 int id;
287 int retval;
288 struct sem_array *sma;
289 int size;
290 key_t key = params->key;
291 int nsems = params->u.nsems;
292 int semflg = params->flg;
293 int i;
294
295 if (!nsems)
296 return -EINVAL;
297 if (ns->used_sems + nsems > ns->sc_semmns)
298 return -ENOSPC;
299
300 size = sizeof (*sma) + nsems * sizeof (struct sem);
301 sma = ipc_rcu_alloc(size);
302 if (!sma) {
303 return -ENOMEM;
304 }
305 memset (sma, 0, size);
306
307 sma->sem_perm.mode = (semflg & S_IRWXUGO);
308 sma->sem_perm.key = key;
309
310 sma->sem_perm.security = NULL;
311 retval = security_sem_alloc(sma);
312 if (retval) {
313 ipc_rcu_putref(sma);
314 return retval;
315 }
316
317 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
318 if (id < 0) {
319 security_sem_free(sma);
320 ipc_rcu_putref(sma);
321 return id;
322 }
323 ns->used_sems += nsems;
324
325 sma->sem_base = (struct sem *) &sma[1];
326
327 for (i = 0; i < nsems; i++)
328 INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
329
330 sma->complex_count = 0;
331 INIT_LIST_HEAD(&sma->sem_pending);
332 INIT_LIST_HEAD(&sma->list_id);
333 sma->sem_nsems = nsems;
334 sma->sem_ctime = get_seconds();
335 sem_unlock(sma);
336
337 return sma->sem_perm.id;
338}
339
340
341/*
342 * Called with sem_ids.rw_mutex and ipcp locked.
343 */
344static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
345{
346 struct sem_array *sma;
347
348 sma = container_of(ipcp, struct sem_array, sem_perm);
349 return security_sem_associate(sma, semflg);
350}
351
352/*
353 * Called with sem_ids.rw_mutex and ipcp locked.
354 */
355static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
356 struct ipc_params *params)
357{
358 struct sem_array *sma;
359
360 sma = container_of(ipcp, struct sem_array, sem_perm);
361 if (params->u.nsems > sma->sem_nsems)
362 return -EINVAL;
363
364 return 0;
365}
366
367SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
368{
369 struct ipc_namespace *ns;
370 struct ipc_ops sem_ops;
371 struct ipc_params sem_params;
372
373 ns = current->nsproxy->ipc_ns;
374
375 if (nsems < 0 || nsems > ns->sc_semmsl)
376 return -EINVAL;
377
378 sem_ops.getnew = newary;
379 sem_ops.associate = sem_security;
380 sem_ops.more_checks = sem_more_checks;
381
382 sem_params.key = key;
383 sem_params.flg = semflg;
384 sem_params.u.nsems = nsems;
385
386 return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
387}
388
389/*
390 * Determine whether a sequence of semaphore operations would succeed
391 * all at once. Return 0 if yes, 1 if need to sleep, else return error code.
392 */
393
394static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops,
395 int nsops, struct sem_undo *un, int pid)
396{
397 int result, sem_op;
398 struct sembuf *sop;
399 struct sem * curr;
400
401 for (sop = sops; sop < sops + nsops; sop++) {
402 curr = sma->sem_base + sop->sem_num;
403 sem_op = sop->sem_op;
404 result = curr->semval;
405
406 if (!sem_op && result)
407 goto would_block;
408
409 result += sem_op;
410 if (result < 0)
411 goto would_block;
412 if (result > SEMVMX)
413 goto out_of_range;
414 if (sop->sem_flg & SEM_UNDO) {
415 int undo = un->semadj[sop->sem_num] - sem_op;
416 /*
417 * Exceeding the undo range is an error.
418 */
419 if (undo < (-SEMAEM - 1) || undo > SEMAEM)
420 goto out_of_range;
421 }
422 curr->semval = result;
423 }
424
425 sop--;
426 while (sop >= sops) {
427 sma->sem_base[sop->sem_num].sempid = pid;
428 if (sop->sem_flg & SEM_UNDO)
429 un->semadj[sop->sem_num] -= sop->sem_op;
430 sop--;
431 }
432
433 return 0;
434
435out_of_range:
436 result = -ERANGE;
437 goto undo;
438
439would_block:
440 if (sop->sem_flg & IPC_NOWAIT)
441 result = -EAGAIN;
442 else
443 result = 1;
444
445undo:
446 sop--;
447 while (sop >= sops) {
448 sma->sem_base[sop->sem_num].semval -= sop->sem_op;
449 sop--;
450 }
451
452 return result;
453}
454
455/** wake_up_sem_queue_prepare(q, error): Prepare wake-up
456 * @q: queue entry that must be signaled
457 * @error: Error value for the signal
458 *
459 * Prepare the wake-up of the queue entry q.
460 */
461static void wake_up_sem_queue_prepare(struct list_head *pt,
462 struct sem_queue *q, int error)
463{
464 if (list_empty(pt)) {
465 /*
466 * Hold preempt off so that we don't get preempted and have the
467 * wakee busy-wait until we're scheduled back on.
468 */
469 preempt_disable();
470 }
471 q->status = IN_WAKEUP;
472 q->pid = error;
473
474 list_add_tail(&q->simple_list, pt);
475}
476
477/**
478 * wake_up_sem_queue_do(pt) - do the actual wake-up
479 * @pt: list of tasks to be woken up
480 *
481 * Do the actual wake-up.
482 * The function is called without any locks held, thus the semaphore array
483 * could be destroyed already and the tasks can disappear as soon as the
484 * status is set to the actual return code.
485 */
486static void wake_up_sem_queue_do(struct list_head *pt)
487{
488 struct sem_queue *q, *t;
489 int did_something;
490
491 did_something = !list_empty(pt);
492 list_for_each_entry_safe(q, t, pt, simple_list) {
493 wake_up_process(q->sleeper);
494 /* q can disappear immediately after writing q->status. */
495 smp_wmb();
496 q->status = q->pid;
497 }
498 if (did_something)
499 preempt_enable();
500}
501
502static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
503{
504 list_del(&q->list);
505 if (q->nsops == 1)
506 list_del(&q->simple_list);
507 else
508 sma->complex_count--;
509}
510
511/** check_restart(sma, q)
512 * @sma: semaphore array
513 * @q: the operation that just completed
514 *
515 * update_queue is O(N^2) when it restarts scanning the whole queue of
516 * waiting operations. Therefore this function checks if the restart is
517 * really necessary. It is called after a previously waiting operation
518 * was completed.
519 */
520static int check_restart(struct sem_array *sma, struct sem_queue *q)
521{
522 struct sem *curr;
523 struct sem_queue *h;
524
525 /* if the operation didn't modify the array, then no restart */
526 if (q->alter == 0)
527 return 0;
528
529 /* pending complex operations are too difficult to analyse */
530 if (sma->complex_count)
531 return 1;
532
533 /* we were a sleeping complex operation. Too difficult */
534 if (q->nsops > 1)
535 return 1;
536
537 curr = sma->sem_base + q->sops[0].sem_num;
538
539 /* No-one waits on this queue */
540 if (list_empty(&curr->sem_pending))
541 return 0;
542
543 /* the new semaphore value */
544 if (curr->semval) {
545 /* It is impossible that someone waits for the new value:
546 * - q is a previously sleeping simple operation that
547 * altered the array. It must be a decrement, because
548 * simple increments never sleep.
549 * - The value is not 0, thus wait-for-zero won't proceed.
550 * - If there are older (higher priority) decrements
551 * in the queue, then they have observed the original
552 * semval value and couldn't proceed. The operation
553 * decremented to value - thus they won't proceed either.
554 */
555 BUG_ON(q->sops[0].sem_op >= 0);
556 return 0;
557 }
558 /*
559 * semval is 0. Check if there are wait-for-zero semops.
560 * They must be the first entries in the per-semaphore simple queue
561 */
562 h = list_first_entry(&curr->sem_pending, struct sem_queue, simple_list);
563 BUG_ON(h->nsops != 1);
564 BUG_ON(h->sops[0].sem_num != q->sops[0].sem_num);
565
566 /* Yes, there is a wait-for-zero semop. Restart */
567 if (h->sops[0].sem_op == 0)
568 return 1;
569
570 /* Again - no-one is waiting for the new value. */
571 return 0;
572}
573
574
575/**
576 * update_queue(sma, semnum): Look for tasks that can be completed.
577 * @sma: semaphore array.
578 * @semnum: semaphore that was modified.
579 * @pt: list head for the tasks that must be woken up.
580 *
581 * update_queue must be called after a semaphore in a semaphore array
582 * was modified. If multiple semaphore were modified, then @semnum
583 * must be set to -1.
584 * The tasks that must be woken up are added to @pt. The return code
585 * is stored in q->pid.
586 * The function return 1 if at least one semop was completed successfully.
587 */
588static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
589{
590 struct sem_queue *q;
591 struct list_head *walk;
592 struct list_head *pending_list;
593 int offset;
594 int semop_completed = 0;
595
596 /* if there are complex operations around, then knowing the semaphore
597 * that was modified doesn't help us. Assume that multiple semaphores
598 * were modified.
599 */
600 if (sma->complex_count)
601 semnum = -1;
602
603 if (semnum == -1) {
604 pending_list = &sma->sem_pending;
605 offset = offsetof(struct sem_queue, list);
606 } else {
607 pending_list = &sma->sem_base[semnum].sem_pending;
608 offset = offsetof(struct sem_queue, simple_list);
609 }
610
611again:
612 walk = pending_list->next;
613 while (walk != pending_list) {
614 int error, restart;
615
616 q = (struct sem_queue *)((char *)walk - offset);
617 walk = walk->next;
618
619 /* If we are scanning the single sop, per-semaphore list of
620 * one semaphore and that semaphore is 0, then it is not
621 * necessary to scan the "alter" entries: simple increments
622 * that affect only one entry succeed immediately and cannot
623 * be in the per semaphore pending queue, and decrements
624 * cannot be successful if the value is already 0.
625 */
626 if (semnum != -1 && sma->sem_base[semnum].semval == 0 &&
627 q->alter)
628 break;
629
630 error = try_atomic_semop(sma, q->sops, q->nsops,
631 q->undo, q->pid);
632
633 /* Does q->sleeper still need to sleep? */
634 if (error > 0)
635 continue;
636
637 unlink_queue(sma, q);
638
639 if (error) {
640 restart = 0;
641 } else {
642 semop_completed = 1;
643 restart = check_restart(sma, q);
644 }
645
646 wake_up_sem_queue_prepare(pt, q, error);
647 if (restart)
648 goto again;
649 }
650 return semop_completed;
651}
652
653/**
654 * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue
655 * @sma: semaphore array
656 * @sops: operations that were performed
657 * @nsops: number of operations
658 * @otime: force setting otime
659 * @pt: list head of the tasks that must be woken up.
660 *
661 * do_smart_update() does the required called to update_queue, based on the
662 * actual changes that were performed on the semaphore array.
663 * Note that the function does not do the actual wake-up: the caller is
664 * responsible for calling wake_up_sem_queue_do(@pt).
665 * It is safe to perform this call after dropping all locks.
666 */
667static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
668 int otime, struct list_head *pt)
669{
670 int i;
671
672 if (sma->complex_count || sops == NULL) {
673 if (update_queue(sma, -1, pt))
674 otime = 1;
675 goto done;
676 }
677
678 for (i = 0; i < nsops; i++) {
679 if (sops[i].sem_op > 0 ||
680 (sops[i].sem_op < 0 &&
681 sma->sem_base[sops[i].sem_num].semval == 0))
682 if (update_queue(sma, sops[i].sem_num, pt))
683 otime = 1;
684 }
685done:
686 if (otime)
687 sma->sem_otime = get_seconds();
688}
689
690
691/* The following counts are associated to each semaphore:
692 * semncnt number of tasks waiting on semval being nonzero
693 * semzcnt number of tasks waiting on semval being zero
694 * This model assumes that a task waits on exactly one semaphore.
695 * Since semaphore operations are to be performed atomically, tasks actually
696 * wait on a whole sequence of semaphores simultaneously.
697 * The counts we return here are a rough approximation, but still
698 * warrant that semncnt+semzcnt>0 if the task is on the pending queue.
699 */
700static int count_semncnt (struct sem_array * sma, ushort semnum)
701{
702 int semncnt;
703 struct sem_queue * q;
704
705 semncnt = 0;
706 list_for_each_entry(q, &sma->sem_pending, list) {
707 struct sembuf * sops = q->sops;
708 int nsops = q->nsops;
709 int i;
710 for (i = 0; i < nsops; i++)
711 if (sops[i].sem_num == semnum
712 && (sops[i].sem_op < 0)
713 && !(sops[i].sem_flg & IPC_NOWAIT))
714 semncnt++;
715 }
716 return semncnt;
717}
718
719static int count_semzcnt (struct sem_array * sma, ushort semnum)
720{
721 int semzcnt;
722 struct sem_queue * q;
723
724 semzcnt = 0;
725 list_for_each_entry(q, &sma->sem_pending, list) {
726 struct sembuf * sops = q->sops;
727 int nsops = q->nsops;
728 int i;
729 for (i = 0; i < nsops; i++)
730 if (sops[i].sem_num == semnum
731 && (sops[i].sem_op == 0)
732 && !(sops[i].sem_flg & IPC_NOWAIT))
733 semzcnt++;
734 }
735 return semzcnt;
736}
737
738/* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked
739 * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex
740 * remains locked on exit.
741 */
742static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
743{
744 struct sem_undo *un, *tu;
745 struct sem_queue *q, *tq;
746 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
747 struct list_head tasks;
748
749 /* Free the existing undo structures for this semaphore set. */
750 assert_spin_locked(&sma->sem_perm.lock);
751 list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
752 list_del(&un->list_id);
753 spin_lock(&un->ulp->lock);
754 un->semid = -1;
755 list_del_rcu(&un->list_proc);
756 spin_unlock(&un->ulp->lock);
757 kfree_rcu(un, rcu);
758 }
759
760 /* Wake up all pending processes and let them fail with EIDRM. */
761 INIT_LIST_HEAD(&tasks);
762 list_for_each_entry_safe(q, tq, &sma->sem_pending, list) {
763 unlink_queue(sma, q);
764 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
765 }
766
767 /* Remove the semaphore set from the IDR */
768 sem_rmid(ns, sma);
769 sem_unlock(sma);
770
771 wake_up_sem_queue_do(&tasks);
772 ns->used_sems -= sma->sem_nsems;
773 security_sem_free(sma);
774 ipc_rcu_putref(sma);
775}
776
777static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
778{
779 switch(version) {
780 case IPC_64:
781 return copy_to_user(buf, in, sizeof(*in));
782 case IPC_OLD:
783 {
784 struct semid_ds out;
785
786 memset(&out, 0, sizeof(out));
787
788 ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
789
790 out.sem_otime = in->sem_otime;
791 out.sem_ctime = in->sem_ctime;
792 out.sem_nsems = in->sem_nsems;
793
794 return copy_to_user(buf, &out, sizeof(out));
795 }
796 default:
797 return -EINVAL;
798 }
799}
800
801static int semctl_nolock(struct ipc_namespace *ns, int semid,
802 int cmd, int version, union semun arg)
803{
804 int err;
805 struct sem_array *sma;
806
807 switch(cmd) {
808 case IPC_INFO:
809 case SEM_INFO:
810 {
811 struct seminfo seminfo;
812 int max_id;
813
814 err = security_sem_semctl(NULL, cmd);
815 if (err)
816 return err;
817
818 memset(&seminfo,0,sizeof(seminfo));
819 seminfo.semmni = ns->sc_semmni;
820 seminfo.semmns = ns->sc_semmns;
821 seminfo.semmsl = ns->sc_semmsl;
822 seminfo.semopm = ns->sc_semopm;
823 seminfo.semvmx = SEMVMX;
824 seminfo.semmnu = SEMMNU;
825 seminfo.semmap = SEMMAP;
826 seminfo.semume = SEMUME;
827 down_read(&sem_ids(ns).rw_mutex);
828 if (cmd == SEM_INFO) {
829 seminfo.semusz = sem_ids(ns).in_use;
830 seminfo.semaem = ns->used_sems;
831 } else {
832 seminfo.semusz = SEMUSZ;
833 seminfo.semaem = SEMAEM;
834 }
835 max_id = ipc_get_maxid(&sem_ids(ns));
836 up_read(&sem_ids(ns).rw_mutex);
837 if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo)))
838 return -EFAULT;
839 return (max_id < 0) ? 0: max_id;
840 }
841 case IPC_STAT:
842 case SEM_STAT:
843 {
844 struct semid64_ds tbuf;
845 int id;
846
847 if (cmd == SEM_STAT) {
848 sma = sem_lock(ns, semid);
849 if (IS_ERR(sma))
850 return PTR_ERR(sma);
851 id = sma->sem_perm.id;
852 } else {
853 sma = sem_lock_check(ns, semid);
854 if (IS_ERR(sma))
855 return PTR_ERR(sma);
856 id = 0;
857 }
858
859 err = -EACCES;
860 if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
861 goto out_unlock;
862
863 err = security_sem_semctl(sma, cmd);
864 if (err)
865 goto out_unlock;
866
867 memset(&tbuf, 0, sizeof(tbuf));
868
869 kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
870 tbuf.sem_otime = sma->sem_otime;
871 tbuf.sem_ctime = sma->sem_ctime;
872 tbuf.sem_nsems = sma->sem_nsems;
873 sem_unlock(sma);
874 if (copy_semid_to_user (arg.buf, &tbuf, version))
875 return -EFAULT;
876 return id;
877 }
878 default:
879 return -EINVAL;
880 }
881out_unlock:
882 sem_unlock(sma);
883 return err;
884}
885
886static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
887 int cmd, int version, union semun arg)
888{
889 struct sem_array *sma;
890 struct sem* curr;
891 int err;
892 ushort fast_sem_io[SEMMSL_FAST];
893 ushort* sem_io = fast_sem_io;
894 int nsems;
895 struct list_head tasks;
896
897 sma = sem_lock_check(ns, semid);
898 if (IS_ERR(sma))
899 return PTR_ERR(sma);
900
901 INIT_LIST_HEAD(&tasks);
902 nsems = sma->sem_nsems;
903
904 err = -EACCES;
905 if (ipcperms(ns, &sma->sem_perm,
906 (cmd == SETVAL || cmd == SETALL) ? S_IWUGO : S_IRUGO))
907 goto out_unlock;
908
909 err = security_sem_semctl(sma, cmd);
910 if (err)
911 goto out_unlock;
912
913 err = -EACCES;
914 switch (cmd) {
915 case GETALL:
916 {
917 ushort __user *array = arg.array;
918 int i;
919
920 if(nsems > SEMMSL_FAST) {
921 sem_getref_and_unlock(sma);
922
923 sem_io = ipc_alloc(sizeof(ushort)*nsems);
924 if(sem_io == NULL) {
925 sem_putref(sma);
926 return -ENOMEM;
927 }
928
929 sem_lock_and_putref(sma);
930 if (sma->sem_perm.deleted) {
931 sem_unlock(sma);
932 err = -EIDRM;
933 goto out_free;
934 }
935 }
936
937 for (i = 0; i < sma->sem_nsems; i++)
938 sem_io[i] = sma->sem_base[i].semval;
939 sem_unlock(sma);
940 err = 0;
941 if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
942 err = -EFAULT;
943 goto out_free;
944 }
945 case SETALL:
946 {
947 int i;
948 struct sem_undo *un;
949
950 sem_getref_and_unlock(sma);
951
952 if(nsems > SEMMSL_FAST) {
953 sem_io = ipc_alloc(sizeof(ushort)*nsems);
954 if(sem_io == NULL) {
955 sem_putref(sma);
956 return -ENOMEM;
957 }
958 }
959
960 if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) {
961 sem_putref(sma);
962 err = -EFAULT;
963 goto out_free;
964 }
965
966 for (i = 0; i < nsems; i++) {
967 if (sem_io[i] > SEMVMX) {
968 sem_putref(sma);
969 err = -ERANGE;
970 goto out_free;
971 }
972 }
973 sem_lock_and_putref(sma);
974 if (sma->sem_perm.deleted) {
975 sem_unlock(sma);
976 err = -EIDRM;
977 goto out_free;
978 }
979
980 for (i = 0; i < nsems; i++)
981 sma->sem_base[i].semval = sem_io[i];
982
983 assert_spin_locked(&sma->sem_perm.lock);
984 list_for_each_entry(un, &sma->list_id, list_id) {
985 for (i = 0; i < nsems; i++)
986 un->semadj[i] = 0;
987 }
988 sma->sem_ctime = get_seconds();
989 /* maybe some queued-up processes were waiting for this */
990 do_smart_update(sma, NULL, 0, 0, &tasks);
991 err = 0;
992 goto out_unlock;
993 }
994 /* GETVAL, GETPID, GETNCTN, GETZCNT, SETVAL: fall-through */
995 }
996 err = -EINVAL;
997 if(semnum < 0 || semnum >= nsems)
998 goto out_unlock;
999
1000 curr = &sma->sem_base[semnum];
1001
1002 switch (cmd) {
1003 case GETVAL:
1004 err = curr->semval;
1005 goto out_unlock;
1006 case GETPID:
1007 err = curr->sempid;
1008 goto out_unlock;
1009 case GETNCNT:
1010 err = count_semncnt(sma,semnum);
1011 goto out_unlock;
1012 case GETZCNT:
1013 err = count_semzcnt(sma,semnum);
1014 goto out_unlock;
1015 case SETVAL:
1016 {
1017 int val = arg.val;
1018 struct sem_undo *un;
1019
1020 err = -ERANGE;
1021 if (val > SEMVMX || val < 0)
1022 goto out_unlock;
1023
1024 assert_spin_locked(&sma->sem_perm.lock);
1025 list_for_each_entry(un, &sma->list_id, list_id)
1026 un->semadj[semnum] = 0;
1027
1028 curr->semval = val;
1029 curr->sempid = task_tgid_vnr(current);
1030 sma->sem_ctime = get_seconds();
1031 /* maybe some queued-up processes were waiting for this */
1032 do_smart_update(sma, NULL, 0, 0, &tasks);
1033 err = 0;
1034 goto out_unlock;
1035 }
1036 }
1037out_unlock:
1038 sem_unlock(sma);
1039 wake_up_sem_queue_do(&tasks);
1040
1041out_free:
1042 if(sem_io != fast_sem_io)
1043 ipc_free(sem_io, sizeof(ushort)*nsems);
1044 return err;
1045}
1046
1047static inline unsigned long
1048copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1049{
1050 switch(version) {
1051 case IPC_64:
1052 if (copy_from_user(out, buf, sizeof(*out)))
1053 return -EFAULT;
1054 return 0;
1055 case IPC_OLD:
1056 {
1057 struct semid_ds tbuf_old;
1058
1059 if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1060 return -EFAULT;
1061
1062 out->sem_perm.uid = tbuf_old.sem_perm.uid;
1063 out->sem_perm.gid = tbuf_old.sem_perm.gid;
1064 out->sem_perm.mode = tbuf_old.sem_perm.mode;
1065
1066 return 0;
1067 }
1068 default:
1069 return -EINVAL;
1070 }
1071}
1072
1073/*
1074 * This function handles some semctl commands which require the rw_mutex
1075 * to be held in write mode.
1076 * NOTE: no locks must be held, the rw_mutex is taken inside this function.
1077 */
1078static int semctl_down(struct ipc_namespace *ns, int semid,
1079 int cmd, int version, union semun arg)
1080{
1081 struct sem_array *sma;
1082 int err;
1083 struct semid64_ds semid64;
1084 struct kern_ipc_perm *ipcp;
1085
1086 if(cmd == IPC_SET) {
1087 if (copy_semid_from_user(&semid64, arg.buf, version))
1088 return -EFAULT;
1089 }
1090
1091 ipcp = ipcctl_pre_down(ns, &sem_ids(ns), semid, cmd,
1092 &semid64.sem_perm, 0);
1093 if (IS_ERR(ipcp))
1094 return PTR_ERR(ipcp);
1095
1096 sma = container_of(ipcp, struct sem_array, sem_perm);
1097
1098 err = security_sem_semctl(sma, cmd);
1099 if (err)
1100 goto out_unlock;
1101
1102 switch(cmd){
1103 case IPC_RMID:
1104 freeary(ns, ipcp);
1105 goto out_up;
1106 case IPC_SET:
1107 ipc_update_perm(&semid64.sem_perm, ipcp);
1108 sma->sem_ctime = get_seconds();
1109 break;
1110 default:
1111 err = -EINVAL;
1112 }
1113
1114out_unlock:
1115 sem_unlock(sma);
1116out_up:
1117 up_write(&sem_ids(ns).rw_mutex);
1118 return err;
1119}
1120
1121SYSCALL_DEFINE(semctl)(int semid, int semnum, int cmd, union semun arg)
1122{
1123 int err = -EINVAL;
1124 int version;
1125 struct ipc_namespace *ns;
1126
1127 if (semid < 0)
1128 return -EINVAL;
1129
1130 version = ipc_parse_version(&cmd);
1131 ns = current->nsproxy->ipc_ns;
1132
1133 switch(cmd) {
1134 case IPC_INFO:
1135 case SEM_INFO:
1136 case IPC_STAT:
1137 case SEM_STAT:
1138 err = semctl_nolock(ns, semid, cmd, version, arg);
1139 return err;
1140 case GETALL:
1141 case GETVAL:
1142 case GETPID:
1143 case GETNCNT:
1144 case GETZCNT:
1145 case SETVAL:
1146 case SETALL:
1147 err = semctl_main(ns,semid,semnum,cmd,version,arg);
1148 return err;
1149 case IPC_RMID:
1150 case IPC_SET:
1151 err = semctl_down(ns, semid, cmd, version, arg);
1152 return err;
1153 default:
1154 return -EINVAL;
1155 }
1156}
1157#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
1158asmlinkage long SyS_semctl(int semid, int semnum, int cmd, union semun arg)
1159{
1160 return SYSC_semctl((int) semid, (int) semnum, (int) cmd, arg);
1161}
1162SYSCALL_ALIAS(sys_semctl, SyS_semctl);
1163#endif
1164
1165/* If the task doesn't already have a undo_list, then allocate one
1166 * here. We guarantee there is only one thread using this undo list,
1167 * and current is THE ONE
1168 *
1169 * If this allocation and assignment succeeds, but later
1170 * portions of this code fail, there is no need to free the sem_undo_list.
1171 * Just let it stay associated with the task, and it'll be freed later
1172 * at exit time.
1173 *
1174 * This can block, so callers must hold no locks.
1175 */
1176static inline int get_undo_list(struct sem_undo_list **undo_listp)
1177{
1178 struct sem_undo_list *undo_list;
1179
1180 undo_list = current->sysvsem.undo_list;
1181 if (!undo_list) {
1182 undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1183 if (undo_list == NULL)
1184 return -ENOMEM;
1185 spin_lock_init(&undo_list->lock);
1186 atomic_set(&undo_list->refcnt, 1);
1187 INIT_LIST_HEAD(&undo_list->list_proc);
1188
1189 current->sysvsem.undo_list = undo_list;
1190 }
1191 *undo_listp = undo_list;
1192 return 0;
1193}
1194
1195static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1196{
1197 struct sem_undo *un;
1198
1199 list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
1200 if (un->semid == semid)
1201 return un;
1202 }
1203 return NULL;
1204}
1205
1206static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1207{
1208 struct sem_undo *un;
1209
1210 assert_spin_locked(&ulp->lock);
1211
1212 un = __lookup_undo(ulp, semid);
1213 if (un) {
1214 list_del_rcu(&un->list_proc);
1215 list_add_rcu(&un->list_proc, &ulp->list_proc);
1216 }
1217 return un;
1218}
1219
1220/**
1221 * find_alloc_undo - Lookup (and if not present create) undo array
1222 * @ns: namespace
1223 * @semid: semaphore array id
1224 *
1225 * The function looks up (and if not present creates) the undo structure.
1226 * The size of the undo structure depends on the size of the semaphore
1227 * array, thus the alloc path is not that straightforward.
1228 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1229 * performs a rcu_read_lock().
1230 */
1231static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1232{
1233 struct sem_array *sma;
1234 struct sem_undo_list *ulp;
1235 struct sem_undo *un, *new;
1236 int nsems;
1237 int error;
1238
1239 error = get_undo_list(&ulp);
1240 if (error)
1241 return ERR_PTR(error);
1242
1243 rcu_read_lock();
1244 spin_lock(&ulp->lock);
1245 un = lookup_undo(ulp, semid);
1246 spin_unlock(&ulp->lock);
1247 if (likely(un!=NULL))
1248 goto out;
1249 rcu_read_unlock();
1250
1251 /* no undo structure around - allocate one. */
1252 /* step 1: figure out the size of the semaphore array */
1253 sma = sem_lock_check(ns, semid);
1254 if (IS_ERR(sma))
1255 return ERR_CAST(sma);
1256
1257 nsems = sma->sem_nsems;
1258 sem_getref_and_unlock(sma);
1259
1260 /* step 2: allocate new undo structure */
1261 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1262 if (!new) {
1263 sem_putref(sma);
1264 return ERR_PTR(-ENOMEM);
1265 }
1266
1267 /* step 3: Acquire the lock on semaphore array */
1268 sem_lock_and_putref(sma);
1269 if (sma->sem_perm.deleted) {
1270 sem_unlock(sma);
1271 kfree(new);
1272 un = ERR_PTR(-EIDRM);
1273 goto out;
1274 }
1275 spin_lock(&ulp->lock);
1276
1277 /*
1278 * step 4: check for races: did someone else allocate the undo struct?
1279 */
1280 un = lookup_undo(ulp, semid);
1281 if (un) {
1282 kfree(new);
1283 goto success;
1284 }
1285 /* step 5: initialize & link new undo structure */
1286 new->semadj = (short *) &new[1];
1287 new->ulp = ulp;
1288 new->semid = semid;
1289 assert_spin_locked(&ulp->lock);
1290 list_add_rcu(&new->list_proc, &ulp->list_proc);
1291 assert_spin_locked(&sma->sem_perm.lock);
1292 list_add(&new->list_id, &sma->list_id);
1293 un = new;
1294
1295success:
1296 spin_unlock(&ulp->lock);
1297 rcu_read_lock();
1298 sem_unlock(sma);
1299out:
1300 return un;
1301}
1302
1303
1304/**
1305 * get_queue_result - Retrieve the result code from sem_queue
1306 * @q: Pointer to queue structure
1307 *
1308 * Retrieve the return code from the pending queue. If IN_WAKEUP is found in
1309 * q->status, then we must loop until the value is replaced with the final
1310 * value: This may happen if a task is woken up by an unrelated event (e.g.
1311 * signal) and in parallel the task is woken up by another task because it got
1312 * the requested semaphores.
1313 *
1314 * The function can be called with or without holding the semaphore spinlock.
1315 */
1316static int get_queue_result(struct sem_queue *q)
1317{
1318 int error;
1319
1320 error = q->status;
1321 while (unlikely(error == IN_WAKEUP)) {
1322 cpu_relax();
1323 error = q->status;
1324 }
1325
1326 return error;
1327}
1328
1329
1330SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1331 unsigned, nsops, const struct timespec __user *, timeout)
1332{
1333 int error = -EINVAL;
1334 struct sem_array *sma;
1335 struct sembuf fast_sops[SEMOPM_FAST];
1336 struct sembuf* sops = fast_sops, *sop;
1337 struct sem_undo *un;
1338 int undos = 0, alter = 0, max;
1339 struct sem_queue queue;
1340 unsigned long jiffies_left = 0;
1341 struct ipc_namespace *ns;
1342 struct list_head tasks;
1343
1344 ns = current->nsproxy->ipc_ns;
1345
1346 if (nsops < 1 || semid < 0)
1347 return -EINVAL;
1348 if (nsops > ns->sc_semopm)
1349 return -E2BIG;
1350 if(nsops > SEMOPM_FAST) {
1351 sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL);
1352 if(sops==NULL)
1353 return -ENOMEM;
1354 }
1355 if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) {
1356 error=-EFAULT;
1357 goto out_free;
1358 }
1359 if (timeout) {
1360 struct timespec _timeout;
1361 if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) {
1362 error = -EFAULT;
1363 goto out_free;
1364 }
1365 if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 ||
1366 _timeout.tv_nsec >= 1000000000L) {
1367 error = -EINVAL;
1368 goto out_free;
1369 }
1370 jiffies_left = timespec_to_jiffies(&_timeout);
1371 }
1372 max = 0;
1373 for (sop = sops; sop < sops + nsops; sop++) {
1374 if (sop->sem_num >= max)
1375 max = sop->sem_num;
1376 if (sop->sem_flg & SEM_UNDO)
1377 undos = 1;
1378 if (sop->sem_op != 0)
1379 alter = 1;
1380 }
1381
1382 if (undos) {
1383 un = find_alloc_undo(ns, semid);
1384 if (IS_ERR(un)) {
1385 error = PTR_ERR(un);
1386 goto out_free;
1387 }
1388 } else
1389 un = NULL;
1390
1391 INIT_LIST_HEAD(&tasks);
1392
1393 sma = sem_lock_check(ns, semid);
1394 if (IS_ERR(sma)) {
1395 if (un)
1396 rcu_read_unlock();
1397 error = PTR_ERR(sma);
1398 goto out_free;
1399 }
1400
1401 /*
1402 * semid identifiers are not unique - find_alloc_undo may have
1403 * allocated an undo structure, it was invalidated by an RMID
1404 * and now a new array with received the same id. Check and fail.
1405 * This case can be detected checking un->semid. The existence of
1406 * "un" itself is guaranteed by rcu.
1407 */
1408 error = -EIDRM;
1409 if (un) {
1410 if (un->semid == -1) {
1411 rcu_read_unlock();
1412 goto out_unlock_free;
1413 } else {
1414 /*
1415 * rcu lock can be released, "un" cannot disappear:
1416 * - sem_lock is acquired, thus IPC_RMID is
1417 * impossible.
1418 * - exit_sem is impossible, it always operates on
1419 * current (or a dead task).
1420 */
1421
1422 rcu_read_unlock();
1423 }
1424 }
1425
1426 error = -EFBIG;
1427 if (max >= sma->sem_nsems)
1428 goto out_unlock_free;
1429
1430 error = -EACCES;
1431 if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
1432 goto out_unlock_free;
1433
1434 error = security_sem_semop(sma, sops, nsops, alter);
1435 if (error)
1436 goto out_unlock_free;
1437
1438 error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
1439 if (error <= 0) {
1440 if (alter && error == 0)
1441 do_smart_update(sma, sops, nsops, 1, &tasks);
1442
1443 goto out_unlock_free;
1444 }
1445
1446 /* We need to sleep on this operation, so we put the current
1447 * task into the pending queue and go to sleep.
1448 */
1449
1450 queue.sops = sops;
1451 queue.nsops = nsops;
1452 queue.undo = un;
1453 queue.pid = task_tgid_vnr(current);
1454 queue.alter = alter;
1455 if (alter)
1456 list_add_tail(&queue.list, &sma->sem_pending);
1457 else
1458 list_add(&queue.list, &sma->sem_pending);
1459
1460 if (nsops == 1) {
1461 struct sem *curr;
1462 curr = &sma->sem_base[sops->sem_num];
1463
1464 if (alter)
1465 list_add_tail(&queue.simple_list, &curr->sem_pending);
1466 else
1467 list_add(&queue.simple_list, &curr->sem_pending);
1468 } else {
1469 INIT_LIST_HEAD(&queue.simple_list);
1470 sma->complex_count++;
1471 }
1472
1473 queue.status = -EINTR;
1474 queue.sleeper = current;
1475
1476sleep_again:
1477 current->state = TASK_INTERRUPTIBLE;
1478 sem_unlock(sma);
1479
1480 if (timeout)
1481 jiffies_left = schedule_timeout(jiffies_left);
1482 else
1483 schedule();
1484
1485 error = get_queue_result(&queue);
1486
1487 if (error != -EINTR) {
1488 /* fast path: update_queue already obtained all requested
1489 * resources.
1490 * Perform a smp_mb(): User space could assume that semop()
1491 * is a memory barrier: Without the mb(), the cpu could
1492 * speculatively read in user space stale data that was
1493 * overwritten by the previous owner of the semaphore.
1494 */
1495 smp_mb();
1496
1497 goto out_free;
1498 }
1499
1500 sma = sem_lock(ns, semid);
1501
1502 /*
1503 * Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
1504 */
1505 error = get_queue_result(&queue);
1506
1507 /*
1508 * Array removed? If yes, leave without sem_unlock().
1509 */
1510 if (IS_ERR(sma)) {
1511 goto out_free;
1512 }
1513
1514
1515 /*
1516 * If queue.status != -EINTR we are woken up by another process.
1517 * Leave without unlink_queue(), but with sem_unlock().
1518 */
1519
1520 if (error != -EINTR) {
1521 goto out_unlock_free;
1522 }
1523
1524 /*
1525 * If an interrupt occurred we have to clean up the queue
1526 */
1527 if (timeout && jiffies_left == 0)
1528 error = -EAGAIN;
1529
1530 /*
1531 * If the wakeup was spurious, just retry
1532 */
1533 if (error == -EINTR && !signal_pending(current))
1534 goto sleep_again;
1535
1536 unlink_queue(sma, &queue);
1537
1538out_unlock_free:
1539 sem_unlock(sma);
1540
1541 wake_up_sem_queue_do(&tasks);
1542out_free:
1543 if(sops != fast_sops)
1544 kfree(sops);
1545 return error;
1546}
1547
1548SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
1549 unsigned, nsops)
1550{
1551 return sys_semtimedop(semid, tsops, nsops, NULL);
1552}
1553
1554/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
1555 * parent and child tasks.
1556 */
1557
1558int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
1559{
1560 struct sem_undo_list *undo_list;
1561 int error;
1562
1563 if (clone_flags & CLONE_SYSVSEM) {
1564 error = get_undo_list(&undo_list);
1565 if (error)
1566 return error;
1567 atomic_inc(&undo_list->refcnt);
1568 tsk->sysvsem.undo_list = undo_list;
1569 } else
1570 tsk->sysvsem.undo_list = NULL;
1571
1572 return 0;
1573}
1574
1575/*
1576 * add semadj values to semaphores, free undo structures.
1577 * undo structures are not freed when semaphore arrays are destroyed
1578 * so some of them may be out of date.
1579 * IMPLEMENTATION NOTE: There is some confusion over whether the
1580 * set of adjustments that needs to be done should be done in an atomic
1581 * manner or not. That is, if we are attempting to decrement the semval
1582 * should we queue up and wait until we can do so legally?
1583 * The original implementation attempted to do this (queue and wait).
1584 * The current implementation does not do so. The POSIX standard
1585 * and SVID should be consulted to determine what behavior is mandated.
1586 */
1587void exit_sem(struct task_struct *tsk)
1588{
1589 struct sem_undo_list *ulp;
1590
1591 ulp = tsk->sysvsem.undo_list;
1592 if (!ulp)
1593 return;
1594 tsk->sysvsem.undo_list = NULL;
1595
1596 if (!atomic_dec_and_test(&ulp->refcnt))
1597 return;
1598
1599 for (;;) {
1600 struct sem_array *sma;
1601 struct sem_undo *un;
1602 struct list_head tasks;
1603 int semid;
1604 int i;
1605
1606 rcu_read_lock();
1607 un = list_entry_rcu(ulp->list_proc.next,
1608 struct sem_undo, list_proc);
1609 if (&un->list_proc == &ulp->list_proc)
1610 semid = -1;
1611 else
1612 semid = un->semid;
1613 rcu_read_unlock();
1614
1615 if (semid == -1)
1616 break;
1617
1618 sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
1619
1620 /* exit_sem raced with IPC_RMID, nothing to do */
1621 if (IS_ERR(sma))
1622 continue;
1623
1624 un = __lookup_undo(ulp, semid);
1625 if (un == NULL) {
1626 /* exit_sem raced with IPC_RMID+semget() that created
1627 * exactly the same semid. Nothing to do.
1628 */
1629 sem_unlock(sma);
1630 continue;
1631 }
1632
1633 /* remove un from the linked lists */
1634 assert_spin_locked(&sma->sem_perm.lock);
1635 list_del(&un->list_id);
1636
1637 spin_lock(&ulp->lock);
1638 list_del_rcu(&un->list_proc);
1639 spin_unlock(&ulp->lock);
1640
1641 /* perform adjustments registered in un */
1642 for (i = 0; i < sma->sem_nsems; i++) {
1643 struct sem * semaphore = &sma->sem_base[i];
1644 if (un->semadj[i]) {
1645 semaphore->semval += un->semadj[i];
1646 /*
1647 * Range checks of the new semaphore value,
1648 * not defined by sus:
1649 * - Some unices ignore the undo entirely
1650 * (e.g. HP UX 11i 11.22, Tru64 V5.1)
1651 * - some cap the value (e.g. FreeBSD caps
1652 * at 0, but doesn't enforce SEMVMX)
1653 *
1654 * Linux caps the semaphore value, both at 0
1655 * and at SEMVMX.
1656 *
1657 * Manfred <manfred@colorfullife.com>
1658 */
1659 if (semaphore->semval < 0)
1660 semaphore->semval = 0;
1661 if (semaphore->semval > SEMVMX)
1662 semaphore->semval = SEMVMX;
1663 semaphore->sempid = task_tgid_vnr(current);
1664 }
1665 }
1666 /* maybe some queued-up processes were waiting for this */
1667 INIT_LIST_HEAD(&tasks);
1668 do_smart_update(sma, NULL, 0, 1, &tasks);
1669 sem_unlock(sma);
1670 wake_up_sem_queue_do(&tasks);
1671
1672 kfree_rcu(un, rcu);
1673 }
1674 kfree(ulp);
1675}
1676
1677#ifdef CONFIG_PROC_FS
1678static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
1679{
1680 struct sem_array *sma = it;
1681
1682 return seq_printf(s,
1683 "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
1684 sma->sem_perm.key,
1685 sma->sem_perm.id,
1686 sma->sem_perm.mode,
1687 sma->sem_nsems,
1688 sma->sem_perm.uid,
1689 sma->sem_perm.gid,
1690 sma->sem_perm.cuid,
1691 sma->sem_perm.cgid,
1692 sma->sem_otime,
1693 sma->sem_ctime);
1694}
1695#endif
1/*
2 * linux/ipc/sem.c
3 * Copyright (C) 1992 Krishna Balasubramanian
4 * Copyright (C) 1995 Eric Schenk, Bruno Haible
5 *
6 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
7 *
8 * SMP-threaded, sysctl's added
9 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
10 * Enforced range limit on SEM_UNDO
11 * (c) 2001 Red Hat Inc
12 * Lockless wakeup
13 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
14 * (c) 2016 Davidlohr Bueso <dave@stgolabs.net>
15 * Further wakeup optimizations, documentation
16 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
17 *
18 * support for audit of ipc object properties and permission changes
19 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
20 *
21 * namespaces support
22 * OpenVZ, SWsoft Inc.
23 * Pavel Emelianov <xemul@openvz.org>
24 *
25 * Implementation notes: (May 2010)
26 * This file implements System V semaphores.
27 *
28 * User space visible behavior:
29 * - FIFO ordering for semop() operations (just FIFO, not starvation
30 * protection)
31 * - multiple semaphore operations that alter the same semaphore in
32 * one semop() are handled.
33 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
34 * SETALL calls.
35 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
36 * - undo adjustments at process exit are limited to 0..SEMVMX.
37 * - namespace are supported.
38 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
39 * to /proc/sys/kernel/sem.
40 * - statistics about the usage are reported in /proc/sysvipc/sem.
41 *
42 * Internals:
43 * - scalability:
44 * - all global variables are read-mostly.
45 * - semop() calls and semctl(RMID) are synchronized by RCU.
46 * - most operations do write operations (actually: spin_lock calls) to
47 * the per-semaphore array structure.
48 * Thus: Perfect SMP scaling between independent semaphore arrays.
49 * If multiple semaphores in one array are used, then cache line
50 * trashing on the semaphore array spinlock will limit the scaling.
51 * - semncnt and semzcnt are calculated on demand in count_semcnt()
52 * - the task that performs a successful semop() scans the list of all
53 * sleeping tasks and completes any pending operations that can be fulfilled.
54 * Semaphores are actively given to waiting tasks (necessary for FIFO).
55 * (see update_queue())
56 * - To improve the scalability, the actual wake-up calls are performed after
57 * dropping all locks. (see wake_up_sem_queue_prepare())
58 * - All work is done by the waker, the woken up task does not have to do
59 * anything - not even acquiring a lock or dropping a refcount.
60 * - A woken up task may not even touch the semaphore array anymore, it may
61 * have been destroyed already by a semctl(RMID).
62 * - UNDO values are stored in an array (one per process and per
63 * semaphore array, lazily allocated). For backwards compatibility, multiple
64 * modes for the UNDO variables are supported (per process, per thread)
65 * (see copy_semundo, CLONE_SYSVSEM)
66 * - There are two lists of the pending operations: a per-array list
67 * and per-semaphore list (stored in the array). This allows to achieve FIFO
68 * ordering without always scanning all pending operations.
69 * The worst-case behavior is nevertheless O(N^2) for N wakeups.
70 */
71
72#include <linux/slab.h>
73#include <linux/spinlock.h>
74#include <linux/init.h>
75#include <linux/proc_fs.h>
76#include <linux/time.h>
77#include <linux/security.h>
78#include <linux/syscalls.h>
79#include <linux/audit.h>
80#include <linux/capability.h>
81#include <linux/seq_file.h>
82#include <linux/rwsem.h>
83#include <linux/nsproxy.h>
84#include <linux/ipc_namespace.h>
85
86#include <linux/uaccess.h>
87#include "util.h"
88
89/* One semaphore structure for each semaphore in the system. */
90struct sem {
91 int semval; /* current value */
92 /*
93 * PID of the process that last modified the semaphore. For
94 * Linux, specifically these are:
95 * - semop
96 * - semctl, via SETVAL and SETALL.
97 * - at task exit when performing undo adjustments (see exit_sem).
98 */
99 int sempid;
100 spinlock_t lock; /* spinlock for fine-grained semtimedop */
101 struct list_head pending_alter; /* pending single-sop operations */
102 /* that alter the semaphore */
103 struct list_head pending_const; /* pending single-sop operations */
104 /* that do not alter the semaphore*/
105 time_t sem_otime; /* candidate for sem_otime */
106} ____cacheline_aligned_in_smp;
107
108/* One queue for each sleeping process in the system. */
109struct sem_queue {
110 struct list_head list; /* queue of pending operations */
111 struct task_struct *sleeper; /* this process */
112 struct sem_undo *undo; /* undo structure */
113 int pid; /* process id of requesting process */
114 int status; /* completion status of operation */
115 struct sembuf *sops; /* array of pending operations */
116 struct sembuf *blocking; /* the operation that blocked */
117 int nsops; /* number of operations */
118 bool alter; /* does *sops alter the array? */
119 bool dupsop; /* sops on more than one sem_num */
120};
121
122/* Each task has a list of undo requests. They are executed automatically
123 * when the process exits.
124 */
125struct sem_undo {
126 struct list_head list_proc; /* per-process list: *
127 * all undos from one process
128 * rcu protected */
129 struct rcu_head rcu; /* rcu struct for sem_undo */
130 struct sem_undo_list *ulp; /* back ptr to sem_undo_list */
131 struct list_head list_id; /* per semaphore array list:
132 * all undos for one array */
133 int semid; /* semaphore set identifier */
134 short *semadj; /* array of adjustments */
135 /* one per semaphore */
136};
137
138/* sem_undo_list controls shared access to the list of sem_undo structures
139 * that may be shared among all a CLONE_SYSVSEM task group.
140 */
141struct sem_undo_list {
142 atomic_t refcnt;
143 spinlock_t lock;
144 struct list_head list_proc;
145};
146
147
148#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
149
150#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
151
152static int newary(struct ipc_namespace *, struct ipc_params *);
153static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
154#ifdef CONFIG_PROC_FS
155static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
156#endif
157
158#define SEMMSL_FAST 256 /* 512 bytes on stack */
159#define SEMOPM_FAST 64 /* ~ 372 bytes on stack */
160
161/*
162 * Locking:
163 * a) global sem_lock() for read/write
164 * sem_undo.id_next,
165 * sem_array.complex_count,
166 * sem_array.complex_mode
167 * sem_array.pending{_alter,_const},
168 * sem_array.sem_undo
169 *
170 * b) global or semaphore sem_lock() for read/write:
171 * sem_array.sem_base[i].pending_{const,alter}:
172 * sem_array.complex_mode (for read)
173 *
174 * c) special:
175 * sem_undo_list.list_proc:
176 * * undo_list->lock for write
177 * * rcu for read
178 */
179
180#define sc_semmsl sem_ctls[0]
181#define sc_semmns sem_ctls[1]
182#define sc_semopm sem_ctls[2]
183#define sc_semmni sem_ctls[3]
184
185void sem_init_ns(struct ipc_namespace *ns)
186{
187 ns->sc_semmsl = SEMMSL;
188 ns->sc_semmns = SEMMNS;
189 ns->sc_semopm = SEMOPM;
190 ns->sc_semmni = SEMMNI;
191 ns->used_sems = 0;
192 ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
193}
194
195#ifdef CONFIG_IPC_NS
196void sem_exit_ns(struct ipc_namespace *ns)
197{
198 free_ipcs(ns, &sem_ids(ns), freeary);
199 idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
200}
201#endif
202
203void __init sem_init(void)
204{
205 sem_init_ns(&init_ipc_ns);
206 ipc_init_proc_interface("sysvipc/sem",
207 " key semid perms nsems uid gid cuid cgid otime ctime\n",
208 IPC_SEM_IDS, sysvipc_sem_proc_show);
209}
210
211/**
212 * unmerge_queues - unmerge queues, if possible.
213 * @sma: semaphore array
214 *
215 * The function unmerges the wait queues if complex_count is 0.
216 * It must be called prior to dropping the global semaphore array lock.
217 */
218static void unmerge_queues(struct sem_array *sma)
219{
220 struct sem_queue *q, *tq;
221
222 /* complex operations still around? */
223 if (sma->complex_count)
224 return;
225 /*
226 * We will switch back to simple mode.
227 * Move all pending operation back into the per-semaphore
228 * queues.
229 */
230 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
231 struct sem *curr;
232 curr = &sma->sem_base[q->sops[0].sem_num];
233
234 list_add_tail(&q->list, &curr->pending_alter);
235 }
236 INIT_LIST_HEAD(&sma->pending_alter);
237}
238
239/**
240 * merge_queues - merge single semop queues into global queue
241 * @sma: semaphore array
242 *
243 * This function merges all per-semaphore queues into the global queue.
244 * It is necessary to achieve FIFO ordering for the pending single-sop
245 * operations when a multi-semop operation must sleep.
246 * Only the alter operations must be moved, the const operations can stay.
247 */
248static void merge_queues(struct sem_array *sma)
249{
250 int i;
251 for (i = 0; i < sma->sem_nsems; i++) {
252 struct sem *sem = sma->sem_base + i;
253
254 list_splice_init(&sem->pending_alter, &sma->pending_alter);
255 }
256}
257
258static void sem_rcu_free(struct rcu_head *head)
259{
260 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
261 struct sem_array *sma = ipc_rcu_to_struct(p);
262
263 security_sem_free(sma);
264 ipc_rcu_free(head);
265}
266
267/*
268 * Enter the mode suitable for non-simple operations:
269 * Caller must own sem_perm.lock.
270 */
271static void complexmode_enter(struct sem_array *sma)
272{
273 int i;
274 struct sem *sem;
275
276 if (sma->complex_mode) {
277 /* We are already in complex_mode. Nothing to do */
278 return;
279 }
280
281 /* We need a full barrier after seting complex_mode:
282 * The write to complex_mode must be visible
283 * before we read the first sem->lock spinlock state.
284 */
285 smp_store_mb(sma->complex_mode, true);
286
287 for (i = 0; i < sma->sem_nsems; i++) {
288 sem = sma->sem_base + i;
289 spin_unlock_wait(&sem->lock);
290 }
291 /*
292 * spin_unlock_wait() is not a memory barriers, it is only a
293 * control barrier. The code must pair with spin_unlock(&sem->lock),
294 * thus just the control barrier is insufficient.
295 *
296 * smp_rmb() is sufficient, as writes cannot pass the control barrier.
297 */
298 smp_rmb();
299}
300
301/*
302 * Try to leave the mode that disallows simple operations:
303 * Caller must own sem_perm.lock.
304 */
305static void complexmode_tryleave(struct sem_array *sma)
306{
307 if (sma->complex_count) {
308 /* Complex ops are sleeping.
309 * We must stay in complex mode
310 */
311 return;
312 }
313 /*
314 * Immediately after setting complex_mode to false,
315 * a simple op can start. Thus: all memory writes
316 * performed by the current operation must be visible
317 * before we set complex_mode to false.
318 */
319 smp_store_release(&sma->complex_mode, false);
320}
321
322#define SEM_GLOBAL_LOCK (-1)
323/*
324 * If the request contains only one semaphore operation, and there are
325 * no complex transactions pending, lock only the semaphore involved.
326 * Otherwise, lock the entire semaphore array, since we either have
327 * multiple semaphores in our own semops, or we need to look at
328 * semaphores from other pending complex operations.
329 */
330static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
331 int nsops)
332{
333 struct sem *sem;
334
335 if (nsops != 1) {
336 /* Complex operation - acquire a full lock */
337 ipc_lock_object(&sma->sem_perm);
338
339 /* Prevent parallel simple ops */
340 complexmode_enter(sma);
341 return SEM_GLOBAL_LOCK;
342 }
343
344 /*
345 * Only one semaphore affected - try to optimize locking.
346 * Optimized locking is possible if no complex operation
347 * is either enqueued or processed right now.
348 *
349 * Both facts are tracked by complex_mode.
350 */
351 sem = sma->sem_base + sops->sem_num;
352
353 /*
354 * Initial check for complex_mode. Just an optimization,
355 * no locking, no memory barrier.
356 */
357 if (!sma->complex_mode) {
358 /*
359 * It appears that no complex operation is around.
360 * Acquire the per-semaphore lock.
361 */
362 spin_lock(&sem->lock);
363
364 /*
365 * See 51d7d5205d33
366 * ("powerpc: Add smp_mb() to arch_spin_is_locked()"):
367 * A full barrier is required: the write of sem->lock
368 * must be visible before the read is executed
369 */
370 smp_mb();
371
372 if (!smp_load_acquire(&sma->complex_mode)) {
373 /* fast path successful! */
374 return sops->sem_num;
375 }
376 spin_unlock(&sem->lock);
377 }
378
379 /* slow path: acquire the full lock */
380 ipc_lock_object(&sma->sem_perm);
381
382 if (sma->complex_count == 0) {
383 /* False alarm:
384 * There is no complex operation, thus we can switch
385 * back to the fast path.
386 */
387 spin_lock(&sem->lock);
388 ipc_unlock_object(&sma->sem_perm);
389 return sops->sem_num;
390 } else {
391 /* Not a false alarm, thus complete the sequence for a
392 * full lock.
393 */
394 complexmode_enter(sma);
395 return SEM_GLOBAL_LOCK;
396 }
397}
398
399static inline void sem_unlock(struct sem_array *sma, int locknum)
400{
401 if (locknum == SEM_GLOBAL_LOCK) {
402 unmerge_queues(sma);
403 complexmode_tryleave(sma);
404 ipc_unlock_object(&sma->sem_perm);
405 } else {
406 struct sem *sem = sma->sem_base + locknum;
407 spin_unlock(&sem->lock);
408 }
409}
410
411/*
412 * sem_lock_(check_) routines are called in the paths where the rwsem
413 * is not held.
414 *
415 * The caller holds the RCU read lock.
416 */
417static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
418{
419 struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
420
421 if (IS_ERR(ipcp))
422 return ERR_CAST(ipcp);
423
424 return container_of(ipcp, struct sem_array, sem_perm);
425}
426
427static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
428 int id)
429{
430 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
431
432 if (IS_ERR(ipcp))
433 return ERR_CAST(ipcp);
434
435 return container_of(ipcp, struct sem_array, sem_perm);
436}
437
438static inline void sem_lock_and_putref(struct sem_array *sma)
439{
440 sem_lock(sma, NULL, -1);
441 ipc_rcu_putref(sma, sem_rcu_free);
442}
443
444static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
445{
446 ipc_rmid(&sem_ids(ns), &s->sem_perm);
447}
448
449/**
450 * newary - Create a new semaphore set
451 * @ns: namespace
452 * @params: ptr to the structure that contains key, semflg and nsems
453 *
454 * Called with sem_ids.rwsem held (as a writer)
455 */
456static int newary(struct ipc_namespace *ns, struct ipc_params *params)
457{
458 int id;
459 int retval;
460 struct sem_array *sma;
461 int size;
462 key_t key = params->key;
463 int nsems = params->u.nsems;
464 int semflg = params->flg;
465 int i;
466
467 if (!nsems)
468 return -EINVAL;
469 if (ns->used_sems + nsems > ns->sc_semmns)
470 return -ENOSPC;
471
472 size = sizeof(*sma) + nsems * sizeof(struct sem);
473 sma = ipc_rcu_alloc(size);
474 if (!sma)
475 return -ENOMEM;
476
477 memset(sma, 0, size);
478
479 sma->sem_perm.mode = (semflg & S_IRWXUGO);
480 sma->sem_perm.key = key;
481
482 sma->sem_perm.security = NULL;
483 retval = security_sem_alloc(sma);
484 if (retval) {
485 ipc_rcu_putref(sma, ipc_rcu_free);
486 return retval;
487 }
488
489 sma->sem_base = (struct sem *) &sma[1];
490
491 for (i = 0; i < nsems; i++) {
492 INIT_LIST_HEAD(&sma->sem_base[i].pending_alter);
493 INIT_LIST_HEAD(&sma->sem_base[i].pending_const);
494 spin_lock_init(&sma->sem_base[i].lock);
495 }
496
497 sma->complex_count = 0;
498 sma->complex_mode = true; /* dropped by sem_unlock below */
499 INIT_LIST_HEAD(&sma->pending_alter);
500 INIT_LIST_HEAD(&sma->pending_const);
501 INIT_LIST_HEAD(&sma->list_id);
502 sma->sem_nsems = nsems;
503 sma->sem_ctime = get_seconds();
504
505 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
506 if (id < 0) {
507 ipc_rcu_putref(sma, sem_rcu_free);
508 return id;
509 }
510 ns->used_sems += nsems;
511
512 sem_unlock(sma, -1);
513 rcu_read_unlock();
514
515 return sma->sem_perm.id;
516}
517
518
519/*
520 * Called with sem_ids.rwsem and ipcp locked.
521 */
522static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
523{
524 struct sem_array *sma;
525
526 sma = container_of(ipcp, struct sem_array, sem_perm);
527 return security_sem_associate(sma, semflg);
528}
529
530/*
531 * Called with sem_ids.rwsem and ipcp locked.
532 */
533static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
534 struct ipc_params *params)
535{
536 struct sem_array *sma;
537
538 sma = container_of(ipcp, struct sem_array, sem_perm);
539 if (params->u.nsems > sma->sem_nsems)
540 return -EINVAL;
541
542 return 0;
543}
544
545SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
546{
547 struct ipc_namespace *ns;
548 static const struct ipc_ops sem_ops = {
549 .getnew = newary,
550 .associate = sem_security,
551 .more_checks = sem_more_checks,
552 };
553 struct ipc_params sem_params;
554
555 ns = current->nsproxy->ipc_ns;
556
557 if (nsems < 0 || nsems > ns->sc_semmsl)
558 return -EINVAL;
559
560 sem_params.key = key;
561 sem_params.flg = semflg;
562 sem_params.u.nsems = nsems;
563
564 return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
565}
566
567/**
568 * perform_atomic_semop[_slow] - Attempt to perform semaphore
569 * operations on a given array.
570 * @sma: semaphore array
571 * @q: struct sem_queue that describes the operation
572 *
573 * Caller blocking are as follows, based the value
574 * indicated by the semaphore operation (sem_op):
575 *
576 * (1) >0 never blocks.
577 * (2) 0 (wait-for-zero operation): semval is non-zero.
578 * (3) <0 attempting to decrement semval to a value smaller than zero.
579 *
580 * Returns 0 if the operation was possible.
581 * Returns 1 if the operation is impossible, the caller must sleep.
582 * Returns <0 for error codes.
583 */
584static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
585{
586 int result, sem_op, nsops, pid;
587 struct sembuf *sop;
588 struct sem *curr;
589 struct sembuf *sops;
590 struct sem_undo *un;
591
592 sops = q->sops;
593 nsops = q->nsops;
594 un = q->undo;
595
596 for (sop = sops; sop < sops + nsops; sop++) {
597 curr = sma->sem_base + sop->sem_num;
598 sem_op = sop->sem_op;
599 result = curr->semval;
600
601 if (!sem_op && result)
602 goto would_block;
603
604 result += sem_op;
605 if (result < 0)
606 goto would_block;
607 if (result > SEMVMX)
608 goto out_of_range;
609
610 if (sop->sem_flg & SEM_UNDO) {
611 int undo = un->semadj[sop->sem_num] - sem_op;
612 /* Exceeding the undo range is an error. */
613 if (undo < (-SEMAEM - 1) || undo > SEMAEM)
614 goto out_of_range;
615 un->semadj[sop->sem_num] = undo;
616 }
617
618 curr->semval = result;
619 }
620
621 sop--;
622 pid = q->pid;
623 while (sop >= sops) {
624 sma->sem_base[sop->sem_num].sempid = pid;
625 sop--;
626 }
627
628 return 0;
629
630out_of_range:
631 result = -ERANGE;
632 goto undo;
633
634would_block:
635 q->blocking = sop;
636
637 if (sop->sem_flg & IPC_NOWAIT)
638 result = -EAGAIN;
639 else
640 result = 1;
641
642undo:
643 sop--;
644 while (sop >= sops) {
645 sem_op = sop->sem_op;
646 sma->sem_base[sop->sem_num].semval -= sem_op;
647 if (sop->sem_flg & SEM_UNDO)
648 un->semadj[sop->sem_num] += sem_op;
649 sop--;
650 }
651
652 return result;
653}
654
655static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
656{
657 int result, sem_op, nsops;
658 struct sembuf *sop;
659 struct sem *curr;
660 struct sembuf *sops;
661 struct sem_undo *un;
662
663 sops = q->sops;
664 nsops = q->nsops;
665 un = q->undo;
666
667 if (unlikely(q->dupsop))
668 return perform_atomic_semop_slow(sma, q);
669
670 /*
671 * We scan the semaphore set twice, first to ensure that the entire
672 * operation can succeed, therefore avoiding any pointless writes
673 * to shared memory and having to undo such changes in order to block
674 * until the operations can go through.
675 */
676 for (sop = sops; sop < sops + nsops; sop++) {
677 curr = sma->sem_base + sop->sem_num;
678 sem_op = sop->sem_op;
679 result = curr->semval;
680
681 if (!sem_op && result)
682 goto would_block; /* wait-for-zero */
683
684 result += sem_op;
685 if (result < 0)
686 goto would_block;
687
688 if (result > SEMVMX)
689 return -ERANGE;
690
691 if (sop->sem_flg & SEM_UNDO) {
692 int undo = un->semadj[sop->sem_num] - sem_op;
693
694 /* Exceeding the undo range is an error. */
695 if (undo < (-SEMAEM - 1) || undo > SEMAEM)
696 return -ERANGE;
697 }
698 }
699
700 for (sop = sops; sop < sops + nsops; sop++) {
701 curr = sma->sem_base + sop->sem_num;
702 sem_op = sop->sem_op;
703 result = curr->semval;
704
705 if (sop->sem_flg & SEM_UNDO) {
706 int undo = un->semadj[sop->sem_num] - sem_op;
707
708 un->semadj[sop->sem_num] = undo;
709 }
710 curr->semval += sem_op;
711 curr->sempid = q->pid;
712 }
713
714 return 0;
715
716would_block:
717 q->blocking = sop;
718 return sop->sem_flg & IPC_NOWAIT ? -EAGAIN : 1;
719}
720
721static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
722 struct wake_q_head *wake_q)
723{
724 wake_q_add(wake_q, q->sleeper);
725 /*
726 * Rely on the above implicit barrier, such that we can
727 * ensure that we hold reference to the task before setting
728 * q->status. Otherwise we could race with do_exit if the
729 * task is awoken by an external event before calling
730 * wake_up_process().
731 */
732 WRITE_ONCE(q->status, error);
733}
734
735static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
736{
737 list_del(&q->list);
738 if (q->nsops > 1)
739 sma->complex_count--;
740}
741
742/** check_restart(sma, q)
743 * @sma: semaphore array
744 * @q: the operation that just completed
745 *
746 * update_queue is O(N^2) when it restarts scanning the whole queue of
747 * waiting operations. Therefore this function checks if the restart is
748 * really necessary. It is called after a previously waiting operation
749 * modified the array.
750 * Note that wait-for-zero operations are handled without restart.
751 */
752static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
753{
754 /* pending complex alter operations are too difficult to analyse */
755 if (!list_empty(&sma->pending_alter))
756 return 1;
757
758 /* we were a sleeping complex operation. Too difficult */
759 if (q->nsops > 1)
760 return 1;
761
762 /* It is impossible that someone waits for the new value:
763 * - complex operations always restart.
764 * - wait-for-zero are handled seperately.
765 * - q is a previously sleeping simple operation that
766 * altered the array. It must be a decrement, because
767 * simple increments never sleep.
768 * - If there are older (higher priority) decrements
769 * in the queue, then they have observed the original
770 * semval value and couldn't proceed. The operation
771 * decremented to value - thus they won't proceed either.
772 */
773 return 0;
774}
775
776/**
777 * wake_const_ops - wake up non-alter tasks
778 * @sma: semaphore array.
779 * @semnum: semaphore that was modified.
780 * @wake_q: lockless wake-queue head.
781 *
782 * wake_const_ops must be called after a semaphore in a semaphore array
783 * was set to 0. If complex const operations are pending, wake_const_ops must
784 * be called with semnum = -1, as well as with the number of each modified
785 * semaphore.
786 * The tasks that must be woken up are added to @wake_q. The return code
787 * is stored in q->pid.
788 * The function returns 1 if at least one operation was completed successfully.
789 */
790static int wake_const_ops(struct sem_array *sma, int semnum,
791 struct wake_q_head *wake_q)
792{
793 struct sem_queue *q, *tmp;
794 struct list_head *pending_list;
795 int semop_completed = 0;
796
797 if (semnum == -1)
798 pending_list = &sma->pending_const;
799 else
800 pending_list = &sma->sem_base[semnum].pending_const;
801
802 list_for_each_entry_safe(q, tmp, pending_list, list) {
803 int error = perform_atomic_semop(sma, q);
804
805 if (error > 0)
806 continue;
807 /* operation completed, remove from queue & wakeup */
808 unlink_queue(sma, q);
809
810 wake_up_sem_queue_prepare(q, error, wake_q);
811 if (error == 0)
812 semop_completed = 1;
813 }
814
815 return semop_completed;
816}
817
818/**
819 * do_smart_wakeup_zero - wakeup all wait for zero tasks
820 * @sma: semaphore array
821 * @sops: operations that were performed
822 * @nsops: number of operations
823 * @wake_q: lockless wake-queue head
824 *
825 * Checks all required queue for wait-for-zero operations, based
826 * on the actual changes that were performed on the semaphore array.
827 * The function returns 1 if at least one operation was completed successfully.
828 */
829static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
830 int nsops, struct wake_q_head *wake_q)
831{
832 int i;
833 int semop_completed = 0;
834 int got_zero = 0;
835
836 /* first: the per-semaphore queues, if known */
837 if (sops) {
838 for (i = 0; i < nsops; i++) {
839 int num = sops[i].sem_num;
840
841 if (sma->sem_base[num].semval == 0) {
842 got_zero = 1;
843 semop_completed |= wake_const_ops(sma, num, wake_q);
844 }
845 }
846 } else {
847 /*
848 * No sops means modified semaphores not known.
849 * Assume all were changed.
850 */
851 for (i = 0; i < sma->sem_nsems; i++) {
852 if (sma->sem_base[i].semval == 0) {
853 got_zero = 1;
854 semop_completed |= wake_const_ops(sma, i, wake_q);
855 }
856 }
857 }
858 /*
859 * If one of the modified semaphores got 0,
860 * then check the global queue, too.
861 */
862 if (got_zero)
863 semop_completed |= wake_const_ops(sma, -1, wake_q);
864
865 return semop_completed;
866}
867
868
869/**
870 * update_queue - look for tasks that can be completed.
871 * @sma: semaphore array.
872 * @semnum: semaphore that was modified.
873 * @wake_q: lockless wake-queue head.
874 *
875 * update_queue must be called after a semaphore in a semaphore array
876 * was modified. If multiple semaphores were modified, update_queue must
877 * be called with semnum = -1, as well as with the number of each modified
878 * semaphore.
879 * The tasks that must be woken up are added to @wake_q. The return code
880 * is stored in q->pid.
881 * The function internally checks if const operations can now succeed.
882 *
883 * The function return 1 if at least one semop was completed successfully.
884 */
885static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q)
886{
887 struct sem_queue *q, *tmp;
888 struct list_head *pending_list;
889 int semop_completed = 0;
890
891 if (semnum == -1)
892 pending_list = &sma->pending_alter;
893 else
894 pending_list = &sma->sem_base[semnum].pending_alter;
895
896again:
897 list_for_each_entry_safe(q, tmp, pending_list, list) {
898 int error, restart;
899
900 /* If we are scanning the single sop, per-semaphore list of
901 * one semaphore and that semaphore is 0, then it is not
902 * necessary to scan further: simple increments
903 * that affect only one entry succeed immediately and cannot
904 * be in the per semaphore pending queue, and decrements
905 * cannot be successful if the value is already 0.
906 */
907 if (semnum != -1 && sma->sem_base[semnum].semval == 0)
908 break;
909
910 error = perform_atomic_semop(sma, q);
911
912 /* Does q->sleeper still need to sleep? */
913 if (error > 0)
914 continue;
915
916 unlink_queue(sma, q);
917
918 if (error) {
919 restart = 0;
920 } else {
921 semop_completed = 1;
922 do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q);
923 restart = check_restart(sma, q);
924 }
925
926 wake_up_sem_queue_prepare(q, error, wake_q);
927 if (restart)
928 goto again;
929 }
930 return semop_completed;
931}
932
933/**
934 * set_semotime - set sem_otime
935 * @sma: semaphore array
936 * @sops: operations that modified the array, may be NULL
937 *
938 * sem_otime is replicated to avoid cache line trashing.
939 * This function sets one instance to the current time.
940 */
941static void set_semotime(struct sem_array *sma, struct sembuf *sops)
942{
943 if (sops == NULL) {
944 sma->sem_base[0].sem_otime = get_seconds();
945 } else {
946 sma->sem_base[sops[0].sem_num].sem_otime =
947 get_seconds();
948 }
949}
950
951/**
952 * do_smart_update - optimized update_queue
953 * @sma: semaphore array
954 * @sops: operations that were performed
955 * @nsops: number of operations
956 * @otime: force setting otime
957 * @wake_q: lockless wake-queue head
958 *
959 * do_smart_update() does the required calls to update_queue and wakeup_zero,
960 * based on the actual changes that were performed on the semaphore array.
961 * Note that the function does not do the actual wake-up: the caller is
962 * responsible for calling wake_up_q().
963 * It is safe to perform this call after dropping all locks.
964 */
965static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
966 int otime, struct wake_q_head *wake_q)
967{
968 int i;
969
970 otime |= do_smart_wakeup_zero(sma, sops, nsops, wake_q);
971
972 if (!list_empty(&sma->pending_alter)) {
973 /* semaphore array uses the global queue - just process it. */
974 otime |= update_queue(sma, -1, wake_q);
975 } else {
976 if (!sops) {
977 /*
978 * No sops, thus the modified semaphores are not
979 * known. Check all.
980 */
981 for (i = 0; i < sma->sem_nsems; i++)
982 otime |= update_queue(sma, i, wake_q);
983 } else {
984 /*
985 * Check the semaphores that were increased:
986 * - No complex ops, thus all sleeping ops are
987 * decrease.
988 * - if we decreased the value, then any sleeping
989 * semaphore ops wont be able to run: If the
990 * previous value was too small, then the new
991 * value will be too small, too.
992 */
993 for (i = 0; i < nsops; i++) {
994 if (sops[i].sem_op > 0) {
995 otime |= update_queue(sma,
996 sops[i].sem_num, wake_q);
997 }
998 }
999 }
1000 }
1001 if (otime)
1002 set_semotime(sma, sops);
1003}
1004
1005/*
1006 * check_qop: Test if a queued operation sleeps on the semaphore semnum
1007 */
1008static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
1009 bool count_zero)
1010{
1011 struct sembuf *sop = q->blocking;
1012
1013 /*
1014 * Linux always (since 0.99.10) reported a task as sleeping on all
1015 * semaphores. This violates SUS, therefore it was changed to the
1016 * standard compliant behavior.
1017 * Give the administrators a chance to notice that an application
1018 * might misbehave because it relies on the Linux behavior.
1019 */
1020 pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
1021 "The task %s (%d) triggered the difference, watch for misbehavior.\n",
1022 current->comm, task_pid_nr(current));
1023
1024 if (sop->sem_num != semnum)
1025 return 0;
1026
1027 if (count_zero && sop->sem_op == 0)
1028 return 1;
1029 if (!count_zero && sop->sem_op < 0)
1030 return 1;
1031
1032 return 0;
1033}
1034
1035/* The following counts are associated to each semaphore:
1036 * semncnt number of tasks waiting on semval being nonzero
1037 * semzcnt number of tasks waiting on semval being zero
1038 *
1039 * Per definition, a task waits only on the semaphore of the first semop
1040 * that cannot proceed, even if additional operation would block, too.
1041 */
1042static int count_semcnt(struct sem_array *sma, ushort semnum,
1043 bool count_zero)
1044{
1045 struct list_head *l;
1046 struct sem_queue *q;
1047 int semcnt;
1048
1049 semcnt = 0;
1050 /* First: check the simple operations. They are easy to evaluate */
1051 if (count_zero)
1052 l = &sma->sem_base[semnum].pending_const;
1053 else
1054 l = &sma->sem_base[semnum].pending_alter;
1055
1056 list_for_each_entry(q, l, list) {
1057 /* all task on a per-semaphore list sleep on exactly
1058 * that semaphore
1059 */
1060 semcnt++;
1061 }
1062
1063 /* Then: check the complex operations. */
1064 list_for_each_entry(q, &sma->pending_alter, list) {
1065 semcnt += check_qop(sma, semnum, q, count_zero);
1066 }
1067 if (count_zero) {
1068 list_for_each_entry(q, &sma->pending_const, list) {
1069 semcnt += check_qop(sma, semnum, q, count_zero);
1070 }
1071 }
1072 return semcnt;
1073}
1074
1075/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
1076 * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
1077 * remains locked on exit.
1078 */
1079static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1080{
1081 struct sem_undo *un, *tu;
1082 struct sem_queue *q, *tq;
1083 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1084 int i;
1085 DEFINE_WAKE_Q(wake_q);
1086
1087 /* Free the existing undo structures for this semaphore set. */
1088 ipc_assert_locked_object(&sma->sem_perm);
1089 list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1090 list_del(&un->list_id);
1091 spin_lock(&un->ulp->lock);
1092 un->semid = -1;
1093 list_del_rcu(&un->list_proc);
1094 spin_unlock(&un->ulp->lock);
1095 kfree_rcu(un, rcu);
1096 }
1097
1098 /* Wake up all pending processes and let them fail with EIDRM. */
1099 list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1100 unlink_queue(sma, q);
1101 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1102 }
1103
1104 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1105 unlink_queue(sma, q);
1106 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1107 }
1108 for (i = 0; i < sma->sem_nsems; i++) {
1109 struct sem *sem = sma->sem_base + i;
1110 list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
1111 unlink_queue(sma, q);
1112 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1113 }
1114 list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
1115 unlink_queue(sma, q);
1116 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1117 }
1118 }
1119
1120 /* Remove the semaphore set from the IDR */
1121 sem_rmid(ns, sma);
1122 sem_unlock(sma, -1);
1123 rcu_read_unlock();
1124
1125 wake_up_q(&wake_q);
1126 ns->used_sems -= sma->sem_nsems;
1127 ipc_rcu_putref(sma, sem_rcu_free);
1128}
1129
1130static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1131{
1132 switch (version) {
1133 case IPC_64:
1134 return copy_to_user(buf, in, sizeof(*in));
1135 case IPC_OLD:
1136 {
1137 struct semid_ds out;
1138
1139 memset(&out, 0, sizeof(out));
1140
1141 ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
1142
1143 out.sem_otime = in->sem_otime;
1144 out.sem_ctime = in->sem_ctime;
1145 out.sem_nsems = in->sem_nsems;
1146
1147 return copy_to_user(buf, &out, sizeof(out));
1148 }
1149 default:
1150 return -EINVAL;
1151 }
1152}
1153
1154static time_t get_semotime(struct sem_array *sma)
1155{
1156 int i;
1157 time_t res;
1158
1159 res = sma->sem_base[0].sem_otime;
1160 for (i = 1; i < sma->sem_nsems; i++) {
1161 time_t to = sma->sem_base[i].sem_otime;
1162
1163 if (to > res)
1164 res = to;
1165 }
1166 return res;
1167}
1168
1169static int semctl_nolock(struct ipc_namespace *ns, int semid,
1170 int cmd, int version, void __user *p)
1171{
1172 int err;
1173 struct sem_array *sma;
1174
1175 switch (cmd) {
1176 case IPC_INFO:
1177 case SEM_INFO:
1178 {
1179 struct seminfo seminfo;
1180 int max_id;
1181
1182 err = security_sem_semctl(NULL, cmd);
1183 if (err)
1184 return err;
1185
1186 memset(&seminfo, 0, sizeof(seminfo));
1187 seminfo.semmni = ns->sc_semmni;
1188 seminfo.semmns = ns->sc_semmns;
1189 seminfo.semmsl = ns->sc_semmsl;
1190 seminfo.semopm = ns->sc_semopm;
1191 seminfo.semvmx = SEMVMX;
1192 seminfo.semmnu = SEMMNU;
1193 seminfo.semmap = SEMMAP;
1194 seminfo.semume = SEMUME;
1195 down_read(&sem_ids(ns).rwsem);
1196 if (cmd == SEM_INFO) {
1197 seminfo.semusz = sem_ids(ns).in_use;
1198 seminfo.semaem = ns->used_sems;
1199 } else {
1200 seminfo.semusz = SEMUSZ;
1201 seminfo.semaem = SEMAEM;
1202 }
1203 max_id = ipc_get_maxid(&sem_ids(ns));
1204 up_read(&sem_ids(ns).rwsem);
1205 if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1206 return -EFAULT;
1207 return (max_id < 0) ? 0 : max_id;
1208 }
1209 case IPC_STAT:
1210 case SEM_STAT:
1211 {
1212 struct semid64_ds tbuf;
1213 int id = 0;
1214
1215 memset(&tbuf, 0, sizeof(tbuf));
1216
1217 rcu_read_lock();
1218 if (cmd == SEM_STAT) {
1219 sma = sem_obtain_object(ns, semid);
1220 if (IS_ERR(sma)) {
1221 err = PTR_ERR(sma);
1222 goto out_unlock;
1223 }
1224 id = sma->sem_perm.id;
1225 } else {
1226 sma = sem_obtain_object_check(ns, semid);
1227 if (IS_ERR(sma)) {
1228 err = PTR_ERR(sma);
1229 goto out_unlock;
1230 }
1231 }
1232
1233 err = -EACCES;
1234 if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1235 goto out_unlock;
1236
1237 err = security_sem_semctl(sma, cmd);
1238 if (err)
1239 goto out_unlock;
1240
1241 kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
1242 tbuf.sem_otime = get_semotime(sma);
1243 tbuf.sem_ctime = sma->sem_ctime;
1244 tbuf.sem_nsems = sma->sem_nsems;
1245 rcu_read_unlock();
1246 if (copy_semid_to_user(p, &tbuf, version))
1247 return -EFAULT;
1248 return id;
1249 }
1250 default:
1251 return -EINVAL;
1252 }
1253out_unlock:
1254 rcu_read_unlock();
1255 return err;
1256}
1257
1258static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1259 unsigned long arg)
1260{
1261 struct sem_undo *un;
1262 struct sem_array *sma;
1263 struct sem *curr;
1264 int err, val;
1265 DEFINE_WAKE_Q(wake_q);
1266
1267#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1268 /* big-endian 64bit */
1269 val = arg >> 32;
1270#else
1271 /* 32bit or little-endian 64bit */
1272 val = arg;
1273#endif
1274
1275 if (val > SEMVMX || val < 0)
1276 return -ERANGE;
1277
1278 rcu_read_lock();
1279 sma = sem_obtain_object_check(ns, semid);
1280 if (IS_ERR(sma)) {
1281 rcu_read_unlock();
1282 return PTR_ERR(sma);
1283 }
1284
1285 if (semnum < 0 || semnum >= sma->sem_nsems) {
1286 rcu_read_unlock();
1287 return -EINVAL;
1288 }
1289
1290
1291 if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1292 rcu_read_unlock();
1293 return -EACCES;
1294 }
1295
1296 err = security_sem_semctl(sma, SETVAL);
1297 if (err) {
1298 rcu_read_unlock();
1299 return -EACCES;
1300 }
1301
1302 sem_lock(sma, NULL, -1);
1303
1304 if (!ipc_valid_object(&sma->sem_perm)) {
1305 sem_unlock(sma, -1);
1306 rcu_read_unlock();
1307 return -EIDRM;
1308 }
1309
1310 curr = &sma->sem_base[semnum];
1311
1312 ipc_assert_locked_object(&sma->sem_perm);
1313 list_for_each_entry(un, &sma->list_id, list_id)
1314 un->semadj[semnum] = 0;
1315
1316 curr->semval = val;
1317 curr->sempid = task_tgid_vnr(current);
1318 sma->sem_ctime = get_seconds();
1319 /* maybe some queued-up processes were waiting for this */
1320 do_smart_update(sma, NULL, 0, 0, &wake_q);
1321 sem_unlock(sma, -1);
1322 rcu_read_unlock();
1323 wake_up_q(&wake_q);
1324 return 0;
1325}
1326
1327static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1328 int cmd, void __user *p)
1329{
1330 struct sem_array *sma;
1331 struct sem *curr;
1332 int err, nsems;
1333 ushort fast_sem_io[SEMMSL_FAST];
1334 ushort *sem_io = fast_sem_io;
1335 DEFINE_WAKE_Q(wake_q);
1336
1337 rcu_read_lock();
1338 sma = sem_obtain_object_check(ns, semid);
1339 if (IS_ERR(sma)) {
1340 rcu_read_unlock();
1341 return PTR_ERR(sma);
1342 }
1343
1344 nsems = sma->sem_nsems;
1345
1346 err = -EACCES;
1347 if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1348 goto out_rcu_wakeup;
1349
1350 err = security_sem_semctl(sma, cmd);
1351 if (err)
1352 goto out_rcu_wakeup;
1353
1354 err = -EACCES;
1355 switch (cmd) {
1356 case GETALL:
1357 {
1358 ushort __user *array = p;
1359 int i;
1360
1361 sem_lock(sma, NULL, -1);
1362 if (!ipc_valid_object(&sma->sem_perm)) {
1363 err = -EIDRM;
1364 goto out_unlock;
1365 }
1366 if (nsems > SEMMSL_FAST) {
1367 if (!ipc_rcu_getref(sma)) {
1368 err = -EIDRM;
1369 goto out_unlock;
1370 }
1371 sem_unlock(sma, -1);
1372 rcu_read_unlock();
1373 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1374 if (sem_io == NULL) {
1375 ipc_rcu_putref(sma, sem_rcu_free);
1376 return -ENOMEM;
1377 }
1378
1379 rcu_read_lock();
1380 sem_lock_and_putref(sma);
1381 if (!ipc_valid_object(&sma->sem_perm)) {
1382 err = -EIDRM;
1383 goto out_unlock;
1384 }
1385 }
1386 for (i = 0; i < sma->sem_nsems; i++)
1387 sem_io[i] = sma->sem_base[i].semval;
1388 sem_unlock(sma, -1);
1389 rcu_read_unlock();
1390 err = 0;
1391 if (copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1392 err = -EFAULT;
1393 goto out_free;
1394 }
1395 case SETALL:
1396 {
1397 int i;
1398 struct sem_undo *un;
1399
1400 if (!ipc_rcu_getref(sma)) {
1401 err = -EIDRM;
1402 goto out_rcu_wakeup;
1403 }
1404 rcu_read_unlock();
1405
1406 if (nsems > SEMMSL_FAST) {
1407 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1408 if (sem_io == NULL) {
1409 ipc_rcu_putref(sma, sem_rcu_free);
1410 return -ENOMEM;
1411 }
1412 }
1413
1414 if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
1415 ipc_rcu_putref(sma, sem_rcu_free);
1416 err = -EFAULT;
1417 goto out_free;
1418 }
1419
1420 for (i = 0; i < nsems; i++) {
1421 if (sem_io[i] > SEMVMX) {
1422 ipc_rcu_putref(sma, sem_rcu_free);
1423 err = -ERANGE;
1424 goto out_free;
1425 }
1426 }
1427 rcu_read_lock();
1428 sem_lock_and_putref(sma);
1429 if (!ipc_valid_object(&sma->sem_perm)) {
1430 err = -EIDRM;
1431 goto out_unlock;
1432 }
1433
1434 for (i = 0; i < nsems; i++) {
1435 sma->sem_base[i].semval = sem_io[i];
1436 sma->sem_base[i].sempid = task_tgid_vnr(current);
1437 }
1438
1439 ipc_assert_locked_object(&sma->sem_perm);
1440 list_for_each_entry(un, &sma->list_id, list_id) {
1441 for (i = 0; i < nsems; i++)
1442 un->semadj[i] = 0;
1443 }
1444 sma->sem_ctime = get_seconds();
1445 /* maybe some queued-up processes were waiting for this */
1446 do_smart_update(sma, NULL, 0, 0, &wake_q);
1447 err = 0;
1448 goto out_unlock;
1449 }
1450 /* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */
1451 }
1452 err = -EINVAL;
1453 if (semnum < 0 || semnum >= nsems)
1454 goto out_rcu_wakeup;
1455
1456 sem_lock(sma, NULL, -1);
1457 if (!ipc_valid_object(&sma->sem_perm)) {
1458 err = -EIDRM;
1459 goto out_unlock;
1460 }
1461 curr = &sma->sem_base[semnum];
1462
1463 switch (cmd) {
1464 case GETVAL:
1465 err = curr->semval;
1466 goto out_unlock;
1467 case GETPID:
1468 err = curr->sempid;
1469 goto out_unlock;
1470 case GETNCNT:
1471 err = count_semcnt(sma, semnum, 0);
1472 goto out_unlock;
1473 case GETZCNT:
1474 err = count_semcnt(sma, semnum, 1);
1475 goto out_unlock;
1476 }
1477
1478out_unlock:
1479 sem_unlock(sma, -1);
1480out_rcu_wakeup:
1481 rcu_read_unlock();
1482 wake_up_q(&wake_q);
1483out_free:
1484 if (sem_io != fast_sem_io)
1485 ipc_free(sem_io);
1486 return err;
1487}
1488
1489static inline unsigned long
1490copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1491{
1492 switch (version) {
1493 case IPC_64:
1494 if (copy_from_user(out, buf, sizeof(*out)))
1495 return -EFAULT;
1496 return 0;
1497 case IPC_OLD:
1498 {
1499 struct semid_ds tbuf_old;
1500
1501 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1502 return -EFAULT;
1503
1504 out->sem_perm.uid = tbuf_old.sem_perm.uid;
1505 out->sem_perm.gid = tbuf_old.sem_perm.gid;
1506 out->sem_perm.mode = tbuf_old.sem_perm.mode;
1507
1508 return 0;
1509 }
1510 default:
1511 return -EINVAL;
1512 }
1513}
1514
1515/*
1516 * This function handles some semctl commands which require the rwsem
1517 * to be held in write mode.
1518 * NOTE: no locks must be held, the rwsem is taken inside this function.
1519 */
1520static int semctl_down(struct ipc_namespace *ns, int semid,
1521 int cmd, int version, void __user *p)
1522{
1523 struct sem_array *sma;
1524 int err;
1525 struct semid64_ds semid64;
1526 struct kern_ipc_perm *ipcp;
1527
1528 if (cmd == IPC_SET) {
1529 if (copy_semid_from_user(&semid64, p, version))
1530 return -EFAULT;
1531 }
1532
1533 down_write(&sem_ids(ns).rwsem);
1534 rcu_read_lock();
1535
1536 ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd,
1537 &semid64.sem_perm, 0);
1538 if (IS_ERR(ipcp)) {
1539 err = PTR_ERR(ipcp);
1540 goto out_unlock1;
1541 }
1542
1543 sma = container_of(ipcp, struct sem_array, sem_perm);
1544
1545 err = security_sem_semctl(sma, cmd);
1546 if (err)
1547 goto out_unlock1;
1548
1549 switch (cmd) {
1550 case IPC_RMID:
1551 sem_lock(sma, NULL, -1);
1552 /* freeary unlocks the ipc object and rcu */
1553 freeary(ns, ipcp);
1554 goto out_up;
1555 case IPC_SET:
1556 sem_lock(sma, NULL, -1);
1557 err = ipc_update_perm(&semid64.sem_perm, ipcp);
1558 if (err)
1559 goto out_unlock0;
1560 sma->sem_ctime = get_seconds();
1561 break;
1562 default:
1563 err = -EINVAL;
1564 goto out_unlock1;
1565 }
1566
1567out_unlock0:
1568 sem_unlock(sma, -1);
1569out_unlock1:
1570 rcu_read_unlock();
1571out_up:
1572 up_write(&sem_ids(ns).rwsem);
1573 return err;
1574}
1575
1576SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1577{
1578 int version;
1579 struct ipc_namespace *ns;
1580 void __user *p = (void __user *)arg;
1581
1582 if (semid < 0)
1583 return -EINVAL;
1584
1585 version = ipc_parse_version(&cmd);
1586 ns = current->nsproxy->ipc_ns;
1587
1588 switch (cmd) {
1589 case IPC_INFO:
1590 case SEM_INFO:
1591 case IPC_STAT:
1592 case SEM_STAT:
1593 return semctl_nolock(ns, semid, cmd, version, p);
1594 case GETALL:
1595 case GETVAL:
1596 case GETPID:
1597 case GETNCNT:
1598 case GETZCNT:
1599 case SETALL:
1600 return semctl_main(ns, semid, semnum, cmd, p);
1601 case SETVAL:
1602 return semctl_setval(ns, semid, semnum, arg);
1603 case IPC_RMID:
1604 case IPC_SET:
1605 return semctl_down(ns, semid, cmd, version, p);
1606 default:
1607 return -EINVAL;
1608 }
1609}
1610
1611/* If the task doesn't already have a undo_list, then allocate one
1612 * here. We guarantee there is only one thread using this undo list,
1613 * and current is THE ONE
1614 *
1615 * If this allocation and assignment succeeds, but later
1616 * portions of this code fail, there is no need to free the sem_undo_list.
1617 * Just let it stay associated with the task, and it'll be freed later
1618 * at exit time.
1619 *
1620 * This can block, so callers must hold no locks.
1621 */
1622static inline int get_undo_list(struct sem_undo_list **undo_listp)
1623{
1624 struct sem_undo_list *undo_list;
1625
1626 undo_list = current->sysvsem.undo_list;
1627 if (!undo_list) {
1628 undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1629 if (undo_list == NULL)
1630 return -ENOMEM;
1631 spin_lock_init(&undo_list->lock);
1632 atomic_set(&undo_list->refcnt, 1);
1633 INIT_LIST_HEAD(&undo_list->list_proc);
1634
1635 current->sysvsem.undo_list = undo_list;
1636 }
1637 *undo_listp = undo_list;
1638 return 0;
1639}
1640
1641static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1642{
1643 struct sem_undo *un;
1644
1645 list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
1646 if (un->semid == semid)
1647 return un;
1648 }
1649 return NULL;
1650}
1651
1652static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1653{
1654 struct sem_undo *un;
1655
1656 assert_spin_locked(&ulp->lock);
1657
1658 un = __lookup_undo(ulp, semid);
1659 if (un) {
1660 list_del_rcu(&un->list_proc);
1661 list_add_rcu(&un->list_proc, &ulp->list_proc);
1662 }
1663 return un;
1664}
1665
1666/**
1667 * find_alloc_undo - lookup (and if not present create) undo array
1668 * @ns: namespace
1669 * @semid: semaphore array id
1670 *
1671 * The function looks up (and if not present creates) the undo structure.
1672 * The size of the undo structure depends on the size of the semaphore
1673 * array, thus the alloc path is not that straightforward.
1674 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1675 * performs a rcu_read_lock().
1676 */
1677static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1678{
1679 struct sem_array *sma;
1680 struct sem_undo_list *ulp;
1681 struct sem_undo *un, *new;
1682 int nsems, error;
1683
1684 error = get_undo_list(&ulp);
1685 if (error)
1686 return ERR_PTR(error);
1687
1688 rcu_read_lock();
1689 spin_lock(&ulp->lock);
1690 un = lookup_undo(ulp, semid);
1691 spin_unlock(&ulp->lock);
1692 if (likely(un != NULL))
1693 goto out;
1694
1695 /* no undo structure around - allocate one. */
1696 /* step 1: figure out the size of the semaphore array */
1697 sma = sem_obtain_object_check(ns, semid);
1698 if (IS_ERR(sma)) {
1699 rcu_read_unlock();
1700 return ERR_CAST(sma);
1701 }
1702
1703 nsems = sma->sem_nsems;
1704 if (!ipc_rcu_getref(sma)) {
1705 rcu_read_unlock();
1706 un = ERR_PTR(-EIDRM);
1707 goto out;
1708 }
1709 rcu_read_unlock();
1710
1711 /* step 2: allocate new undo structure */
1712 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1713 if (!new) {
1714 ipc_rcu_putref(sma, sem_rcu_free);
1715 return ERR_PTR(-ENOMEM);
1716 }
1717
1718 /* step 3: Acquire the lock on semaphore array */
1719 rcu_read_lock();
1720 sem_lock_and_putref(sma);
1721 if (!ipc_valid_object(&sma->sem_perm)) {
1722 sem_unlock(sma, -1);
1723 rcu_read_unlock();
1724 kfree(new);
1725 un = ERR_PTR(-EIDRM);
1726 goto out;
1727 }
1728 spin_lock(&ulp->lock);
1729
1730 /*
1731 * step 4: check for races: did someone else allocate the undo struct?
1732 */
1733 un = lookup_undo(ulp, semid);
1734 if (un) {
1735 kfree(new);
1736 goto success;
1737 }
1738 /* step 5: initialize & link new undo structure */
1739 new->semadj = (short *) &new[1];
1740 new->ulp = ulp;
1741 new->semid = semid;
1742 assert_spin_locked(&ulp->lock);
1743 list_add_rcu(&new->list_proc, &ulp->list_proc);
1744 ipc_assert_locked_object(&sma->sem_perm);
1745 list_add(&new->list_id, &sma->list_id);
1746 un = new;
1747
1748success:
1749 spin_unlock(&ulp->lock);
1750 sem_unlock(sma, -1);
1751out:
1752 return un;
1753}
1754
1755SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1756 unsigned, nsops, const struct timespec __user *, timeout)
1757{
1758 int error = -EINVAL;
1759 struct sem_array *sma;
1760 struct sembuf fast_sops[SEMOPM_FAST];
1761 struct sembuf *sops = fast_sops, *sop;
1762 struct sem_undo *un;
1763 int max, locknum;
1764 bool undos = false, alter = false, dupsop = false;
1765 struct sem_queue queue;
1766 unsigned long dup = 0, jiffies_left = 0;
1767 struct ipc_namespace *ns;
1768
1769 ns = current->nsproxy->ipc_ns;
1770
1771 if (nsops < 1 || semid < 0)
1772 return -EINVAL;
1773 if (nsops > ns->sc_semopm)
1774 return -E2BIG;
1775 if (nsops > SEMOPM_FAST) {
1776 sops = kmalloc(sizeof(*sops)*nsops, GFP_KERNEL);
1777 if (sops == NULL)
1778 return -ENOMEM;
1779 }
1780
1781 if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
1782 error = -EFAULT;
1783 goto out_free;
1784 }
1785
1786 if (timeout) {
1787 struct timespec _timeout;
1788 if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) {
1789 error = -EFAULT;
1790 goto out_free;
1791 }
1792 if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 ||
1793 _timeout.tv_nsec >= 1000000000L) {
1794 error = -EINVAL;
1795 goto out_free;
1796 }
1797 jiffies_left = timespec_to_jiffies(&_timeout);
1798 }
1799
1800 max = 0;
1801 for (sop = sops; sop < sops + nsops; sop++) {
1802 unsigned long mask = 1ULL << ((sop->sem_num) % BITS_PER_LONG);
1803
1804 if (sop->sem_num >= max)
1805 max = sop->sem_num;
1806 if (sop->sem_flg & SEM_UNDO)
1807 undos = true;
1808 if (dup & mask) {
1809 /*
1810 * There was a previous alter access that appears
1811 * to have accessed the same semaphore, thus use
1812 * the dupsop logic. "appears", because the detection
1813 * can only check % BITS_PER_LONG.
1814 */
1815 dupsop = true;
1816 }
1817 if (sop->sem_op != 0) {
1818 alter = true;
1819 dup |= mask;
1820 }
1821 }
1822
1823 if (undos) {
1824 /* On success, find_alloc_undo takes the rcu_read_lock */
1825 un = find_alloc_undo(ns, semid);
1826 if (IS_ERR(un)) {
1827 error = PTR_ERR(un);
1828 goto out_free;
1829 }
1830 } else {
1831 un = NULL;
1832 rcu_read_lock();
1833 }
1834
1835 sma = sem_obtain_object_check(ns, semid);
1836 if (IS_ERR(sma)) {
1837 rcu_read_unlock();
1838 error = PTR_ERR(sma);
1839 goto out_free;
1840 }
1841
1842 error = -EFBIG;
1843 if (max >= sma->sem_nsems) {
1844 rcu_read_unlock();
1845 goto out_free;
1846 }
1847
1848 error = -EACCES;
1849 if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) {
1850 rcu_read_unlock();
1851 goto out_free;
1852 }
1853
1854 error = security_sem_semop(sma, sops, nsops, alter);
1855 if (error) {
1856 rcu_read_unlock();
1857 goto out_free;
1858 }
1859
1860 error = -EIDRM;
1861 locknum = sem_lock(sma, sops, nsops);
1862 /*
1863 * We eventually might perform the following check in a lockless
1864 * fashion, considering ipc_valid_object() locking constraints.
1865 * If nsops == 1 and there is no contention for sem_perm.lock, then
1866 * only a per-semaphore lock is held and it's OK to proceed with the
1867 * check below. More details on the fine grained locking scheme
1868 * entangled here and why it's RMID race safe on comments at sem_lock()
1869 */
1870 if (!ipc_valid_object(&sma->sem_perm))
1871 goto out_unlock_free;
1872 /*
1873 * semid identifiers are not unique - find_alloc_undo may have
1874 * allocated an undo structure, it was invalidated by an RMID
1875 * and now a new array with received the same id. Check and fail.
1876 * This case can be detected checking un->semid. The existence of
1877 * "un" itself is guaranteed by rcu.
1878 */
1879 if (un && un->semid == -1)
1880 goto out_unlock_free;
1881
1882 queue.sops = sops;
1883 queue.nsops = nsops;
1884 queue.undo = un;
1885 queue.pid = task_tgid_vnr(current);
1886 queue.alter = alter;
1887 queue.dupsop = dupsop;
1888
1889 error = perform_atomic_semop(sma, &queue);
1890 if (error == 0) { /* non-blocking succesfull path */
1891 DEFINE_WAKE_Q(wake_q);
1892
1893 /*
1894 * If the operation was successful, then do
1895 * the required updates.
1896 */
1897 if (alter)
1898 do_smart_update(sma, sops, nsops, 1, &wake_q);
1899 else
1900 set_semotime(sma, sops);
1901
1902 sem_unlock(sma, locknum);
1903 rcu_read_unlock();
1904 wake_up_q(&wake_q);
1905
1906 goto out_free;
1907 }
1908 if (error < 0) /* non-blocking error path */
1909 goto out_unlock_free;
1910
1911 /*
1912 * We need to sleep on this operation, so we put the current
1913 * task into the pending queue and go to sleep.
1914 */
1915 if (nsops == 1) {
1916 struct sem *curr;
1917 curr = &sma->sem_base[sops->sem_num];
1918
1919 if (alter) {
1920 if (sma->complex_count) {
1921 list_add_tail(&queue.list,
1922 &sma->pending_alter);
1923 } else {
1924
1925 list_add_tail(&queue.list,
1926 &curr->pending_alter);
1927 }
1928 } else {
1929 list_add_tail(&queue.list, &curr->pending_const);
1930 }
1931 } else {
1932 if (!sma->complex_count)
1933 merge_queues(sma);
1934
1935 if (alter)
1936 list_add_tail(&queue.list, &sma->pending_alter);
1937 else
1938 list_add_tail(&queue.list, &sma->pending_const);
1939
1940 sma->complex_count++;
1941 }
1942
1943 do {
1944 queue.status = -EINTR;
1945 queue.sleeper = current;
1946
1947 __set_current_state(TASK_INTERRUPTIBLE);
1948 sem_unlock(sma, locknum);
1949 rcu_read_unlock();
1950
1951 if (timeout)
1952 jiffies_left = schedule_timeout(jiffies_left);
1953 else
1954 schedule();
1955
1956 /*
1957 * fastpath: the semop has completed, either successfully or
1958 * not, from the syscall pov, is quite irrelevant to us at this
1959 * point; we're done.
1960 *
1961 * We _do_ care, nonetheless, about being awoken by a signal or
1962 * spuriously. The queue.status is checked again in the
1963 * slowpath (aka after taking sem_lock), such that we can detect
1964 * scenarios where we were awakened externally, during the
1965 * window between wake_q_add() and wake_up_q().
1966 */
1967 error = READ_ONCE(queue.status);
1968 if (error != -EINTR) {
1969 /*
1970 * User space could assume that semop() is a memory
1971 * barrier: Without the mb(), the cpu could
1972 * speculatively read in userspace stale data that was
1973 * overwritten by the previous owner of the semaphore.
1974 */
1975 smp_mb();
1976 goto out_free;
1977 }
1978
1979 rcu_read_lock();
1980 locknum = sem_lock(sma, sops, nsops);
1981
1982 if (!ipc_valid_object(&sma->sem_perm))
1983 goto out_unlock_free;
1984
1985 error = READ_ONCE(queue.status);
1986
1987 /*
1988 * If queue.status != -EINTR we are woken up by another process.
1989 * Leave without unlink_queue(), but with sem_unlock().
1990 */
1991 if (error != -EINTR)
1992 goto out_unlock_free;
1993
1994 /*
1995 * If an interrupt occurred we have to clean up the queue.
1996 */
1997 if (timeout && jiffies_left == 0)
1998 error = -EAGAIN;
1999 } while (error == -EINTR && !signal_pending(current)); /* spurious */
2000
2001 unlink_queue(sma, &queue);
2002
2003out_unlock_free:
2004 sem_unlock(sma, locknum);
2005 rcu_read_unlock();
2006out_free:
2007 if (sops != fast_sops)
2008 kfree(sops);
2009 return error;
2010}
2011
2012SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
2013 unsigned, nsops)
2014{
2015 return sys_semtimedop(semid, tsops, nsops, NULL);
2016}
2017
2018/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
2019 * parent and child tasks.
2020 */
2021
2022int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
2023{
2024 struct sem_undo_list *undo_list;
2025 int error;
2026
2027 if (clone_flags & CLONE_SYSVSEM) {
2028 error = get_undo_list(&undo_list);
2029 if (error)
2030 return error;
2031 atomic_inc(&undo_list->refcnt);
2032 tsk->sysvsem.undo_list = undo_list;
2033 } else
2034 tsk->sysvsem.undo_list = NULL;
2035
2036 return 0;
2037}
2038
2039/*
2040 * add semadj values to semaphores, free undo structures.
2041 * undo structures are not freed when semaphore arrays are destroyed
2042 * so some of them may be out of date.
2043 * IMPLEMENTATION NOTE: There is some confusion over whether the
2044 * set of adjustments that needs to be done should be done in an atomic
2045 * manner or not. That is, if we are attempting to decrement the semval
2046 * should we queue up and wait until we can do so legally?
2047 * The original implementation attempted to do this (queue and wait).
2048 * The current implementation does not do so. The POSIX standard
2049 * and SVID should be consulted to determine what behavior is mandated.
2050 */
2051void exit_sem(struct task_struct *tsk)
2052{
2053 struct sem_undo_list *ulp;
2054
2055 ulp = tsk->sysvsem.undo_list;
2056 if (!ulp)
2057 return;
2058 tsk->sysvsem.undo_list = NULL;
2059
2060 if (!atomic_dec_and_test(&ulp->refcnt))
2061 return;
2062
2063 for (;;) {
2064 struct sem_array *sma;
2065 struct sem_undo *un;
2066 int semid, i;
2067 DEFINE_WAKE_Q(wake_q);
2068
2069 cond_resched();
2070
2071 rcu_read_lock();
2072 un = list_entry_rcu(ulp->list_proc.next,
2073 struct sem_undo, list_proc);
2074 if (&un->list_proc == &ulp->list_proc) {
2075 /*
2076 * We must wait for freeary() before freeing this ulp,
2077 * in case we raced with last sem_undo. There is a small
2078 * possibility where we exit while freeary() didn't
2079 * finish unlocking sem_undo_list.
2080 */
2081 spin_unlock_wait(&ulp->lock);
2082 rcu_read_unlock();
2083 break;
2084 }
2085 spin_lock(&ulp->lock);
2086 semid = un->semid;
2087 spin_unlock(&ulp->lock);
2088
2089 /* exit_sem raced with IPC_RMID, nothing to do */
2090 if (semid == -1) {
2091 rcu_read_unlock();
2092 continue;
2093 }
2094
2095 sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2096 /* exit_sem raced with IPC_RMID, nothing to do */
2097 if (IS_ERR(sma)) {
2098 rcu_read_unlock();
2099 continue;
2100 }
2101
2102 sem_lock(sma, NULL, -1);
2103 /* exit_sem raced with IPC_RMID, nothing to do */
2104 if (!ipc_valid_object(&sma->sem_perm)) {
2105 sem_unlock(sma, -1);
2106 rcu_read_unlock();
2107 continue;
2108 }
2109 un = __lookup_undo(ulp, semid);
2110 if (un == NULL) {
2111 /* exit_sem raced with IPC_RMID+semget() that created
2112 * exactly the same semid. Nothing to do.
2113 */
2114 sem_unlock(sma, -1);
2115 rcu_read_unlock();
2116 continue;
2117 }
2118
2119 /* remove un from the linked lists */
2120 ipc_assert_locked_object(&sma->sem_perm);
2121 list_del(&un->list_id);
2122
2123 /* we are the last process using this ulp, acquiring ulp->lock
2124 * isn't required. Besides that, we are also protected against
2125 * IPC_RMID as we hold sma->sem_perm lock now
2126 */
2127 list_del_rcu(&un->list_proc);
2128
2129 /* perform adjustments registered in un */
2130 for (i = 0; i < sma->sem_nsems; i++) {
2131 struct sem *semaphore = &sma->sem_base[i];
2132 if (un->semadj[i]) {
2133 semaphore->semval += un->semadj[i];
2134 /*
2135 * Range checks of the new semaphore value,
2136 * not defined by sus:
2137 * - Some unices ignore the undo entirely
2138 * (e.g. HP UX 11i 11.22, Tru64 V5.1)
2139 * - some cap the value (e.g. FreeBSD caps
2140 * at 0, but doesn't enforce SEMVMX)
2141 *
2142 * Linux caps the semaphore value, both at 0
2143 * and at SEMVMX.
2144 *
2145 * Manfred <manfred@colorfullife.com>
2146 */
2147 if (semaphore->semval < 0)
2148 semaphore->semval = 0;
2149 if (semaphore->semval > SEMVMX)
2150 semaphore->semval = SEMVMX;
2151 semaphore->sempid = task_tgid_vnr(current);
2152 }
2153 }
2154 /* maybe some queued-up processes were waiting for this */
2155 do_smart_update(sma, NULL, 0, 1, &wake_q);
2156 sem_unlock(sma, -1);
2157 rcu_read_unlock();
2158 wake_up_q(&wake_q);
2159
2160 kfree_rcu(un, rcu);
2161 }
2162 kfree(ulp);
2163}
2164
2165#ifdef CONFIG_PROC_FS
2166static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2167{
2168 struct user_namespace *user_ns = seq_user_ns(s);
2169 struct sem_array *sma = it;
2170 time_t sem_otime;
2171
2172 /*
2173 * The proc interface isn't aware of sem_lock(), it calls
2174 * ipc_lock_object() directly (in sysvipc_find_ipc).
2175 * In order to stay compatible with sem_lock(), we must
2176 * enter / leave complex_mode.
2177 */
2178 complexmode_enter(sma);
2179
2180 sem_otime = get_semotime(sma);
2181
2182 seq_printf(s,
2183 "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
2184 sma->sem_perm.key,
2185 sma->sem_perm.id,
2186 sma->sem_perm.mode,
2187 sma->sem_nsems,
2188 from_kuid_munged(user_ns, sma->sem_perm.uid),
2189 from_kgid_munged(user_ns, sma->sem_perm.gid),
2190 from_kuid_munged(user_ns, sma->sem_perm.cuid),
2191 from_kgid_munged(user_ns, sma->sem_perm.cgid),
2192 sem_otime,
2193 sma->sem_ctime);
2194
2195 complexmode_tryleave(sma);
2196
2197 return 0;
2198}
2199#endif