Loading...
1/*
2 * linux/ipc/sem.c
3 * Copyright (C) 1992 Krishna Balasubramanian
4 * Copyright (C) 1995 Eric Schenk, Bruno Haible
5 *
6 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
7 *
8 * SMP-threaded, sysctl's added
9 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
10 * Enforced range limit on SEM_UNDO
11 * (c) 2001 Red Hat Inc
12 * Lockless wakeup
13 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
14 * Further wakeup optimizations, documentation
15 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
16 *
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
19 *
20 * namespaces support
21 * OpenVZ, SWsoft Inc.
22 * Pavel Emelianov <xemul@openvz.org>
23 *
24 * Implementation notes: (May 2010)
25 * This file implements System V semaphores.
26 *
27 * User space visible behavior:
28 * - FIFO ordering for semop() operations (just FIFO, not starvation
29 * protection)
30 * - multiple semaphore operations that alter the same semaphore in
31 * one semop() are handled.
32 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
33 * SETALL calls.
34 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
35 * - undo adjustments at process exit are limited to 0..SEMVMX.
36 * - namespace are supported.
37 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
38 * to /proc/sys/kernel/sem.
39 * - statistics about the usage are reported in /proc/sysvipc/sem.
40 *
41 * Internals:
42 * - scalability:
43 * - all global variables are read-mostly.
44 * - semop() calls and semctl(RMID) are synchronized by RCU.
45 * - most operations do write operations (actually: spin_lock calls) to
46 * the per-semaphore array structure.
47 * Thus: Perfect SMP scaling between independent semaphore arrays.
48 * If multiple semaphores in one array are used, then cache line
49 * trashing on the semaphore array spinlock will limit the scaling.
50 * - semncnt and semzcnt are calculated on demand in count_semncnt() and
51 * count_semzcnt()
52 * - the task that performs a successful semop() scans the list of all
53 * sleeping tasks and completes any pending operations that can be fulfilled.
54 * Semaphores are actively given to waiting tasks (necessary for FIFO).
55 * (see update_queue())
56 * - To improve the scalability, the actual wake-up calls are performed after
57 * dropping all locks. (see wake_up_sem_queue_prepare(),
58 * wake_up_sem_queue_do())
59 * - All work is done by the waker, the woken up task does not have to do
60 * anything - not even acquiring a lock or dropping a refcount.
61 * - A woken up task may not even touch the semaphore array anymore, it may
62 * have been destroyed already by a semctl(RMID).
63 * - The synchronizations between wake-ups due to a timeout/signal and a
64 * wake-up due to a completed semaphore operation is achieved by using an
65 * intermediate state (IN_WAKEUP).
66 * - UNDO values are stored in an array (one per process and per
67 * semaphore array, lazily allocated). For backwards compatibility, multiple
68 * modes for the UNDO variables are supported (per process, per thread)
69 * (see copy_semundo, CLONE_SYSVSEM)
70 * - There are two lists of the pending operations: a per-array list
71 * and per-semaphore list (stored in the array). This allows to achieve FIFO
72 * ordering without always scanning all pending operations.
73 * The worst-case behavior is nevertheless O(N^2) for N wakeups.
74 */
75
76#include <linux/slab.h>
77#include <linux/spinlock.h>
78#include <linux/init.h>
79#include <linux/proc_fs.h>
80#include <linux/time.h>
81#include <linux/security.h>
82#include <linux/syscalls.h>
83#include <linux/audit.h>
84#include <linux/capability.h>
85#include <linux/seq_file.h>
86#include <linux/rwsem.h>
87#include <linux/nsproxy.h>
88#include <linux/ipc_namespace.h>
89
90#include <asm/uaccess.h>
91#include "util.h"
92
93/* One semaphore structure for each semaphore in the system. */
94struct sem {
95 int semval; /* current value */
96 int sempid; /* pid of last operation */
97 struct list_head sem_pending; /* pending single-sop operations */
98};
99
100/* One queue for each sleeping process in the system. */
101struct sem_queue {
102 struct list_head simple_list; /* queue of pending operations */
103 struct list_head list; /* queue of pending operations */
104 struct task_struct *sleeper; /* this process */
105 struct sem_undo *undo; /* undo structure */
106 int pid; /* process id of requesting process */
107 int status; /* completion status of operation */
108 struct sembuf *sops; /* array of pending operations */
109 int nsops; /* number of operations */
110 int alter; /* does *sops alter the array? */
111};
112
113/* Each task has a list of undo requests. They are executed automatically
114 * when the process exits.
115 */
116struct sem_undo {
117 struct list_head list_proc; /* per-process list: *
118 * all undos from one process
119 * rcu protected */
120 struct rcu_head rcu; /* rcu struct for sem_undo */
121 struct sem_undo_list *ulp; /* back ptr to sem_undo_list */
122 struct list_head list_id; /* per semaphore array list:
123 * all undos for one array */
124 int semid; /* semaphore set identifier */
125 short *semadj; /* array of adjustments */
126 /* one per semaphore */
127};
128
129/* sem_undo_list controls shared access to the list of sem_undo structures
130 * that may be shared among all a CLONE_SYSVSEM task group.
131 */
132struct sem_undo_list {
133 atomic_t refcnt;
134 spinlock_t lock;
135 struct list_head list_proc;
136};
137
138
139#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
140
141#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
142#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
143
144static int newary(struct ipc_namespace *, struct ipc_params *);
145static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
146#ifdef CONFIG_PROC_FS
147static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
148#endif
149
150#define SEMMSL_FAST 256 /* 512 bytes on stack */
151#define SEMOPM_FAST 64 /* ~ 372 bytes on stack */
152
153/*
154 * linked list protection:
155 * sem_undo.id_next,
156 * sem_array.sem_pending{,last},
157 * sem_array.sem_undo: sem_lock() for read/write
158 * sem_undo.proc_next: only "current" is allowed to read/write that field.
159 *
160 */
161
162#define sc_semmsl sem_ctls[0]
163#define sc_semmns sem_ctls[1]
164#define sc_semopm sem_ctls[2]
165#define sc_semmni sem_ctls[3]
166
167void sem_init_ns(struct ipc_namespace *ns)
168{
169 ns->sc_semmsl = SEMMSL;
170 ns->sc_semmns = SEMMNS;
171 ns->sc_semopm = SEMOPM;
172 ns->sc_semmni = SEMMNI;
173 ns->used_sems = 0;
174 ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
175}
176
177#ifdef CONFIG_IPC_NS
178void sem_exit_ns(struct ipc_namespace *ns)
179{
180 free_ipcs(ns, &sem_ids(ns), freeary);
181 idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
182}
183#endif
184
185void __init sem_init (void)
186{
187 sem_init_ns(&init_ipc_ns);
188 ipc_init_proc_interface("sysvipc/sem",
189 " key semid perms nsems uid gid cuid cgid otime ctime\n",
190 IPC_SEM_IDS, sysvipc_sem_proc_show);
191}
192
193/*
194 * sem_lock_(check_) routines are called in the paths where the rw_mutex
195 * is not held.
196 */
197static inline struct sem_array *sem_lock(struct ipc_namespace *ns, int id)
198{
199 struct kern_ipc_perm *ipcp = ipc_lock(&sem_ids(ns), id);
200
201 if (IS_ERR(ipcp))
202 return (struct sem_array *)ipcp;
203
204 return container_of(ipcp, struct sem_array, sem_perm);
205}
206
207static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
208 int id)
209{
210 struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
211
212 if (IS_ERR(ipcp))
213 return (struct sem_array *)ipcp;
214
215 return container_of(ipcp, struct sem_array, sem_perm);
216}
217
218static inline void sem_lock_and_putref(struct sem_array *sma)
219{
220 ipc_lock_by_ptr(&sma->sem_perm);
221 ipc_rcu_putref(sma);
222}
223
224static inline void sem_getref_and_unlock(struct sem_array *sma)
225{
226 ipc_rcu_getref(sma);
227 ipc_unlock(&(sma)->sem_perm);
228}
229
230static inline void sem_putref(struct sem_array *sma)
231{
232 ipc_lock_by_ptr(&sma->sem_perm);
233 ipc_rcu_putref(sma);
234 ipc_unlock(&(sma)->sem_perm);
235}
236
237static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
238{
239 ipc_rmid(&sem_ids(ns), &s->sem_perm);
240}
241
242/*
243 * Lockless wakeup algorithm:
244 * Without the check/retry algorithm a lockless wakeup is possible:
245 * - queue.status is initialized to -EINTR before blocking.
246 * - wakeup is performed by
247 * * unlinking the queue entry from sma->sem_pending
248 * * setting queue.status to IN_WAKEUP
249 * This is the notification for the blocked thread that a
250 * result value is imminent.
251 * * call wake_up_process
252 * * set queue.status to the final value.
253 * - the previously blocked thread checks queue.status:
254 * * if it's IN_WAKEUP, then it must wait until the value changes
255 * * if it's not -EINTR, then the operation was completed by
256 * update_queue. semtimedop can return queue.status without
257 * performing any operation on the sem array.
258 * * otherwise it must acquire the spinlock and check what's up.
259 *
260 * The two-stage algorithm is necessary to protect against the following
261 * races:
262 * - if queue.status is set after wake_up_process, then the woken up idle
263 * thread could race forward and try (and fail) to acquire sma->lock
264 * before update_queue had a chance to set queue.status
265 * - if queue.status is written before wake_up_process and if the
266 * blocked process is woken up by a signal between writing
267 * queue.status and the wake_up_process, then the woken up
268 * process could return from semtimedop and die by calling
269 * sys_exit before wake_up_process is called. Then wake_up_process
270 * will oops, because the task structure is already invalid.
271 * (yes, this happened on s390 with sysv msg).
272 *
273 */
274#define IN_WAKEUP 1
275
276/**
277 * newary - Create a new semaphore set
278 * @ns: namespace
279 * @params: ptr to the structure that contains key, semflg and nsems
280 *
281 * Called with sem_ids.rw_mutex held (as a writer)
282 */
283
284static int newary(struct ipc_namespace *ns, struct ipc_params *params)
285{
286 int id;
287 int retval;
288 struct sem_array *sma;
289 int size;
290 key_t key = params->key;
291 int nsems = params->u.nsems;
292 int semflg = params->flg;
293 int i;
294
295 if (!nsems)
296 return -EINVAL;
297 if (ns->used_sems + nsems > ns->sc_semmns)
298 return -ENOSPC;
299
300 size = sizeof (*sma) + nsems * sizeof (struct sem);
301 sma = ipc_rcu_alloc(size);
302 if (!sma) {
303 return -ENOMEM;
304 }
305 memset (sma, 0, size);
306
307 sma->sem_perm.mode = (semflg & S_IRWXUGO);
308 sma->sem_perm.key = key;
309
310 sma->sem_perm.security = NULL;
311 retval = security_sem_alloc(sma);
312 if (retval) {
313 ipc_rcu_putref(sma);
314 return retval;
315 }
316
317 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
318 if (id < 0) {
319 security_sem_free(sma);
320 ipc_rcu_putref(sma);
321 return id;
322 }
323 ns->used_sems += nsems;
324
325 sma->sem_base = (struct sem *) &sma[1];
326
327 for (i = 0; i < nsems; i++)
328 INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
329
330 sma->complex_count = 0;
331 INIT_LIST_HEAD(&sma->sem_pending);
332 INIT_LIST_HEAD(&sma->list_id);
333 sma->sem_nsems = nsems;
334 sma->sem_ctime = get_seconds();
335 sem_unlock(sma);
336
337 return sma->sem_perm.id;
338}
339
340
341/*
342 * Called with sem_ids.rw_mutex and ipcp locked.
343 */
344static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
345{
346 struct sem_array *sma;
347
348 sma = container_of(ipcp, struct sem_array, sem_perm);
349 return security_sem_associate(sma, semflg);
350}
351
352/*
353 * Called with sem_ids.rw_mutex and ipcp locked.
354 */
355static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
356 struct ipc_params *params)
357{
358 struct sem_array *sma;
359
360 sma = container_of(ipcp, struct sem_array, sem_perm);
361 if (params->u.nsems > sma->sem_nsems)
362 return -EINVAL;
363
364 return 0;
365}
366
367SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
368{
369 struct ipc_namespace *ns;
370 struct ipc_ops sem_ops;
371 struct ipc_params sem_params;
372
373 ns = current->nsproxy->ipc_ns;
374
375 if (nsems < 0 || nsems > ns->sc_semmsl)
376 return -EINVAL;
377
378 sem_ops.getnew = newary;
379 sem_ops.associate = sem_security;
380 sem_ops.more_checks = sem_more_checks;
381
382 sem_params.key = key;
383 sem_params.flg = semflg;
384 sem_params.u.nsems = nsems;
385
386 return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
387}
388
389/*
390 * Determine whether a sequence of semaphore operations would succeed
391 * all at once. Return 0 if yes, 1 if need to sleep, else return error code.
392 */
393
394static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops,
395 int nsops, struct sem_undo *un, int pid)
396{
397 int result, sem_op;
398 struct sembuf *sop;
399 struct sem * curr;
400
401 for (sop = sops; sop < sops + nsops; sop++) {
402 curr = sma->sem_base + sop->sem_num;
403 sem_op = sop->sem_op;
404 result = curr->semval;
405
406 if (!sem_op && result)
407 goto would_block;
408
409 result += sem_op;
410 if (result < 0)
411 goto would_block;
412 if (result > SEMVMX)
413 goto out_of_range;
414 if (sop->sem_flg & SEM_UNDO) {
415 int undo = un->semadj[sop->sem_num] - sem_op;
416 /*
417 * Exceeding the undo range is an error.
418 */
419 if (undo < (-SEMAEM - 1) || undo > SEMAEM)
420 goto out_of_range;
421 }
422 curr->semval = result;
423 }
424
425 sop--;
426 while (sop >= sops) {
427 sma->sem_base[sop->sem_num].sempid = pid;
428 if (sop->sem_flg & SEM_UNDO)
429 un->semadj[sop->sem_num] -= sop->sem_op;
430 sop--;
431 }
432
433 return 0;
434
435out_of_range:
436 result = -ERANGE;
437 goto undo;
438
439would_block:
440 if (sop->sem_flg & IPC_NOWAIT)
441 result = -EAGAIN;
442 else
443 result = 1;
444
445undo:
446 sop--;
447 while (sop >= sops) {
448 sma->sem_base[sop->sem_num].semval -= sop->sem_op;
449 sop--;
450 }
451
452 return result;
453}
454
455/** wake_up_sem_queue_prepare(q, error): Prepare wake-up
456 * @q: queue entry that must be signaled
457 * @error: Error value for the signal
458 *
459 * Prepare the wake-up of the queue entry q.
460 */
461static void wake_up_sem_queue_prepare(struct list_head *pt,
462 struct sem_queue *q, int error)
463{
464 if (list_empty(pt)) {
465 /*
466 * Hold preempt off so that we don't get preempted and have the
467 * wakee busy-wait until we're scheduled back on.
468 */
469 preempt_disable();
470 }
471 q->status = IN_WAKEUP;
472 q->pid = error;
473
474 list_add_tail(&q->simple_list, pt);
475}
476
477/**
478 * wake_up_sem_queue_do(pt) - do the actual wake-up
479 * @pt: list of tasks to be woken up
480 *
481 * Do the actual wake-up.
482 * The function is called without any locks held, thus the semaphore array
483 * could be destroyed already and the tasks can disappear as soon as the
484 * status is set to the actual return code.
485 */
486static void wake_up_sem_queue_do(struct list_head *pt)
487{
488 struct sem_queue *q, *t;
489 int did_something;
490
491 did_something = !list_empty(pt);
492 list_for_each_entry_safe(q, t, pt, simple_list) {
493 wake_up_process(q->sleeper);
494 /* q can disappear immediately after writing q->status. */
495 smp_wmb();
496 q->status = q->pid;
497 }
498 if (did_something)
499 preempt_enable();
500}
501
502static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
503{
504 list_del(&q->list);
505 if (q->nsops == 1)
506 list_del(&q->simple_list);
507 else
508 sma->complex_count--;
509}
510
511/** check_restart(sma, q)
512 * @sma: semaphore array
513 * @q: the operation that just completed
514 *
515 * update_queue is O(N^2) when it restarts scanning the whole queue of
516 * waiting operations. Therefore this function checks if the restart is
517 * really necessary. It is called after a previously waiting operation
518 * was completed.
519 */
520static int check_restart(struct sem_array *sma, struct sem_queue *q)
521{
522 struct sem *curr;
523 struct sem_queue *h;
524
525 /* if the operation didn't modify the array, then no restart */
526 if (q->alter == 0)
527 return 0;
528
529 /* pending complex operations are too difficult to analyse */
530 if (sma->complex_count)
531 return 1;
532
533 /* we were a sleeping complex operation. Too difficult */
534 if (q->nsops > 1)
535 return 1;
536
537 curr = sma->sem_base + q->sops[0].sem_num;
538
539 /* No-one waits on this queue */
540 if (list_empty(&curr->sem_pending))
541 return 0;
542
543 /* the new semaphore value */
544 if (curr->semval) {
545 /* It is impossible that someone waits for the new value:
546 * - q is a previously sleeping simple operation that
547 * altered the array. It must be a decrement, because
548 * simple increments never sleep.
549 * - The value is not 0, thus wait-for-zero won't proceed.
550 * - If there are older (higher priority) decrements
551 * in the queue, then they have observed the original
552 * semval value and couldn't proceed. The operation
553 * decremented to value - thus they won't proceed either.
554 */
555 BUG_ON(q->sops[0].sem_op >= 0);
556 return 0;
557 }
558 /*
559 * semval is 0. Check if there are wait-for-zero semops.
560 * They must be the first entries in the per-semaphore simple queue
561 */
562 h = list_first_entry(&curr->sem_pending, struct sem_queue, simple_list);
563 BUG_ON(h->nsops != 1);
564 BUG_ON(h->sops[0].sem_num != q->sops[0].sem_num);
565
566 /* Yes, there is a wait-for-zero semop. Restart */
567 if (h->sops[0].sem_op == 0)
568 return 1;
569
570 /* Again - no-one is waiting for the new value. */
571 return 0;
572}
573
574
575/**
576 * update_queue(sma, semnum): Look for tasks that can be completed.
577 * @sma: semaphore array.
578 * @semnum: semaphore that was modified.
579 * @pt: list head for the tasks that must be woken up.
580 *
581 * update_queue must be called after a semaphore in a semaphore array
582 * was modified. If multiple semaphore were modified, then @semnum
583 * must be set to -1.
584 * The tasks that must be woken up are added to @pt. The return code
585 * is stored in q->pid.
586 * The function return 1 if at least one semop was completed successfully.
587 */
588static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
589{
590 struct sem_queue *q;
591 struct list_head *walk;
592 struct list_head *pending_list;
593 int offset;
594 int semop_completed = 0;
595
596 /* if there are complex operations around, then knowing the semaphore
597 * that was modified doesn't help us. Assume that multiple semaphores
598 * were modified.
599 */
600 if (sma->complex_count)
601 semnum = -1;
602
603 if (semnum == -1) {
604 pending_list = &sma->sem_pending;
605 offset = offsetof(struct sem_queue, list);
606 } else {
607 pending_list = &sma->sem_base[semnum].sem_pending;
608 offset = offsetof(struct sem_queue, simple_list);
609 }
610
611again:
612 walk = pending_list->next;
613 while (walk != pending_list) {
614 int error, restart;
615
616 q = (struct sem_queue *)((char *)walk - offset);
617 walk = walk->next;
618
619 /* If we are scanning the single sop, per-semaphore list of
620 * one semaphore and that semaphore is 0, then it is not
621 * necessary to scan the "alter" entries: simple increments
622 * that affect only one entry succeed immediately and cannot
623 * be in the per semaphore pending queue, and decrements
624 * cannot be successful if the value is already 0.
625 */
626 if (semnum != -1 && sma->sem_base[semnum].semval == 0 &&
627 q->alter)
628 break;
629
630 error = try_atomic_semop(sma, q->sops, q->nsops,
631 q->undo, q->pid);
632
633 /* Does q->sleeper still need to sleep? */
634 if (error > 0)
635 continue;
636
637 unlink_queue(sma, q);
638
639 if (error) {
640 restart = 0;
641 } else {
642 semop_completed = 1;
643 restart = check_restart(sma, q);
644 }
645
646 wake_up_sem_queue_prepare(pt, q, error);
647 if (restart)
648 goto again;
649 }
650 return semop_completed;
651}
652
653/**
654 * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue
655 * @sma: semaphore array
656 * @sops: operations that were performed
657 * @nsops: number of operations
658 * @otime: force setting otime
659 * @pt: list head of the tasks that must be woken up.
660 *
661 * do_smart_update() does the required called to update_queue, based on the
662 * actual changes that were performed on the semaphore array.
663 * Note that the function does not do the actual wake-up: the caller is
664 * responsible for calling wake_up_sem_queue_do(@pt).
665 * It is safe to perform this call after dropping all locks.
666 */
667static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
668 int otime, struct list_head *pt)
669{
670 int i;
671
672 if (sma->complex_count || sops == NULL) {
673 if (update_queue(sma, -1, pt))
674 otime = 1;
675 goto done;
676 }
677
678 for (i = 0; i < nsops; i++) {
679 if (sops[i].sem_op > 0 ||
680 (sops[i].sem_op < 0 &&
681 sma->sem_base[sops[i].sem_num].semval == 0))
682 if (update_queue(sma, sops[i].sem_num, pt))
683 otime = 1;
684 }
685done:
686 if (otime)
687 sma->sem_otime = get_seconds();
688}
689
690
691/* The following counts are associated to each semaphore:
692 * semncnt number of tasks waiting on semval being nonzero
693 * semzcnt number of tasks waiting on semval being zero
694 * This model assumes that a task waits on exactly one semaphore.
695 * Since semaphore operations are to be performed atomically, tasks actually
696 * wait on a whole sequence of semaphores simultaneously.
697 * The counts we return here are a rough approximation, but still
698 * warrant that semncnt+semzcnt>0 if the task is on the pending queue.
699 */
700static int count_semncnt (struct sem_array * sma, ushort semnum)
701{
702 int semncnt;
703 struct sem_queue * q;
704
705 semncnt = 0;
706 list_for_each_entry(q, &sma->sem_pending, list) {
707 struct sembuf * sops = q->sops;
708 int nsops = q->nsops;
709 int i;
710 for (i = 0; i < nsops; i++)
711 if (sops[i].sem_num == semnum
712 && (sops[i].sem_op < 0)
713 && !(sops[i].sem_flg & IPC_NOWAIT))
714 semncnt++;
715 }
716 return semncnt;
717}
718
719static int count_semzcnt (struct sem_array * sma, ushort semnum)
720{
721 int semzcnt;
722 struct sem_queue * q;
723
724 semzcnt = 0;
725 list_for_each_entry(q, &sma->sem_pending, list) {
726 struct sembuf * sops = q->sops;
727 int nsops = q->nsops;
728 int i;
729 for (i = 0; i < nsops; i++)
730 if (sops[i].sem_num == semnum
731 && (sops[i].sem_op == 0)
732 && !(sops[i].sem_flg & IPC_NOWAIT))
733 semzcnt++;
734 }
735 return semzcnt;
736}
737
738/* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked
739 * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex
740 * remains locked on exit.
741 */
742static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
743{
744 struct sem_undo *un, *tu;
745 struct sem_queue *q, *tq;
746 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
747 struct list_head tasks;
748
749 /* Free the existing undo structures for this semaphore set. */
750 assert_spin_locked(&sma->sem_perm.lock);
751 list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
752 list_del(&un->list_id);
753 spin_lock(&un->ulp->lock);
754 un->semid = -1;
755 list_del_rcu(&un->list_proc);
756 spin_unlock(&un->ulp->lock);
757 kfree_rcu(un, rcu);
758 }
759
760 /* Wake up all pending processes and let them fail with EIDRM. */
761 INIT_LIST_HEAD(&tasks);
762 list_for_each_entry_safe(q, tq, &sma->sem_pending, list) {
763 unlink_queue(sma, q);
764 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
765 }
766
767 /* Remove the semaphore set from the IDR */
768 sem_rmid(ns, sma);
769 sem_unlock(sma);
770
771 wake_up_sem_queue_do(&tasks);
772 ns->used_sems -= sma->sem_nsems;
773 security_sem_free(sma);
774 ipc_rcu_putref(sma);
775}
776
777static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
778{
779 switch(version) {
780 case IPC_64:
781 return copy_to_user(buf, in, sizeof(*in));
782 case IPC_OLD:
783 {
784 struct semid_ds out;
785
786 memset(&out, 0, sizeof(out));
787
788 ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
789
790 out.sem_otime = in->sem_otime;
791 out.sem_ctime = in->sem_ctime;
792 out.sem_nsems = in->sem_nsems;
793
794 return copy_to_user(buf, &out, sizeof(out));
795 }
796 default:
797 return -EINVAL;
798 }
799}
800
801static int semctl_nolock(struct ipc_namespace *ns, int semid,
802 int cmd, int version, union semun arg)
803{
804 int err;
805 struct sem_array *sma;
806
807 switch(cmd) {
808 case IPC_INFO:
809 case SEM_INFO:
810 {
811 struct seminfo seminfo;
812 int max_id;
813
814 err = security_sem_semctl(NULL, cmd);
815 if (err)
816 return err;
817
818 memset(&seminfo,0,sizeof(seminfo));
819 seminfo.semmni = ns->sc_semmni;
820 seminfo.semmns = ns->sc_semmns;
821 seminfo.semmsl = ns->sc_semmsl;
822 seminfo.semopm = ns->sc_semopm;
823 seminfo.semvmx = SEMVMX;
824 seminfo.semmnu = SEMMNU;
825 seminfo.semmap = SEMMAP;
826 seminfo.semume = SEMUME;
827 down_read(&sem_ids(ns).rw_mutex);
828 if (cmd == SEM_INFO) {
829 seminfo.semusz = sem_ids(ns).in_use;
830 seminfo.semaem = ns->used_sems;
831 } else {
832 seminfo.semusz = SEMUSZ;
833 seminfo.semaem = SEMAEM;
834 }
835 max_id = ipc_get_maxid(&sem_ids(ns));
836 up_read(&sem_ids(ns).rw_mutex);
837 if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo)))
838 return -EFAULT;
839 return (max_id < 0) ? 0: max_id;
840 }
841 case IPC_STAT:
842 case SEM_STAT:
843 {
844 struct semid64_ds tbuf;
845 int id;
846
847 if (cmd == SEM_STAT) {
848 sma = sem_lock(ns, semid);
849 if (IS_ERR(sma))
850 return PTR_ERR(sma);
851 id = sma->sem_perm.id;
852 } else {
853 sma = sem_lock_check(ns, semid);
854 if (IS_ERR(sma))
855 return PTR_ERR(sma);
856 id = 0;
857 }
858
859 err = -EACCES;
860 if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
861 goto out_unlock;
862
863 err = security_sem_semctl(sma, cmd);
864 if (err)
865 goto out_unlock;
866
867 memset(&tbuf, 0, sizeof(tbuf));
868
869 kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
870 tbuf.sem_otime = sma->sem_otime;
871 tbuf.sem_ctime = sma->sem_ctime;
872 tbuf.sem_nsems = sma->sem_nsems;
873 sem_unlock(sma);
874 if (copy_semid_to_user (arg.buf, &tbuf, version))
875 return -EFAULT;
876 return id;
877 }
878 default:
879 return -EINVAL;
880 }
881out_unlock:
882 sem_unlock(sma);
883 return err;
884}
885
886static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
887 int cmd, int version, union semun arg)
888{
889 struct sem_array *sma;
890 struct sem* curr;
891 int err;
892 ushort fast_sem_io[SEMMSL_FAST];
893 ushort* sem_io = fast_sem_io;
894 int nsems;
895 struct list_head tasks;
896
897 sma = sem_lock_check(ns, semid);
898 if (IS_ERR(sma))
899 return PTR_ERR(sma);
900
901 INIT_LIST_HEAD(&tasks);
902 nsems = sma->sem_nsems;
903
904 err = -EACCES;
905 if (ipcperms(ns, &sma->sem_perm,
906 (cmd == SETVAL || cmd == SETALL) ? S_IWUGO : S_IRUGO))
907 goto out_unlock;
908
909 err = security_sem_semctl(sma, cmd);
910 if (err)
911 goto out_unlock;
912
913 err = -EACCES;
914 switch (cmd) {
915 case GETALL:
916 {
917 ushort __user *array = arg.array;
918 int i;
919
920 if(nsems > SEMMSL_FAST) {
921 sem_getref_and_unlock(sma);
922
923 sem_io = ipc_alloc(sizeof(ushort)*nsems);
924 if(sem_io == NULL) {
925 sem_putref(sma);
926 return -ENOMEM;
927 }
928
929 sem_lock_and_putref(sma);
930 if (sma->sem_perm.deleted) {
931 sem_unlock(sma);
932 err = -EIDRM;
933 goto out_free;
934 }
935 }
936
937 for (i = 0; i < sma->sem_nsems; i++)
938 sem_io[i] = sma->sem_base[i].semval;
939 sem_unlock(sma);
940 err = 0;
941 if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
942 err = -EFAULT;
943 goto out_free;
944 }
945 case SETALL:
946 {
947 int i;
948 struct sem_undo *un;
949
950 sem_getref_and_unlock(sma);
951
952 if(nsems > SEMMSL_FAST) {
953 sem_io = ipc_alloc(sizeof(ushort)*nsems);
954 if(sem_io == NULL) {
955 sem_putref(sma);
956 return -ENOMEM;
957 }
958 }
959
960 if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) {
961 sem_putref(sma);
962 err = -EFAULT;
963 goto out_free;
964 }
965
966 for (i = 0; i < nsems; i++) {
967 if (sem_io[i] > SEMVMX) {
968 sem_putref(sma);
969 err = -ERANGE;
970 goto out_free;
971 }
972 }
973 sem_lock_and_putref(sma);
974 if (sma->sem_perm.deleted) {
975 sem_unlock(sma);
976 err = -EIDRM;
977 goto out_free;
978 }
979
980 for (i = 0; i < nsems; i++)
981 sma->sem_base[i].semval = sem_io[i];
982
983 assert_spin_locked(&sma->sem_perm.lock);
984 list_for_each_entry(un, &sma->list_id, list_id) {
985 for (i = 0; i < nsems; i++)
986 un->semadj[i] = 0;
987 }
988 sma->sem_ctime = get_seconds();
989 /* maybe some queued-up processes were waiting for this */
990 do_smart_update(sma, NULL, 0, 0, &tasks);
991 err = 0;
992 goto out_unlock;
993 }
994 /* GETVAL, GETPID, GETNCTN, GETZCNT, SETVAL: fall-through */
995 }
996 err = -EINVAL;
997 if(semnum < 0 || semnum >= nsems)
998 goto out_unlock;
999
1000 curr = &sma->sem_base[semnum];
1001
1002 switch (cmd) {
1003 case GETVAL:
1004 err = curr->semval;
1005 goto out_unlock;
1006 case GETPID:
1007 err = curr->sempid;
1008 goto out_unlock;
1009 case GETNCNT:
1010 err = count_semncnt(sma,semnum);
1011 goto out_unlock;
1012 case GETZCNT:
1013 err = count_semzcnt(sma,semnum);
1014 goto out_unlock;
1015 case SETVAL:
1016 {
1017 int val = arg.val;
1018 struct sem_undo *un;
1019
1020 err = -ERANGE;
1021 if (val > SEMVMX || val < 0)
1022 goto out_unlock;
1023
1024 assert_spin_locked(&sma->sem_perm.lock);
1025 list_for_each_entry(un, &sma->list_id, list_id)
1026 un->semadj[semnum] = 0;
1027
1028 curr->semval = val;
1029 curr->sempid = task_tgid_vnr(current);
1030 sma->sem_ctime = get_seconds();
1031 /* maybe some queued-up processes were waiting for this */
1032 do_smart_update(sma, NULL, 0, 0, &tasks);
1033 err = 0;
1034 goto out_unlock;
1035 }
1036 }
1037out_unlock:
1038 sem_unlock(sma);
1039 wake_up_sem_queue_do(&tasks);
1040
1041out_free:
1042 if(sem_io != fast_sem_io)
1043 ipc_free(sem_io, sizeof(ushort)*nsems);
1044 return err;
1045}
1046
1047static inline unsigned long
1048copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1049{
1050 switch(version) {
1051 case IPC_64:
1052 if (copy_from_user(out, buf, sizeof(*out)))
1053 return -EFAULT;
1054 return 0;
1055 case IPC_OLD:
1056 {
1057 struct semid_ds tbuf_old;
1058
1059 if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1060 return -EFAULT;
1061
1062 out->sem_perm.uid = tbuf_old.sem_perm.uid;
1063 out->sem_perm.gid = tbuf_old.sem_perm.gid;
1064 out->sem_perm.mode = tbuf_old.sem_perm.mode;
1065
1066 return 0;
1067 }
1068 default:
1069 return -EINVAL;
1070 }
1071}
1072
1073/*
1074 * This function handles some semctl commands which require the rw_mutex
1075 * to be held in write mode.
1076 * NOTE: no locks must be held, the rw_mutex is taken inside this function.
1077 */
1078static int semctl_down(struct ipc_namespace *ns, int semid,
1079 int cmd, int version, union semun arg)
1080{
1081 struct sem_array *sma;
1082 int err;
1083 struct semid64_ds semid64;
1084 struct kern_ipc_perm *ipcp;
1085
1086 if(cmd == IPC_SET) {
1087 if (copy_semid_from_user(&semid64, arg.buf, version))
1088 return -EFAULT;
1089 }
1090
1091 ipcp = ipcctl_pre_down(ns, &sem_ids(ns), semid, cmd,
1092 &semid64.sem_perm, 0);
1093 if (IS_ERR(ipcp))
1094 return PTR_ERR(ipcp);
1095
1096 sma = container_of(ipcp, struct sem_array, sem_perm);
1097
1098 err = security_sem_semctl(sma, cmd);
1099 if (err)
1100 goto out_unlock;
1101
1102 switch(cmd){
1103 case IPC_RMID:
1104 freeary(ns, ipcp);
1105 goto out_up;
1106 case IPC_SET:
1107 ipc_update_perm(&semid64.sem_perm, ipcp);
1108 sma->sem_ctime = get_seconds();
1109 break;
1110 default:
1111 err = -EINVAL;
1112 }
1113
1114out_unlock:
1115 sem_unlock(sma);
1116out_up:
1117 up_write(&sem_ids(ns).rw_mutex);
1118 return err;
1119}
1120
1121SYSCALL_DEFINE(semctl)(int semid, int semnum, int cmd, union semun arg)
1122{
1123 int err = -EINVAL;
1124 int version;
1125 struct ipc_namespace *ns;
1126
1127 if (semid < 0)
1128 return -EINVAL;
1129
1130 version = ipc_parse_version(&cmd);
1131 ns = current->nsproxy->ipc_ns;
1132
1133 switch(cmd) {
1134 case IPC_INFO:
1135 case SEM_INFO:
1136 case IPC_STAT:
1137 case SEM_STAT:
1138 err = semctl_nolock(ns, semid, cmd, version, arg);
1139 return err;
1140 case GETALL:
1141 case GETVAL:
1142 case GETPID:
1143 case GETNCNT:
1144 case GETZCNT:
1145 case SETVAL:
1146 case SETALL:
1147 err = semctl_main(ns,semid,semnum,cmd,version,arg);
1148 return err;
1149 case IPC_RMID:
1150 case IPC_SET:
1151 err = semctl_down(ns, semid, cmd, version, arg);
1152 return err;
1153 default:
1154 return -EINVAL;
1155 }
1156}
1157#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
1158asmlinkage long SyS_semctl(int semid, int semnum, int cmd, union semun arg)
1159{
1160 return SYSC_semctl((int) semid, (int) semnum, (int) cmd, arg);
1161}
1162SYSCALL_ALIAS(sys_semctl, SyS_semctl);
1163#endif
1164
1165/* If the task doesn't already have a undo_list, then allocate one
1166 * here. We guarantee there is only one thread using this undo list,
1167 * and current is THE ONE
1168 *
1169 * If this allocation and assignment succeeds, but later
1170 * portions of this code fail, there is no need to free the sem_undo_list.
1171 * Just let it stay associated with the task, and it'll be freed later
1172 * at exit time.
1173 *
1174 * This can block, so callers must hold no locks.
1175 */
1176static inline int get_undo_list(struct sem_undo_list **undo_listp)
1177{
1178 struct sem_undo_list *undo_list;
1179
1180 undo_list = current->sysvsem.undo_list;
1181 if (!undo_list) {
1182 undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1183 if (undo_list == NULL)
1184 return -ENOMEM;
1185 spin_lock_init(&undo_list->lock);
1186 atomic_set(&undo_list->refcnt, 1);
1187 INIT_LIST_HEAD(&undo_list->list_proc);
1188
1189 current->sysvsem.undo_list = undo_list;
1190 }
1191 *undo_listp = undo_list;
1192 return 0;
1193}
1194
1195static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1196{
1197 struct sem_undo *un;
1198
1199 list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
1200 if (un->semid == semid)
1201 return un;
1202 }
1203 return NULL;
1204}
1205
1206static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1207{
1208 struct sem_undo *un;
1209
1210 assert_spin_locked(&ulp->lock);
1211
1212 un = __lookup_undo(ulp, semid);
1213 if (un) {
1214 list_del_rcu(&un->list_proc);
1215 list_add_rcu(&un->list_proc, &ulp->list_proc);
1216 }
1217 return un;
1218}
1219
1220/**
1221 * find_alloc_undo - Lookup (and if not present create) undo array
1222 * @ns: namespace
1223 * @semid: semaphore array id
1224 *
1225 * The function looks up (and if not present creates) the undo structure.
1226 * The size of the undo structure depends on the size of the semaphore
1227 * array, thus the alloc path is not that straightforward.
1228 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1229 * performs a rcu_read_lock().
1230 */
1231static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1232{
1233 struct sem_array *sma;
1234 struct sem_undo_list *ulp;
1235 struct sem_undo *un, *new;
1236 int nsems;
1237 int error;
1238
1239 error = get_undo_list(&ulp);
1240 if (error)
1241 return ERR_PTR(error);
1242
1243 rcu_read_lock();
1244 spin_lock(&ulp->lock);
1245 un = lookup_undo(ulp, semid);
1246 spin_unlock(&ulp->lock);
1247 if (likely(un!=NULL))
1248 goto out;
1249 rcu_read_unlock();
1250
1251 /* no undo structure around - allocate one. */
1252 /* step 1: figure out the size of the semaphore array */
1253 sma = sem_lock_check(ns, semid);
1254 if (IS_ERR(sma))
1255 return ERR_CAST(sma);
1256
1257 nsems = sma->sem_nsems;
1258 sem_getref_and_unlock(sma);
1259
1260 /* step 2: allocate new undo structure */
1261 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1262 if (!new) {
1263 sem_putref(sma);
1264 return ERR_PTR(-ENOMEM);
1265 }
1266
1267 /* step 3: Acquire the lock on semaphore array */
1268 sem_lock_and_putref(sma);
1269 if (sma->sem_perm.deleted) {
1270 sem_unlock(sma);
1271 kfree(new);
1272 un = ERR_PTR(-EIDRM);
1273 goto out;
1274 }
1275 spin_lock(&ulp->lock);
1276
1277 /*
1278 * step 4: check for races: did someone else allocate the undo struct?
1279 */
1280 un = lookup_undo(ulp, semid);
1281 if (un) {
1282 kfree(new);
1283 goto success;
1284 }
1285 /* step 5: initialize & link new undo structure */
1286 new->semadj = (short *) &new[1];
1287 new->ulp = ulp;
1288 new->semid = semid;
1289 assert_spin_locked(&ulp->lock);
1290 list_add_rcu(&new->list_proc, &ulp->list_proc);
1291 assert_spin_locked(&sma->sem_perm.lock);
1292 list_add(&new->list_id, &sma->list_id);
1293 un = new;
1294
1295success:
1296 spin_unlock(&ulp->lock);
1297 rcu_read_lock();
1298 sem_unlock(sma);
1299out:
1300 return un;
1301}
1302
1303
1304/**
1305 * get_queue_result - Retrieve the result code from sem_queue
1306 * @q: Pointer to queue structure
1307 *
1308 * Retrieve the return code from the pending queue. If IN_WAKEUP is found in
1309 * q->status, then we must loop until the value is replaced with the final
1310 * value: This may happen if a task is woken up by an unrelated event (e.g.
1311 * signal) and in parallel the task is woken up by another task because it got
1312 * the requested semaphores.
1313 *
1314 * The function can be called with or without holding the semaphore spinlock.
1315 */
1316static int get_queue_result(struct sem_queue *q)
1317{
1318 int error;
1319
1320 error = q->status;
1321 while (unlikely(error == IN_WAKEUP)) {
1322 cpu_relax();
1323 error = q->status;
1324 }
1325
1326 return error;
1327}
1328
1329
1330SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1331 unsigned, nsops, const struct timespec __user *, timeout)
1332{
1333 int error = -EINVAL;
1334 struct sem_array *sma;
1335 struct sembuf fast_sops[SEMOPM_FAST];
1336 struct sembuf* sops = fast_sops, *sop;
1337 struct sem_undo *un;
1338 int undos = 0, alter = 0, max;
1339 struct sem_queue queue;
1340 unsigned long jiffies_left = 0;
1341 struct ipc_namespace *ns;
1342 struct list_head tasks;
1343
1344 ns = current->nsproxy->ipc_ns;
1345
1346 if (nsops < 1 || semid < 0)
1347 return -EINVAL;
1348 if (nsops > ns->sc_semopm)
1349 return -E2BIG;
1350 if(nsops > SEMOPM_FAST) {
1351 sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL);
1352 if(sops==NULL)
1353 return -ENOMEM;
1354 }
1355 if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) {
1356 error=-EFAULT;
1357 goto out_free;
1358 }
1359 if (timeout) {
1360 struct timespec _timeout;
1361 if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) {
1362 error = -EFAULT;
1363 goto out_free;
1364 }
1365 if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 ||
1366 _timeout.tv_nsec >= 1000000000L) {
1367 error = -EINVAL;
1368 goto out_free;
1369 }
1370 jiffies_left = timespec_to_jiffies(&_timeout);
1371 }
1372 max = 0;
1373 for (sop = sops; sop < sops + nsops; sop++) {
1374 if (sop->sem_num >= max)
1375 max = sop->sem_num;
1376 if (sop->sem_flg & SEM_UNDO)
1377 undos = 1;
1378 if (sop->sem_op != 0)
1379 alter = 1;
1380 }
1381
1382 if (undos) {
1383 un = find_alloc_undo(ns, semid);
1384 if (IS_ERR(un)) {
1385 error = PTR_ERR(un);
1386 goto out_free;
1387 }
1388 } else
1389 un = NULL;
1390
1391 INIT_LIST_HEAD(&tasks);
1392
1393 sma = sem_lock_check(ns, semid);
1394 if (IS_ERR(sma)) {
1395 if (un)
1396 rcu_read_unlock();
1397 error = PTR_ERR(sma);
1398 goto out_free;
1399 }
1400
1401 /*
1402 * semid identifiers are not unique - find_alloc_undo may have
1403 * allocated an undo structure, it was invalidated by an RMID
1404 * and now a new array with received the same id. Check and fail.
1405 * This case can be detected checking un->semid. The existence of
1406 * "un" itself is guaranteed by rcu.
1407 */
1408 error = -EIDRM;
1409 if (un) {
1410 if (un->semid == -1) {
1411 rcu_read_unlock();
1412 goto out_unlock_free;
1413 } else {
1414 /*
1415 * rcu lock can be released, "un" cannot disappear:
1416 * - sem_lock is acquired, thus IPC_RMID is
1417 * impossible.
1418 * - exit_sem is impossible, it always operates on
1419 * current (or a dead task).
1420 */
1421
1422 rcu_read_unlock();
1423 }
1424 }
1425
1426 error = -EFBIG;
1427 if (max >= sma->sem_nsems)
1428 goto out_unlock_free;
1429
1430 error = -EACCES;
1431 if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
1432 goto out_unlock_free;
1433
1434 error = security_sem_semop(sma, sops, nsops, alter);
1435 if (error)
1436 goto out_unlock_free;
1437
1438 error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
1439 if (error <= 0) {
1440 if (alter && error == 0)
1441 do_smart_update(sma, sops, nsops, 1, &tasks);
1442
1443 goto out_unlock_free;
1444 }
1445
1446 /* We need to sleep on this operation, so we put the current
1447 * task into the pending queue and go to sleep.
1448 */
1449
1450 queue.sops = sops;
1451 queue.nsops = nsops;
1452 queue.undo = un;
1453 queue.pid = task_tgid_vnr(current);
1454 queue.alter = alter;
1455 if (alter)
1456 list_add_tail(&queue.list, &sma->sem_pending);
1457 else
1458 list_add(&queue.list, &sma->sem_pending);
1459
1460 if (nsops == 1) {
1461 struct sem *curr;
1462 curr = &sma->sem_base[sops->sem_num];
1463
1464 if (alter)
1465 list_add_tail(&queue.simple_list, &curr->sem_pending);
1466 else
1467 list_add(&queue.simple_list, &curr->sem_pending);
1468 } else {
1469 INIT_LIST_HEAD(&queue.simple_list);
1470 sma->complex_count++;
1471 }
1472
1473 queue.status = -EINTR;
1474 queue.sleeper = current;
1475
1476sleep_again:
1477 current->state = TASK_INTERRUPTIBLE;
1478 sem_unlock(sma);
1479
1480 if (timeout)
1481 jiffies_left = schedule_timeout(jiffies_left);
1482 else
1483 schedule();
1484
1485 error = get_queue_result(&queue);
1486
1487 if (error != -EINTR) {
1488 /* fast path: update_queue already obtained all requested
1489 * resources.
1490 * Perform a smp_mb(): User space could assume that semop()
1491 * is a memory barrier: Without the mb(), the cpu could
1492 * speculatively read in user space stale data that was
1493 * overwritten by the previous owner of the semaphore.
1494 */
1495 smp_mb();
1496
1497 goto out_free;
1498 }
1499
1500 sma = sem_lock(ns, semid);
1501
1502 /*
1503 * Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
1504 */
1505 error = get_queue_result(&queue);
1506
1507 /*
1508 * Array removed? If yes, leave without sem_unlock().
1509 */
1510 if (IS_ERR(sma)) {
1511 goto out_free;
1512 }
1513
1514
1515 /*
1516 * If queue.status != -EINTR we are woken up by another process.
1517 * Leave without unlink_queue(), but with sem_unlock().
1518 */
1519
1520 if (error != -EINTR) {
1521 goto out_unlock_free;
1522 }
1523
1524 /*
1525 * If an interrupt occurred we have to clean up the queue
1526 */
1527 if (timeout && jiffies_left == 0)
1528 error = -EAGAIN;
1529
1530 /*
1531 * If the wakeup was spurious, just retry
1532 */
1533 if (error == -EINTR && !signal_pending(current))
1534 goto sleep_again;
1535
1536 unlink_queue(sma, &queue);
1537
1538out_unlock_free:
1539 sem_unlock(sma);
1540
1541 wake_up_sem_queue_do(&tasks);
1542out_free:
1543 if(sops != fast_sops)
1544 kfree(sops);
1545 return error;
1546}
1547
1548SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
1549 unsigned, nsops)
1550{
1551 return sys_semtimedop(semid, tsops, nsops, NULL);
1552}
1553
1554/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
1555 * parent and child tasks.
1556 */
1557
1558int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
1559{
1560 struct sem_undo_list *undo_list;
1561 int error;
1562
1563 if (clone_flags & CLONE_SYSVSEM) {
1564 error = get_undo_list(&undo_list);
1565 if (error)
1566 return error;
1567 atomic_inc(&undo_list->refcnt);
1568 tsk->sysvsem.undo_list = undo_list;
1569 } else
1570 tsk->sysvsem.undo_list = NULL;
1571
1572 return 0;
1573}
1574
1575/*
1576 * add semadj values to semaphores, free undo structures.
1577 * undo structures are not freed when semaphore arrays are destroyed
1578 * so some of them may be out of date.
1579 * IMPLEMENTATION NOTE: There is some confusion over whether the
1580 * set of adjustments that needs to be done should be done in an atomic
1581 * manner or not. That is, if we are attempting to decrement the semval
1582 * should we queue up and wait until we can do so legally?
1583 * The original implementation attempted to do this (queue and wait).
1584 * The current implementation does not do so. The POSIX standard
1585 * and SVID should be consulted to determine what behavior is mandated.
1586 */
1587void exit_sem(struct task_struct *tsk)
1588{
1589 struct sem_undo_list *ulp;
1590
1591 ulp = tsk->sysvsem.undo_list;
1592 if (!ulp)
1593 return;
1594 tsk->sysvsem.undo_list = NULL;
1595
1596 if (!atomic_dec_and_test(&ulp->refcnt))
1597 return;
1598
1599 for (;;) {
1600 struct sem_array *sma;
1601 struct sem_undo *un;
1602 struct list_head tasks;
1603 int semid;
1604 int i;
1605
1606 rcu_read_lock();
1607 un = list_entry_rcu(ulp->list_proc.next,
1608 struct sem_undo, list_proc);
1609 if (&un->list_proc == &ulp->list_proc)
1610 semid = -1;
1611 else
1612 semid = un->semid;
1613 rcu_read_unlock();
1614
1615 if (semid == -1)
1616 break;
1617
1618 sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
1619
1620 /* exit_sem raced with IPC_RMID, nothing to do */
1621 if (IS_ERR(sma))
1622 continue;
1623
1624 un = __lookup_undo(ulp, semid);
1625 if (un == NULL) {
1626 /* exit_sem raced with IPC_RMID+semget() that created
1627 * exactly the same semid. Nothing to do.
1628 */
1629 sem_unlock(sma);
1630 continue;
1631 }
1632
1633 /* remove un from the linked lists */
1634 assert_spin_locked(&sma->sem_perm.lock);
1635 list_del(&un->list_id);
1636
1637 spin_lock(&ulp->lock);
1638 list_del_rcu(&un->list_proc);
1639 spin_unlock(&ulp->lock);
1640
1641 /* perform adjustments registered in un */
1642 for (i = 0; i < sma->sem_nsems; i++) {
1643 struct sem * semaphore = &sma->sem_base[i];
1644 if (un->semadj[i]) {
1645 semaphore->semval += un->semadj[i];
1646 /*
1647 * Range checks of the new semaphore value,
1648 * not defined by sus:
1649 * - Some unices ignore the undo entirely
1650 * (e.g. HP UX 11i 11.22, Tru64 V5.1)
1651 * - some cap the value (e.g. FreeBSD caps
1652 * at 0, but doesn't enforce SEMVMX)
1653 *
1654 * Linux caps the semaphore value, both at 0
1655 * and at SEMVMX.
1656 *
1657 * Manfred <manfred@colorfullife.com>
1658 */
1659 if (semaphore->semval < 0)
1660 semaphore->semval = 0;
1661 if (semaphore->semval > SEMVMX)
1662 semaphore->semval = SEMVMX;
1663 semaphore->sempid = task_tgid_vnr(current);
1664 }
1665 }
1666 /* maybe some queued-up processes were waiting for this */
1667 INIT_LIST_HEAD(&tasks);
1668 do_smart_update(sma, NULL, 0, 1, &tasks);
1669 sem_unlock(sma);
1670 wake_up_sem_queue_do(&tasks);
1671
1672 kfree_rcu(un, rcu);
1673 }
1674 kfree(ulp);
1675}
1676
1677#ifdef CONFIG_PROC_FS
1678static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
1679{
1680 struct sem_array *sma = it;
1681
1682 return seq_printf(s,
1683 "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
1684 sma->sem_perm.key,
1685 sma->sem_perm.id,
1686 sma->sem_perm.mode,
1687 sma->sem_nsems,
1688 sma->sem_perm.uid,
1689 sma->sem_perm.gid,
1690 sma->sem_perm.cuid,
1691 sma->sem_perm.cgid,
1692 sma->sem_otime,
1693 sma->sem_ctime);
1694}
1695#endif
1/*
2 * linux/ipc/sem.c
3 * Copyright (C) 1992 Krishna Balasubramanian
4 * Copyright (C) 1995 Eric Schenk, Bruno Haible
5 *
6 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
7 *
8 * SMP-threaded, sysctl's added
9 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
10 * Enforced range limit on SEM_UNDO
11 * (c) 2001 Red Hat Inc
12 * Lockless wakeup
13 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
14 * Further wakeup optimizations, documentation
15 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
16 *
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
19 *
20 * namespaces support
21 * OpenVZ, SWsoft Inc.
22 * Pavel Emelianov <xemul@openvz.org>
23 *
24 * Implementation notes: (May 2010)
25 * This file implements System V semaphores.
26 *
27 * User space visible behavior:
28 * - FIFO ordering for semop() operations (just FIFO, not starvation
29 * protection)
30 * - multiple semaphore operations that alter the same semaphore in
31 * one semop() are handled.
32 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
33 * SETALL calls.
34 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
35 * - undo adjustments at process exit are limited to 0..SEMVMX.
36 * - namespace are supported.
37 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
38 * to /proc/sys/kernel/sem.
39 * - statistics about the usage are reported in /proc/sysvipc/sem.
40 *
41 * Internals:
42 * - scalability:
43 * - all global variables are read-mostly.
44 * - semop() calls and semctl(RMID) are synchronized by RCU.
45 * - most operations do write operations (actually: spin_lock calls) to
46 * the per-semaphore array structure.
47 * Thus: Perfect SMP scaling between independent semaphore arrays.
48 * If multiple semaphores in one array are used, then cache line
49 * trashing on the semaphore array spinlock will limit the scaling.
50 * - semncnt and semzcnt are calculated on demand in count_semcnt()
51 * - the task that performs a successful semop() scans the list of all
52 * sleeping tasks and completes any pending operations that can be fulfilled.
53 * Semaphores are actively given to waiting tasks (necessary for FIFO).
54 * (see update_queue())
55 * - To improve the scalability, the actual wake-up calls are performed after
56 * dropping all locks. (see wake_up_sem_queue_prepare(),
57 * wake_up_sem_queue_do())
58 * - All work is done by the waker, the woken up task does not have to do
59 * anything - not even acquiring a lock or dropping a refcount.
60 * - A woken up task may not even touch the semaphore array anymore, it may
61 * have been destroyed already by a semctl(RMID).
62 * - The synchronizations between wake-ups due to a timeout/signal and a
63 * wake-up due to a completed semaphore operation is achieved by using an
64 * intermediate state (IN_WAKEUP).
65 * - UNDO values are stored in an array (one per process and per
66 * semaphore array, lazily allocated). For backwards compatibility, multiple
67 * modes for the UNDO variables are supported (per process, per thread)
68 * (see copy_semundo, CLONE_SYSVSEM)
69 * - There are two lists of the pending operations: a per-array list
70 * and per-semaphore list (stored in the array). This allows to achieve FIFO
71 * ordering without always scanning all pending operations.
72 * The worst-case behavior is nevertheless O(N^2) for N wakeups.
73 */
74
75#include <linux/slab.h>
76#include <linux/spinlock.h>
77#include <linux/init.h>
78#include <linux/proc_fs.h>
79#include <linux/time.h>
80#include <linux/security.h>
81#include <linux/syscalls.h>
82#include <linux/audit.h>
83#include <linux/capability.h>
84#include <linux/seq_file.h>
85#include <linux/rwsem.h>
86#include <linux/nsproxy.h>
87#include <linux/ipc_namespace.h>
88
89#include <linux/uaccess.h>
90#include "util.h"
91
92/* One semaphore structure for each semaphore in the system. */
93struct sem {
94 int semval; /* current value */
95 /*
96 * PID of the process that last modified the semaphore. For
97 * Linux, specifically these are:
98 * - semop
99 * - semctl, via SETVAL and SETALL.
100 * - at task exit when performing undo adjustments (see exit_sem).
101 */
102 int sempid;
103 spinlock_t lock; /* spinlock for fine-grained semtimedop */
104 struct list_head pending_alter; /* pending single-sop operations */
105 /* that alter the semaphore */
106 struct list_head pending_const; /* pending single-sop operations */
107 /* that do not alter the semaphore*/
108 time_t sem_otime; /* candidate for sem_otime */
109} ____cacheline_aligned_in_smp;
110
111/* One queue for each sleeping process in the system. */
112struct sem_queue {
113 struct list_head list; /* queue of pending operations */
114 struct task_struct *sleeper; /* this process */
115 struct sem_undo *undo; /* undo structure */
116 int pid; /* process id of requesting process */
117 int status; /* completion status of operation */
118 struct sembuf *sops; /* array of pending operations */
119 struct sembuf *blocking; /* the operation that blocked */
120 int nsops; /* number of operations */
121 int alter; /* does *sops alter the array? */
122};
123
124/* Each task has a list of undo requests. They are executed automatically
125 * when the process exits.
126 */
127struct sem_undo {
128 struct list_head list_proc; /* per-process list: *
129 * all undos from one process
130 * rcu protected */
131 struct rcu_head rcu; /* rcu struct for sem_undo */
132 struct sem_undo_list *ulp; /* back ptr to sem_undo_list */
133 struct list_head list_id; /* per semaphore array list:
134 * all undos for one array */
135 int semid; /* semaphore set identifier */
136 short *semadj; /* array of adjustments */
137 /* one per semaphore */
138};
139
140/* sem_undo_list controls shared access to the list of sem_undo structures
141 * that may be shared among all a CLONE_SYSVSEM task group.
142 */
143struct sem_undo_list {
144 atomic_t refcnt;
145 spinlock_t lock;
146 struct list_head list_proc;
147};
148
149
150#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
151
152#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
153
154static int newary(struct ipc_namespace *, struct ipc_params *);
155static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
156#ifdef CONFIG_PROC_FS
157static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
158#endif
159
160#define SEMMSL_FAST 256 /* 512 bytes on stack */
161#define SEMOPM_FAST 64 /* ~ 372 bytes on stack */
162
163/*
164 * Locking:
165 * sem_undo.id_next,
166 * sem_array.complex_count,
167 * sem_array.pending{_alter,_cont},
168 * sem_array.sem_undo: global sem_lock() for read/write
169 * sem_undo.proc_next: only "current" is allowed to read/write that field.
170 *
171 * sem_array.sem_base[i].pending_{const,alter}:
172 * global or semaphore sem_lock() for read/write
173 */
174
175#define sc_semmsl sem_ctls[0]
176#define sc_semmns sem_ctls[1]
177#define sc_semopm sem_ctls[2]
178#define sc_semmni sem_ctls[3]
179
180void sem_init_ns(struct ipc_namespace *ns)
181{
182 ns->sc_semmsl = SEMMSL;
183 ns->sc_semmns = SEMMNS;
184 ns->sc_semopm = SEMOPM;
185 ns->sc_semmni = SEMMNI;
186 ns->used_sems = 0;
187 ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
188}
189
190#ifdef CONFIG_IPC_NS
191void sem_exit_ns(struct ipc_namespace *ns)
192{
193 free_ipcs(ns, &sem_ids(ns), freeary);
194 idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
195}
196#endif
197
198void __init sem_init(void)
199{
200 sem_init_ns(&init_ipc_ns);
201 ipc_init_proc_interface("sysvipc/sem",
202 " key semid perms nsems uid gid cuid cgid otime ctime\n",
203 IPC_SEM_IDS, sysvipc_sem_proc_show);
204}
205
206/**
207 * unmerge_queues - unmerge queues, if possible.
208 * @sma: semaphore array
209 *
210 * The function unmerges the wait queues if complex_count is 0.
211 * It must be called prior to dropping the global semaphore array lock.
212 */
213static void unmerge_queues(struct sem_array *sma)
214{
215 struct sem_queue *q, *tq;
216
217 /* complex operations still around? */
218 if (sma->complex_count)
219 return;
220 /*
221 * We will switch back to simple mode.
222 * Move all pending operation back into the per-semaphore
223 * queues.
224 */
225 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
226 struct sem *curr;
227 curr = &sma->sem_base[q->sops[0].sem_num];
228
229 list_add_tail(&q->list, &curr->pending_alter);
230 }
231 INIT_LIST_HEAD(&sma->pending_alter);
232}
233
234/**
235 * merge_queues - merge single semop queues into global queue
236 * @sma: semaphore array
237 *
238 * This function merges all per-semaphore queues into the global queue.
239 * It is necessary to achieve FIFO ordering for the pending single-sop
240 * operations when a multi-semop operation must sleep.
241 * Only the alter operations must be moved, the const operations can stay.
242 */
243static void merge_queues(struct sem_array *sma)
244{
245 int i;
246 for (i = 0; i < sma->sem_nsems; i++) {
247 struct sem *sem = sma->sem_base + i;
248
249 list_splice_init(&sem->pending_alter, &sma->pending_alter);
250 }
251}
252
253static void sem_rcu_free(struct rcu_head *head)
254{
255 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
256 struct sem_array *sma = ipc_rcu_to_struct(p);
257
258 security_sem_free(sma);
259 ipc_rcu_free(head);
260}
261
262/*
263 * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
264 * are only control barriers.
265 * The code must pair with spin_unlock(&sem->lock) or
266 * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
267 *
268 * smp_rmb() is sufficient, as writes cannot pass the control barrier.
269 */
270#define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb()
271
272/*
273 * Wait until all currently ongoing simple ops have completed.
274 * Caller must own sem_perm.lock.
275 * New simple ops cannot start, because simple ops first check
276 * that sem_perm.lock is free.
277 * that a) sem_perm.lock is free and b) complex_count is 0.
278 */
279static void sem_wait_array(struct sem_array *sma)
280{
281 int i;
282 struct sem *sem;
283
284 if (sma->complex_count) {
285 /* The thread that increased sma->complex_count waited on
286 * all sem->lock locks. Thus we don't need to wait again.
287 */
288 return;
289 }
290
291 for (i = 0; i < sma->sem_nsems; i++) {
292 sem = sma->sem_base + i;
293 spin_unlock_wait(&sem->lock);
294 }
295 ipc_smp_acquire__after_spin_is_unlocked();
296}
297
298/*
299 * If the request contains only one semaphore operation, and there are
300 * no complex transactions pending, lock only the semaphore involved.
301 * Otherwise, lock the entire semaphore array, since we either have
302 * multiple semaphores in our own semops, or we need to look at
303 * semaphores from other pending complex operations.
304 */
305static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
306 int nsops)
307{
308 struct sem *sem;
309
310 if (nsops != 1) {
311 /* Complex operation - acquire a full lock */
312 ipc_lock_object(&sma->sem_perm);
313
314 /* And wait until all simple ops that are processed
315 * right now have dropped their locks.
316 */
317 sem_wait_array(sma);
318 return -1;
319 }
320
321 /*
322 * Only one semaphore affected - try to optimize locking.
323 * The rules are:
324 * - optimized locking is possible if no complex operation
325 * is either enqueued or processed right now.
326 * - The test for enqueued complex ops is simple:
327 * sma->complex_count != 0
328 * - Testing for complex ops that are processed right now is
329 * a bit more difficult. Complex ops acquire the full lock
330 * and first wait that the running simple ops have completed.
331 * (see above)
332 * Thus: If we own a simple lock and the global lock is free
333 * and complex_count is now 0, then it will stay 0 and
334 * thus just locking sem->lock is sufficient.
335 */
336 sem = sma->sem_base + sops->sem_num;
337
338 if (sma->complex_count == 0) {
339 /*
340 * It appears that no complex operation is around.
341 * Acquire the per-semaphore lock.
342 */
343 spin_lock(&sem->lock);
344
345 /* Then check that the global lock is free */
346 if (!spin_is_locked(&sma->sem_perm.lock)) {
347 /*
348 * We need a memory barrier with acquire semantics,
349 * otherwise we can race with another thread that does:
350 * complex_count++;
351 * spin_unlock(sem_perm.lock);
352 */
353 ipc_smp_acquire__after_spin_is_unlocked();
354
355 /*
356 * Now repeat the test of complex_count:
357 * It can't change anymore until we drop sem->lock.
358 * Thus: if is now 0, then it will stay 0.
359 */
360 if (sma->complex_count == 0) {
361 /* fast path successful! */
362 return sops->sem_num;
363 }
364 }
365 spin_unlock(&sem->lock);
366 }
367
368 /* slow path: acquire the full lock */
369 ipc_lock_object(&sma->sem_perm);
370
371 if (sma->complex_count == 0) {
372 /* False alarm:
373 * There is no complex operation, thus we can switch
374 * back to the fast path.
375 */
376 spin_lock(&sem->lock);
377 ipc_unlock_object(&sma->sem_perm);
378 return sops->sem_num;
379 } else {
380 /* Not a false alarm, thus complete the sequence for a
381 * full lock.
382 */
383 sem_wait_array(sma);
384 return -1;
385 }
386}
387
388static inline void sem_unlock(struct sem_array *sma, int locknum)
389{
390 if (locknum == -1) {
391 unmerge_queues(sma);
392 ipc_unlock_object(&sma->sem_perm);
393 } else {
394 struct sem *sem = sma->sem_base + locknum;
395 spin_unlock(&sem->lock);
396 }
397}
398
399/*
400 * sem_lock_(check_) routines are called in the paths where the rwsem
401 * is not held.
402 *
403 * The caller holds the RCU read lock.
404 */
405static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
406 int id, struct sembuf *sops, int nsops, int *locknum)
407{
408 struct kern_ipc_perm *ipcp;
409 struct sem_array *sma;
410
411 ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
412 if (IS_ERR(ipcp))
413 return ERR_CAST(ipcp);
414
415 sma = container_of(ipcp, struct sem_array, sem_perm);
416 *locknum = sem_lock(sma, sops, nsops);
417
418 /* ipc_rmid() may have already freed the ID while sem_lock
419 * was spinning: verify that the structure is still valid
420 */
421 if (ipc_valid_object(ipcp))
422 return container_of(ipcp, struct sem_array, sem_perm);
423
424 sem_unlock(sma, *locknum);
425 return ERR_PTR(-EINVAL);
426}
427
428static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
429{
430 struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
431
432 if (IS_ERR(ipcp))
433 return ERR_CAST(ipcp);
434
435 return container_of(ipcp, struct sem_array, sem_perm);
436}
437
438static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
439 int id)
440{
441 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
442
443 if (IS_ERR(ipcp))
444 return ERR_CAST(ipcp);
445
446 return container_of(ipcp, struct sem_array, sem_perm);
447}
448
449static inline void sem_lock_and_putref(struct sem_array *sma)
450{
451 sem_lock(sma, NULL, -1);
452 ipc_rcu_putref(sma, ipc_rcu_free);
453}
454
455static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
456{
457 ipc_rmid(&sem_ids(ns), &s->sem_perm);
458}
459
460/*
461 * Lockless wakeup algorithm:
462 * Without the check/retry algorithm a lockless wakeup is possible:
463 * - queue.status is initialized to -EINTR before blocking.
464 * - wakeup is performed by
465 * * unlinking the queue entry from the pending list
466 * * setting queue.status to IN_WAKEUP
467 * This is the notification for the blocked thread that a
468 * result value is imminent.
469 * * call wake_up_process
470 * * set queue.status to the final value.
471 * - the previously blocked thread checks queue.status:
472 * * if it's IN_WAKEUP, then it must wait until the value changes
473 * * if it's not -EINTR, then the operation was completed by
474 * update_queue. semtimedop can return queue.status without
475 * performing any operation on the sem array.
476 * * otherwise it must acquire the spinlock and check what's up.
477 *
478 * The two-stage algorithm is necessary to protect against the following
479 * races:
480 * - if queue.status is set after wake_up_process, then the woken up idle
481 * thread could race forward and try (and fail) to acquire sma->lock
482 * before update_queue had a chance to set queue.status
483 * - if queue.status is written before wake_up_process and if the
484 * blocked process is woken up by a signal between writing
485 * queue.status and the wake_up_process, then the woken up
486 * process could return from semtimedop and die by calling
487 * sys_exit before wake_up_process is called. Then wake_up_process
488 * will oops, because the task structure is already invalid.
489 * (yes, this happened on s390 with sysv msg).
490 *
491 */
492#define IN_WAKEUP 1
493
494/**
495 * newary - Create a new semaphore set
496 * @ns: namespace
497 * @params: ptr to the structure that contains key, semflg and nsems
498 *
499 * Called with sem_ids.rwsem held (as a writer)
500 */
501static int newary(struct ipc_namespace *ns, struct ipc_params *params)
502{
503 int id;
504 int retval;
505 struct sem_array *sma;
506 int size;
507 key_t key = params->key;
508 int nsems = params->u.nsems;
509 int semflg = params->flg;
510 int i;
511
512 if (!nsems)
513 return -EINVAL;
514 if (ns->used_sems + nsems > ns->sc_semmns)
515 return -ENOSPC;
516
517 size = sizeof(*sma) + nsems * sizeof(struct sem);
518 sma = ipc_rcu_alloc(size);
519 if (!sma)
520 return -ENOMEM;
521
522 memset(sma, 0, size);
523
524 sma->sem_perm.mode = (semflg & S_IRWXUGO);
525 sma->sem_perm.key = key;
526
527 sma->sem_perm.security = NULL;
528 retval = security_sem_alloc(sma);
529 if (retval) {
530 ipc_rcu_putref(sma, ipc_rcu_free);
531 return retval;
532 }
533
534 sma->sem_base = (struct sem *) &sma[1];
535
536 for (i = 0; i < nsems; i++) {
537 INIT_LIST_HEAD(&sma->sem_base[i].pending_alter);
538 INIT_LIST_HEAD(&sma->sem_base[i].pending_const);
539 spin_lock_init(&sma->sem_base[i].lock);
540 }
541
542 sma->complex_count = 0;
543 INIT_LIST_HEAD(&sma->pending_alter);
544 INIT_LIST_HEAD(&sma->pending_const);
545 INIT_LIST_HEAD(&sma->list_id);
546 sma->sem_nsems = nsems;
547 sma->sem_ctime = get_seconds();
548
549 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
550 if (id < 0) {
551 ipc_rcu_putref(sma, sem_rcu_free);
552 return id;
553 }
554 ns->used_sems += nsems;
555
556 sem_unlock(sma, -1);
557 rcu_read_unlock();
558
559 return sma->sem_perm.id;
560}
561
562
563/*
564 * Called with sem_ids.rwsem and ipcp locked.
565 */
566static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
567{
568 struct sem_array *sma;
569
570 sma = container_of(ipcp, struct sem_array, sem_perm);
571 return security_sem_associate(sma, semflg);
572}
573
574/*
575 * Called with sem_ids.rwsem and ipcp locked.
576 */
577static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
578 struct ipc_params *params)
579{
580 struct sem_array *sma;
581
582 sma = container_of(ipcp, struct sem_array, sem_perm);
583 if (params->u.nsems > sma->sem_nsems)
584 return -EINVAL;
585
586 return 0;
587}
588
589SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
590{
591 struct ipc_namespace *ns;
592 static const struct ipc_ops sem_ops = {
593 .getnew = newary,
594 .associate = sem_security,
595 .more_checks = sem_more_checks,
596 };
597 struct ipc_params sem_params;
598
599 ns = current->nsproxy->ipc_ns;
600
601 if (nsems < 0 || nsems > ns->sc_semmsl)
602 return -EINVAL;
603
604 sem_params.key = key;
605 sem_params.flg = semflg;
606 sem_params.u.nsems = nsems;
607
608 return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
609}
610
611/**
612 * perform_atomic_semop - Perform (if possible) a semaphore operation
613 * @sma: semaphore array
614 * @q: struct sem_queue that describes the operation
615 *
616 * Returns 0 if the operation was possible.
617 * Returns 1 if the operation is impossible, the caller must sleep.
618 * Negative values are error codes.
619 */
620static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
621{
622 int result, sem_op, nsops, pid;
623 struct sembuf *sop;
624 struct sem *curr;
625 struct sembuf *sops;
626 struct sem_undo *un;
627
628 sops = q->sops;
629 nsops = q->nsops;
630 un = q->undo;
631
632 for (sop = sops; sop < sops + nsops; sop++) {
633 curr = sma->sem_base + sop->sem_num;
634 sem_op = sop->sem_op;
635 result = curr->semval;
636
637 if (!sem_op && result)
638 goto would_block;
639
640 result += sem_op;
641 if (result < 0)
642 goto would_block;
643 if (result > SEMVMX)
644 goto out_of_range;
645
646 if (sop->sem_flg & SEM_UNDO) {
647 int undo = un->semadj[sop->sem_num] - sem_op;
648 /* Exceeding the undo range is an error. */
649 if (undo < (-SEMAEM - 1) || undo > SEMAEM)
650 goto out_of_range;
651 un->semadj[sop->sem_num] = undo;
652 }
653
654 curr->semval = result;
655 }
656
657 sop--;
658 pid = q->pid;
659 while (sop >= sops) {
660 sma->sem_base[sop->sem_num].sempid = pid;
661 sop--;
662 }
663
664 return 0;
665
666out_of_range:
667 result = -ERANGE;
668 goto undo;
669
670would_block:
671 q->blocking = sop;
672
673 if (sop->sem_flg & IPC_NOWAIT)
674 result = -EAGAIN;
675 else
676 result = 1;
677
678undo:
679 sop--;
680 while (sop >= sops) {
681 sem_op = sop->sem_op;
682 sma->sem_base[sop->sem_num].semval -= sem_op;
683 if (sop->sem_flg & SEM_UNDO)
684 un->semadj[sop->sem_num] += sem_op;
685 sop--;
686 }
687
688 return result;
689}
690
691/** wake_up_sem_queue_prepare(q, error): Prepare wake-up
692 * @q: queue entry that must be signaled
693 * @error: Error value for the signal
694 *
695 * Prepare the wake-up of the queue entry q.
696 */
697static void wake_up_sem_queue_prepare(struct list_head *pt,
698 struct sem_queue *q, int error)
699{
700 if (list_empty(pt)) {
701 /*
702 * Hold preempt off so that we don't get preempted and have the
703 * wakee busy-wait until we're scheduled back on.
704 */
705 preempt_disable();
706 }
707 q->status = IN_WAKEUP;
708 q->pid = error;
709
710 list_add_tail(&q->list, pt);
711}
712
713/**
714 * wake_up_sem_queue_do - do the actual wake-up
715 * @pt: list of tasks to be woken up
716 *
717 * Do the actual wake-up.
718 * The function is called without any locks held, thus the semaphore array
719 * could be destroyed already and the tasks can disappear as soon as the
720 * status is set to the actual return code.
721 */
722static void wake_up_sem_queue_do(struct list_head *pt)
723{
724 struct sem_queue *q, *t;
725 int did_something;
726
727 did_something = !list_empty(pt);
728 list_for_each_entry_safe(q, t, pt, list) {
729 wake_up_process(q->sleeper);
730 /* q can disappear immediately after writing q->status. */
731 smp_wmb();
732 q->status = q->pid;
733 }
734 if (did_something)
735 preempt_enable();
736}
737
738static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
739{
740 list_del(&q->list);
741 if (q->nsops > 1)
742 sma->complex_count--;
743}
744
745/** check_restart(sma, q)
746 * @sma: semaphore array
747 * @q: the operation that just completed
748 *
749 * update_queue is O(N^2) when it restarts scanning the whole queue of
750 * waiting operations. Therefore this function checks if the restart is
751 * really necessary. It is called after a previously waiting operation
752 * modified the array.
753 * Note that wait-for-zero operations are handled without restart.
754 */
755static int check_restart(struct sem_array *sma, struct sem_queue *q)
756{
757 /* pending complex alter operations are too difficult to analyse */
758 if (!list_empty(&sma->pending_alter))
759 return 1;
760
761 /* we were a sleeping complex operation. Too difficult */
762 if (q->nsops > 1)
763 return 1;
764
765 /* It is impossible that someone waits for the new value:
766 * - complex operations always restart.
767 * - wait-for-zero are handled seperately.
768 * - q is a previously sleeping simple operation that
769 * altered the array. It must be a decrement, because
770 * simple increments never sleep.
771 * - If there are older (higher priority) decrements
772 * in the queue, then they have observed the original
773 * semval value and couldn't proceed. The operation
774 * decremented to value - thus they won't proceed either.
775 */
776 return 0;
777}
778
779/**
780 * wake_const_ops - wake up non-alter tasks
781 * @sma: semaphore array.
782 * @semnum: semaphore that was modified.
783 * @pt: list head for the tasks that must be woken up.
784 *
785 * wake_const_ops must be called after a semaphore in a semaphore array
786 * was set to 0. If complex const operations are pending, wake_const_ops must
787 * be called with semnum = -1, as well as with the number of each modified
788 * semaphore.
789 * The tasks that must be woken up are added to @pt. The return code
790 * is stored in q->pid.
791 * The function returns 1 if at least one operation was completed successfully.
792 */
793static int wake_const_ops(struct sem_array *sma, int semnum,
794 struct list_head *pt)
795{
796 struct sem_queue *q;
797 struct list_head *walk;
798 struct list_head *pending_list;
799 int semop_completed = 0;
800
801 if (semnum == -1)
802 pending_list = &sma->pending_const;
803 else
804 pending_list = &sma->sem_base[semnum].pending_const;
805
806 walk = pending_list->next;
807 while (walk != pending_list) {
808 int error;
809
810 q = container_of(walk, struct sem_queue, list);
811 walk = walk->next;
812
813 error = perform_atomic_semop(sma, q);
814
815 if (error <= 0) {
816 /* operation completed, remove from queue & wakeup */
817
818 unlink_queue(sma, q);
819
820 wake_up_sem_queue_prepare(pt, q, error);
821 if (error == 0)
822 semop_completed = 1;
823 }
824 }
825 return semop_completed;
826}
827
828/**
829 * do_smart_wakeup_zero - wakeup all wait for zero tasks
830 * @sma: semaphore array
831 * @sops: operations that were performed
832 * @nsops: number of operations
833 * @pt: list head of the tasks that must be woken up.
834 *
835 * Checks all required queue for wait-for-zero operations, based
836 * on the actual changes that were performed on the semaphore array.
837 * The function returns 1 if at least one operation was completed successfully.
838 */
839static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
840 int nsops, struct list_head *pt)
841{
842 int i;
843 int semop_completed = 0;
844 int got_zero = 0;
845
846 /* first: the per-semaphore queues, if known */
847 if (sops) {
848 for (i = 0; i < nsops; i++) {
849 int num = sops[i].sem_num;
850
851 if (sma->sem_base[num].semval == 0) {
852 got_zero = 1;
853 semop_completed |= wake_const_ops(sma, num, pt);
854 }
855 }
856 } else {
857 /*
858 * No sops means modified semaphores not known.
859 * Assume all were changed.
860 */
861 for (i = 0; i < sma->sem_nsems; i++) {
862 if (sma->sem_base[i].semval == 0) {
863 got_zero = 1;
864 semop_completed |= wake_const_ops(sma, i, pt);
865 }
866 }
867 }
868 /*
869 * If one of the modified semaphores got 0,
870 * then check the global queue, too.
871 */
872 if (got_zero)
873 semop_completed |= wake_const_ops(sma, -1, pt);
874
875 return semop_completed;
876}
877
878
879/**
880 * update_queue - look for tasks that can be completed.
881 * @sma: semaphore array.
882 * @semnum: semaphore that was modified.
883 * @pt: list head for the tasks that must be woken up.
884 *
885 * update_queue must be called after a semaphore in a semaphore array
886 * was modified. If multiple semaphores were modified, update_queue must
887 * be called with semnum = -1, as well as with the number of each modified
888 * semaphore.
889 * The tasks that must be woken up are added to @pt. The return code
890 * is stored in q->pid.
891 * The function internally checks if const operations can now succeed.
892 *
893 * The function return 1 if at least one semop was completed successfully.
894 */
895static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
896{
897 struct sem_queue *q;
898 struct list_head *walk;
899 struct list_head *pending_list;
900 int semop_completed = 0;
901
902 if (semnum == -1)
903 pending_list = &sma->pending_alter;
904 else
905 pending_list = &sma->sem_base[semnum].pending_alter;
906
907again:
908 walk = pending_list->next;
909 while (walk != pending_list) {
910 int error, restart;
911
912 q = container_of(walk, struct sem_queue, list);
913 walk = walk->next;
914
915 /* If we are scanning the single sop, per-semaphore list of
916 * one semaphore and that semaphore is 0, then it is not
917 * necessary to scan further: simple increments
918 * that affect only one entry succeed immediately and cannot
919 * be in the per semaphore pending queue, and decrements
920 * cannot be successful if the value is already 0.
921 */
922 if (semnum != -1 && sma->sem_base[semnum].semval == 0)
923 break;
924
925 error = perform_atomic_semop(sma, q);
926
927 /* Does q->sleeper still need to sleep? */
928 if (error > 0)
929 continue;
930
931 unlink_queue(sma, q);
932
933 if (error) {
934 restart = 0;
935 } else {
936 semop_completed = 1;
937 do_smart_wakeup_zero(sma, q->sops, q->nsops, pt);
938 restart = check_restart(sma, q);
939 }
940
941 wake_up_sem_queue_prepare(pt, q, error);
942 if (restart)
943 goto again;
944 }
945 return semop_completed;
946}
947
948/**
949 * set_semotime - set sem_otime
950 * @sma: semaphore array
951 * @sops: operations that modified the array, may be NULL
952 *
953 * sem_otime is replicated to avoid cache line trashing.
954 * This function sets one instance to the current time.
955 */
956static void set_semotime(struct sem_array *sma, struct sembuf *sops)
957{
958 if (sops == NULL) {
959 sma->sem_base[0].sem_otime = get_seconds();
960 } else {
961 sma->sem_base[sops[0].sem_num].sem_otime =
962 get_seconds();
963 }
964}
965
966/**
967 * do_smart_update - optimized update_queue
968 * @sma: semaphore array
969 * @sops: operations that were performed
970 * @nsops: number of operations
971 * @otime: force setting otime
972 * @pt: list head of the tasks that must be woken up.
973 *
974 * do_smart_update() does the required calls to update_queue and wakeup_zero,
975 * based on the actual changes that were performed on the semaphore array.
976 * Note that the function does not do the actual wake-up: the caller is
977 * responsible for calling wake_up_sem_queue_do(@pt).
978 * It is safe to perform this call after dropping all locks.
979 */
980static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
981 int otime, struct list_head *pt)
982{
983 int i;
984
985 otime |= do_smart_wakeup_zero(sma, sops, nsops, pt);
986
987 if (!list_empty(&sma->pending_alter)) {
988 /* semaphore array uses the global queue - just process it. */
989 otime |= update_queue(sma, -1, pt);
990 } else {
991 if (!sops) {
992 /*
993 * No sops, thus the modified semaphores are not
994 * known. Check all.
995 */
996 for (i = 0; i < sma->sem_nsems; i++)
997 otime |= update_queue(sma, i, pt);
998 } else {
999 /*
1000 * Check the semaphores that were increased:
1001 * - No complex ops, thus all sleeping ops are
1002 * decrease.
1003 * - if we decreased the value, then any sleeping
1004 * semaphore ops wont be able to run: If the
1005 * previous value was too small, then the new
1006 * value will be too small, too.
1007 */
1008 for (i = 0; i < nsops; i++) {
1009 if (sops[i].sem_op > 0) {
1010 otime |= update_queue(sma,
1011 sops[i].sem_num, pt);
1012 }
1013 }
1014 }
1015 }
1016 if (otime)
1017 set_semotime(sma, sops);
1018}
1019
1020/*
1021 * check_qop: Test if a queued operation sleeps on the semaphore semnum
1022 */
1023static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
1024 bool count_zero)
1025{
1026 struct sembuf *sop = q->blocking;
1027
1028 /*
1029 * Linux always (since 0.99.10) reported a task as sleeping on all
1030 * semaphores. This violates SUS, therefore it was changed to the
1031 * standard compliant behavior.
1032 * Give the administrators a chance to notice that an application
1033 * might misbehave because it relies on the Linux behavior.
1034 */
1035 pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
1036 "The task %s (%d) triggered the difference, watch for misbehavior.\n",
1037 current->comm, task_pid_nr(current));
1038
1039 if (sop->sem_num != semnum)
1040 return 0;
1041
1042 if (count_zero && sop->sem_op == 0)
1043 return 1;
1044 if (!count_zero && sop->sem_op < 0)
1045 return 1;
1046
1047 return 0;
1048}
1049
1050/* The following counts are associated to each semaphore:
1051 * semncnt number of tasks waiting on semval being nonzero
1052 * semzcnt number of tasks waiting on semval being zero
1053 *
1054 * Per definition, a task waits only on the semaphore of the first semop
1055 * that cannot proceed, even if additional operation would block, too.
1056 */
1057static int count_semcnt(struct sem_array *sma, ushort semnum,
1058 bool count_zero)
1059{
1060 struct list_head *l;
1061 struct sem_queue *q;
1062 int semcnt;
1063
1064 semcnt = 0;
1065 /* First: check the simple operations. They are easy to evaluate */
1066 if (count_zero)
1067 l = &sma->sem_base[semnum].pending_const;
1068 else
1069 l = &sma->sem_base[semnum].pending_alter;
1070
1071 list_for_each_entry(q, l, list) {
1072 /* all task on a per-semaphore list sleep on exactly
1073 * that semaphore
1074 */
1075 semcnt++;
1076 }
1077
1078 /* Then: check the complex operations. */
1079 list_for_each_entry(q, &sma->pending_alter, list) {
1080 semcnt += check_qop(sma, semnum, q, count_zero);
1081 }
1082 if (count_zero) {
1083 list_for_each_entry(q, &sma->pending_const, list) {
1084 semcnt += check_qop(sma, semnum, q, count_zero);
1085 }
1086 }
1087 return semcnt;
1088}
1089
1090/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
1091 * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
1092 * remains locked on exit.
1093 */
1094static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1095{
1096 struct sem_undo *un, *tu;
1097 struct sem_queue *q, *tq;
1098 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1099 struct list_head tasks;
1100 int i;
1101
1102 /* Free the existing undo structures for this semaphore set. */
1103 ipc_assert_locked_object(&sma->sem_perm);
1104 list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1105 list_del(&un->list_id);
1106 spin_lock(&un->ulp->lock);
1107 un->semid = -1;
1108 list_del_rcu(&un->list_proc);
1109 spin_unlock(&un->ulp->lock);
1110 kfree_rcu(un, rcu);
1111 }
1112
1113 /* Wake up all pending processes and let them fail with EIDRM. */
1114 INIT_LIST_HEAD(&tasks);
1115 list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1116 unlink_queue(sma, q);
1117 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1118 }
1119
1120 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1121 unlink_queue(sma, q);
1122 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1123 }
1124 for (i = 0; i < sma->sem_nsems; i++) {
1125 struct sem *sem = sma->sem_base + i;
1126 list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
1127 unlink_queue(sma, q);
1128 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1129 }
1130 list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
1131 unlink_queue(sma, q);
1132 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1133 }
1134 }
1135
1136 /* Remove the semaphore set from the IDR */
1137 sem_rmid(ns, sma);
1138 sem_unlock(sma, -1);
1139 rcu_read_unlock();
1140
1141 wake_up_sem_queue_do(&tasks);
1142 ns->used_sems -= sma->sem_nsems;
1143 ipc_rcu_putref(sma, sem_rcu_free);
1144}
1145
1146static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1147{
1148 switch (version) {
1149 case IPC_64:
1150 return copy_to_user(buf, in, sizeof(*in));
1151 case IPC_OLD:
1152 {
1153 struct semid_ds out;
1154
1155 memset(&out, 0, sizeof(out));
1156
1157 ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
1158
1159 out.sem_otime = in->sem_otime;
1160 out.sem_ctime = in->sem_ctime;
1161 out.sem_nsems = in->sem_nsems;
1162
1163 return copy_to_user(buf, &out, sizeof(out));
1164 }
1165 default:
1166 return -EINVAL;
1167 }
1168}
1169
1170static time_t get_semotime(struct sem_array *sma)
1171{
1172 int i;
1173 time_t res;
1174
1175 res = sma->sem_base[0].sem_otime;
1176 for (i = 1; i < sma->sem_nsems; i++) {
1177 time_t to = sma->sem_base[i].sem_otime;
1178
1179 if (to > res)
1180 res = to;
1181 }
1182 return res;
1183}
1184
1185static int semctl_nolock(struct ipc_namespace *ns, int semid,
1186 int cmd, int version, void __user *p)
1187{
1188 int err;
1189 struct sem_array *sma;
1190
1191 switch (cmd) {
1192 case IPC_INFO:
1193 case SEM_INFO:
1194 {
1195 struct seminfo seminfo;
1196 int max_id;
1197
1198 err = security_sem_semctl(NULL, cmd);
1199 if (err)
1200 return err;
1201
1202 memset(&seminfo, 0, sizeof(seminfo));
1203 seminfo.semmni = ns->sc_semmni;
1204 seminfo.semmns = ns->sc_semmns;
1205 seminfo.semmsl = ns->sc_semmsl;
1206 seminfo.semopm = ns->sc_semopm;
1207 seminfo.semvmx = SEMVMX;
1208 seminfo.semmnu = SEMMNU;
1209 seminfo.semmap = SEMMAP;
1210 seminfo.semume = SEMUME;
1211 down_read(&sem_ids(ns).rwsem);
1212 if (cmd == SEM_INFO) {
1213 seminfo.semusz = sem_ids(ns).in_use;
1214 seminfo.semaem = ns->used_sems;
1215 } else {
1216 seminfo.semusz = SEMUSZ;
1217 seminfo.semaem = SEMAEM;
1218 }
1219 max_id = ipc_get_maxid(&sem_ids(ns));
1220 up_read(&sem_ids(ns).rwsem);
1221 if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1222 return -EFAULT;
1223 return (max_id < 0) ? 0 : max_id;
1224 }
1225 case IPC_STAT:
1226 case SEM_STAT:
1227 {
1228 struct semid64_ds tbuf;
1229 int id = 0;
1230
1231 memset(&tbuf, 0, sizeof(tbuf));
1232
1233 rcu_read_lock();
1234 if (cmd == SEM_STAT) {
1235 sma = sem_obtain_object(ns, semid);
1236 if (IS_ERR(sma)) {
1237 err = PTR_ERR(sma);
1238 goto out_unlock;
1239 }
1240 id = sma->sem_perm.id;
1241 } else {
1242 sma = sem_obtain_object_check(ns, semid);
1243 if (IS_ERR(sma)) {
1244 err = PTR_ERR(sma);
1245 goto out_unlock;
1246 }
1247 }
1248
1249 err = -EACCES;
1250 if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1251 goto out_unlock;
1252
1253 err = security_sem_semctl(sma, cmd);
1254 if (err)
1255 goto out_unlock;
1256
1257 kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
1258 tbuf.sem_otime = get_semotime(sma);
1259 tbuf.sem_ctime = sma->sem_ctime;
1260 tbuf.sem_nsems = sma->sem_nsems;
1261 rcu_read_unlock();
1262 if (copy_semid_to_user(p, &tbuf, version))
1263 return -EFAULT;
1264 return id;
1265 }
1266 default:
1267 return -EINVAL;
1268 }
1269out_unlock:
1270 rcu_read_unlock();
1271 return err;
1272}
1273
1274static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1275 unsigned long arg)
1276{
1277 struct sem_undo *un;
1278 struct sem_array *sma;
1279 struct sem *curr;
1280 int err;
1281 struct list_head tasks;
1282 int val;
1283#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1284 /* big-endian 64bit */
1285 val = arg >> 32;
1286#else
1287 /* 32bit or little-endian 64bit */
1288 val = arg;
1289#endif
1290
1291 if (val > SEMVMX || val < 0)
1292 return -ERANGE;
1293
1294 INIT_LIST_HEAD(&tasks);
1295
1296 rcu_read_lock();
1297 sma = sem_obtain_object_check(ns, semid);
1298 if (IS_ERR(sma)) {
1299 rcu_read_unlock();
1300 return PTR_ERR(sma);
1301 }
1302
1303 if (semnum < 0 || semnum >= sma->sem_nsems) {
1304 rcu_read_unlock();
1305 return -EINVAL;
1306 }
1307
1308
1309 if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1310 rcu_read_unlock();
1311 return -EACCES;
1312 }
1313
1314 err = security_sem_semctl(sma, SETVAL);
1315 if (err) {
1316 rcu_read_unlock();
1317 return -EACCES;
1318 }
1319
1320 sem_lock(sma, NULL, -1);
1321
1322 if (!ipc_valid_object(&sma->sem_perm)) {
1323 sem_unlock(sma, -1);
1324 rcu_read_unlock();
1325 return -EIDRM;
1326 }
1327
1328 curr = &sma->sem_base[semnum];
1329
1330 ipc_assert_locked_object(&sma->sem_perm);
1331 list_for_each_entry(un, &sma->list_id, list_id)
1332 un->semadj[semnum] = 0;
1333
1334 curr->semval = val;
1335 curr->sempid = task_tgid_vnr(current);
1336 sma->sem_ctime = get_seconds();
1337 /* maybe some queued-up processes were waiting for this */
1338 do_smart_update(sma, NULL, 0, 0, &tasks);
1339 sem_unlock(sma, -1);
1340 rcu_read_unlock();
1341 wake_up_sem_queue_do(&tasks);
1342 return 0;
1343}
1344
1345static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1346 int cmd, void __user *p)
1347{
1348 struct sem_array *sma;
1349 struct sem *curr;
1350 int err, nsems;
1351 ushort fast_sem_io[SEMMSL_FAST];
1352 ushort *sem_io = fast_sem_io;
1353 struct list_head tasks;
1354
1355 INIT_LIST_HEAD(&tasks);
1356
1357 rcu_read_lock();
1358 sma = sem_obtain_object_check(ns, semid);
1359 if (IS_ERR(sma)) {
1360 rcu_read_unlock();
1361 return PTR_ERR(sma);
1362 }
1363
1364 nsems = sma->sem_nsems;
1365
1366 err = -EACCES;
1367 if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1368 goto out_rcu_wakeup;
1369
1370 err = security_sem_semctl(sma, cmd);
1371 if (err)
1372 goto out_rcu_wakeup;
1373
1374 err = -EACCES;
1375 switch (cmd) {
1376 case GETALL:
1377 {
1378 ushort __user *array = p;
1379 int i;
1380
1381 sem_lock(sma, NULL, -1);
1382 if (!ipc_valid_object(&sma->sem_perm)) {
1383 err = -EIDRM;
1384 goto out_unlock;
1385 }
1386 if (nsems > SEMMSL_FAST) {
1387 if (!ipc_rcu_getref(sma)) {
1388 err = -EIDRM;
1389 goto out_unlock;
1390 }
1391 sem_unlock(sma, -1);
1392 rcu_read_unlock();
1393 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1394 if (sem_io == NULL) {
1395 ipc_rcu_putref(sma, ipc_rcu_free);
1396 return -ENOMEM;
1397 }
1398
1399 rcu_read_lock();
1400 sem_lock_and_putref(sma);
1401 if (!ipc_valid_object(&sma->sem_perm)) {
1402 err = -EIDRM;
1403 goto out_unlock;
1404 }
1405 }
1406 for (i = 0; i < sma->sem_nsems; i++)
1407 sem_io[i] = sma->sem_base[i].semval;
1408 sem_unlock(sma, -1);
1409 rcu_read_unlock();
1410 err = 0;
1411 if (copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1412 err = -EFAULT;
1413 goto out_free;
1414 }
1415 case SETALL:
1416 {
1417 int i;
1418 struct sem_undo *un;
1419
1420 if (!ipc_rcu_getref(sma)) {
1421 err = -EIDRM;
1422 goto out_rcu_wakeup;
1423 }
1424 rcu_read_unlock();
1425
1426 if (nsems > SEMMSL_FAST) {
1427 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1428 if (sem_io == NULL) {
1429 ipc_rcu_putref(sma, ipc_rcu_free);
1430 return -ENOMEM;
1431 }
1432 }
1433
1434 if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
1435 ipc_rcu_putref(sma, ipc_rcu_free);
1436 err = -EFAULT;
1437 goto out_free;
1438 }
1439
1440 for (i = 0; i < nsems; i++) {
1441 if (sem_io[i] > SEMVMX) {
1442 ipc_rcu_putref(sma, ipc_rcu_free);
1443 err = -ERANGE;
1444 goto out_free;
1445 }
1446 }
1447 rcu_read_lock();
1448 sem_lock_and_putref(sma);
1449 if (!ipc_valid_object(&sma->sem_perm)) {
1450 err = -EIDRM;
1451 goto out_unlock;
1452 }
1453
1454 for (i = 0; i < nsems; i++) {
1455 sma->sem_base[i].semval = sem_io[i];
1456 sma->sem_base[i].sempid = task_tgid_vnr(current);
1457 }
1458
1459 ipc_assert_locked_object(&sma->sem_perm);
1460 list_for_each_entry(un, &sma->list_id, list_id) {
1461 for (i = 0; i < nsems; i++)
1462 un->semadj[i] = 0;
1463 }
1464 sma->sem_ctime = get_seconds();
1465 /* maybe some queued-up processes were waiting for this */
1466 do_smart_update(sma, NULL, 0, 0, &tasks);
1467 err = 0;
1468 goto out_unlock;
1469 }
1470 /* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */
1471 }
1472 err = -EINVAL;
1473 if (semnum < 0 || semnum >= nsems)
1474 goto out_rcu_wakeup;
1475
1476 sem_lock(sma, NULL, -1);
1477 if (!ipc_valid_object(&sma->sem_perm)) {
1478 err = -EIDRM;
1479 goto out_unlock;
1480 }
1481 curr = &sma->sem_base[semnum];
1482
1483 switch (cmd) {
1484 case GETVAL:
1485 err = curr->semval;
1486 goto out_unlock;
1487 case GETPID:
1488 err = curr->sempid;
1489 goto out_unlock;
1490 case GETNCNT:
1491 err = count_semcnt(sma, semnum, 0);
1492 goto out_unlock;
1493 case GETZCNT:
1494 err = count_semcnt(sma, semnum, 1);
1495 goto out_unlock;
1496 }
1497
1498out_unlock:
1499 sem_unlock(sma, -1);
1500out_rcu_wakeup:
1501 rcu_read_unlock();
1502 wake_up_sem_queue_do(&tasks);
1503out_free:
1504 if (sem_io != fast_sem_io)
1505 ipc_free(sem_io);
1506 return err;
1507}
1508
1509static inline unsigned long
1510copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1511{
1512 switch (version) {
1513 case IPC_64:
1514 if (copy_from_user(out, buf, sizeof(*out)))
1515 return -EFAULT;
1516 return 0;
1517 case IPC_OLD:
1518 {
1519 struct semid_ds tbuf_old;
1520
1521 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1522 return -EFAULT;
1523
1524 out->sem_perm.uid = tbuf_old.sem_perm.uid;
1525 out->sem_perm.gid = tbuf_old.sem_perm.gid;
1526 out->sem_perm.mode = tbuf_old.sem_perm.mode;
1527
1528 return 0;
1529 }
1530 default:
1531 return -EINVAL;
1532 }
1533}
1534
1535/*
1536 * This function handles some semctl commands which require the rwsem
1537 * to be held in write mode.
1538 * NOTE: no locks must be held, the rwsem is taken inside this function.
1539 */
1540static int semctl_down(struct ipc_namespace *ns, int semid,
1541 int cmd, int version, void __user *p)
1542{
1543 struct sem_array *sma;
1544 int err;
1545 struct semid64_ds semid64;
1546 struct kern_ipc_perm *ipcp;
1547
1548 if (cmd == IPC_SET) {
1549 if (copy_semid_from_user(&semid64, p, version))
1550 return -EFAULT;
1551 }
1552
1553 down_write(&sem_ids(ns).rwsem);
1554 rcu_read_lock();
1555
1556 ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd,
1557 &semid64.sem_perm, 0);
1558 if (IS_ERR(ipcp)) {
1559 err = PTR_ERR(ipcp);
1560 goto out_unlock1;
1561 }
1562
1563 sma = container_of(ipcp, struct sem_array, sem_perm);
1564
1565 err = security_sem_semctl(sma, cmd);
1566 if (err)
1567 goto out_unlock1;
1568
1569 switch (cmd) {
1570 case IPC_RMID:
1571 sem_lock(sma, NULL, -1);
1572 /* freeary unlocks the ipc object and rcu */
1573 freeary(ns, ipcp);
1574 goto out_up;
1575 case IPC_SET:
1576 sem_lock(sma, NULL, -1);
1577 err = ipc_update_perm(&semid64.sem_perm, ipcp);
1578 if (err)
1579 goto out_unlock0;
1580 sma->sem_ctime = get_seconds();
1581 break;
1582 default:
1583 err = -EINVAL;
1584 goto out_unlock1;
1585 }
1586
1587out_unlock0:
1588 sem_unlock(sma, -1);
1589out_unlock1:
1590 rcu_read_unlock();
1591out_up:
1592 up_write(&sem_ids(ns).rwsem);
1593 return err;
1594}
1595
1596SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1597{
1598 int version;
1599 struct ipc_namespace *ns;
1600 void __user *p = (void __user *)arg;
1601
1602 if (semid < 0)
1603 return -EINVAL;
1604
1605 version = ipc_parse_version(&cmd);
1606 ns = current->nsproxy->ipc_ns;
1607
1608 switch (cmd) {
1609 case IPC_INFO:
1610 case SEM_INFO:
1611 case IPC_STAT:
1612 case SEM_STAT:
1613 return semctl_nolock(ns, semid, cmd, version, p);
1614 case GETALL:
1615 case GETVAL:
1616 case GETPID:
1617 case GETNCNT:
1618 case GETZCNT:
1619 case SETALL:
1620 return semctl_main(ns, semid, semnum, cmd, p);
1621 case SETVAL:
1622 return semctl_setval(ns, semid, semnum, arg);
1623 case IPC_RMID:
1624 case IPC_SET:
1625 return semctl_down(ns, semid, cmd, version, p);
1626 default:
1627 return -EINVAL;
1628 }
1629}
1630
1631/* If the task doesn't already have a undo_list, then allocate one
1632 * here. We guarantee there is only one thread using this undo list,
1633 * and current is THE ONE
1634 *
1635 * If this allocation and assignment succeeds, but later
1636 * portions of this code fail, there is no need to free the sem_undo_list.
1637 * Just let it stay associated with the task, and it'll be freed later
1638 * at exit time.
1639 *
1640 * This can block, so callers must hold no locks.
1641 */
1642static inline int get_undo_list(struct sem_undo_list **undo_listp)
1643{
1644 struct sem_undo_list *undo_list;
1645
1646 undo_list = current->sysvsem.undo_list;
1647 if (!undo_list) {
1648 undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1649 if (undo_list == NULL)
1650 return -ENOMEM;
1651 spin_lock_init(&undo_list->lock);
1652 atomic_set(&undo_list->refcnt, 1);
1653 INIT_LIST_HEAD(&undo_list->list_proc);
1654
1655 current->sysvsem.undo_list = undo_list;
1656 }
1657 *undo_listp = undo_list;
1658 return 0;
1659}
1660
1661static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1662{
1663 struct sem_undo *un;
1664
1665 list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
1666 if (un->semid == semid)
1667 return un;
1668 }
1669 return NULL;
1670}
1671
1672static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1673{
1674 struct sem_undo *un;
1675
1676 assert_spin_locked(&ulp->lock);
1677
1678 un = __lookup_undo(ulp, semid);
1679 if (un) {
1680 list_del_rcu(&un->list_proc);
1681 list_add_rcu(&un->list_proc, &ulp->list_proc);
1682 }
1683 return un;
1684}
1685
1686/**
1687 * find_alloc_undo - lookup (and if not present create) undo array
1688 * @ns: namespace
1689 * @semid: semaphore array id
1690 *
1691 * The function looks up (and if not present creates) the undo structure.
1692 * The size of the undo structure depends on the size of the semaphore
1693 * array, thus the alloc path is not that straightforward.
1694 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1695 * performs a rcu_read_lock().
1696 */
1697static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1698{
1699 struct sem_array *sma;
1700 struct sem_undo_list *ulp;
1701 struct sem_undo *un, *new;
1702 int nsems, error;
1703
1704 error = get_undo_list(&ulp);
1705 if (error)
1706 return ERR_PTR(error);
1707
1708 rcu_read_lock();
1709 spin_lock(&ulp->lock);
1710 un = lookup_undo(ulp, semid);
1711 spin_unlock(&ulp->lock);
1712 if (likely(un != NULL))
1713 goto out;
1714
1715 /* no undo structure around - allocate one. */
1716 /* step 1: figure out the size of the semaphore array */
1717 sma = sem_obtain_object_check(ns, semid);
1718 if (IS_ERR(sma)) {
1719 rcu_read_unlock();
1720 return ERR_CAST(sma);
1721 }
1722
1723 nsems = sma->sem_nsems;
1724 if (!ipc_rcu_getref(sma)) {
1725 rcu_read_unlock();
1726 un = ERR_PTR(-EIDRM);
1727 goto out;
1728 }
1729 rcu_read_unlock();
1730
1731 /* step 2: allocate new undo structure */
1732 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1733 if (!new) {
1734 ipc_rcu_putref(sma, ipc_rcu_free);
1735 return ERR_PTR(-ENOMEM);
1736 }
1737
1738 /* step 3: Acquire the lock on semaphore array */
1739 rcu_read_lock();
1740 sem_lock_and_putref(sma);
1741 if (!ipc_valid_object(&sma->sem_perm)) {
1742 sem_unlock(sma, -1);
1743 rcu_read_unlock();
1744 kfree(new);
1745 un = ERR_PTR(-EIDRM);
1746 goto out;
1747 }
1748 spin_lock(&ulp->lock);
1749
1750 /*
1751 * step 4: check for races: did someone else allocate the undo struct?
1752 */
1753 un = lookup_undo(ulp, semid);
1754 if (un) {
1755 kfree(new);
1756 goto success;
1757 }
1758 /* step 5: initialize & link new undo structure */
1759 new->semadj = (short *) &new[1];
1760 new->ulp = ulp;
1761 new->semid = semid;
1762 assert_spin_locked(&ulp->lock);
1763 list_add_rcu(&new->list_proc, &ulp->list_proc);
1764 ipc_assert_locked_object(&sma->sem_perm);
1765 list_add(&new->list_id, &sma->list_id);
1766 un = new;
1767
1768success:
1769 spin_unlock(&ulp->lock);
1770 sem_unlock(sma, -1);
1771out:
1772 return un;
1773}
1774
1775
1776/**
1777 * get_queue_result - retrieve the result code from sem_queue
1778 * @q: Pointer to queue structure
1779 *
1780 * Retrieve the return code from the pending queue. If IN_WAKEUP is found in
1781 * q->status, then we must loop until the value is replaced with the final
1782 * value: This may happen if a task is woken up by an unrelated event (e.g.
1783 * signal) and in parallel the task is woken up by another task because it got
1784 * the requested semaphores.
1785 *
1786 * The function can be called with or without holding the semaphore spinlock.
1787 */
1788static int get_queue_result(struct sem_queue *q)
1789{
1790 int error;
1791
1792 error = q->status;
1793 while (unlikely(error == IN_WAKEUP)) {
1794 cpu_relax();
1795 error = q->status;
1796 }
1797
1798 return error;
1799}
1800
1801SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1802 unsigned, nsops, const struct timespec __user *, timeout)
1803{
1804 int error = -EINVAL;
1805 struct sem_array *sma;
1806 struct sembuf fast_sops[SEMOPM_FAST];
1807 struct sembuf *sops = fast_sops, *sop;
1808 struct sem_undo *un;
1809 int undos = 0, alter = 0, max, locknum;
1810 struct sem_queue queue;
1811 unsigned long jiffies_left = 0;
1812 struct ipc_namespace *ns;
1813 struct list_head tasks;
1814
1815 ns = current->nsproxy->ipc_ns;
1816
1817 if (nsops < 1 || semid < 0)
1818 return -EINVAL;
1819 if (nsops > ns->sc_semopm)
1820 return -E2BIG;
1821 if (nsops > SEMOPM_FAST) {
1822 sops = kmalloc(sizeof(*sops)*nsops, GFP_KERNEL);
1823 if (sops == NULL)
1824 return -ENOMEM;
1825 }
1826 if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
1827 error = -EFAULT;
1828 goto out_free;
1829 }
1830 if (timeout) {
1831 struct timespec _timeout;
1832 if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) {
1833 error = -EFAULT;
1834 goto out_free;
1835 }
1836 if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 ||
1837 _timeout.tv_nsec >= 1000000000L) {
1838 error = -EINVAL;
1839 goto out_free;
1840 }
1841 jiffies_left = timespec_to_jiffies(&_timeout);
1842 }
1843 max = 0;
1844 for (sop = sops; sop < sops + nsops; sop++) {
1845 if (sop->sem_num >= max)
1846 max = sop->sem_num;
1847 if (sop->sem_flg & SEM_UNDO)
1848 undos = 1;
1849 if (sop->sem_op != 0)
1850 alter = 1;
1851 }
1852
1853 INIT_LIST_HEAD(&tasks);
1854
1855 if (undos) {
1856 /* On success, find_alloc_undo takes the rcu_read_lock */
1857 un = find_alloc_undo(ns, semid);
1858 if (IS_ERR(un)) {
1859 error = PTR_ERR(un);
1860 goto out_free;
1861 }
1862 } else {
1863 un = NULL;
1864 rcu_read_lock();
1865 }
1866
1867 sma = sem_obtain_object_check(ns, semid);
1868 if (IS_ERR(sma)) {
1869 rcu_read_unlock();
1870 error = PTR_ERR(sma);
1871 goto out_free;
1872 }
1873
1874 error = -EFBIG;
1875 if (max >= sma->sem_nsems)
1876 goto out_rcu_wakeup;
1877
1878 error = -EACCES;
1879 if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
1880 goto out_rcu_wakeup;
1881
1882 error = security_sem_semop(sma, sops, nsops, alter);
1883 if (error)
1884 goto out_rcu_wakeup;
1885
1886 error = -EIDRM;
1887 locknum = sem_lock(sma, sops, nsops);
1888 /*
1889 * We eventually might perform the following check in a lockless
1890 * fashion, considering ipc_valid_object() locking constraints.
1891 * If nsops == 1 and there is no contention for sem_perm.lock, then
1892 * only a per-semaphore lock is held and it's OK to proceed with the
1893 * check below. More details on the fine grained locking scheme
1894 * entangled here and why it's RMID race safe on comments at sem_lock()
1895 */
1896 if (!ipc_valid_object(&sma->sem_perm))
1897 goto out_unlock_free;
1898 /*
1899 * semid identifiers are not unique - find_alloc_undo may have
1900 * allocated an undo structure, it was invalidated by an RMID
1901 * and now a new array with received the same id. Check and fail.
1902 * This case can be detected checking un->semid. The existence of
1903 * "un" itself is guaranteed by rcu.
1904 */
1905 if (un && un->semid == -1)
1906 goto out_unlock_free;
1907
1908 queue.sops = sops;
1909 queue.nsops = nsops;
1910 queue.undo = un;
1911 queue.pid = task_tgid_vnr(current);
1912 queue.alter = alter;
1913
1914 error = perform_atomic_semop(sma, &queue);
1915 if (error == 0) {
1916 /* If the operation was successful, then do
1917 * the required updates.
1918 */
1919 if (alter)
1920 do_smart_update(sma, sops, nsops, 1, &tasks);
1921 else
1922 set_semotime(sma, sops);
1923 }
1924 if (error <= 0)
1925 goto out_unlock_free;
1926
1927 /* We need to sleep on this operation, so we put the current
1928 * task into the pending queue and go to sleep.
1929 */
1930
1931 if (nsops == 1) {
1932 struct sem *curr;
1933 curr = &sma->sem_base[sops->sem_num];
1934
1935 if (alter) {
1936 if (sma->complex_count) {
1937 list_add_tail(&queue.list,
1938 &sma->pending_alter);
1939 } else {
1940
1941 list_add_tail(&queue.list,
1942 &curr->pending_alter);
1943 }
1944 } else {
1945 list_add_tail(&queue.list, &curr->pending_const);
1946 }
1947 } else {
1948 if (!sma->complex_count)
1949 merge_queues(sma);
1950
1951 if (alter)
1952 list_add_tail(&queue.list, &sma->pending_alter);
1953 else
1954 list_add_tail(&queue.list, &sma->pending_const);
1955
1956 sma->complex_count++;
1957 }
1958
1959 queue.status = -EINTR;
1960 queue.sleeper = current;
1961
1962sleep_again:
1963 __set_current_state(TASK_INTERRUPTIBLE);
1964 sem_unlock(sma, locknum);
1965 rcu_read_unlock();
1966
1967 if (timeout)
1968 jiffies_left = schedule_timeout(jiffies_left);
1969 else
1970 schedule();
1971
1972 error = get_queue_result(&queue);
1973
1974 if (error != -EINTR) {
1975 /* fast path: update_queue already obtained all requested
1976 * resources.
1977 * Perform a smp_mb(): User space could assume that semop()
1978 * is a memory barrier: Without the mb(), the cpu could
1979 * speculatively read in user space stale data that was
1980 * overwritten by the previous owner of the semaphore.
1981 */
1982 smp_mb();
1983
1984 goto out_free;
1985 }
1986
1987 rcu_read_lock();
1988 sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
1989
1990 /*
1991 * Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
1992 */
1993 error = get_queue_result(&queue);
1994
1995 /*
1996 * Array removed? If yes, leave without sem_unlock().
1997 */
1998 if (IS_ERR(sma)) {
1999 rcu_read_unlock();
2000 goto out_free;
2001 }
2002
2003
2004 /*
2005 * If queue.status != -EINTR we are woken up by another process.
2006 * Leave without unlink_queue(), but with sem_unlock().
2007 */
2008 if (error != -EINTR)
2009 goto out_unlock_free;
2010
2011 /*
2012 * If an interrupt occurred we have to clean up the queue
2013 */
2014 if (timeout && jiffies_left == 0)
2015 error = -EAGAIN;
2016
2017 /*
2018 * If the wakeup was spurious, just retry
2019 */
2020 if (error == -EINTR && !signal_pending(current))
2021 goto sleep_again;
2022
2023 unlink_queue(sma, &queue);
2024
2025out_unlock_free:
2026 sem_unlock(sma, locknum);
2027out_rcu_wakeup:
2028 rcu_read_unlock();
2029 wake_up_sem_queue_do(&tasks);
2030out_free:
2031 if (sops != fast_sops)
2032 kfree(sops);
2033 return error;
2034}
2035
2036SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
2037 unsigned, nsops)
2038{
2039 return sys_semtimedop(semid, tsops, nsops, NULL);
2040}
2041
2042/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
2043 * parent and child tasks.
2044 */
2045
2046int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
2047{
2048 struct sem_undo_list *undo_list;
2049 int error;
2050
2051 if (clone_flags & CLONE_SYSVSEM) {
2052 error = get_undo_list(&undo_list);
2053 if (error)
2054 return error;
2055 atomic_inc(&undo_list->refcnt);
2056 tsk->sysvsem.undo_list = undo_list;
2057 } else
2058 tsk->sysvsem.undo_list = NULL;
2059
2060 return 0;
2061}
2062
2063/*
2064 * add semadj values to semaphores, free undo structures.
2065 * undo structures are not freed when semaphore arrays are destroyed
2066 * so some of them may be out of date.
2067 * IMPLEMENTATION NOTE: There is some confusion over whether the
2068 * set of adjustments that needs to be done should be done in an atomic
2069 * manner or not. That is, if we are attempting to decrement the semval
2070 * should we queue up and wait until we can do so legally?
2071 * The original implementation attempted to do this (queue and wait).
2072 * The current implementation does not do so. The POSIX standard
2073 * and SVID should be consulted to determine what behavior is mandated.
2074 */
2075void exit_sem(struct task_struct *tsk)
2076{
2077 struct sem_undo_list *ulp;
2078
2079 ulp = tsk->sysvsem.undo_list;
2080 if (!ulp)
2081 return;
2082 tsk->sysvsem.undo_list = NULL;
2083
2084 if (!atomic_dec_and_test(&ulp->refcnt))
2085 return;
2086
2087 for (;;) {
2088 struct sem_array *sma;
2089 struct sem_undo *un;
2090 struct list_head tasks;
2091 int semid, i;
2092
2093 rcu_read_lock();
2094 un = list_entry_rcu(ulp->list_proc.next,
2095 struct sem_undo, list_proc);
2096 if (&un->list_proc == &ulp->list_proc) {
2097 /*
2098 * We must wait for freeary() before freeing this ulp,
2099 * in case we raced with last sem_undo. There is a small
2100 * possibility where we exit while freeary() didn't
2101 * finish unlocking sem_undo_list.
2102 */
2103 spin_unlock_wait(&ulp->lock);
2104 rcu_read_unlock();
2105 break;
2106 }
2107 spin_lock(&ulp->lock);
2108 semid = un->semid;
2109 spin_unlock(&ulp->lock);
2110
2111 /* exit_sem raced with IPC_RMID, nothing to do */
2112 if (semid == -1) {
2113 rcu_read_unlock();
2114 continue;
2115 }
2116
2117 sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2118 /* exit_sem raced with IPC_RMID, nothing to do */
2119 if (IS_ERR(sma)) {
2120 rcu_read_unlock();
2121 continue;
2122 }
2123
2124 sem_lock(sma, NULL, -1);
2125 /* exit_sem raced with IPC_RMID, nothing to do */
2126 if (!ipc_valid_object(&sma->sem_perm)) {
2127 sem_unlock(sma, -1);
2128 rcu_read_unlock();
2129 continue;
2130 }
2131 un = __lookup_undo(ulp, semid);
2132 if (un == NULL) {
2133 /* exit_sem raced with IPC_RMID+semget() that created
2134 * exactly the same semid. Nothing to do.
2135 */
2136 sem_unlock(sma, -1);
2137 rcu_read_unlock();
2138 continue;
2139 }
2140
2141 /* remove un from the linked lists */
2142 ipc_assert_locked_object(&sma->sem_perm);
2143 list_del(&un->list_id);
2144
2145 /* we are the last process using this ulp, acquiring ulp->lock
2146 * isn't required. Besides that, we are also protected against
2147 * IPC_RMID as we hold sma->sem_perm lock now
2148 */
2149 list_del_rcu(&un->list_proc);
2150
2151 /* perform adjustments registered in un */
2152 for (i = 0; i < sma->sem_nsems; i++) {
2153 struct sem *semaphore = &sma->sem_base[i];
2154 if (un->semadj[i]) {
2155 semaphore->semval += un->semadj[i];
2156 /*
2157 * Range checks of the new semaphore value,
2158 * not defined by sus:
2159 * - Some unices ignore the undo entirely
2160 * (e.g. HP UX 11i 11.22, Tru64 V5.1)
2161 * - some cap the value (e.g. FreeBSD caps
2162 * at 0, but doesn't enforce SEMVMX)
2163 *
2164 * Linux caps the semaphore value, both at 0
2165 * and at SEMVMX.
2166 *
2167 * Manfred <manfred@colorfullife.com>
2168 */
2169 if (semaphore->semval < 0)
2170 semaphore->semval = 0;
2171 if (semaphore->semval > SEMVMX)
2172 semaphore->semval = SEMVMX;
2173 semaphore->sempid = task_tgid_vnr(current);
2174 }
2175 }
2176 /* maybe some queued-up processes were waiting for this */
2177 INIT_LIST_HEAD(&tasks);
2178 do_smart_update(sma, NULL, 0, 1, &tasks);
2179 sem_unlock(sma, -1);
2180 rcu_read_unlock();
2181 wake_up_sem_queue_do(&tasks);
2182
2183 kfree_rcu(un, rcu);
2184 }
2185 kfree(ulp);
2186}
2187
2188#ifdef CONFIG_PROC_FS
2189static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2190{
2191 struct user_namespace *user_ns = seq_user_ns(s);
2192 struct sem_array *sma = it;
2193 time_t sem_otime;
2194
2195 /*
2196 * The proc interface isn't aware of sem_lock(), it calls
2197 * ipc_lock_object() directly (in sysvipc_find_ipc).
2198 * In order to stay compatible with sem_lock(), we must wait until
2199 * all simple semop() calls have left their critical regions.
2200 */
2201 sem_wait_array(sma);
2202
2203 sem_otime = get_semotime(sma);
2204
2205 seq_printf(s,
2206 "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
2207 sma->sem_perm.key,
2208 sma->sem_perm.id,
2209 sma->sem_perm.mode,
2210 sma->sem_nsems,
2211 from_kuid_munged(user_ns, sma->sem_perm.uid),
2212 from_kgid_munged(user_ns, sma->sem_perm.gid),
2213 from_kuid_munged(user_ns, sma->sem_perm.cuid),
2214 from_kgid_munged(user_ns, sma->sem_perm.cgid),
2215 sem_otime,
2216 sma->sem_ctime);
2217
2218 return 0;
2219}
2220#endif