Linux Audio

Check our new training course

Loading...
v3.15
 
   1/*
   2 * linux/ipc/sem.c
   3 * Copyright (C) 1992 Krishna Balasubramanian
   4 * Copyright (C) 1995 Eric Schenk, Bruno Haible
   5 *
   6 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
   7 *
   8 * SMP-threaded, sysctl's added
   9 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
  10 * Enforced range limit on SEM_UNDO
  11 * (c) 2001 Red Hat Inc
  12 * Lockless wakeup
  13 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
 
  14 * Further wakeup optimizations, documentation
  15 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
  16 *
  17 * support for audit of ipc object properties and permission changes
  18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  19 *
  20 * namespaces support
  21 * OpenVZ, SWsoft Inc.
  22 * Pavel Emelianov <xemul@openvz.org>
  23 *
  24 * Implementation notes: (May 2010)
  25 * This file implements System V semaphores.
  26 *
  27 * User space visible behavior:
  28 * - FIFO ordering for semop() operations (just FIFO, not starvation
  29 *   protection)
  30 * - multiple semaphore operations that alter the same semaphore in
  31 *   one semop() are handled.
  32 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
  33 *   SETALL calls.
  34 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
  35 * - undo adjustments at process exit are limited to 0..SEMVMX.
  36 * - namespace are supported.
  37 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
  38 *   to /proc/sys/kernel/sem.
  39 * - statistics about the usage are reported in /proc/sysvipc/sem.
  40 *
  41 * Internals:
  42 * - scalability:
  43 *   - all global variables are read-mostly.
  44 *   - semop() calls and semctl(RMID) are synchronized by RCU.
  45 *   - most operations do write operations (actually: spin_lock calls) to
  46 *     the per-semaphore array structure.
  47 *   Thus: Perfect SMP scaling between independent semaphore arrays.
  48 *         If multiple semaphores in one array are used, then cache line
  49 *         trashing on the semaphore array spinlock will limit the scaling.
  50 * - semncnt and semzcnt are calculated on demand in count_semncnt() and
  51 *   count_semzcnt()
  52 * - the task that performs a successful semop() scans the list of all
  53 *   sleeping tasks and completes any pending operations that can be fulfilled.
  54 *   Semaphores are actively given to waiting tasks (necessary for FIFO).
  55 *   (see update_queue())
  56 * - To improve the scalability, the actual wake-up calls are performed after
  57 *   dropping all locks. (see wake_up_sem_queue_prepare(),
  58 *   wake_up_sem_queue_do())
  59 * - All work is done by the waker, the woken up task does not have to do
  60 *   anything - not even acquiring a lock or dropping a refcount.
  61 * - A woken up task may not even touch the semaphore array anymore, it may
  62 *   have been destroyed already by a semctl(RMID).
  63 * - The synchronizations between wake-ups due to a timeout/signal and a
  64 *   wake-up due to a completed semaphore operation is achieved by using an
  65 *   intermediate state (IN_WAKEUP).
  66 * - UNDO values are stored in an array (one per process and per
  67 *   semaphore array, lazily allocated). For backwards compatibility, multiple
  68 *   modes for the UNDO variables are supported (per process, per thread)
  69 *   (see copy_semundo, CLONE_SYSVSEM)
  70 * - There are two lists of the pending operations: a per-array list
  71 *   and per-semaphore list (stored in the array). This allows to achieve FIFO
  72 *   ordering without always scanning all pending operations.
  73 *   The worst-case behavior is nevertheless O(N^2) for N wakeups.
  74 */
  75
 
  76#include <linux/slab.h>
  77#include <linux/spinlock.h>
  78#include <linux/init.h>
  79#include <linux/proc_fs.h>
  80#include <linux/time.h>
  81#include <linux/security.h>
  82#include <linux/syscalls.h>
  83#include <linux/audit.h>
  84#include <linux/capability.h>
  85#include <linux/seq_file.h>
  86#include <linux/rwsem.h>
  87#include <linux/nsproxy.h>
  88#include <linux/ipc_namespace.h>
 
 
 
  89
  90#include <asm/uaccess.h>
  91#include "util.h"
  92
  93/* One semaphore structure for each semaphore in the system. */
  94struct sem {
  95	int	semval;		/* current value */
  96	int	sempid;		/* pid of last operation */
 
 
 
 
 
 
 
  97	spinlock_t	lock;	/* spinlock for fine-grained semtimedop */
  98	struct list_head pending_alter; /* pending single-sop operations */
  99					/* that alter the semaphore */
 100	struct list_head pending_const; /* pending single-sop operations */
 101					/* that do not alter the semaphore*/
 102	time_t	sem_otime;	/* candidate for sem_otime */
 103} ____cacheline_aligned_in_smp;
 104
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 105/* One queue for each sleeping process in the system. */
 106struct sem_queue {
 107	struct list_head	list;	 /* queue of pending operations */
 108	struct task_struct	*sleeper; /* this process */
 109	struct sem_undo		*undo;	 /* undo structure */
 110	int			pid;	 /* process id of requesting process */
 111	int			status;	 /* completion status of operation */
 112	struct sembuf		*sops;	 /* array of pending operations */
 
 113	int			nsops;	 /* number of operations */
 114	int			alter;	 /* does *sops alter the array? */
 
 115};
 116
 117/* Each task has a list of undo requests. They are executed automatically
 118 * when the process exits.
 119 */
 120struct sem_undo {
 121	struct list_head	list_proc;	/* per-process list: *
 122						 * all undos from one process
 123						 * rcu protected */
 124	struct rcu_head		rcu;		/* rcu struct for sem_undo */
 125	struct sem_undo_list	*ulp;		/* back ptr to sem_undo_list */
 126	struct list_head	list_id;	/* per semaphore array list:
 127						 * all undos for one array */
 128	int			semid;		/* semaphore set identifier */
 129	short			*semadj;	/* array of adjustments */
 130						/* one per semaphore */
 131};
 132
 133/* sem_undo_list controls shared access to the list of sem_undo structures
 134 * that may be shared among all a CLONE_SYSVSEM task group.
 135 */
 136struct sem_undo_list {
 137	atomic_t		refcnt;
 138	spinlock_t		lock;
 139	struct list_head	list_proc;
 140};
 141
 142
 143#define sem_ids(ns)	((ns)->ids[IPC_SEM_IDS])
 144
 145#define sem_checkid(sma, semid)	ipc_checkid(&sma->sem_perm, semid)
 146
 147static int newary(struct ipc_namespace *, struct ipc_params *);
 148static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
 149#ifdef CONFIG_PROC_FS
 150static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
 151#endif
 152
 153#define SEMMSL_FAST	256 /* 512 bytes on stack */
 154#define SEMOPM_FAST	64  /* ~ 372 bytes on stack */
 155
 156/*
 
 
 
 
 
 
 
 157 * Locking:
 
 158 *	sem_undo.id_next,
 159 *	sem_array.complex_count,
 160 *	sem_array.pending{_alter,_cont},
 161 *	sem_array.sem_undo: global sem_lock() for read/write
 162 *	sem_undo.proc_next: only "current" is allowed to read/write that field.
 163 *	
 164 *	sem_array.sem_base[i].pending_{const,alter}:
 165 *		global or semaphore sem_lock() for read/write
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 166 */
 167
 168#define sc_semmsl	sem_ctls[0]
 169#define sc_semmns	sem_ctls[1]
 170#define sc_semopm	sem_ctls[2]
 171#define sc_semmni	sem_ctls[3]
 172
 173void sem_init_ns(struct ipc_namespace *ns)
 174{
 175	ns->sc_semmsl = SEMMSL;
 176	ns->sc_semmns = SEMMNS;
 177	ns->sc_semopm = SEMOPM;
 178	ns->sc_semmni = SEMMNI;
 179	ns->used_sems = 0;
 180	ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
 181}
 182
 183#ifdef CONFIG_IPC_NS
 184void sem_exit_ns(struct ipc_namespace *ns)
 185{
 186	free_ipcs(ns, &sem_ids(ns), freeary);
 187	idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
 
 188}
 189#endif
 190
 191void __init sem_init(void)
 192{
 193	sem_init_ns(&init_ipc_ns);
 194	ipc_init_proc_interface("sysvipc/sem",
 195				"       key      semid perms      nsems   uid   gid  cuid  cgid      otime      ctime\n",
 196				IPC_SEM_IDS, sysvipc_sem_proc_show);
 197}
 198
 199/**
 200 * unmerge_queues - unmerge queues, if possible.
 201 * @sma: semaphore array
 202 *
 203 * The function unmerges the wait queues if complex_count is 0.
 204 * It must be called prior to dropping the global semaphore array lock.
 205 */
 206static void unmerge_queues(struct sem_array *sma)
 207{
 208	struct sem_queue *q, *tq;
 209
 210	/* complex operations still around? */
 211	if (sma->complex_count)
 212		return;
 213	/*
 214	 * We will switch back to simple mode.
 215	 * Move all pending operation back into the per-semaphore
 216	 * queues.
 217	 */
 218	list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
 219		struct sem *curr;
 220		curr = &sma->sem_base[q->sops[0].sem_num];
 221
 222		list_add_tail(&q->list, &curr->pending_alter);
 223	}
 224	INIT_LIST_HEAD(&sma->pending_alter);
 225}
 226
 227/**
 228 * merge_queues - merge single semop queues into global queue
 229 * @sma: semaphore array
 230 *
 231 * This function merges all per-semaphore queues into the global queue.
 232 * It is necessary to achieve FIFO ordering for the pending single-sop
 233 * operations when a multi-semop operation must sleep.
 234 * Only the alter operations must be moved, the const operations can stay.
 235 */
 236static void merge_queues(struct sem_array *sma)
 237{
 238	int i;
 239	for (i = 0; i < sma->sem_nsems; i++) {
 240		struct sem *sem = sma->sem_base + i;
 241
 242		list_splice_init(&sem->pending_alter, &sma->pending_alter);
 243	}
 244}
 245
 246static void sem_rcu_free(struct rcu_head *head)
 247{
 248	struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
 249	struct sem_array *sma = ipc_rcu_to_struct(p);
 250
 251	security_sem_free(sma);
 252	ipc_rcu_free(head);
 253}
 254
 255/*
 256 * Wait until all currently ongoing simple ops have completed.
 257 * Caller must own sem_perm.lock.
 258 * New simple ops cannot start, because simple ops first check
 259 * that sem_perm.lock is free.
 260 * that a) sem_perm.lock is free and b) complex_count is 0.
 261 */
 262static void sem_wait_array(struct sem_array *sma)
 263{
 264	int i;
 265	struct sem *sem;
 266
 267	if (sma->complex_count)  {
 268		/* The thread that increased sma->complex_count waited on
 269		 * all sem->lock locks. Thus we don't need to wait again.
 
 
 270		 */
 
 271		return;
 272	}
 
 273
 274	for (i = 0; i < sma->sem_nsems; i++) {
 275		sem = sma->sem_base + i;
 276		spin_unlock_wait(&sem->lock);
 
 277	}
 278}
 279
 280/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 281 * If the request contains only one semaphore operation, and there are
 282 * no complex transactions pending, lock only the semaphore involved.
 283 * Otherwise, lock the entire semaphore array, since we either have
 284 * multiple semaphores in our own semops, or we need to look at
 285 * semaphores from other pending complex operations.
 286 */
 287static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
 288			      int nsops)
 289{
 290	struct sem *sem;
 
 291
 292	if (nsops != 1) {
 293		/* Complex operation - acquire a full lock */
 294		ipc_lock_object(&sma->sem_perm);
 295
 296		/* And wait until all simple ops that are processed
 297		 * right now have dropped their locks.
 298		 */
 299		sem_wait_array(sma);
 300		return -1;
 301	}
 302
 303	/*
 304	 * Only one semaphore affected - try to optimize locking.
 305	 * The rules are:
 306	 * - optimized locking is possible if no complex operation
 307	 *   is either enqueued or processed right now.
 308	 * - The test for enqueued complex ops is simple:
 309	 *      sma->complex_count != 0
 310	 * - Testing for complex ops that are processed right now is
 311	 *   a bit more difficult. Complex ops acquire the full lock
 312	 *   and first wait that the running simple ops have completed.
 313	 *   (see above)
 314	 *   Thus: If we own a simple lock and the global lock is free
 315	 *	and complex_count is now 0, then it will stay 0 and
 316	 *	thus just locking sem->lock is sufficient.
 317	 */
 318	sem = sma->sem_base + sops->sem_num;
 
 319
 320	if (sma->complex_count == 0) {
 
 
 
 
 321		/*
 322		 * It appears that no complex operation is around.
 323		 * Acquire the per-semaphore lock.
 324		 */
 325		spin_lock(&sem->lock);
 326
 327		/* Then check that the global lock is free */
 328		if (!spin_is_locked(&sma->sem_perm.lock)) {
 329			/* spin_is_locked() is not a memory barrier */
 330			smp_mb();
 331
 332			/* Now repeat the test of complex_count:
 333			 * It can't change anymore until we drop sem->lock.
 334			 * Thus: if is now 0, then it will stay 0.
 335			 */
 336			if (sma->complex_count == 0) {
 337				/* fast path successful! */
 338				return sops->sem_num;
 339			}
 340		}
 341		spin_unlock(&sem->lock);
 342	}
 343
 344	/* slow path: acquire the full lock */
 345	ipc_lock_object(&sma->sem_perm);
 346
 347	if (sma->complex_count == 0) {
 348		/* False alarm:
 349		 * There is no complex operation, thus we can switch
 350		 * back to the fast path.
 
 
 
 
 
 351		 */
 352		spin_lock(&sem->lock);
 
 353		ipc_unlock_object(&sma->sem_perm);
 354		return sops->sem_num;
 355	} else {
 356		/* Not a false alarm, thus complete the sequence for a
 357		 * full lock.
 
 
 358		 */
 359		sem_wait_array(sma);
 360		return -1;
 361	}
 362}
 363
 364static inline void sem_unlock(struct sem_array *sma, int locknum)
 365{
 366	if (locknum == -1) {
 367		unmerge_queues(sma);
 
 368		ipc_unlock_object(&sma->sem_perm);
 369	} else {
 370		struct sem *sem = sma->sem_base + locknum;
 371		spin_unlock(&sem->lock);
 372	}
 373}
 374
 375/*
 376 * sem_lock_(check_) routines are called in the paths where the rwsem
 377 * is not held.
 378 *
 379 * The caller holds the RCU read lock.
 380 */
 381static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
 382			int id, struct sembuf *sops, int nsops, int *locknum)
 383{
 384	struct kern_ipc_perm *ipcp;
 385	struct sem_array *sma;
 386
 387	ipcp = ipc_obtain_object(&sem_ids(ns), id);
 388	if (IS_ERR(ipcp))
 389		return ERR_CAST(ipcp);
 390
 391	sma = container_of(ipcp, struct sem_array, sem_perm);
 392	*locknum = sem_lock(sma, sops, nsops);
 393
 394	/* ipc_rmid() may have already freed the ID while sem_lock
 395	 * was spinning: verify that the structure is still valid
 396	 */
 397	if (ipc_valid_object(ipcp))
 398		return container_of(ipcp, struct sem_array, sem_perm);
 399
 400	sem_unlock(sma, *locknum);
 401	return ERR_PTR(-EINVAL);
 402}
 403
 404static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
 405{
 406	struct kern_ipc_perm *ipcp = ipc_obtain_object(&sem_ids(ns), id);
 407
 408	if (IS_ERR(ipcp))
 409		return ERR_CAST(ipcp);
 410
 411	return container_of(ipcp, struct sem_array, sem_perm);
 412}
 413
 414static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
 415							int id)
 416{
 417	struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
 418
 419	if (IS_ERR(ipcp))
 420		return ERR_CAST(ipcp);
 421
 422	return container_of(ipcp, struct sem_array, sem_perm);
 423}
 424
 425static inline void sem_lock_and_putref(struct sem_array *sma)
 426{
 427	sem_lock(sma, NULL, -1);
 428	ipc_rcu_putref(sma, ipc_rcu_free);
 429}
 430
 431static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
 432{
 433	ipc_rmid(&sem_ids(ns), &s->sem_perm);
 434}
 435
 436/*
 437 * Lockless wakeup algorithm:
 438 * Without the check/retry algorithm a lockless wakeup is possible:
 439 * - queue.status is initialized to -EINTR before blocking.
 440 * - wakeup is performed by
 441 *	* unlinking the queue entry from the pending list
 442 *	* setting queue.status to IN_WAKEUP
 443 *	  This is the notification for the blocked thread that a
 444 *	  result value is imminent.
 445 *	* call wake_up_process
 446 *	* set queue.status to the final value.
 447 * - the previously blocked thread checks queue.status:
 448 *	* if it's IN_WAKEUP, then it must wait until the value changes
 449 *	* if it's not -EINTR, then the operation was completed by
 450 *	  update_queue. semtimedop can return queue.status without
 451 *	  performing any operation on the sem array.
 452 *	* otherwise it must acquire the spinlock and check what's up.
 453 *
 454 * The two-stage algorithm is necessary to protect against the following
 455 * races:
 456 * - if queue.status is set after wake_up_process, then the woken up idle
 457 *   thread could race forward and try (and fail) to acquire sma->lock
 458 *   before update_queue had a chance to set queue.status
 459 * - if queue.status is written before wake_up_process and if the
 460 *   blocked process is woken up by a signal between writing
 461 *   queue.status and the wake_up_process, then the woken up
 462 *   process could return from semtimedop and die by calling
 463 *   sys_exit before wake_up_process is called. Then wake_up_process
 464 *   will oops, because the task structure is already invalid.
 465 *   (yes, this happened on s390 with sysv msg).
 466 *
 467 */
 468#define IN_WAKEUP	1
 469
 470/**
 471 * newary - Create a new semaphore set
 472 * @ns: namespace
 473 * @params: ptr to the structure that contains key, semflg and nsems
 474 *
 475 * Called with sem_ids.rwsem held (as a writer)
 476 */
 477static int newary(struct ipc_namespace *ns, struct ipc_params *params)
 478{
 479	int id;
 480	int retval;
 481	struct sem_array *sma;
 482	int size;
 483	key_t key = params->key;
 484	int nsems = params->u.nsems;
 485	int semflg = params->flg;
 486	int i;
 487
 488	if (!nsems)
 489		return -EINVAL;
 490	if (ns->used_sems + nsems > ns->sc_semmns)
 491		return -ENOSPC;
 492
 493	size = sizeof(*sma) + nsems * sizeof(struct sem);
 494	sma = ipc_rcu_alloc(size);
 495	if (!sma)
 496		return -ENOMEM;
 497
 498	memset(sma, 0, size);
 499
 500	sma->sem_perm.mode = (semflg & S_IRWXUGO);
 501	sma->sem_perm.key = key;
 502
 503	sma->sem_perm.security = NULL;
 504	retval = security_sem_alloc(sma);
 505	if (retval) {
 506		ipc_rcu_putref(sma, ipc_rcu_free);
 507		return retval;
 508	}
 509
 510	id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
 511	if (id < 0) {
 512		ipc_rcu_putref(sma, sem_rcu_free);
 513		return id;
 514	}
 515	ns->used_sems += nsems;
 516
 517	sma->sem_base = (struct sem *) &sma[1];
 518
 519	for (i = 0; i < nsems; i++) {
 520		INIT_LIST_HEAD(&sma->sem_base[i].pending_alter);
 521		INIT_LIST_HEAD(&sma->sem_base[i].pending_const);
 522		spin_lock_init(&sma->sem_base[i].lock);
 523	}
 524
 525	sma->complex_count = 0;
 
 526	INIT_LIST_HEAD(&sma->pending_alter);
 527	INIT_LIST_HEAD(&sma->pending_const);
 528	INIT_LIST_HEAD(&sma->list_id);
 529	sma->sem_nsems = nsems;
 530	sma->sem_ctime = get_seconds();
 
 
 
 
 
 
 
 
 
 531	sem_unlock(sma, -1);
 532	rcu_read_unlock();
 533
 534	return sma->sem_perm.id;
 535}
 536
 537
 538/*
 539 * Called with sem_ids.rwsem and ipcp locked.
 540 */
 541static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
 542{
 543	struct sem_array *sma;
 544
 545	sma = container_of(ipcp, struct sem_array, sem_perm);
 546	return security_sem_associate(sma, semflg);
 547}
 548
 549/*
 550 * Called with sem_ids.rwsem and ipcp locked.
 551 */
 552static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
 553				struct ipc_params *params)
 554{
 555	struct sem_array *sma;
 556
 557	sma = container_of(ipcp, struct sem_array, sem_perm);
 558	if (params->u.nsems > sma->sem_nsems)
 559		return -EINVAL;
 560
 561	return 0;
 562}
 563
 564SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
 565{
 566	struct ipc_namespace *ns;
 567	struct ipc_ops sem_ops;
 
 
 
 
 568	struct ipc_params sem_params;
 569
 570	ns = current->nsproxy->ipc_ns;
 571
 572	if (nsems < 0 || nsems > ns->sc_semmsl)
 573		return -EINVAL;
 574
 575	sem_ops.getnew = newary;
 576	sem_ops.associate = sem_security;
 577	sem_ops.more_checks = sem_more_checks;
 578
 579	sem_params.key = key;
 580	sem_params.flg = semflg;
 581	sem_params.u.nsems = nsems;
 582
 583	return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
 584}
 585
 
 
 
 
 
 586/**
 587 * perform_atomic_semop - Perform (if possible) a semaphore operation
 
 588 * @sma: semaphore array
 589 * @sops: array with operations that should be checked
 590 * @nsops: number of operations
 591 * @un: undo array
 592 * @pid: pid that did the change
 
 
 
 
 593 *
 594 * Returns 0 if the operation was possible.
 595 * Returns 1 if the operation is impossible, the caller must sleep.
 596 * Negative values are error codes.
 597 */
 598static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops,
 599			     int nsops, struct sem_undo *un, int pid)
 600{
 601	int result, sem_op;
 
 602	struct sembuf *sop;
 603	struct sem *curr;
 
 
 
 
 
 
 604
 605	for (sop = sops; sop < sops + nsops; sop++) {
 606		curr = sma->sem_base + sop->sem_num;
 
 607		sem_op = sop->sem_op;
 608		result = curr->semval;
 609
 610		if (!sem_op && result)
 611			goto would_block;
 612
 613		result += sem_op;
 614		if (result < 0)
 615			goto would_block;
 616		if (result > SEMVMX)
 617			goto out_of_range;
 618
 619		if (sop->sem_flg & SEM_UNDO) {
 620			int undo = un->semadj[sop->sem_num] - sem_op;
 621			/* Exceeding the undo range is an error. */
 622			if (undo < (-SEMAEM - 1) || undo > SEMAEM)
 623				goto out_of_range;
 624			un->semadj[sop->sem_num] = undo;
 625		}
 626
 627		curr->semval = result;
 628	}
 629
 630	sop--;
 
 631	while (sop >= sops) {
 632		sma->sem_base[sop->sem_num].sempid = pid;
 633		sop--;
 634	}
 635
 636	return 0;
 637
 638out_of_range:
 639	result = -ERANGE;
 640	goto undo;
 641
 642would_block:
 
 
 643	if (sop->sem_flg & IPC_NOWAIT)
 644		result = -EAGAIN;
 645	else
 646		result = 1;
 647
 648undo:
 649	sop--;
 650	while (sop >= sops) {
 651		sem_op = sop->sem_op;
 652		sma->sem_base[sop->sem_num].semval -= sem_op;
 653		if (sop->sem_flg & SEM_UNDO)
 654			un->semadj[sop->sem_num] += sem_op;
 655		sop--;
 656	}
 657
 658	return result;
 659}
 660
 661/** wake_up_sem_queue_prepare(q, error): Prepare wake-up
 662 * @q: queue entry that must be signaled
 663 * @error: Error value for the signal
 664 *
 665 * Prepare the wake-up of the queue entry q.
 666 */
 667static void wake_up_sem_queue_prepare(struct list_head *pt,
 668				struct sem_queue *q, int error)
 669{
 670	if (list_empty(pt)) {
 671		/*
 672		 * Hold preempt off so that we don't get preempted and have the
 673		 * wakee busy-wait until we're scheduled back on.
 674		 */
 675		preempt_disable();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 676	}
 677	q->status = IN_WAKEUP;
 678	q->pid = error;
 679
 680	list_add_tail(&q->list, pt);
 681}
 
 
 682
 683/**
 684 * wake_up_sem_queue_do - do the actual wake-up
 685 * @pt: list of tasks to be woken up
 686 *
 687 * Do the actual wake-up.
 688 * The function is called without any locks held, thus the semaphore array
 689 * could be destroyed already and the tasks can disappear as soon as the
 690 * status is set to the actual return code.
 691 */
 692static void wake_up_sem_queue_do(struct list_head *pt)
 693{
 694	struct sem_queue *q, *t;
 695	int did_something;
 696
 697	did_something = !list_empty(pt);
 698	list_for_each_entry_safe(q, t, pt, list) {
 699		wake_up_process(q->sleeper);
 700		/* q can disappear immediately after writing q->status. */
 701		smp_wmb();
 702		q->status = q->pid;
 703	}
 704	if (did_something)
 705		preempt_enable();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 706}
 707
 708static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
 709{
 710	list_del(&q->list);
 711	if (q->nsops > 1)
 712		sma->complex_count--;
 713}
 714
 715/** check_restart(sma, q)
 716 * @sma: semaphore array
 717 * @q: the operation that just completed
 718 *
 719 * update_queue is O(N^2) when it restarts scanning the whole queue of
 720 * waiting operations. Therefore this function checks if the restart is
 721 * really necessary. It is called after a previously waiting operation
 722 * modified the array.
 723 * Note that wait-for-zero operations are handled without restart.
 724 */
 725static int check_restart(struct sem_array *sma, struct sem_queue *q)
 726{
 727	/* pending complex alter operations are too difficult to analyse */
 728	if (!list_empty(&sma->pending_alter))
 729		return 1;
 730
 731	/* we were a sleeping complex operation. Too difficult */
 732	if (q->nsops > 1)
 733		return 1;
 734
 735	/* It is impossible that someone waits for the new value:
 736	 * - complex operations always restart.
 737	 * - wait-for-zero are handled seperately.
 738	 * - q is a previously sleeping simple operation that
 739	 *   altered the array. It must be a decrement, because
 740	 *   simple increments never sleep.
 741	 * - If there are older (higher priority) decrements
 742	 *   in the queue, then they have observed the original
 743	 *   semval value and couldn't proceed. The operation
 744	 *   decremented to value - thus they won't proceed either.
 745	 */
 746	return 0;
 747}
 748
 749/**
 750 * wake_const_ops - wake up non-alter tasks
 751 * @sma: semaphore array.
 752 * @semnum: semaphore that was modified.
 753 * @pt: list head for the tasks that must be woken up.
 754 *
 755 * wake_const_ops must be called after a semaphore in a semaphore array
 756 * was set to 0. If complex const operations are pending, wake_const_ops must
 757 * be called with semnum = -1, as well as with the number of each modified
 758 * semaphore.
 759 * The tasks that must be woken up are added to @pt. The return code
 760 * is stored in q->pid.
 761 * The function returns 1 if at least one operation was completed successfully.
 762 */
 763static int wake_const_ops(struct sem_array *sma, int semnum,
 764				struct list_head *pt)
 765{
 766	struct sem_queue *q;
 767	struct list_head *walk;
 768	struct list_head *pending_list;
 769	int semop_completed = 0;
 770
 771	if (semnum == -1)
 772		pending_list = &sma->pending_const;
 773	else
 774		pending_list = &sma->sem_base[semnum].pending_const;
 775
 776	walk = pending_list->next;
 777	while (walk != pending_list) {
 778		int error;
 779
 780		q = container_of(walk, struct sem_queue, list);
 781		walk = walk->next;
 782
 783		error = perform_atomic_semop(sma, q->sops, q->nsops,
 784						 q->undo, q->pid);
 785
 786		if (error <= 0) {
 787			/* operation completed, remove from queue & wakeup */
 788
 789			unlink_queue(sma, q);
 790
 791			wake_up_sem_queue_prepare(pt, q, error);
 792			if (error == 0)
 793				semop_completed = 1;
 794		}
 795	}
 
 796	return semop_completed;
 797}
 798
 799/**
 800 * do_smart_wakeup_zero - wakeup all wait for zero tasks
 801 * @sma: semaphore array
 802 * @sops: operations that were performed
 803 * @nsops: number of operations
 804 * @pt: list head of the tasks that must be woken up.
 805 *
 806 * Checks all required queue for wait-for-zero operations, based
 807 * on the actual changes that were performed on the semaphore array.
 808 * The function returns 1 if at least one operation was completed successfully.
 809 */
 810static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
 811					int nsops, struct list_head *pt)
 812{
 813	int i;
 814	int semop_completed = 0;
 815	int got_zero = 0;
 816
 817	/* first: the per-semaphore queues, if known */
 818	if (sops) {
 819		for (i = 0; i < nsops; i++) {
 820			int num = sops[i].sem_num;
 821
 822			if (sma->sem_base[num].semval == 0) {
 823				got_zero = 1;
 824				semop_completed |= wake_const_ops(sma, num, pt);
 825			}
 826		}
 827	} else {
 828		/*
 829		 * No sops means modified semaphores not known.
 830		 * Assume all were changed.
 831		 */
 832		for (i = 0; i < sma->sem_nsems; i++) {
 833			if (sma->sem_base[i].semval == 0) {
 834				got_zero = 1;
 835				semop_completed |= wake_const_ops(sma, i, pt);
 836			}
 837		}
 838	}
 839	/*
 840	 * If one of the modified semaphores got 0,
 841	 * then check the global queue, too.
 842	 */
 843	if (got_zero)
 844		semop_completed |= wake_const_ops(sma, -1, pt);
 845
 846	return semop_completed;
 847}
 848
 849
 850/**
 851 * update_queue - look for tasks that can be completed.
 852 * @sma: semaphore array.
 853 * @semnum: semaphore that was modified.
 854 * @pt: list head for the tasks that must be woken up.
 855 *
 856 * update_queue must be called after a semaphore in a semaphore array
 857 * was modified. If multiple semaphores were modified, update_queue must
 858 * be called with semnum = -1, as well as with the number of each modified
 859 * semaphore.
 860 * The tasks that must be woken up are added to @pt. The return code
 861 * is stored in q->pid.
 862 * The function internally checks if const operations can now succeed.
 863 *
 864 * The function return 1 if at least one semop was completed successfully.
 865 */
 866static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
 867{
 868	struct sem_queue *q;
 869	struct list_head *walk;
 870	struct list_head *pending_list;
 871	int semop_completed = 0;
 872
 873	if (semnum == -1)
 874		pending_list = &sma->pending_alter;
 875	else
 876		pending_list = &sma->sem_base[semnum].pending_alter;
 877
 878again:
 879	walk = pending_list->next;
 880	while (walk != pending_list) {
 881		int error, restart;
 882
 883		q = container_of(walk, struct sem_queue, list);
 884		walk = walk->next;
 885
 886		/* If we are scanning the single sop, per-semaphore list of
 887		 * one semaphore and that semaphore is 0, then it is not
 888		 * necessary to scan further: simple increments
 889		 * that affect only one entry succeed immediately and cannot
 890		 * be in the  per semaphore pending queue, and decrements
 891		 * cannot be successful if the value is already 0.
 892		 */
 893		if (semnum != -1 && sma->sem_base[semnum].semval == 0)
 894			break;
 895
 896		error = perform_atomic_semop(sma, q->sops, q->nsops,
 897					 q->undo, q->pid);
 898
 899		/* Does q->sleeper still need to sleep? */
 900		if (error > 0)
 901			continue;
 902
 903		unlink_queue(sma, q);
 904
 905		if (error) {
 906			restart = 0;
 907		} else {
 908			semop_completed = 1;
 909			do_smart_wakeup_zero(sma, q->sops, q->nsops, pt);
 910			restart = check_restart(sma, q);
 911		}
 912
 913		wake_up_sem_queue_prepare(pt, q, error);
 914		if (restart)
 915			goto again;
 916	}
 917	return semop_completed;
 918}
 919
 920/**
 921 * set_semotime - set sem_otime
 922 * @sma: semaphore array
 923 * @sops: operations that modified the array, may be NULL
 924 *
 925 * sem_otime is replicated to avoid cache line trashing.
 926 * This function sets one instance to the current time.
 927 */
 928static void set_semotime(struct sem_array *sma, struct sembuf *sops)
 929{
 930	if (sops == NULL) {
 931		sma->sem_base[0].sem_otime = get_seconds();
 932	} else {
 933		sma->sem_base[sops[0].sem_num].sem_otime =
 934							get_seconds();
 935	}
 936}
 937
 938/**
 939 * do_smart_update - optimized update_queue
 940 * @sma: semaphore array
 941 * @sops: operations that were performed
 942 * @nsops: number of operations
 943 * @otime: force setting otime
 944 * @pt: list head of the tasks that must be woken up.
 945 *
 946 * do_smart_update() does the required calls to update_queue and wakeup_zero,
 947 * based on the actual changes that were performed on the semaphore array.
 948 * Note that the function does not do the actual wake-up: the caller is
 949 * responsible for calling wake_up_sem_queue_do(@pt).
 950 * It is safe to perform this call after dropping all locks.
 951 */
 952static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
 953			int otime, struct list_head *pt)
 954{
 955	int i;
 956
 957	otime |= do_smart_wakeup_zero(sma, sops, nsops, pt);
 958
 959	if (!list_empty(&sma->pending_alter)) {
 960		/* semaphore array uses the global queue - just process it. */
 961		otime |= update_queue(sma, -1, pt);
 962	} else {
 963		if (!sops) {
 964			/*
 965			 * No sops, thus the modified semaphores are not
 966			 * known. Check all.
 967			 */
 968			for (i = 0; i < sma->sem_nsems; i++)
 969				otime |= update_queue(sma, i, pt);
 970		} else {
 971			/*
 972			 * Check the semaphores that were increased:
 973			 * - No complex ops, thus all sleeping ops are
 974			 *   decrease.
 975			 * - if we decreased the value, then any sleeping
 976			 *   semaphore ops wont be able to run: If the
 977			 *   previous value was too small, then the new
 978			 *   value will be too small, too.
 979			 */
 980			for (i = 0; i < nsops; i++) {
 981				if (sops[i].sem_op > 0) {
 982					otime |= update_queue(sma,
 983							sops[i].sem_num, pt);
 984				}
 985			}
 986		}
 987	}
 988	if (otime)
 989		set_semotime(sma, sops);
 990}
 991
 992/* The following counts are associated to each semaphore:
 993 *   semncnt        number of tasks waiting on semval being nonzero
 994 *   semzcnt        number of tasks waiting on semval being zero
 995 * This model assumes that a task waits on exactly one semaphore.
 996 * Since semaphore operations are to be performed atomically, tasks actually
 997 * wait on a whole sequence of semaphores simultaneously.
 998 * The counts we return here are a rough approximation, but still
 999 * warrant that semncnt+semzcnt>0 if the task is on the pending queue.
1000 */
1001static int count_semncnt(struct sem_array *sma, ushort semnum)
 
1002{
1003	int semncnt;
1004	struct sem_queue *q;
1005
1006	semncnt = 0;
1007	list_for_each_entry(q, &sma->sem_base[semnum].pending_alter, list) {
1008		struct sembuf *sops = q->sops;
1009		BUG_ON(sops->sem_num != semnum);
1010		if ((sops->sem_op < 0) && !(sops->sem_flg & IPC_NOWAIT))
1011			semncnt++;
1012	}
 
 
 
1013
1014	list_for_each_entry(q, &sma->pending_alter, list) {
1015		struct sembuf *sops = q->sops;
1016		int nsops = q->nsops;
1017		int i;
1018		for (i = 0; i < nsops; i++)
1019			if (sops[i].sem_num == semnum
1020			    && (sops[i].sem_op < 0)
1021			    && !(sops[i].sem_flg & IPC_NOWAIT))
1022				semncnt++;
1023	}
1024	return semncnt;
1025}
1026
1027static int count_semzcnt(struct sem_array *sma, ushort semnum)
 
 
 
 
 
 
 
 
1028{
1029	int semzcnt;
1030	struct sem_queue *q;
 
1031
1032	semzcnt = 0;
1033	list_for_each_entry(q, &sma->sem_base[semnum].pending_const, list) {
1034		struct sembuf *sops = q->sops;
1035		BUG_ON(sops->sem_num != semnum);
1036		if ((sops->sem_op == 0) && !(sops->sem_flg & IPC_NOWAIT))
1037			semzcnt++;
 
 
 
 
 
 
1038	}
1039
1040	list_for_each_entry(q, &sma->pending_const, list) {
1041		struct sembuf *sops = q->sops;
1042		int nsops = q->nsops;
1043		int i;
1044		for (i = 0; i < nsops; i++)
1045			if (sops[i].sem_num == semnum
1046			    && (sops[i].sem_op == 0)
1047			    && !(sops[i].sem_flg & IPC_NOWAIT))
1048				semzcnt++;
1049	}
1050	return semzcnt;
1051}
1052
1053/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
1054 * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
1055 * remains locked on exit.
1056 */
1057static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1058{
1059	struct sem_undo *un, *tu;
1060	struct sem_queue *q, *tq;
1061	struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1062	struct list_head tasks;
1063	int i;
 
1064
1065	/* Free the existing undo structures for this semaphore set.  */
1066	ipc_assert_locked_object(&sma->sem_perm);
1067	list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1068		list_del(&un->list_id);
1069		spin_lock(&un->ulp->lock);
1070		un->semid = -1;
1071		list_del_rcu(&un->list_proc);
1072		spin_unlock(&un->ulp->lock);
1073		kfree_rcu(un, rcu);
1074	}
1075
1076	/* Wake up all pending processes and let them fail with EIDRM. */
1077	INIT_LIST_HEAD(&tasks);
1078	list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1079		unlink_queue(sma, q);
1080		wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1081	}
1082
1083	list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1084		unlink_queue(sma, q);
1085		wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1086	}
1087	for (i = 0; i < sma->sem_nsems; i++) {
1088		struct sem *sem = sma->sem_base + i;
1089		list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
1090			unlink_queue(sma, q);
1091			wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1092		}
1093		list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
1094			unlink_queue(sma, q);
1095			wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1096		}
 
1097	}
1098
1099	/* Remove the semaphore set from the IDR */
1100	sem_rmid(ns, sma);
1101	sem_unlock(sma, -1);
1102	rcu_read_unlock();
1103
1104	wake_up_sem_queue_do(&tasks);
1105	ns->used_sems -= sma->sem_nsems;
1106	ipc_rcu_putref(sma, sem_rcu_free);
1107}
1108
1109static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1110{
1111	switch (version) {
1112	case IPC_64:
1113		return copy_to_user(buf, in, sizeof(*in));
1114	case IPC_OLD:
1115	    {
1116		struct semid_ds out;
1117
1118		memset(&out, 0, sizeof(out));
1119
1120		ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
1121
1122		out.sem_otime	= in->sem_otime;
1123		out.sem_ctime	= in->sem_ctime;
1124		out.sem_nsems	= in->sem_nsems;
1125
1126		return copy_to_user(buf, &out, sizeof(out));
1127	    }
1128	default:
1129		return -EINVAL;
1130	}
1131}
1132
1133static time_t get_semotime(struct sem_array *sma)
1134{
1135	int i;
1136	time_t res;
1137
1138	res = sma->sem_base[0].sem_otime;
1139	for (i = 1; i < sma->sem_nsems; i++) {
1140		time_t to = sma->sem_base[i].sem_otime;
1141
1142		if (to > res)
1143			res = to;
1144	}
1145	return res;
1146}
1147
1148static int semctl_nolock(struct ipc_namespace *ns, int semid,
1149			 int cmd, int version, void __user *p)
1150{
1151	int err;
1152	struct sem_array *sma;
 
 
1153
1154	switch (cmd) {
1155	case IPC_INFO:
1156	case SEM_INFO:
1157	{
1158		struct seminfo seminfo;
1159		int max_id;
1160
1161		err = security_sem_semctl(NULL, cmd);
1162		if (err)
1163			return err;
1164		
1165		memset(&seminfo, 0, sizeof(seminfo));
1166		seminfo.semmni = ns->sc_semmni;
1167		seminfo.semmns = ns->sc_semmns;
1168		seminfo.semmsl = ns->sc_semmsl;
1169		seminfo.semopm = ns->sc_semopm;
1170		seminfo.semvmx = SEMVMX;
1171		seminfo.semmnu = SEMMNU;
1172		seminfo.semmap = SEMMAP;
1173		seminfo.semume = SEMUME;
1174		down_read(&sem_ids(ns).rwsem);
1175		if (cmd == SEM_INFO) {
1176			seminfo.semusz = sem_ids(ns).in_use;
1177			seminfo.semaem = ns->used_sems;
1178		} else {
1179			seminfo.semusz = SEMUSZ;
1180			seminfo.semaem = SEMAEM;
1181		}
1182		max_id = ipc_get_maxid(&sem_ids(ns));
1183		up_read(&sem_ids(ns).rwsem);
1184		if (copy_to_user(p, &seminfo, sizeof(struct seminfo))) 
1185			return -EFAULT;
1186		return (max_id < 0) ? 0 : max_id;
1187	}
1188	case IPC_STAT:
1189	case SEM_STAT:
1190	{
1191		struct semid64_ds tbuf;
1192		int id = 0;
1193
1194		memset(&tbuf, 0, sizeof(tbuf));
1195
1196		rcu_read_lock();
1197		if (cmd == SEM_STAT) {
1198			sma = sem_obtain_object(ns, semid);
1199			if (IS_ERR(sma)) {
1200				err = PTR_ERR(sma);
1201				goto out_unlock;
1202			}
1203			id = sma->sem_perm.id;
1204		} else {
1205			sma = sem_obtain_object_check(ns, semid);
1206			if (IS_ERR(sma)) {
1207				err = PTR_ERR(sma);
1208				goto out_unlock;
1209			}
1210		}
 
1211
 
 
 
 
1212		err = -EACCES;
1213		if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1214			goto out_unlock;
 
1215
1216		err = security_sem_semctl(sma, cmd);
1217		if (err)
1218			goto out_unlock;
1219
1220		kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
1221		tbuf.sem_otime = get_semotime(sma);
1222		tbuf.sem_ctime = sma->sem_ctime;
1223		tbuf.sem_nsems = sma->sem_nsems;
1224		rcu_read_unlock();
1225		if (copy_semid_to_user(p, &tbuf, version))
1226			return -EFAULT;
1227		return id;
1228	}
1229	default:
1230		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1231	}
 
1232out_unlock:
1233	rcu_read_unlock();
1234	return err;
1235}
1236
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1237static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1238		unsigned long arg)
1239{
1240	struct sem_undo *un;
1241	struct sem_array *sma;
1242	struct sem *curr;
1243	int err;
1244	struct list_head tasks;
1245	int val;
1246#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1247	/* big-endian 64bit */
1248	val = arg >> 32;
1249#else
1250	/* 32bit or little-endian 64bit */
1251	val = arg;
1252#endif
1253
1254	if (val > SEMVMX || val < 0)
1255		return -ERANGE;
1256
1257	INIT_LIST_HEAD(&tasks);
1258
1259	rcu_read_lock();
1260	sma = sem_obtain_object_check(ns, semid);
1261	if (IS_ERR(sma)) {
1262		rcu_read_unlock();
1263		return PTR_ERR(sma);
1264	}
1265
1266	if (semnum < 0 || semnum >= sma->sem_nsems) {
1267		rcu_read_unlock();
1268		return -EINVAL;
1269	}
1270
1271
1272	if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1273		rcu_read_unlock();
1274		return -EACCES;
1275	}
1276
1277	err = security_sem_semctl(sma, SETVAL);
1278	if (err) {
1279		rcu_read_unlock();
1280		return -EACCES;
1281	}
1282
1283	sem_lock(sma, NULL, -1);
1284
1285	if (!ipc_valid_object(&sma->sem_perm)) {
1286		sem_unlock(sma, -1);
1287		rcu_read_unlock();
1288		return -EIDRM;
1289	}
1290
1291	curr = &sma->sem_base[semnum];
 
1292
1293	ipc_assert_locked_object(&sma->sem_perm);
1294	list_for_each_entry(un, &sma->list_id, list_id)
1295		un->semadj[semnum] = 0;
1296
1297	curr->semval = val;
1298	curr->sempid = task_tgid_vnr(current);
1299	sma->sem_ctime = get_seconds();
1300	/* maybe some queued-up processes were waiting for this */
1301	do_smart_update(sma, NULL, 0, 0, &tasks);
1302	sem_unlock(sma, -1);
1303	rcu_read_unlock();
1304	wake_up_sem_queue_do(&tasks);
1305	return 0;
1306}
1307
1308static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1309		int cmd, void __user *p)
1310{
1311	struct sem_array *sma;
1312	struct sem *curr;
1313	int err, nsems;
1314	ushort fast_sem_io[SEMMSL_FAST];
1315	ushort *sem_io = fast_sem_io;
1316	struct list_head tasks;
1317
1318	INIT_LIST_HEAD(&tasks);
1319
1320	rcu_read_lock();
1321	sma = sem_obtain_object_check(ns, semid);
1322	if (IS_ERR(sma)) {
1323		rcu_read_unlock();
1324		return PTR_ERR(sma);
1325	}
1326
1327	nsems = sma->sem_nsems;
1328
1329	err = -EACCES;
1330	if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1331		goto out_rcu_wakeup;
1332
1333	err = security_sem_semctl(sma, cmd);
1334	if (err)
1335		goto out_rcu_wakeup;
1336
1337	err = -EACCES;
1338	switch (cmd) {
1339	case GETALL:
1340	{
1341		ushort __user *array = p;
1342		int i;
1343
1344		sem_lock(sma, NULL, -1);
1345		if (!ipc_valid_object(&sma->sem_perm)) {
1346			err = -EIDRM;
1347			goto out_unlock;
1348		}
1349		if (nsems > SEMMSL_FAST) {
1350			if (!ipc_rcu_getref(sma)) {
1351				err = -EIDRM;
1352				goto out_unlock;
1353			}
1354			sem_unlock(sma, -1);
1355			rcu_read_unlock();
1356			sem_io = ipc_alloc(sizeof(ushort)*nsems);
 
1357			if (sem_io == NULL) {
1358				ipc_rcu_putref(sma, ipc_rcu_free);
1359				return -ENOMEM;
1360			}
1361
1362			rcu_read_lock();
1363			sem_lock_and_putref(sma);
1364			if (!ipc_valid_object(&sma->sem_perm)) {
1365				err = -EIDRM;
1366				goto out_unlock;
1367			}
1368		}
1369		for (i = 0; i < sma->sem_nsems; i++)
1370			sem_io[i] = sma->sem_base[i].semval;
1371		sem_unlock(sma, -1);
1372		rcu_read_unlock();
1373		err = 0;
1374		if (copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1375			err = -EFAULT;
1376		goto out_free;
1377	}
1378	case SETALL:
1379	{
1380		int i;
1381		struct sem_undo *un;
1382
1383		if (!ipc_rcu_getref(sma)) {
1384			err = -EIDRM;
1385			goto out_rcu_wakeup;
1386		}
1387		rcu_read_unlock();
1388
1389		if (nsems > SEMMSL_FAST) {
1390			sem_io = ipc_alloc(sizeof(ushort)*nsems);
 
1391			if (sem_io == NULL) {
1392				ipc_rcu_putref(sma, ipc_rcu_free);
1393				return -ENOMEM;
1394			}
1395		}
1396
1397		if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
1398			ipc_rcu_putref(sma, ipc_rcu_free);
1399			err = -EFAULT;
1400			goto out_free;
1401		}
1402
1403		for (i = 0; i < nsems; i++) {
1404			if (sem_io[i] > SEMVMX) {
1405				ipc_rcu_putref(sma, ipc_rcu_free);
1406				err = -ERANGE;
1407				goto out_free;
1408			}
1409		}
1410		rcu_read_lock();
1411		sem_lock_and_putref(sma);
1412		if (!ipc_valid_object(&sma->sem_perm)) {
1413			err = -EIDRM;
1414			goto out_unlock;
1415		}
1416
1417		for (i = 0; i < nsems; i++)
1418			sma->sem_base[i].semval = sem_io[i];
 
 
1419
1420		ipc_assert_locked_object(&sma->sem_perm);
1421		list_for_each_entry(un, &sma->list_id, list_id) {
1422			for (i = 0; i < nsems; i++)
1423				un->semadj[i] = 0;
1424		}
1425		sma->sem_ctime = get_seconds();
1426		/* maybe some queued-up processes were waiting for this */
1427		do_smart_update(sma, NULL, 0, 0, &tasks);
1428		err = 0;
1429		goto out_unlock;
1430	}
1431	/* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */
1432	}
1433	err = -EINVAL;
1434	if (semnum < 0 || semnum >= nsems)
1435		goto out_rcu_wakeup;
1436
1437	sem_lock(sma, NULL, -1);
1438	if (!ipc_valid_object(&sma->sem_perm)) {
1439		err = -EIDRM;
1440		goto out_unlock;
1441	}
1442	curr = &sma->sem_base[semnum];
 
 
1443
1444	switch (cmd) {
1445	case GETVAL:
1446		err = curr->semval;
1447		goto out_unlock;
1448	case GETPID:
1449		err = curr->sempid;
1450		goto out_unlock;
1451	case GETNCNT:
1452		err = count_semncnt(sma, semnum);
1453		goto out_unlock;
1454	case GETZCNT:
1455		err = count_semzcnt(sma, semnum);
1456		goto out_unlock;
1457	}
1458
1459out_unlock:
1460	sem_unlock(sma, -1);
1461out_rcu_wakeup:
1462	rcu_read_unlock();
1463	wake_up_sem_queue_do(&tasks);
1464out_free:
1465	if (sem_io != fast_sem_io)
1466		ipc_free(sem_io, sizeof(ushort)*nsems);
1467	return err;
1468}
1469
1470static inline unsigned long
1471copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1472{
1473	switch (version) {
1474	case IPC_64:
1475		if (copy_from_user(out, buf, sizeof(*out)))
1476			return -EFAULT;
1477		return 0;
1478	case IPC_OLD:
1479	    {
1480		struct semid_ds tbuf_old;
1481
1482		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1483			return -EFAULT;
1484
1485		out->sem_perm.uid	= tbuf_old.sem_perm.uid;
1486		out->sem_perm.gid	= tbuf_old.sem_perm.gid;
1487		out->sem_perm.mode	= tbuf_old.sem_perm.mode;
1488
1489		return 0;
1490	    }
1491	default:
1492		return -EINVAL;
1493	}
1494}
1495
1496/*
1497 * This function handles some semctl commands which require the rwsem
1498 * to be held in write mode.
1499 * NOTE: no locks must be held, the rwsem is taken inside this function.
1500 */
1501static int semctl_down(struct ipc_namespace *ns, int semid,
1502		       int cmd, int version, void __user *p)
1503{
1504	struct sem_array *sma;
1505	int err;
1506	struct semid64_ds semid64;
1507	struct kern_ipc_perm *ipcp;
1508
1509	if (cmd == IPC_SET) {
1510		if (copy_semid_from_user(&semid64, p, version))
1511			return -EFAULT;
1512	}
1513
1514	down_write(&sem_ids(ns).rwsem);
1515	rcu_read_lock();
1516
1517	ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd,
1518				      &semid64.sem_perm, 0);
1519	if (IS_ERR(ipcp)) {
1520		err = PTR_ERR(ipcp);
1521		goto out_unlock1;
1522	}
1523
1524	sma = container_of(ipcp, struct sem_array, sem_perm);
1525
1526	err = security_sem_semctl(sma, cmd);
1527	if (err)
1528		goto out_unlock1;
1529
1530	switch (cmd) {
1531	case IPC_RMID:
1532		sem_lock(sma, NULL, -1);
1533		/* freeary unlocks the ipc object and rcu */
1534		freeary(ns, ipcp);
1535		goto out_up;
1536	case IPC_SET:
1537		sem_lock(sma, NULL, -1);
1538		err = ipc_update_perm(&semid64.sem_perm, ipcp);
1539		if (err)
1540			goto out_unlock0;
1541		sma->sem_ctime = get_seconds();
1542		break;
1543	default:
1544		err = -EINVAL;
1545		goto out_unlock1;
1546	}
1547
1548out_unlock0:
1549	sem_unlock(sma, -1);
1550out_unlock1:
1551	rcu_read_unlock();
1552out_up:
1553	up_write(&sem_ids(ns).rwsem);
1554	return err;
1555}
1556
1557SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1558{
1559	int version;
1560	struct ipc_namespace *ns;
1561	void __user *p = (void __user *)arg;
 
 
1562
1563	if (semid < 0)
1564		return -EINVAL;
1565
1566	version = ipc_parse_version(&cmd);
1567	ns = current->nsproxy->ipc_ns;
1568
1569	switch (cmd) {
1570	case IPC_INFO:
1571	case SEM_INFO:
 
1572	case IPC_STAT:
1573	case SEM_STAT:
1574		return semctl_nolock(ns, semid, cmd, version, p);
 
 
 
 
 
 
1575	case GETALL:
1576	case GETVAL:
1577	case GETPID:
1578	case GETNCNT:
1579	case GETZCNT:
1580	case SETALL:
1581		return semctl_main(ns, semid, semnum, cmd, p);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1582	case SETVAL:
1583		return semctl_setval(ns, semid, semnum, arg);
1584	case IPC_RMID:
1585	case IPC_SET:
1586		return semctl_down(ns, semid, cmd, version, p);
 
 
 
 
1587	default:
1588		return -EINVAL;
1589	}
1590}
1591
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1592/* If the task doesn't already have a undo_list, then allocate one
1593 * here.  We guarantee there is only one thread using this undo list,
1594 * and current is THE ONE
1595 *
1596 * If this allocation and assignment succeeds, but later
1597 * portions of this code fail, there is no need to free the sem_undo_list.
1598 * Just let it stay associated with the task, and it'll be freed later
1599 * at exit time.
1600 *
1601 * This can block, so callers must hold no locks.
1602 */
1603static inline int get_undo_list(struct sem_undo_list **undo_listp)
1604{
1605	struct sem_undo_list *undo_list;
1606
1607	undo_list = current->sysvsem.undo_list;
1608	if (!undo_list) {
1609		undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1610		if (undo_list == NULL)
1611			return -ENOMEM;
1612		spin_lock_init(&undo_list->lock);
1613		atomic_set(&undo_list->refcnt, 1);
1614		INIT_LIST_HEAD(&undo_list->list_proc);
1615
1616		current->sysvsem.undo_list = undo_list;
1617	}
1618	*undo_listp = undo_list;
1619	return 0;
1620}
1621
1622static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1623{
1624	struct sem_undo *un;
1625
1626	list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
 
1627		if (un->semid == semid)
1628			return un;
1629	}
1630	return NULL;
1631}
1632
1633static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1634{
1635	struct sem_undo *un;
1636
1637	assert_spin_locked(&ulp->lock);
1638
1639	un = __lookup_undo(ulp, semid);
1640	if (un) {
1641		list_del_rcu(&un->list_proc);
1642		list_add_rcu(&un->list_proc, &ulp->list_proc);
1643	}
1644	return un;
1645}
1646
1647/**
1648 * find_alloc_undo - lookup (and if not present create) undo array
1649 * @ns: namespace
1650 * @semid: semaphore array id
1651 *
1652 * The function looks up (and if not present creates) the undo structure.
1653 * The size of the undo structure depends on the size of the semaphore
1654 * array, thus the alloc path is not that straightforward.
1655 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1656 * performs a rcu_read_lock().
1657 */
1658static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1659{
1660	struct sem_array *sma;
1661	struct sem_undo_list *ulp;
1662	struct sem_undo *un, *new;
1663	int nsems, error;
1664
1665	error = get_undo_list(&ulp);
1666	if (error)
1667		return ERR_PTR(error);
1668
1669	rcu_read_lock();
1670	spin_lock(&ulp->lock);
1671	un = lookup_undo(ulp, semid);
1672	spin_unlock(&ulp->lock);
1673	if (likely(un != NULL))
1674		goto out;
1675
1676	/* no undo structure around - allocate one. */
1677	/* step 1: figure out the size of the semaphore array */
1678	sma = sem_obtain_object_check(ns, semid);
1679	if (IS_ERR(sma)) {
1680		rcu_read_unlock();
1681		return ERR_CAST(sma);
1682	}
1683
1684	nsems = sma->sem_nsems;
1685	if (!ipc_rcu_getref(sma)) {
1686		rcu_read_unlock();
1687		un = ERR_PTR(-EIDRM);
1688		goto out;
1689	}
1690	rcu_read_unlock();
1691
1692	/* step 2: allocate new undo structure */
1693	new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
 
1694	if (!new) {
1695		ipc_rcu_putref(sma, ipc_rcu_free);
1696		return ERR_PTR(-ENOMEM);
1697	}
1698
1699	/* step 3: Acquire the lock on semaphore array */
1700	rcu_read_lock();
1701	sem_lock_and_putref(sma);
1702	if (!ipc_valid_object(&sma->sem_perm)) {
1703		sem_unlock(sma, -1);
1704		rcu_read_unlock();
1705		kfree(new);
1706		un = ERR_PTR(-EIDRM);
1707		goto out;
1708	}
1709	spin_lock(&ulp->lock);
1710
1711	/*
1712	 * step 4: check for races: did someone else allocate the undo struct?
1713	 */
1714	un = lookup_undo(ulp, semid);
1715	if (un) {
1716		kfree(new);
1717		goto success;
1718	}
1719	/* step 5: initialize & link new undo structure */
1720	new->semadj = (short *) &new[1];
1721	new->ulp = ulp;
1722	new->semid = semid;
1723	assert_spin_locked(&ulp->lock);
1724	list_add_rcu(&new->list_proc, &ulp->list_proc);
1725	ipc_assert_locked_object(&sma->sem_perm);
1726	list_add(&new->list_id, &sma->list_id);
1727	un = new;
1728
1729success:
1730	spin_unlock(&ulp->lock);
1731	sem_unlock(sma, -1);
1732out:
1733	return un;
1734}
1735
1736
1737/**
1738 * get_queue_result - retrieve the result code from sem_queue
1739 * @q: Pointer to queue structure
1740 *
1741 * Retrieve the return code from the pending queue. If IN_WAKEUP is found in
1742 * q->status, then we must loop until the value is replaced with the final
1743 * value: This may happen if a task is woken up by an unrelated event (e.g.
1744 * signal) and in parallel the task is woken up by another task because it got
1745 * the requested semaphores.
1746 *
1747 * The function can be called with or without holding the semaphore spinlock.
1748 */
1749static int get_queue_result(struct sem_queue *q)
1750{
1751	int error;
1752
1753	error = q->status;
1754	while (unlikely(error == IN_WAKEUP)) {
1755		cpu_relax();
1756		error = q->status;
1757	}
1758
1759	return error;
1760}
1761
1762SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1763		unsigned, nsops, const struct timespec __user *, timeout)
1764{
1765	int error = -EINVAL;
1766	struct sem_array *sma;
1767	struct sembuf fast_sops[SEMOPM_FAST];
1768	struct sembuf *sops = fast_sops, *sop;
1769	struct sem_undo *un;
1770	int undos = 0, alter = 0, max, locknum;
 
1771	struct sem_queue queue;
1772	unsigned long jiffies_left = 0;
1773	struct ipc_namespace *ns;
1774	struct list_head tasks;
1775
1776	ns = current->nsproxy->ipc_ns;
1777
1778	if (nsops < 1 || semid < 0)
1779		return -EINVAL;
1780	if (nsops > ns->sc_semopm)
1781		return -E2BIG;
1782	if (nsops > SEMOPM_FAST) {
1783		sops = kmalloc(sizeof(*sops)*nsops, GFP_KERNEL);
1784		if (sops == NULL)
1785			return -ENOMEM;
1786	}
 
1787	if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
1788		error =  -EFAULT;
1789		goto out_free;
1790	}
 
1791	if (timeout) {
1792		struct timespec _timeout;
1793		if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) {
1794			error = -EFAULT;
1795			goto out_free;
1796		}
1797		if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 ||
1798			_timeout.tv_nsec >= 1000000000L) {
1799			error = -EINVAL;
1800			goto out_free;
1801		}
1802		jiffies_left = timespec_to_jiffies(&_timeout);
1803	}
 
1804	max = 0;
1805	for (sop = sops; sop < sops + nsops; sop++) {
 
 
1806		if (sop->sem_num >= max)
1807			max = sop->sem_num;
1808		if (sop->sem_flg & SEM_UNDO)
1809			undos = 1;
1810		if (sop->sem_op != 0)
1811			alter = 1;
 
 
 
 
 
 
 
 
 
 
 
1812	}
1813
1814	INIT_LIST_HEAD(&tasks);
1815
1816	if (undos) {
1817		/* On success, find_alloc_undo takes the rcu_read_lock */
1818		un = find_alloc_undo(ns, semid);
1819		if (IS_ERR(un)) {
1820			error = PTR_ERR(un);
1821			goto out_free;
1822		}
1823	} else {
1824		un = NULL;
1825		rcu_read_lock();
1826	}
1827
1828	sma = sem_obtain_object_check(ns, semid);
1829	if (IS_ERR(sma)) {
1830		rcu_read_unlock();
1831		error = PTR_ERR(sma);
1832		goto out_free;
1833	}
1834
1835	error = -EFBIG;
1836	if (max >= sma->sem_nsems)
1837		goto out_rcu_wakeup;
 
 
1838
1839	error = -EACCES;
1840	if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
1841		goto out_rcu_wakeup;
 
 
1842
1843	error = security_sem_semop(sma, sops, nsops, alter);
1844	if (error)
1845		goto out_rcu_wakeup;
 
 
1846
1847	error = -EIDRM;
1848	locknum = sem_lock(sma, sops, nsops);
1849	/*
1850	 * We eventually might perform the following check in a lockless
1851	 * fashion, considering ipc_valid_object() locking constraints.
1852	 * If nsops == 1 and there is no contention for sem_perm.lock, then
1853	 * only a per-semaphore lock is held and it's OK to proceed with the
1854	 * check below. More details on the fine grained locking scheme
1855	 * entangled here and why it's RMID race safe on comments at sem_lock()
1856	 */
1857	if (!ipc_valid_object(&sma->sem_perm))
1858		goto out_unlock_free;
1859	/*
1860	 * semid identifiers are not unique - find_alloc_undo may have
1861	 * allocated an undo structure, it was invalidated by an RMID
1862	 * and now a new array with received the same id. Check and fail.
1863	 * This case can be detected checking un->semid. The existence of
1864	 * "un" itself is guaranteed by rcu.
1865	 */
1866	if (un && un->semid == -1)
1867		goto out_unlock_free;
1868
1869	error = perform_atomic_semop(sma, sops, nsops, un,
1870					task_tgid_vnr(current));
1871	if (error == 0) {
1872		/* If the operation was successful, then do
 
 
 
 
 
 
 
 
 
1873		 * the required updates.
1874		 */
1875		if (alter)
1876			do_smart_update(sma, sops, nsops, 1, &tasks);
1877		else
1878			set_semotime(sma, sops);
 
 
 
 
 
 
1879	}
1880	if (error <= 0)
1881		goto out_unlock_free;
1882
1883	/* We need to sleep on this operation, so we put the current
 
1884	 * task into the pending queue and go to sleep.
1885	 */
1886		
1887	queue.sops = sops;
1888	queue.nsops = nsops;
1889	queue.undo = un;
1890	queue.pid = task_tgid_vnr(current);
1891	queue.alter = alter;
1892
1893	if (nsops == 1) {
1894		struct sem *curr;
1895		curr = &sma->sem_base[sops->sem_num];
 
1896
1897		if (alter) {
1898			if (sma->complex_count) {
1899				list_add_tail(&queue.list,
1900						&sma->pending_alter);
1901			} else {
1902
1903				list_add_tail(&queue.list,
1904						&curr->pending_alter);
1905			}
1906		} else {
1907			list_add_tail(&queue.list, &curr->pending_const);
1908		}
1909	} else {
1910		if (!sma->complex_count)
1911			merge_queues(sma);
1912
1913		if (alter)
1914			list_add_tail(&queue.list, &sma->pending_alter);
1915		else
1916			list_add_tail(&queue.list, &sma->pending_const);
1917
1918		sma->complex_count++;
1919	}
1920
1921	queue.status = -EINTR;
1922	queue.sleeper = current;
1923
1924sleep_again:
1925	current->state = TASK_INTERRUPTIBLE;
1926	sem_unlock(sma, locknum);
1927	rcu_read_unlock();
1928
1929	if (timeout)
1930		jiffies_left = schedule_timeout(jiffies_left);
1931	else
1932		schedule();
1933
1934	error = get_queue_result(&queue);
 
 
 
1935
1936	if (error != -EINTR) {
1937		/* fast path: update_queue already obtained all requested
1938		 * resources.
1939		 * Perform a smp_mb(): User space could assume that semop()
1940		 * is a memory barrier: Without the mb(), the cpu could
1941		 * speculatively read in user space stale data that was
1942		 * overwritten by the previous owner of the semaphore.
 
 
 
1943		 */
1944		smp_mb();
1945
1946		goto out_free;
1947	}
1948
1949	rcu_read_lock();
1950	sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
1951
1952	/*
1953	 * Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
1954	 */
1955	error = get_queue_result(&queue);
1956
1957	/*
1958	 * Array removed? If yes, leave without sem_unlock().
1959	 */
1960	if (IS_ERR(sma)) {
1961		rcu_read_unlock();
1962		goto out_free;
1963	}
1964
 
 
1965
1966	/*
1967	 * If queue.status != -EINTR we are woken up by another process.
1968	 * Leave without unlink_queue(), but with sem_unlock().
1969	 */
1970	if (error != -EINTR)
1971		goto out_unlock_free;
1972
1973	/*
1974	 * If an interrupt occurred we have to clean up the queue
1975	 */
1976	if (timeout && jiffies_left == 0)
1977		error = -EAGAIN;
 
1978
1979	/*
1980	 * If the wakeup was spurious, just retry
1981	 */
1982	if (error == -EINTR && !signal_pending(current))
1983		goto sleep_again;
 
1984
1985	unlink_queue(sma, &queue);
1986
1987out_unlock_free:
1988	sem_unlock(sma, locknum);
1989out_rcu_wakeup:
1990	rcu_read_unlock();
1991	wake_up_sem_queue_do(&tasks);
1992out_free:
1993	if (sops != fast_sops)
1994		kfree(sops);
1995	return error;
1996}
1997
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1998SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
1999		unsigned, nsops)
2000{
2001	return sys_semtimedop(semid, tsops, nsops, NULL);
2002}
2003
2004/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
2005 * parent and child tasks.
2006 */
2007
2008int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
2009{
2010	struct sem_undo_list *undo_list;
2011	int error;
2012
2013	if (clone_flags & CLONE_SYSVSEM) {
2014		error = get_undo_list(&undo_list);
2015		if (error)
2016			return error;
2017		atomic_inc(&undo_list->refcnt);
2018		tsk->sysvsem.undo_list = undo_list;
2019	} else 
2020		tsk->sysvsem.undo_list = NULL;
2021
2022	return 0;
2023}
2024
2025/*
2026 * add semadj values to semaphores, free undo structures.
2027 * undo structures are not freed when semaphore arrays are destroyed
2028 * so some of them may be out of date.
2029 * IMPLEMENTATION NOTE: There is some confusion over whether the
2030 * set of adjustments that needs to be done should be done in an atomic
2031 * manner or not. That is, if we are attempting to decrement the semval
2032 * should we queue up and wait until we can do so legally?
2033 * The original implementation attempted to do this (queue and wait).
2034 * The current implementation does not do so. The POSIX standard
2035 * and SVID should be consulted to determine what behavior is mandated.
2036 */
2037void exit_sem(struct task_struct *tsk)
2038{
2039	struct sem_undo_list *ulp;
2040
2041	ulp = tsk->sysvsem.undo_list;
2042	if (!ulp)
2043		return;
2044	tsk->sysvsem.undo_list = NULL;
2045
2046	if (!atomic_dec_and_test(&ulp->refcnt))
2047		return;
2048
2049	for (;;) {
2050		struct sem_array *sma;
2051		struct sem_undo *un;
2052		struct list_head tasks;
2053		int semid, i;
 
 
 
2054
2055		rcu_read_lock();
2056		un = list_entry_rcu(ulp->list_proc.next,
2057				    struct sem_undo, list_proc);
2058		if (&un->list_proc == &ulp->list_proc)
2059			semid = -1;
2060		 else
2061			semid = un->semid;
 
 
 
 
 
 
 
 
 
 
 
2062
 
2063		if (semid == -1) {
2064			rcu_read_unlock();
2065			break;
2066		}
2067
2068		sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
2069		/* exit_sem raced with IPC_RMID, nothing to do */
2070		if (IS_ERR(sma)) {
2071			rcu_read_unlock();
2072			continue;
2073		}
2074
2075		sem_lock(sma, NULL, -1);
2076		/* exit_sem raced with IPC_RMID, nothing to do */
2077		if (!ipc_valid_object(&sma->sem_perm)) {
2078			sem_unlock(sma, -1);
2079			rcu_read_unlock();
2080			continue;
2081		}
2082		un = __lookup_undo(ulp, semid);
2083		if (un == NULL) {
2084			/* exit_sem raced with IPC_RMID+semget() that created
2085			 * exactly the same semid. Nothing to do.
2086			 */
2087			sem_unlock(sma, -1);
2088			rcu_read_unlock();
2089			continue;
2090		}
2091
2092		/* remove un from the linked lists */
2093		ipc_assert_locked_object(&sma->sem_perm);
2094		list_del(&un->list_id);
2095
2096		spin_lock(&ulp->lock);
2097		list_del_rcu(&un->list_proc);
2098		spin_unlock(&ulp->lock);
2099
2100		/* perform adjustments registered in un */
2101		for (i = 0; i < sma->sem_nsems; i++) {
2102			struct sem *semaphore = &sma->sem_base[i];
2103			if (un->semadj[i]) {
2104				semaphore->semval += un->semadj[i];
2105				/*
2106				 * Range checks of the new semaphore value,
2107				 * not defined by sus:
2108				 * - Some unices ignore the undo entirely
2109				 *   (e.g. HP UX 11i 11.22, Tru64 V5.1)
2110				 * - some cap the value (e.g. FreeBSD caps
2111				 *   at 0, but doesn't enforce SEMVMX)
2112				 *
2113				 * Linux caps the semaphore value, both at 0
2114				 * and at SEMVMX.
2115				 *
2116				 *	Manfred <manfred@colorfullife.com>
2117				 */
2118				if (semaphore->semval < 0)
2119					semaphore->semval = 0;
2120				if (semaphore->semval > SEMVMX)
2121					semaphore->semval = SEMVMX;
2122				semaphore->sempid = task_tgid_vnr(current);
2123			}
2124		}
2125		/* maybe some queued-up processes were waiting for this */
2126		INIT_LIST_HEAD(&tasks);
2127		do_smart_update(sma, NULL, 0, 1, &tasks);
2128		sem_unlock(sma, -1);
2129		rcu_read_unlock();
2130		wake_up_sem_queue_do(&tasks);
2131
2132		kfree_rcu(un, rcu);
2133	}
2134	kfree(ulp);
2135}
2136
2137#ifdef CONFIG_PROC_FS
2138static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2139{
2140	struct user_namespace *user_ns = seq_user_ns(s);
2141	struct sem_array *sma = it;
2142	time_t sem_otime;
 
2143
2144	/*
2145	 * The proc interface isn't aware of sem_lock(), it calls
2146	 * ipc_lock_object() directly (in sysvipc_find_ipc).
2147	 * In order to stay compatible with sem_lock(), we must wait until
2148	 * all simple semop() calls have left their critical regions.
 
2149	 */
2150	sem_wait_array(sma);
2151
2152	sem_otime = get_semotime(sma);
2153
2154	return seq_printf(s,
2155			  "%10d %10d  %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
2156			  sma->sem_perm.key,
2157			  sma->sem_perm.id,
2158			  sma->sem_perm.mode,
2159			  sma->sem_nsems,
2160			  from_kuid_munged(user_ns, sma->sem_perm.uid),
2161			  from_kgid_munged(user_ns, sma->sem_perm.gid),
2162			  from_kuid_munged(user_ns, sma->sem_perm.cuid),
2163			  from_kgid_munged(user_ns, sma->sem_perm.cgid),
2164			  sem_otime,
2165			  sma->sem_ctime);
 
 
 
 
2166}
2167#endif
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * linux/ipc/sem.c
   4 * Copyright (C) 1992 Krishna Balasubramanian
   5 * Copyright (C) 1995 Eric Schenk, Bruno Haible
   6 *
   7 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
   8 *
   9 * SMP-threaded, sysctl's added
  10 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
  11 * Enforced range limit on SEM_UNDO
  12 * (c) 2001 Red Hat Inc
  13 * Lockless wakeup
  14 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
  15 * (c) 2016 Davidlohr Bueso <dave@stgolabs.net>
  16 * Further wakeup optimizations, documentation
  17 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
  18 *
  19 * support for audit of ipc object properties and permission changes
  20 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  21 *
  22 * namespaces support
  23 * OpenVZ, SWsoft Inc.
  24 * Pavel Emelianov <xemul@openvz.org>
  25 *
  26 * Implementation notes: (May 2010)
  27 * This file implements System V semaphores.
  28 *
  29 * User space visible behavior:
  30 * - FIFO ordering for semop() operations (just FIFO, not starvation
  31 *   protection)
  32 * - multiple semaphore operations that alter the same semaphore in
  33 *   one semop() are handled.
  34 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
  35 *   SETALL calls.
  36 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
  37 * - undo adjustments at process exit are limited to 0..SEMVMX.
  38 * - namespace are supported.
  39 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtime by writing
  40 *   to /proc/sys/kernel/sem.
  41 * - statistics about the usage are reported in /proc/sysvipc/sem.
  42 *
  43 * Internals:
  44 * - scalability:
  45 *   - all global variables are read-mostly.
  46 *   - semop() calls and semctl(RMID) are synchronized by RCU.
  47 *   - most operations do write operations (actually: spin_lock calls) to
  48 *     the per-semaphore array structure.
  49 *   Thus: Perfect SMP scaling between independent semaphore arrays.
  50 *         If multiple semaphores in one array are used, then cache line
  51 *         trashing on the semaphore array spinlock will limit the scaling.
  52 * - semncnt and semzcnt are calculated on demand in count_semcnt()
 
  53 * - the task that performs a successful semop() scans the list of all
  54 *   sleeping tasks and completes any pending operations that can be fulfilled.
  55 *   Semaphores are actively given to waiting tasks (necessary for FIFO).
  56 *   (see update_queue())
  57 * - To improve the scalability, the actual wake-up calls are performed after
  58 *   dropping all locks. (see wake_up_sem_queue_prepare())
 
  59 * - All work is done by the waker, the woken up task does not have to do
  60 *   anything - not even acquiring a lock or dropping a refcount.
  61 * - A woken up task may not even touch the semaphore array anymore, it may
  62 *   have been destroyed already by a semctl(RMID).
 
 
 
  63 * - UNDO values are stored in an array (one per process and per
  64 *   semaphore array, lazily allocated). For backwards compatibility, multiple
  65 *   modes for the UNDO variables are supported (per process, per thread)
  66 *   (see copy_semundo, CLONE_SYSVSEM)
  67 * - There are two lists of the pending operations: a per-array list
  68 *   and per-semaphore list (stored in the array). This allows to achieve FIFO
  69 *   ordering without always scanning all pending operations.
  70 *   The worst-case behavior is nevertheless O(N^2) for N wakeups.
  71 */
  72
  73#include <linux/compat.h>
  74#include <linux/slab.h>
  75#include <linux/spinlock.h>
  76#include <linux/init.h>
  77#include <linux/proc_fs.h>
  78#include <linux/time.h>
  79#include <linux/security.h>
  80#include <linux/syscalls.h>
  81#include <linux/audit.h>
  82#include <linux/capability.h>
  83#include <linux/seq_file.h>
  84#include <linux/rwsem.h>
  85#include <linux/nsproxy.h>
  86#include <linux/ipc_namespace.h>
  87#include <linux/sched/wake_q.h>
  88#include <linux/nospec.h>
  89#include <linux/rhashtable.h>
  90
  91#include <linux/uaccess.h>
  92#include "util.h"
  93
  94/* One semaphore structure for each semaphore in the system. */
  95struct sem {
  96	int	semval;		/* current value */
  97	/*
  98	 * PID of the process that last modified the semaphore. For
  99	 * Linux, specifically these are:
 100	 *  - semop
 101	 *  - semctl, via SETVAL and SETALL.
 102	 *  - at task exit when performing undo adjustments (see exit_sem).
 103	 */
 104	struct pid *sempid;
 105	spinlock_t	lock;	/* spinlock for fine-grained semtimedop */
 106	struct list_head pending_alter; /* pending single-sop operations */
 107					/* that alter the semaphore */
 108	struct list_head pending_const; /* pending single-sop operations */
 109					/* that do not alter the semaphore*/
 110	time64_t	 sem_otime;	/* candidate for sem_otime */
 111} ____cacheline_aligned_in_smp;
 112
 113/* One sem_array data structure for each set of semaphores in the system. */
 114struct sem_array {
 115	struct kern_ipc_perm	sem_perm;	/* permissions .. see ipc.h */
 116	time64_t		sem_ctime;	/* create/last semctl() time */
 117	struct list_head	pending_alter;	/* pending operations */
 118						/* that alter the array */
 119	struct list_head	pending_const;	/* pending complex operations */
 120						/* that do not alter semvals */
 121	struct list_head	list_id;	/* undo requests on this array */
 122	int			sem_nsems;	/* no. of semaphores in array */
 123	int			complex_count;	/* pending complex operations */
 124	unsigned int		use_global_lock;/* >0: global lock required */
 125
 126	struct sem		sems[];
 127} __randomize_layout;
 128
 129/* One queue for each sleeping process in the system. */
 130struct sem_queue {
 131	struct list_head	list;	 /* queue of pending operations */
 132	struct task_struct	*sleeper; /* this process */
 133	struct sem_undo		*undo;	 /* undo structure */
 134	struct pid		*pid;	 /* process id of requesting process */
 135	int			status;	 /* completion status of operation */
 136	struct sembuf		*sops;	 /* array of pending operations */
 137	struct sembuf		*blocking; /* the operation that blocked */
 138	int			nsops;	 /* number of operations */
 139	bool			alter;	 /* does *sops alter the array? */
 140	bool                    dupsop;	 /* sops on more than one sem_num */
 141};
 142
 143/* Each task has a list of undo requests. They are executed automatically
 144 * when the process exits.
 145 */
 146struct sem_undo {
 147	struct list_head	list_proc;	/* per-process list: *
 148						 * all undos from one process
 149						 * rcu protected */
 150	struct rcu_head		rcu;		/* rcu struct for sem_undo */
 151	struct sem_undo_list	*ulp;		/* back ptr to sem_undo_list */
 152	struct list_head	list_id;	/* per semaphore array list:
 153						 * all undos for one array */
 154	int			semid;		/* semaphore set identifier */
 155	short			*semadj;	/* array of adjustments */
 156						/* one per semaphore */
 157};
 158
 159/* sem_undo_list controls shared access to the list of sem_undo structures
 160 * that may be shared among all a CLONE_SYSVSEM task group.
 161 */
 162struct sem_undo_list {
 163	refcount_t		refcnt;
 164	spinlock_t		lock;
 165	struct list_head	list_proc;
 166};
 167
 168
 169#define sem_ids(ns)	((ns)->ids[IPC_SEM_IDS])
 170
 
 
 171static int newary(struct ipc_namespace *, struct ipc_params *);
 172static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
 173#ifdef CONFIG_PROC_FS
 174static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
 175#endif
 176
 177#define SEMMSL_FAST	256 /* 512 bytes on stack */
 178#define SEMOPM_FAST	64  /* ~ 372 bytes on stack */
 179
 180/*
 181 * Switching from the mode suitable for simple ops
 182 * to the mode for complex ops is costly. Therefore:
 183 * use some hysteresis
 184 */
 185#define USE_GLOBAL_LOCK_HYSTERESIS	10
 186
 187/*
 188 * Locking:
 189 * a) global sem_lock() for read/write
 190 *	sem_undo.id_next,
 191 *	sem_array.complex_count,
 192 *	sem_array.pending{_alter,_const},
 193 *	sem_array.sem_undo
 194 *
 195 * b) global or semaphore sem_lock() for read/write:
 196 *	sem_array.sems[i].pending_{const,alter}:
 197 *
 198 * c) special:
 199 *	sem_undo_list.list_proc:
 200 *	* undo_list->lock for write
 201 *	* rcu for read
 202 *	use_global_lock:
 203 *	* global sem_lock() for write
 204 *	* either local or global sem_lock() for read.
 205 *
 206 * Memory ordering:
 207 * Most ordering is enforced by using spin_lock() and spin_unlock().
 208 *
 209 * Exceptions:
 210 * 1) use_global_lock: (SEM_BARRIER_1)
 211 * Setting it from non-zero to 0 is a RELEASE, this is ensured by
 212 * using smp_store_release(): Immediately after setting it to 0,
 213 * a simple op can start.
 214 * Testing if it is non-zero is an ACQUIRE, this is ensured by using
 215 * smp_load_acquire().
 216 * Setting it from 0 to non-zero must be ordered with regards to
 217 * this smp_load_acquire(), this is guaranteed because the smp_load_acquire()
 218 * is inside a spin_lock() and after a write from 0 to non-zero a
 219 * spin_lock()+spin_unlock() is done.
 220 * To prevent the compiler/cpu temporarily writing 0 to use_global_lock,
 221 * READ_ONCE()/WRITE_ONCE() is used.
 222 *
 223 * 2) queue.status: (SEM_BARRIER_2)
 224 * Initialization is done while holding sem_lock(), so no further barrier is
 225 * required.
 226 * Setting it to a result code is a RELEASE, this is ensured by both a
 227 * smp_store_release() (for case a) and while holding sem_lock()
 228 * (for case b).
 229 * The ACQUIRE when reading the result code without holding sem_lock() is
 230 * achieved by using READ_ONCE() + smp_acquire__after_ctrl_dep().
 231 * (case a above).
 232 * Reading the result code while holding sem_lock() needs no further barriers,
 233 * the locks inside sem_lock() enforce ordering (case b above)
 234 *
 235 * 3) current->state:
 236 * current->state is set to TASK_INTERRUPTIBLE while holding sem_lock().
 237 * The wakeup is handled using the wake_q infrastructure. wake_q wakeups may
 238 * happen immediately after calling wake_q_add. As wake_q_add_safe() is called
 239 * when holding sem_lock(), no further barriers are required.
 240 *
 241 * See also ipc/mqueue.c for more details on the covered races.
 242 */
 243
 244#define sc_semmsl	sem_ctls[0]
 245#define sc_semmns	sem_ctls[1]
 246#define sc_semopm	sem_ctls[2]
 247#define sc_semmni	sem_ctls[3]
 248
 249void sem_init_ns(struct ipc_namespace *ns)
 250{
 251	ns->sc_semmsl = SEMMSL;
 252	ns->sc_semmns = SEMMNS;
 253	ns->sc_semopm = SEMOPM;
 254	ns->sc_semmni = SEMMNI;
 255	ns->used_sems = 0;
 256	ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
 257}
 258
 259#ifdef CONFIG_IPC_NS
 260void sem_exit_ns(struct ipc_namespace *ns)
 261{
 262	free_ipcs(ns, &sem_ids(ns), freeary);
 263	idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
 264	rhashtable_destroy(&ns->ids[IPC_SEM_IDS].key_ht);
 265}
 266#endif
 267
 268void __init sem_init(void)
 269{
 270	sem_init_ns(&init_ipc_ns);
 271	ipc_init_proc_interface("sysvipc/sem",
 272				"       key      semid perms      nsems   uid   gid  cuid  cgid      otime      ctime\n",
 273				IPC_SEM_IDS, sysvipc_sem_proc_show);
 274}
 275
 276/**
 277 * unmerge_queues - unmerge queues, if possible.
 278 * @sma: semaphore array
 279 *
 280 * The function unmerges the wait queues if complex_count is 0.
 281 * It must be called prior to dropping the global semaphore array lock.
 282 */
 283static void unmerge_queues(struct sem_array *sma)
 284{
 285	struct sem_queue *q, *tq;
 286
 287	/* complex operations still around? */
 288	if (sma->complex_count)
 289		return;
 290	/*
 291	 * We will switch back to simple mode.
 292	 * Move all pending operation back into the per-semaphore
 293	 * queues.
 294	 */
 295	list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
 296		struct sem *curr;
 297		curr = &sma->sems[q->sops[0].sem_num];
 298
 299		list_add_tail(&q->list, &curr->pending_alter);
 300	}
 301	INIT_LIST_HEAD(&sma->pending_alter);
 302}
 303
 304/**
 305 * merge_queues - merge single semop queues into global queue
 306 * @sma: semaphore array
 307 *
 308 * This function merges all per-semaphore queues into the global queue.
 309 * It is necessary to achieve FIFO ordering for the pending single-sop
 310 * operations when a multi-semop operation must sleep.
 311 * Only the alter operations must be moved, the const operations can stay.
 312 */
 313static void merge_queues(struct sem_array *sma)
 314{
 315	int i;
 316	for (i = 0; i < sma->sem_nsems; i++) {
 317		struct sem *sem = &sma->sems[i];
 318
 319		list_splice_init(&sem->pending_alter, &sma->pending_alter);
 320	}
 321}
 322
 323static void sem_rcu_free(struct rcu_head *head)
 324{
 325	struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu);
 326	struct sem_array *sma = container_of(p, struct sem_array, sem_perm);
 327
 328	security_sem_free(&sma->sem_perm);
 329	kvfree(sma);
 330}
 331
 332/*
 333 * Enter the mode suitable for non-simple operations:
 334 * Caller must own sem_perm.lock.
 
 
 
 335 */
 336static void complexmode_enter(struct sem_array *sma)
 337{
 338	int i;
 339	struct sem *sem;
 340
 341	if (sma->use_global_lock > 0)  {
 342		/*
 343		 * We are already in global lock mode.
 344		 * Nothing to do, just reset the
 345		 * counter until we return to simple mode.
 346		 */
 347		WRITE_ONCE(sma->use_global_lock, USE_GLOBAL_LOCK_HYSTERESIS);
 348		return;
 349	}
 350	WRITE_ONCE(sma->use_global_lock, USE_GLOBAL_LOCK_HYSTERESIS);
 351
 352	for (i = 0; i < sma->sem_nsems; i++) {
 353		sem = &sma->sems[i];
 354		spin_lock(&sem->lock);
 355		spin_unlock(&sem->lock);
 356	}
 357}
 358
 359/*
 360 * Try to leave the mode that disallows simple operations:
 361 * Caller must own sem_perm.lock.
 362 */
 363static void complexmode_tryleave(struct sem_array *sma)
 364{
 365	if (sma->complex_count)  {
 366		/* Complex ops are sleeping.
 367		 * We must stay in complex mode
 368		 */
 369		return;
 370	}
 371	if (sma->use_global_lock == 1) {
 372
 373		/* See SEM_BARRIER_1 for purpose/pairing */
 374		smp_store_release(&sma->use_global_lock, 0);
 375	} else {
 376		WRITE_ONCE(sma->use_global_lock,
 377				sma->use_global_lock-1);
 378	}
 379}
 380
 381#define SEM_GLOBAL_LOCK	(-1)
 382/*
 383 * If the request contains only one semaphore operation, and there are
 384 * no complex transactions pending, lock only the semaphore involved.
 385 * Otherwise, lock the entire semaphore array, since we either have
 386 * multiple semaphores in our own semops, or we need to look at
 387 * semaphores from other pending complex operations.
 388 */
 389static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
 390			      int nsops)
 391{
 392	struct sem *sem;
 393	int idx;
 394
 395	if (nsops != 1) {
 396		/* Complex operation - acquire a full lock */
 397		ipc_lock_object(&sma->sem_perm);
 398
 399		/* Prevent parallel simple ops */
 400		complexmode_enter(sma);
 401		return SEM_GLOBAL_LOCK;
 
 
 402	}
 403
 404	/*
 405	 * Only one semaphore affected - try to optimize locking.
 406	 * Optimized locking is possible if no complex operation
 407	 * is either enqueued or processed right now.
 408	 *
 409	 * Both facts are tracked by use_global_mode.
 
 
 
 
 
 
 
 
 410	 */
 411	idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
 412	sem = &sma->sems[idx];
 413
 414	/*
 415	 * Initial check for use_global_lock. Just an optimization,
 416	 * no locking, no memory barrier.
 417	 */
 418	if (!READ_ONCE(sma->use_global_lock)) {
 419		/*
 420		 * It appears that no complex operation is around.
 421		 * Acquire the per-semaphore lock.
 422		 */
 423		spin_lock(&sem->lock);
 424
 425		/* see SEM_BARRIER_1 for purpose/pairing */
 426		if (!smp_load_acquire(&sma->use_global_lock)) {
 427			/* fast path successful! */
 428			return sops->sem_num;
 
 
 
 
 
 
 
 
 
 429		}
 430		spin_unlock(&sem->lock);
 431	}
 432
 433	/* slow path: acquire the full lock */
 434	ipc_lock_object(&sma->sem_perm);
 435
 436	if (sma->use_global_lock == 0) {
 437		/*
 438		 * The use_global_lock mode ended while we waited for
 439		 * sma->sem_perm.lock. Thus we must switch to locking
 440		 * with sem->lock.
 441		 * Unlike in the fast path, there is no need to recheck
 442		 * sma->use_global_lock after we have acquired sem->lock:
 443		 * We own sma->sem_perm.lock, thus use_global_lock cannot
 444		 * change.
 445		 */
 446		spin_lock(&sem->lock);
 447
 448		ipc_unlock_object(&sma->sem_perm);
 449		return sops->sem_num;
 450	} else {
 451		/*
 452		 * Not a false alarm, thus continue to use the global lock
 453		 * mode. No need for complexmode_enter(), this was done by
 454		 * the caller that has set use_global_mode to non-zero.
 455		 */
 456		return SEM_GLOBAL_LOCK;
 
 457	}
 458}
 459
 460static inline void sem_unlock(struct sem_array *sma, int locknum)
 461{
 462	if (locknum == SEM_GLOBAL_LOCK) {
 463		unmerge_queues(sma);
 464		complexmode_tryleave(sma);
 465		ipc_unlock_object(&sma->sem_perm);
 466	} else {
 467		struct sem *sem = &sma->sems[locknum];
 468		spin_unlock(&sem->lock);
 469	}
 470}
 471
 472/*
 473 * sem_lock_(check_) routines are called in the paths where the rwsem
 474 * is not held.
 475 *
 476 * The caller holds the RCU read lock.
 477 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 478static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
 479{
 480	struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
 481
 482	if (IS_ERR(ipcp))
 483		return ERR_CAST(ipcp);
 484
 485	return container_of(ipcp, struct sem_array, sem_perm);
 486}
 487
 488static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
 489							int id)
 490{
 491	struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
 492
 493	if (IS_ERR(ipcp))
 494		return ERR_CAST(ipcp);
 495
 496	return container_of(ipcp, struct sem_array, sem_perm);
 497}
 498
 499static inline void sem_lock_and_putref(struct sem_array *sma)
 500{
 501	sem_lock(sma, NULL, -1);
 502	ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
 503}
 504
 505static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
 506{
 507	ipc_rmid(&sem_ids(ns), &s->sem_perm);
 508}
 509
 510static struct sem_array *sem_alloc(size_t nsems)
 511{
 512	struct sem_array *sma;
 513
 514	if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0]))
 515		return NULL;
 516
 517	sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL);
 518	if (unlikely(!sma))
 519		return NULL;
 520
 521	return sma;
 522}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 523
 524/**
 525 * newary - Create a new semaphore set
 526 * @ns: namespace
 527 * @params: ptr to the structure that contains key, semflg and nsems
 528 *
 529 * Called with sem_ids.rwsem held (as a writer)
 530 */
 531static int newary(struct ipc_namespace *ns, struct ipc_params *params)
 532{
 
 533	int retval;
 534	struct sem_array *sma;
 
 535	key_t key = params->key;
 536	int nsems = params->u.nsems;
 537	int semflg = params->flg;
 538	int i;
 539
 540	if (!nsems)
 541		return -EINVAL;
 542	if (ns->used_sems + nsems > ns->sc_semmns)
 543		return -ENOSPC;
 544
 545	sma = sem_alloc(nsems);
 
 546	if (!sma)
 547		return -ENOMEM;
 548
 
 
 549	sma->sem_perm.mode = (semflg & S_IRWXUGO);
 550	sma->sem_perm.key = key;
 551
 552	sma->sem_perm.security = NULL;
 553	retval = security_sem_alloc(&sma->sem_perm);
 554	if (retval) {
 555		kvfree(sma);
 556		return retval;
 557	}
 558
 
 
 
 
 
 
 
 
 
 559	for (i = 0; i < nsems; i++) {
 560		INIT_LIST_HEAD(&sma->sems[i].pending_alter);
 561		INIT_LIST_HEAD(&sma->sems[i].pending_const);
 562		spin_lock_init(&sma->sems[i].lock);
 563	}
 564
 565	sma->complex_count = 0;
 566	sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
 567	INIT_LIST_HEAD(&sma->pending_alter);
 568	INIT_LIST_HEAD(&sma->pending_const);
 569	INIT_LIST_HEAD(&sma->list_id);
 570	sma->sem_nsems = nsems;
 571	sma->sem_ctime = ktime_get_real_seconds();
 572
 573	/* ipc_addid() locks sma upon success. */
 574	retval = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
 575	if (retval < 0) {
 576		ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
 577		return retval;
 578	}
 579	ns->used_sems += nsems;
 580
 581	sem_unlock(sma, -1);
 582	rcu_read_unlock();
 583
 584	return sma->sem_perm.id;
 585}
 586
 587
 588/*
 589 * Called with sem_ids.rwsem and ipcp locked.
 590 */
 591static int sem_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params)
 
 
 
 
 
 
 
 
 
 
 
 
 592{
 593	struct sem_array *sma;
 594
 595	sma = container_of(ipcp, struct sem_array, sem_perm);
 596	if (params->u.nsems > sma->sem_nsems)
 597		return -EINVAL;
 598
 599	return 0;
 600}
 601
 602long ksys_semget(key_t key, int nsems, int semflg)
 603{
 604	struct ipc_namespace *ns;
 605	static const struct ipc_ops sem_ops = {
 606		.getnew = newary,
 607		.associate = security_sem_associate,
 608		.more_checks = sem_more_checks,
 609	};
 610	struct ipc_params sem_params;
 611
 612	ns = current->nsproxy->ipc_ns;
 613
 614	if (nsems < 0 || nsems > ns->sc_semmsl)
 615		return -EINVAL;
 616
 
 
 
 
 617	sem_params.key = key;
 618	sem_params.flg = semflg;
 619	sem_params.u.nsems = nsems;
 620
 621	return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
 622}
 623
 624SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
 625{
 626	return ksys_semget(key, nsems, semflg);
 627}
 628
 629/**
 630 * perform_atomic_semop[_slow] - Attempt to perform semaphore
 631 *                               operations on a given array.
 632 * @sma: semaphore array
 633 * @q: struct sem_queue that describes the operation
 634 *
 635 * Caller blocking are as follows, based the value
 636 * indicated by the semaphore operation (sem_op):
 637 *
 638 *  (1) >0 never blocks.
 639 *  (2)  0 (wait-for-zero operation): semval is non-zero.
 640 *  (3) <0 attempting to decrement semval to a value smaller than zero.
 641 *
 642 * Returns 0 if the operation was possible.
 643 * Returns 1 if the operation is impossible, the caller must sleep.
 644 * Returns <0 for error codes.
 645 */
 646static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
 
 647{
 648	int result, sem_op, nsops;
 649	struct pid *pid;
 650	struct sembuf *sop;
 651	struct sem *curr;
 652	struct sembuf *sops;
 653	struct sem_undo *un;
 654
 655	sops = q->sops;
 656	nsops = q->nsops;
 657	un = q->undo;
 658
 659	for (sop = sops; sop < sops + nsops; sop++) {
 660		int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
 661		curr = &sma->sems[idx];
 662		sem_op = sop->sem_op;
 663		result = curr->semval;
 664
 665		if (!sem_op && result)
 666			goto would_block;
 667
 668		result += sem_op;
 669		if (result < 0)
 670			goto would_block;
 671		if (result > SEMVMX)
 672			goto out_of_range;
 673
 674		if (sop->sem_flg & SEM_UNDO) {
 675			int undo = un->semadj[sop->sem_num] - sem_op;
 676			/* Exceeding the undo range is an error. */
 677			if (undo < (-SEMAEM - 1) || undo > SEMAEM)
 678				goto out_of_range;
 679			un->semadj[sop->sem_num] = undo;
 680		}
 681
 682		curr->semval = result;
 683	}
 684
 685	sop--;
 686	pid = q->pid;
 687	while (sop >= sops) {
 688		ipc_update_pid(&sma->sems[sop->sem_num].sempid, pid);
 689		sop--;
 690	}
 691
 692	return 0;
 693
 694out_of_range:
 695	result = -ERANGE;
 696	goto undo;
 697
 698would_block:
 699	q->blocking = sop;
 700
 701	if (sop->sem_flg & IPC_NOWAIT)
 702		result = -EAGAIN;
 703	else
 704		result = 1;
 705
 706undo:
 707	sop--;
 708	while (sop >= sops) {
 709		sem_op = sop->sem_op;
 710		sma->sems[sop->sem_num].semval -= sem_op;
 711		if (sop->sem_flg & SEM_UNDO)
 712			un->semadj[sop->sem_num] += sem_op;
 713		sop--;
 714	}
 715
 716	return result;
 717}
 718
 719static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
 
 
 
 
 
 
 
 720{
 721	int result, sem_op, nsops;
 722	struct sembuf *sop;
 723	struct sem *curr;
 724	struct sembuf *sops;
 725	struct sem_undo *un;
 726
 727	sops = q->sops;
 728	nsops = q->nsops;
 729	un = q->undo;
 730
 731	if (unlikely(q->dupsop))
 732		return perform_atomic_semop_slow(sma, q);
 733
 734	/*
 735	 * We scan the semaphore set twice, first to ensure that the entire
 736	 * operation can succeed, therefore avoiding any pointless writes
 737	 * to shared memory and having to undo such changes in order to block
 738	 * until the operations can go through.
 739	 */
 740	for (sop = sops; sop < sops + nsops; sop++) {
 741		int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
 742
 743		curr = &sma->sems[idx];
 744		sem_op = sop->sem_op;
 745		result = curr->semval;
 746
 747		if (!sem_op && result)
 748			goto would_block; /* wait-for-zero */
 749
 750		result += sem_op;
 751		if (result < 0)
 752			goto would_block;
 753
 754		if (result > SEMVMX)
 755			return -ERANGE;
 756
 757		if (sop->sem_flg & SEM_UNDO) {
 758			int undo = un->semadj[sop->sem_num] - sem_op;
 759
 760			/* Exceeding the undo range is an error. */
 761			if (undo < (-SEMAEM - 1) || undo > SEMAEM)
 762				return -ERANGE;
 763		}
 764	}
 
 
 765
 766	for (sop = sops; sop < sops + nsops; sop++) {
 767		curr = &sma->sems[sop->sem_num];
 768		sem_op = sop->sem_op;
 769		result = curr->semval;
 770
 771		if (sop->sem_flg & SEM_UNDO) {
 772			int undo = un->semadj[sop->sem_num] - sem_op;
 773
 774			un->semadj[sop->sem_num] = undo;
 775		}
 776		curr->semval += sem_op;
 777		ipc_update_pid(&curr->sempid, q->pid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 778	}
 779
 780	return 0;
 781
 782would_block:
 783	q->blocking = sop;
 784	return sop->sem_flg & IPC_NOWAIT ? -EAGAIN : 1;
 785}
 786
 787static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
 788					     struct wake_q_head *wake_q)
 789{
 790	struct task_struct *sleeper;
 791
 792	sleeper = get_task_struct(q->sleeper);
 793
 794	/* see SEM_BARRIER_2 for purpose/pairing */
 795	smp_store_release(&q->status, error);
 796
 797	wake_q_add_safe(wake_q, sleeper);
 798}
 799
 800static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
 801{
 802	list_del(&q->list);
 803	if (q->nsops > 1)
 804		sma->complex_count--;
 805}
 806
 807/** check_restart(sma, q)
 808 * @sma: semaphore array
 809 * @q: the operation that just completed
 810 *
 811 * update_queue is O(N^2) when it restarts scanning the whole queue of
 812 * waiting operations. Therefore this function checks if the restart is
 813 * really necessary. It is called after a previously waiting operation
 814 * modified the array.
 815 * Note that wait-for-zero operations are handled without restart.
 816 */
 817static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
 818{
 819	/* pending complex alter operations are too difficult to analyse */
 820	if (!list_empty(&sma->pending_alter))
 821		return 1;
 822
 823	/* we were a sleeping complex operation. Too difficult */
 824	if (q->nsops > 1)
 825		return 1;
 826
 827	/* It is impossible that someone waits for the new value:
 828	 * - complex operations always restart.
 829	 * - wait-for-zero are handled separately.
 830	 * - q is a previously sleeping simple operation that
 831	 *   altered the array. It must be a decrement, because
 832	 *   simple increments never sleep.
 833	 * - If there are older (higher priority) decrements
 834	 *   in the queue, then they have observed the original
 835	 *   semval value and couldn't proceed. The operation
 836	 *   decremented to value - thus they won't proceed either.
 837	 */
 838	return 0;
 839}
 840
 841/**
 842 * wake_const_ops - wake up non-alter tasks
 843 * @sma: semaphore array.
 844 * @semnum: semaphore that was modified.
 845 * @wake_q: lockless wake-queue head.
 846 *
 847 * wake_const_ops must be called after a semaphore in a semaphore array
 848 * was set to 0. If complex const operations are pending, wake_const_ops must
 849 * be called with semnum = -1, as well as with the number of each modified
 850 * semaphore.
 851 * The tasks that must be woken up are added to @wake_q. The return code
 852 * is stored in q->pid.
 853 * The function returns 1 if at least one operation was completed successfully.
 854 */
 855static int wake_const_ops(struct sem_array *sma, int semnum,
 856			  struct wake_q_head *wake_q)
 857{
 858	struct sem_queue *q, *tmp;
 
 859	struct list_head *pending_list;
 860	int semop_completed = 0;
 861
 862	if (semnum == -1)
 863		pending_list = &sma->pending_const;
 864	else
 865		pending_list = &sma->sems[semnum].pending_const;
 
 
 
 
 
 
 
 866
 867	list_for_each_entry_safe(q, tmp, pending_list, list) {
 868		int error = perform_atomic_semop(sma, q);
 869
 870		if (error > 0)
 871			continue;
 872		/* operation completed, remove from queue & wakeup */
 873		unlink_queue(sma, q);
 874
 875		wake_up_sem_queue_prepare(q, error, wake_q);
 876		if (error == 0)
 877			semop_completed = 1;
 
 878	}
 879
 880	return semop_completed;
 881}
 882
 883/**
 884 * do_smart_wakeup_zero - wakeup all wait for zero tasks
 885 * @sma: semaphore array
 886 * @sops: operations that were performed
 887 * @nsops: number of operations
 888 * @wake_q: lockless wake-queue head
 889 *
 890 * Checks all required queue for wait-for-zero operations, based
 891 * on the actual changes that were performed on the semaphore array.
 892 * The function returns 1 if at least one operation was completed successfully.
 893 */
 894static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
 895				int nsops, struct wake_q_head *wake_q)
 896{
 897	int i;
 898	int semop_completed = 0;
 899	int got_zero = 0;
 900
 901	/* first: the per-semaphore queues, if known */
 902	if (sops) {
 903		for (i = 0; i < nsops; i++) {
 904			int num = sops[i].sem_num;
 905
 906			if (sma->sems[num].semval == 0) {
 907				got_zero = 1;
 908				semop_completed |= wake_const_ops(sma, num, wake_q);
 909			}
 910		}
 911	} else {
 912		/*
 913		 * No sops means modified semaphores not known.
 914		 * Assume all were changed.
 915		 */
 916		for (i = 0; i < sma->sem_nsems; i++) {
 917			if (sma->sems[i].semval == 0) {
 918				got_zero = 1;
 919				semop_completed |= wake_const_ops(sma, i, wake_q);
 920			}
 921		}
 922	}
 923	/*
 924	 * If one of the modified semaphores got 0,
 925	 * then check the global queue, too.
 926	 */
 927	if (got_zero)
 928		semop_completed |= wake_const_ops(sma, -1, wake_q);
 929
 930	return semop_completed;
 931}
 932
 933
 934/**
 935 * update_queue - look for tasks that can be completed.
 936 * @sma: semaphore array.
 937 * @semnum: semaphore that was modified.
 938 * @wake_q: lockless wake-queue head.
 939 *
 940 * update_queue must be called after a semaphore in a semaphore array
 941 * was modified. If multiple semaphores were modified, update_queue must
 942 * be called with semnum = -1, as well as with the number of each modified
 943 * semaphore.
 944 * The tasks that must be woken up are added to @wake_q. The return code
 945 * is stored in q->pid.
 946 * The function internally checks if const operations can now succeed.
 947 *
 948 * The function return 1 if at least one semop was completed successfully.
 949 */
 950static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q)
 951{
 952	struct sem_queue *q, *tmp;
 
 953	struct list_head *pending_list;
 954	int semop_completed = 0;
 955
 956	if (semnum == -1)
 957		pending_list = &sma->pending_alter;
 958	else
 959		pending_list = &sma->sems[semnum].pending_alter;
 960
 961again:
 962	list_for_each_entry_safe(q, tmp, pending_list, list) {
 
 963		int error, restart;
 964
 
 
 
 965		/* If we are scanning the single sop, per-semaphore list of
 966		 * one semaphore and that semaphore is 0, then it is not
 967		 * necessary to scan further: simple increments
 968		 * that affect only one entry succeed immediately and cannot
 969		 * be in the  per semaphore pending queue, and decrements
 970		 * cannot be successful if the value is already 0.
 971		 */
 972		if (semnum != -1 && sma->sems[semnum].semval == 0)
 973			break;
 974
 975		error = perform_atomic_semop(sma, q);
 
 976
 977		/* Does q->sleeper still need to sleep? */
 978		if (error > 0)
 979			continue;
 980
 981		unlink_queue(sma, q);
 982
 983		if (error) {
 984			restart = 0;
 985		} else {
 986			semop_completed = 1;
 987			do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q);
 988			restart = check_restart(sma, q);
 989		}
 990
 991		wake_up_sem_queue_prepare(q, error, wake_q);
 992		if (restart)
 993			goto again;
 994	}
 995	return semop_completed;
 996}
 997
 998/**
 999 * set_semotime - set sem_otime
1000 * @sma: semaphore array
1001 * @sops: operations that modified the array, may be NULL
1002 *
1003 * sem_otime is replicated to avoid cache line trashing.
1004 * This function sets one instance to the current time.
1005 */
1006static void set_semotime(struct sem_array *sma, struct sembuf *sops)
1007{
1008	if (sops == NULL) {
1009		sma->sems[0].sem_otime = ktime_get_real_seconds();
1010	} else {
1011		sma->sems[sops[0].sem_num].sem_otime =
1012						ktime_get_real_seconds();
1013	}
1014}
1015
1016/**
1017 * do_smart_update - optimized update_queue
1018 * @sma: semaphore array
1019 * @sops: operations that were performed
1020 * @nsops: number of operations
1021 * @otime: force setting otime
1022 * @wake_q: lockless wake-queue head
1023 *
1024 * do_smart_update() does the required calls to update_queue and wakeup_zero,
1025 * based on the actual changes that were performed on the semaphore array.
1026 * Note that the function does not do the actual wake-up: the caller is
1027 * responsible for calling wake_up_q().
1028 * It is safe to perform this call after dropping all locks.
1029 */
1030static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
1031			    int otime, struct wake_q_head *wake_q)
1032{
1033	int i;
1034
1035	otime |= do_smart_wakeup_zero(sma, sops, nsops, wake_q);
1036
1037	if (!list_empty(&sma->pending_alter)) {
1038		/* semaphore array uses the global queue - just process it. */
1039		otime |= update_queue(sma, -1, wake_q);
1040	} else {
1041		if (!sops) {
1042			/*
1043			 * No sops, thus the modified semaphores are not
1044			 * known. Check all.
1045			 */
1046			for (i = 0; i < sma->sem_nsems; i++)
1047				otime |= update_queue(sma, i, wake_q);
1048		} else {
1049			/*
1050			 * Check the semaphores that were increased:
1051			 * - No complex ops, thus all sleeping ops are
1052			 *   decrease.
1053			 * - if we decreased the value, then any sleeping
1054			 *   semaphore ops won't be able to run: If the
1055			 *   previous value was too small, then the new
1056			 *   value will be too small, too.
1057			 */
1058			for (i = 0; i < nsops; i++) {
1059				if (sops[i].sem_op > 0) {
1060					otime |= update_queue(sma,
1061							      sops[i].sem_num, wake_q);
1062				}
1063			}
1064		}
1065	}
1066	if (otime)
1067		set_semotime(sma, sops);
1068}
1069
1070/*
1071 * check_qop: Test if a queued operation sleeps on the semaphore semnum
 
 
 
 
 
 
1072 */
1073static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
1074			bool count_zero)
1075{
1076	struct sembuf *sop = q->blocking;
 
1077
1078	/*
1079	 * Linux always (since 0.99.10) reported a task as sleeping on all
1080	 * semaphores. This violates SUS, therefore it was changed to the
1081	 * standard compliant behavior.
1082	 * Give the administrators a chance to notice that an application
1083	 * might misbehave because it relies on the Linux behavior.
1084	 */
1085	pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
1086			"The task %s (%d) triggered the difference, watch for misbehavior.\n",
1087			current->comm, task_pid_nr(current));
1088
1089	if (sop->sem_num != semnum)
1090		return 0;
1091
1092	if (count_zero && sop->sem_op == 0)
1093		return 1;
1094	if (!count_zero && sop->sem_op < 0)
1095		return 1;
1096
1097	return 0;
 
 
1098}
1099
1100/* The following counts are associated to each semaphore:
1101 *   semncnt        number of tasks waiting on semval being nonzero
1102 *   semzcnt        number of tasks waiting on semval being zero
1103 *
1104 * Per definition, a task waits only on the semaphore of the first semop
1105 * that cannot proceed, even if additional operation would block, too.
1106 */
1107static int count_semcnt(struct sem_array *sma, ushort semnum,
1108			bool count_zero)
1109{
1110	struct list_head *l;
1111	struct sem_queue *q;
1112	int semcnt;
1113
1114	semcnt = 0;
1115	/* First: check the simple operations. They are easy to evaluate */
1116	if (count_zero)
1117		l = &sma->sems[semnum].pending_const;
1118	else
1119		l = &sma->sems[semnum].pending_alter;
1120
1121	list_for_each_entry(q, l, list) {
1122		/* all task on a per-semaphore list sleep on exactly
1123		 * that semaphore
1124		 */
1125		semcnt++;
1126	}
1127
1128	/* Then: check the complex operations. */
1129	list_for_each_entry(q, &sma->pending_alter, list) {
1130		semcnt += check_qop(sma, semnum, q, count_zero);
1131	}
1132	if (count_zero) {
1133		list_for_each_entry(q, &sma->pending_const, list) {
1134			semcnt += check_qop(sma, semnum, q, count_zero);
1135		}
 
1136	}
1137	return semcnt;
1138}
1139
1140/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
1141 * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
1142 * remains locked on exit.
1143 */
1144static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1145{
1146	struct sem_undo *un, *tu;
1147	struct sem_queue *q, *tq;
1148	struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
 
1149	int i;
1150	DEFINE_WAKE_Q(wake_q);
1151
1152	/* Free the existing undo structures for this semaphore set.  */
1153	ipc_assert_locked_object(&sma->sem_perm);
1154	list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1155		list_del(&un->list_id);
1156		spin_lock(&un->ulp->lock);
1157		un->semid = -1;
1158		list_del_rcu(&un->list_proc);
1159		spin_unlock(&un->ulp->lock);
1160		kvfree_rcu(un, rcu);
1161	}
1162
1163	/* Wake up all pending processes and let them fail with EIDRM. */
 
1164	list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1165		unlink_queue(sma, q);
1166		wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1167	}
1168
1169	list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1170		unlink_queue(sma, q);
1171		wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1172	}
1173	for (i = 0; i < sma->sem_nsems; i++) {
1174		struct sem *sem = &sma->sems[i];
1175		list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
1176			unlink_queue(sma, q);
1177			wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1178		}
1179		list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
1180			unlink_queue(sma, q);
1181			wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1182		}
1183		ipc_update_pid(&sem->sempid, NULL);
1184	}
1185
1186	/* Remove the semaphore set from the IDR */
1187	sem_rmid(ns, sma);
1188	sem_unlock(sma, -1);
1189	rcu_read_unlock();
1190
1191	wake_up_q(&wake_q);
1192	ns->used_sems -= sma->sem_nsems;
1193	ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1194}
1195
1196static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1197{
1198	switch (version) {
1199	case IPC_64:
1200		return copy_to_user(buf, in, sizeof(*in));
1201	case IPC_OLD:
1202	    {
1203		struct semid_ds out;
1204
1205		memset(&out, 0, sizeof(out));
1206
1207		ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
1208
1209		out.sem_otime	= in->sem_otime;
1210		out.sem_ctime	= in->sem_ctime;
1211		out.sem_nsems	= in->sem_nsems;
1212
1213		return copy_to_user(buf, &out, sizeof(out));
1214	    }
1215	default:
1216		return -EINVAL;
1217	}
1218}
1219
1220static time64_t get_semotime(struct sem_array *sma)
1221{
1222	int i;
1223	time64_t res;
1224
1225	res = sma->sems[0].sem_otime;
1226	for (i = 1; i < sma->sem_nsems; i++) {
1227		time64_t to = sma->sems[i].sem_otime;
1228
1229		if (to > res)
1230			res = to;
1231	}
1232	return res;
1233}
1234
1235static int semctl_stat(struct ipc_namespace *ns, int semid,
1236			 int cmd, struct semid64_ds *semid64)
1237{
 
1238	struct sem_array *sma;
1239	time64_t semotime;
1240	int err;
1241
1242	memset(semid64, 0, sizeof(*semid64));
 
 
 
 
 
1243
1244	rcu_read_lock();
1245	if (cmd == SEM_STAT || cmd == SEM_STAT_ANY) {
1246		sma = sem_obtain_object(ns, semid);
1247		if (IS_ERR(sma)) {
1248			err = PTR_ERR(sma);
1249			goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1250		}
1251	} else { /* IPC_STAT */
1252		sma = sem_obtain_object_check(ns, semid);
1253		if (IS_ERR(sma)) {
1254			err = PTR_ERR(sma);
1255			goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1256		}
1257	}
1258
1259	/* see comment for SHM_STAT_ANY */
1260	if (cmd == SEM_STAT_ANY)
1261		audit_ipc_obj(&sma->sem_perm);
1262	else {
1263		err = -EACCES;
1264		if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1265			goto out_unlock;
1266	}
1267
1268	err = security_sem_semctl(&sma->sem_perm, cmd);
1269	if (err)
1270		goto out_unlock;
1271
1272	ipc_lock_object(&sma->sem_perm);
1273
1274	if (!ipc_valid_object(&sma->sem_perm)) {
1275		ipc_unlock_object(&sma->sem_perm);
1276		err = -EIDRM;
1277		goto out_unlock;
 
 
1278	}
1279
1280	kernel_to_ipc64_perm(&sma->sem_perm, &semid64->sem_perm);
1281	semotime = get_semotime(sma);
1282	semid64->sem_otime = semotime;
1283	semid64->sem_ctime = sma->sem_ctime;
1284#ifndef CONFIG_64BIT
1285	semid64->sem_otime_high = semotime >> 32;
1286	semid64->sem_ctime_high = sma->sem_ctime >> 32;
1287#endif
1288	semid64->sem_nsems = sma->sem_nsems;
1289
1290	if (cmd == IPC_STAT) {
1291		/*
1292		 * As defined in SUS:
1293		 * Return 0 on success
1294		 */
1295		err = 0;
1296	} else {
1297		/*
1298		 * SEM_STAT and SEM_STAT_ANY (both Linux specific)
1299		 * Return the full id, including the sequence number
1300		 */
1301		err = sma->sem_perm.id;
1302	}
1303	ipc_unlock_object(&sma->sem_perm);
1304out_unlock:
1305	rcu_read_unlock();
1306	return err;
1307}
1308
1309static int semctl_info(struct ipc_namespace *ns, int semid,
1310			 int cmd, void __user *p)
1311{
1312	struct seminfo seminfo;
1313	int max_idx;
1314	int err;
1315
1316	err = security_sem_semctl(NULL, cmd);
1317	if (err)
1318		return err;
1319
1320	memset(&seminfo, 0, sizeof(seminfo));
1321	seminfo.semmni = ns->sc_semmni;
1322	seminfo.semmns = ns->sc_semmns;
1323	seminfo.semmsl = ns->sc_semmsl;
1324	seminfo.semopm = ns->sc_semopm;
1325	seminfo.semvmx = SEMVMX;
1326	seminfo.semmnu = SEMMNU;
1327	seminfo.semmap = SEMMAP;
1328	seminfo.semume = SEMUME;
1329	down_read(&sem_ids(ns).rwsem);
1330	if (cmd == SEM_INFO) {
1331		seminfo.semusz = sem_ids(ns).in_use;
1332		seminfo.semaem = ns->used_sems;
1333	} else {
1334		seminfo.semusz = SEMUSZ;
1335		seminfo.semaem = SEMAEM;
1336	}
1337	max_idx = ipc_get_maxidx(&sem_ids(ns));
1338	up_read(&sem_ids(ns).rwsem);
1339	if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1340		return -EFAULT;
1341	return (max_idx < 0) ? 0 : max_idx;
1342}
1343
1344static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1345		int val)
1346{
1347	struct sem_undo *un;
1348	struct sem_array *sma;
1349	struct sem *curr;
1350	int err;
1351	DEFINE_WAKE_Q(wake_q);
 
 
 
 
 
 
 
 
1352
1353	if (val > SEMVMX || val < 0)
1354		return -ERANGE;
1355
 
 
1356	rcu_read_lock();
1357	sma = sem_obtain_object_check(ns, semid);
1358	if (IS_ERR(sma)) {
1359		rcu_read_unlock();
1360		return PTR_ERR(sma);
1361	}
1362
1363	if (semnum < 0 || semnum >= sma->sem_nsems) {
1364		rcu_read_unlock();
1365		return -EINVAL;
1366	}
1367
1368
1369	if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1370		rcu_read_unlock();
1371		return -EACCES;
1372	}
1373
1374	err = security_sem_semctl(&sma->sem_perm, SETVAL);
1375	if (err) {
1376		rcu_read_unlock();
1377		return -EACCES;
1378	}
1379
1380	sem_lock(sma, NULL, -1);
1381
1382	if (!ipc_valid_object(&sma->sem_perm)) {
1383		sem_unlock(sma, -1);
1384		rcu_read_unlock();
1385		return -EIDRM;
1386	}
1387
1388	semnum = array_index_nospec(semnum, sma->sem_nsems);
1389	curr = &sma->sems[semnum];
1390
1391	ipc_assert_locked_object(&sma->sem_perm);
1392	list_for_each_entry(un, &sma->list_id, list_id)
1393		un->semadj[semnum] = 0;
1394
1395	curr->semval = val;
1396	ipc_update_pid(&curr->sempid, task_tgid(current));
1397	sma->sem_ctime = ktime_get_real_seconds();
1398	/* maybe some queued-up processes were waiting for this */
1399	do_smart_update(sma, NULL, 0, 0, &wake_q);
1400	sem_unlock(sma, -1);
1401	rcu_read_unlock();
1402	wake_up_q(&wake_q);
1403	return 0;
1404}
1405
1406static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1407		int cmd, void __user *p)
1408{
1409	struct sem_array *sma;
1410	struct sem *curr;
1411	int err, nsems;
1412	ushort fast_sem_io[SEMMSL_FAST];
1413	ushort *sem_io = fast_sem_io;
1414	DEFINE_WAKE_Q(wake_q);
 
 
1415
1416	rcu_read_lock();
1417	sma = sem_obtain_object_check(ns, semid);
1418	if (IS_ERR(sma)) {
1419		rcu_read_unlock();
1420		return PTR_ERR(sma);
1421	}
1422
1423	nsems = sma->sem_nsems;
1424
1425	err = -EACCES;
1426	if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1427		goto out_rcu_wakeup;
1428
1429	err = security_sem_semctl(&sma->sem_perm, cmd);
1430	if (err)
1431		goto out_rcu_wakeup;
1432
1433	err = -EACCES;
1434	switch (cmd) {
1435	case GETALL:
1436	{
1437		ushort __user *array = p;
1438		int i;
1439
1440		sem_lock(sma, NULL, -1);
1441		if (!ipc_valid_object(&sma->sem_perm)) {
1442			err = -EIDRM;
1443			goto out_unlock;
1444		}
1445		if (nsems > SEMMSL_FAST) {
1446			if (!ipc_rcu_getref(&sma->sem_perm)) {
1447				err = -EIDRM;
1448				goto out_unlock;
1449			}
1450			sem_unlock(sma, -1);
1451			rcu_read_unlock();
1452			sem_io = kvmalloc_array(nsems, sizeof(ushort),
1453						GFP_KERNEL);
1454			if (sem_io == NULL) {
1455				ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1456				return -ENOMEM;
1457			}
1458
1459			rcu_read_lock();
1460			sem_lock_and_putref(sma);
1461			if (!ipc_valid_object(&sma->sem_perm)) {
1462				err = -EIDRM;
1463				goto out_unlock;
1464			}
1465		}
1466		for (i = 0; i < sma->sem_nsems; i++)
1467			sem_io[i] = sma->sems[i].semval;
1468		sem_unlock(sma, -1);
1469		rcu_read_unlock();
1470		err = 0;
1471		if (copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1472			err = -EFAULT;
1473		goto out_free;
1474	}
1475	case SETALL:
1476	{
1477		int i;
1478		struct sem_undo *un;
1479
1480		if (!ipc_rcu_getref(&sma->sem_perm)) {
1481			err = -EIDRM;
1482			goto out_rcu_wakeup;
1483		}
1484		rcu_read_unlock();
1485
1486		if (nsems > SEMMSL_FAST) {
1487			sem_io = kvmalloc_array(nsems, sizeof(ushort),
1488						GFP_KERNEL);
1489			if (sem_io == NULL) {
1490				ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1491				return -ENOMEM;
1492			}
1493		}
1494
1495		if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
1496			ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1497			err = -EFAULT;
1498			goto out_free;
1499		}
1500
1501		for (i = 0; i < nsems; i++) {
1502			if (sem_io[i] > SEMVMX) {
1503				ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1504				err = -ERANGE;
1505				goto out_free;
1506			}
1507		}
1508		rcu_read_lock();
1509		sem_lock_and_putref(sma);
1510		if (!ipc_valid_object(&sma->sem_perm)) {
1511			err = -EIDRM;
1512			goto out_unlock;
1513		}
1514
1515		for (i = 0; i < nsems; i++) {
1516			sma->sems[i].semval = sem_io[i];
1517			ipc_update_pid(&sma->sems[i].sempid, task_tgid(current));
1518		}
1519
1520		ipc_assert_locked_object(&sma->sem_perm);
1521		list_for_each_entry(un, &sma->list_id, list_id) {
1522			for (i = 0; i < nsems; i++)
1523				un->semadj[i] = 0;
1524		}
1525		sma->sem_ctime = ktime_get_real_seconds();
1526		/* maybe some queued-up processes were waiting for this */
1527		do_smart_update(sma, NULL, 0, 0, &wake_q);
1528		err = 0;
1529		goto out_unlock;
1530	}
1531	/* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */
1532	}
1533	err = -EINVAL;
1534	if (semnum < 0 || semnum >= nsems)
1535		goto out_rcu_wakeup;
1536
1537	sem_lock(sma, NULL, -1);
1538	if (!ipc_valid_object(&sma->sem_perm)) {
1539		err = -EIDRM;
1540		goto out_unlock;
1541	}
1542
1543	semnum = array_index_nospec(semnum, nsems);
1544	curr = &sma->sems[semnum];
1545
1546	switch (cmd) {
1547	case GETVAL:
1548		err = curr->semval;
1549		goto out_unlock;
1550	case GETPID:
1551		err = pid_vnr(curr->sempid);
1552		goto out_unlock;
1553	case GETNCNT:
1554		err = count_semcnt(sma, semnum, 0);
1555		goto out_unlock;
1556	case GETZCNT:
1557		err = count_semcnt(sma, semnum, 1);
1558		goto out_unlock;
1559	}
1560
1561out_unlock:
1562	sem_unlock(sma, -1);
1563out_rcu_wakeup:
1564	rcu_read_unlock();
1565	wake_up_q(&wake_q);
1566out_free:
1567	if (sem_io != fast_sem_io)
1568		kvfree(sem_io);
1569	return err;
1570}
1571
1572static inline unsigned long
1573copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1574{
1575	switch (version) {
1576	case IPC_64:
1577		if (copy_from_user(out, buf, sizeof(*out)))
1578			return -EFAULT;
1579		return 0;
1580	case IPC_OLD:
1581	    {
1582		struct semid_ds tbuf_old;
1583
1584		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1585			return -EFAULT;
1586
1587		out->sem_perm.uid	= tbuf_old.sem_perm.uid;
1588		out->sem_perm.gid	= tbuf_old.sem_perm.gid;
1589		out->sem_perm.mode	= tbuf_old.sem_perm.mode;
1590
1591		return 0;
1592	    }
1593	default:
1594		return -EINVAL;
1595	}
1596}
1597
1598/*
1599 * This function handles some semctl commands which require the rwsem
1600 * to be held in write mode.
1601 * NOTE: no locks must be held, the rwsem is taken inside this function.
1602 */
1603static int semctl_down(struct ipc_namespace *ns, int semid,
1604		       int cmd, struct semid64_ds *semid64)
1605{
1606	struct sem_array *sma;
1607	int err;
 
1608	struct kern_ipc_perm *ipcp;
1609
 
 
 
 
 
1610	down_write(&sem_ids(ns).rwsem);
1611	rcu_read_lock();
1612
1613	ipcp = ipcctl_obtain_check(ns, &sem_ids(ns), semid, cmd,
1614				      &semid64->sem_perm, 0);
1615	if (IS_ERR(ipcp)) {
1616		err = PTR_ERR(ipcp);
1617		goto out_unlock1;
1618	}
1619
1620	sma = container_of(ipcp, struct sem_array, sem_perm);
1621
1622	err = security_sem_semctl(&sma->sem_perm, cmd);
1623	if (err)
1624		goto out_unlock1;
1625
1626	switch (cmd) {
1627	case IPC_RMID:
1628		sem_lock(sma, NULL, -1);
1629		/* freeary unlocks the ipc object and rcu */
1630		freeary(ns, ipcp);
1631		goto out_up;
1632	case IPC_SET:
1633		sem_lock(sma, NULL, -1);
1634		err = ipc_update_perm(&semid64->sem_perm, ipcp);
1635		if (err)
1636			goto out_unlock0;
1637		sma->sem_ctime = ktime_get_real_seconds();
1638		break;
1639	default:
1640		err = -EINVAL;
1641		goto out_unlock1;
1642	}
1643
1644out_unlock0:
1645	sem_unlock(sma, -1);
1646out_unlock1:
1647	rcu_read_unlock();
1648out_up:
1649	up_write(&sem_ids(ns).rwsem);
1650	return err;
1651}
1652
1653static long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg, int version)
1654{
 
1655	struct ipc_namespace *ns;
1656	void __user *p = (void __user *)arg;
1657	struct semid64_ds semid64;
1658	int err;
1659
1660	if (semid < 0)
1661		return -EINVAL;
1662
 
1663	ns = current->nsproxy->ipc_ns;
1664
1665	switch (cmd) {
1666	case IPC_INFO:
1667	case SEM_INFO:
1668		return semctl_info(ns, semid, cmd, p);
1669	case IPC_STAT:
1670	case SEM_STAT:
1671	case SEM_STAT_ANY:
1672		err = semctl_stat(ns, semid, cmd, &semid64);
1673		if (err < 0)
1674			return err;
1675		if (copy_semid_to_user(p, &semid64, version))
1676			err = -EFAULT;
1677		return err;
1678	case GETALL:
1679	case GETVAL:
1680	case GETPID:
1681	case GETNCNT:
1682	case GETZCNT:
1683	case SETALL:
1684		return semctl_main(ns, semid, semnum, cmd, p);
1685	case SETVAL: {
1686		int val;
1687#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1688		/* big-endian 64bit */
1689		val = arg >> 32;
1690#else
1691		/* 32bit or little-endian 64bit */
1692		val = arg;
1693#endif
1694		return semctl_setval(ns, semid, semnum, val);
1695	}
1696	case IPC_SET:
1697		if (copy_semid_from_user(&semid64, p, version))
1698			return -EFAULT;
1699		fallthrough;
1700	case IPC_RMID:
1701		return semctl_down(ns, semid, cmd, &semid64);
1702	default:
1703		return -EINVAL;
1704	}
1705}
1706
1707SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1708{
1709	return ksys_semctl(semid, semnum, cmd, arg, IPC_64);
1710}
1711
1712#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
1713long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg)
1714{
1715	int version = ipc_parse_version(&cmd);
1716
1717	return ksys_semctl(semid, semnum, cmd, arg, version);
1718}
1719
1720SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1721{
1722	return ksys_old_semctl(semid, semnum, cmd, arg);
1723}
1724#endif
1725
1726#ifdef CONFIG_COMPAT
1727
1728struct compat_semid_ds {
1729	struct compat_ipc_perm sem_perm;
1730	old_time32_t sem_otime;
1731	old_time32_t sem_ctime;
1732	compat_uptr_t sem_base;
1733	compat_uptr_t sem_pending;
1734	compat_uptr_t sem_pending_last;
1735	compat_uptr_t undo;
1736	unsigned short sem_nsems;
1737};
1738
1739static int copy_compat_semid_from_user(struct semid64_ds *out, void __user *buf,
1740					int version)
1741{
1742	memset(out, 0, sizeof(*out));
1743	if (version == IPC_64) {
1744		struct compat_semid64_ds __user *p = buf;
1745		return get_compat_ipc64_perm(&out->sem_perm, &p->sem_perm);
1746	} else {
1747		struct compat_semid_ds __user *p = buf;
1748		return get_compat_ipc_perm(&out->sem_perm, &p->sem_perm);
1749	}
1750}
1751
1752static int copy_compat_semid_to_user(void __user *buf, struct semid64_ds *in,
1753					int version)
1754{
1755	if (version == IPC_64) {
1756		struct compat_semid64_ds v;
1757		memset(&v, 0, sizeof(v));
1758		to_compat_ipc64_perm(&v.sem_perm, &in->sem_perm);
1759		v.sem_otime	 = lower_32_bits(in->sem_otime);
1760		v.sem_otime_high = upper_32_bits(in->sem_otime);
1761		v.sem_ctime	 = lower_32_bits(in->sem_ctime);
1762		v.sem_ctime_high = upper_32_bits(in->sem_ctime);
1763		v.sem_nsems = in->sem_nsems;
1764		return copy_to_user(buf, &v, sizeof(v));
1765	} else {
1766		struct compat_semid_ds v;
1767		memset(&v, 0, sizeof(v));
1768		to_compat_ipc_perm(&v.sem_perm, &in->sem_perm);
1769		v.sem_otime = in->sem_otime;
1770		v.sem_ctime = in->sem_ctime;
1771		v.sem_nsems = in->sem_nsems;
1772		return copy_to_user(buf, &v, sizeof(v));
1773	}
1774}
1775
1776static long compat_ksys_semctl(int semid, int semnum, int cmd, int arg, int version)
1777{
1778	void __user *p = compat_ptr(arg);
1779	struct ipc_namespace *ns;
1780	struct semid64_ds semid64;
1781	int err;
1782
1783	ns = current->nsproxy->ipc_ns;
1784
1785	if (semid < 0)
1786		return -EINVAL;
1787
1788	switch (cmd & (~IPC_64)) {
1789	case IPC_INFO:
1790	case SEM_INFO:
1791		return semctl_info(ns, semid, cmd, p);
1792	case IPC_STAT:
1793	case SEM_STAT:
1794	case SEM_STAT_ANY:
1795		err = semctl_stat(ns, semid, cmd, &semid64);
1796		if (err < 0)
1797			return err;
1798		if (copy_compat_semid_to_user(p, &semid64, version))
1799			err = -EFAULT;
1800		return err;
1801	case GETVAL:
1802	case GETPID:
1803	case GETNCNT:
1804	case GETZCNT:
1805	case GETALL:
1806	case SETALL:
1807		return semctl_main(ns, semid, semnum, cmd, p);
1808	case SETVAL:
1809		return semctl_setval(ns, semid, semnum, arg);
 
1810	case IPC_SET:
1811		if (copy_compat_semid_from_user(&semid64, p, version))
1812			return -EFAULT;
1813		fallthrough;
1814	case IPC_RMID:
1815		return semctl_down(ns, semid, cmd, &semid64);
1816	default:
1817		return -EINVAL;
1818	}
1819}
1820
1821COMPAT_SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, int, arg)
1822{
1823	return compat_ksys_semctl(semid, semnum, cmd, arg, IPC_64);
1824}
1825
1826#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION
1827long compat_ksys_old_semctl(int semid, int semnum, int cmd, int arg)
1828{
1829	int version = compat_ipc_parse_version(&cmd);
1830
1831	return compat_ksys_semctl(semid, semnum, cmd, arg, version);
1832}
1833
1834COMPAT_SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, int, arg)
1835{
1836	return compat_ksys_old_semctl(semid, semnum, cmd, arg);
1837}
1838#endif
1839#endif
1840
1841/* If the task doesn't already have a undo_list, then allocate one
1842 * here.  We guarantee there is only one thread using this undo list,
1843 * and current is THE ONE
1844 *
1845 * If this allocation and assignment succeeds, but later
1846 * portions of this code fail, there is no need to free the sem_undo_list.
1847 * Just let it stay associated with the task, and it'll be freed later
1848 * at exit time.
1849 *
1850 * This can block, so callers must hold no locks.
1851 */
1852static inline int get_undo_list(struct sem_undo_list **undo_listp)
1853{
1854	struct sem_undo_list *undo_list;
1855
1856	undo_list = current->sysvsem.undo_list;
1857	if (!undo_list) {
1858		undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1859		if (undo_list == NULL)
1860			return -ENOMEM;
1861		spin_lock_init(&undo_list->lock);
1862		refcount_set(&undo_list->refcnt, 1);
1863		INIT_LIST_HEAD(&undo_list->list_proc);
1864
1865		current->sysvsem.undo_list = undo_list;
1866	}
1867	*undo_listp = undo_list;
1868	return 0;
1869}
1870
1871static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1872{
1873	struct sem_undo *un;
1874
1875	list_for_each_entry_rcu(un, &ulp->list_proc, list_proc,
1876				spin_is_locked(&ulp->lock)) {
1877		if (un->semid == semid)
1878			return un;
1879	}
1880	return NULL;
1881}
1882
1883static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1884{
1885	struct sem_undo *un;
1886
1887	assert_spin_locked(&ulp->lock);
1888
1889	un = __lookup_undo(ulp, semid);
1890	if (un) {
1891		list_del_rcu(&un->list_proc);
1892		list_add_rcu(&un->list_proc, &ulp->list_proc);
1893	}
1894	return un;
1895}
1896
1897/**
1898 * find_alloc_undo - lookup (and if not present create) undo array
1899 * @ns: namespace
1900 * @semid: semaphore array id
1901 *
1902 * The function looks up (and if not present creates) the undo structure.
1903 * The size of the undo structure depends on the size of the semaphore
1904 * array, thus the alloc path is not that straightforward.
1905 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1906 * performs a rcu_read_lock().
1907 */
1908static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1909{
1910	struct sem_array *sma;
1911	struct sem_undo_list *ulp;
1912	struct sem_undo *un, *new;
1913	int nsems, error;
1914
1915	error = get_undo_list(&ulp);
1916	if (error)
1917		return ERR_PTR(error);
1918
1919	rcu_read_lock();
1920	spin_lock(&ulp->lock);
1921	un = lookup_undo(ulp, semid);
1922	spin_unlock(&ulp->lock);
1923	if (likely(un != NULL))
1924		goto out;
1925
1926	/* no undo structure around - allocate one. */
1927	/* step 1: figure out the size of the semaphore array */
1928	sma = sem_obtain_object_check(ns, semid);
1929	if (IS_ERR(sma)) {
1930		rcu_read_unlock();
1931		return ERR_CAST(sma);
1932	}
1933
1934	nsems = sma->sem_nsems;
1935	if (!ipc_rcu_getref(&sma->sem_perm)) {
1936		rcu_read_unlock();
1937		un = ERR_PTR(-EIDRM);
1938		goto out;
1939	}
1940	rcu_read_unlock();
1941
1942	/* step 2: allocate new undo structure */
1943	new = kvzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems,
1944		       GFP_KERNEL);
1945	if (!new) {
1946		ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1947		return ERR_PTR(-ENOMEM);
1948	}
1949
1950	/* step 3: Acquire the lock on semaphore array */
1951	rcu_read_lock();
1952	sem_lock_and_putref(sma);
1953	if (!ipc_valid_object(&sma->sem_perm)) {
1954		sem_unlock(sma, -1);
1955		rcu_read_unlock();
1956		kvfree(new);
1957		un = ERR_PTR(-EIDRM);
1958		goto out;
1959	}
1960	spin_lock(&ulp->lock);
1961
1962	/*
1963	 * step 4: check for races: did someone else allocate the undo struct?
1964	 */
1965	un = lookup_undo(ulp, semid);
1966	if (un) {
1967		kvfree(new);
1968		goto success;
1969	}
1970	/* step 5: initialize & link new undo structure */
1971	new->semadj = (short *) &new[1];
1972	new->ulp = ulp;
1973	new->semid = semid;
1974	assert_spin_locked(&ulp->lock);
1975	list_add_rcu(&new->list_proc, &ulp->list_proc);
1976	ipc_assert_locked_object(&sma->sem_perm);
1977	list_add(&new->list_id, &sma->list_id);
1978	un = new;
1979
1980success:
1981	spin_unlock(&ulp->lock);
1982	sem_unlock(sma, -1);
1983out:
1984	return un;
1985}
1986
1987static long do_semtimedop(int semid, struct sembuf __user *tsops,
1988		unsigned nsops, const struct timespec64 *timeout)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1989{
1990	int error = -EINVAL;
1991	struct sem_array *sma;
1992	struct sembuf fast_sops[SEMOPM_FAST];
1993	struct sembuf *sops = fast_sops, *sop;
1994	struct sem_undo *un;
1995	int max, locknum;
1996	bool undos = false, alter = false, dupsop = false;
1997	struct sem_queue queue;
1998	unsigned long dup = 0, jiffies_left = 0;
1999	struct ipc_namespace *ns;
 
2000
2001	ns = current->nsproxy->ipc_ns;
2002
2003	if (nsops < 1 || semid < 0)
2004		return -EINVAL;
2005	if (nsops > ns->sc_semopm)
2006		return -E2BIG;
2007	if (nsops > SEMOPM_FAST) {
2008		sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL);
2009		if (sops == NULL)
2010			return -ENOMEM;
2011	}
2012
2013	if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
2014		error =  -EFAULT;
2015		goto out_free;
2016	}
2017
2018	if (timeout) {
2019		if (timeout->tv_sec < 0 || timeout->tv_nsec < 0 ||
2020			timeout->tv_nsec >= 1000000000L) {
 
 
 
 
 
2021			error = -EINVAL;
2022			goto out_free;
2023		}
2024		jiffies_left = timespec64_to_jiffies(timeout);
2025	}
2026
2027	max = 0;
2028	for (sop = sops; sop < sops + nsops; sop++) {
2029		unsigned long mask = 1ULL << ((sop->sem_num) % BITS_PER_LONG);
2030
2031		if (sop->sem_num >= max)
2032			max = sop->sem_num;
2033		if (sop->sem_flg & SEM_UNDO)
2034			undos = true;
2035		if (dup & mask) {
2036			/*
2037			 * There was a previous alter access that appears
2038			 * to have accessed the same semaphore, thus use
2039			 * the dupsop logic. "appears", because the detection
2040			 * can only check % BITS_PER_LONG.
2041			 */
2042			dupsop = true;
2043		}
2044		if (sop->sem_op != 0) {
2045			alter = true;
2046			dup |= mask;
2047		}
2048	}
2049
 
 
2050	if (undos) {
2051		/* On success, find_alloc_undo takes the rcu_read_lock */
2052		un = find_alloc_undo(ns, semid);
2053		if (IS_ERR(un)) {
2054			error = PTR_ERR(un);
2055			goto out_free;
2056		}
2057	} else {
2058		un = NULL;
2059		rcu_read_lock();
2060	}
2061
2062	sma = sem_obtain_object_check(ns, semid);
2063	if (IS_ERR(sma)) {
2064		rcu_read_unlock();
2065		error = PTR_ERR(sma);
2066		goto out_free;
2067	}
2068
2069	error = -EFBIG;
2070	if (max >= sma->sem_nsems) {
2071		rcu_read_unlock();
2072		goto out_free;
2073	}
2074
2075	error = -EACCES;
2076	if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) {
2077		rcu_read_unlock();
2078		goto out_free;
2079	}
2080
2081	error = security_sem_semop(&sma->sem_perm, sops, nsops, alter);
2082	if (error) {
2083		rcu_read_unlock();
2084		goto out_free;
2085	}
2086
2087	error = -EIDRM;
2088	locknum = sem_lock(sma, sops, nsops);
2089	/*
2090	 * We eventually might perform the following check in a lockless
2091	 * fashion, considering ipc_valid_object() locking constraints.
2092	 * If nsops == 1 and there is no contention for sem_perm.lock, then
2093	 * only a per-semaphore lock is held and it's OK to proceed with the
2094	 * check below. More details on the fine grained locking scheme
2095	 * entangled here and why it's RMID race safe on comments at sem_lock()
2096	 */
2097	if (!ipc_valid_object(&sma->sem_perm))
2098		goto out_unlock_free;
2099	/*
2100	 * semid identifiers are not unique - find_alloc_undo may have
2101	 * allocated an undo structure, it was invalidated by an RMID
2102	 * and now a new array with received the same id. Check and fail.
2103	 * This case can be detected checking un->semid. The existence of
2104	 * "un" itself is guaranteed by rcu.
2105	 */
2106	if (un && un->semid == -1)
2107		goto out_unlock_free;
2108
2109	queue.sops = sops;
2110	queue.nsops = nsops;
2111	queue.undo = un;
2112	queue.pid = task_tgid(current);
2113	queue.alter = alter;
2114	queue.dupsop = dupsop;
2115
2116	error = perform_atomic_semop(sma, &queue);
2117	if (error == 0) { /* non-blocking successful path */
2118		DEFINE_WAKE_Q(wake_q);
2119
2120		/*
2121		 * If the operation was successful, then do
2122		 * the required updates.
2123		 */
2124		if (alter)
2125			do_smart_update(sma, sops, nsops, 1, &wake_q);
2126		else
2127			set_semotime(sma, sops);
2128
2129		sem_unlock(sma, locknum);
2130		rcu_read_unlock();
2131		wake_up_q(&wake_q);
2132
2133		goto out_free;
2134	}
2135	if (error < 0) /* non-blocking error path */
2136		goto out_unlock_free;
2137
2138	/*
2139	 * We need to sleep on this operation, so we put the current
2140	 * task into the pending queue and go to sleep.
2141	 */
 
 
 
 
 
 
 
2142	if (nsops == 1) {
2143		struct sem *curr;
2144		int idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
2145		curr = &sma->sems[idx];
2146
2147		if (alter) {
2148			if (sma->complex_count) {
2149				list_add_tail(&queue.list,
2150						&sma->pending_alter);
2151			} else {
2152
2153				list_add_tail(&queue.list,
2154						&curr->pending_alter);
2155			}
2156		} else {
2157			list_add_tail(&queue.list, &curr->pending_const);
2158		}
2159	} else {
2160		if (!sma->complex_count)
2161			merge_queues(sma);
2162
2163		if (alter)
2164			list_add_tail(&queue.list, &sma->pending_alter);
2165		else
2166			list_add_tail(&queue.list, &sma->pending_const);
2167
2168		sma->complex_count++;
2169	}
2170
2171	do {
2172		/* memory ordering ensured by the lock in sem_lock() */
2173		WRITE_ONCE(queue.status, -EINTR);
2174		queue.sleeper = current;
2175
2176		/* memory ordering is ensured by the lock in sem_lock() */
2177		__set_current_state(TASK_INTERRUPTIBLE);
2178		sem_unlock(sma, locknum);
2179		rcu_read_unlock();
 
 
 
2180
2181		if (timeout)
2182			jiffies_left = schedule_timeout(jiffies_left);
2183		else
2184			schedule();
2185
2186		/*
2187		 * fastpath: the semop has completed, either successfully or
2188		 * not, from the syscall pov, is quite irrelevant to us at this
2189		 * point; we're done.
2190		 *
2191		 * We _do_ care, nonetheless, about being awoken by a signal or
2192		 * spuriously.  The queue.status is checked again in the
2193		 * slowpath (aka after taking sem_lock), such that we can detect
2194		 * scenarios where we were awakened externally, during the
2195		 * window between wake_q_add() and wake_up_q().
2196		 */
2197		error = READ_ONCE(queue.status);
2198		if (error != -EINTR) {
2199			/* see SEM_BARRIER_2 for purpose/pairing */
2200			smp_acquire__after_ctrl_dep();
2201			goto out_free;
2202		}
 
 
 
 
 
 
2203
2204		rcu_read_lock();
2205		locknum = sem_lock(sma, sops, nsops);
 
 
 
 
 
2206
2207		if (!ipc_valid_object(&sma->sem_perm))
2208			goto out_unlock_free;
2209
2210		/*
2211		 * No necessity for any barrier: We are protect by sem_lock()
2212		 */
2213		error = READ_ONCE(queue.status);
 
 
2214
2215		/*
2216		 * If queue.status != -EINTR we are woken up by another process.
2217		 * Leave without unlink_queue(), but with sem_unlock().
2218		 */
2219		if (error != -EINTR)
2220			goto out_unlock_free;
2221
2222		/*
2223		 * If an interrupt occurred we have to clean up the queue.
2224		 */
2225		if (timeout && jiffies_left == 0)
2226			error = -EAGAIN;
2227	} while (error == -EINTR && !signal_pending(current)); /* spurious */
2228
2229	unlink_queue(sma, &queue);
2230
2231out_unlock_free:
2232	sem_unlock(sma, locknum);
 
2233	rcu_read_unlock();
 
2234out_free:
2235	if (sops != fast_sops)
2236		kvfree(sops);
2237	return error;
2238}
2239
2240long ksys_semtimedop(int semid, struct sembuf __user *tsops,
2241		     unsigned int nsops, const struct __kernel_timespec __user *timeout)
2242{
2243	if (timeout) {
2244		struct timespec64 ts;
2245		if (get_timespec64(&ts, timeout))
2246			return -EFAULT;
2247		return do_semtimedop(semid, tsops, nsops, &ts);
2248	}
2249	return do_semtimedop(semid, tsops, nsops, NULL);
2250}
2251
2252SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
2253		unsigned int, nsops, const struct __kernel_timespec __user *, timeout)
2254{
2255	return ksys_semtimedop(semid, tsops, nsops, timeout);
2256}
2257
2258#ifdef CONFIG_COMPAT_32BIT_TIME
2259long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
2260			    unsigned int nsops,
2261			    const struct old_timespec32 __user *timeout)
2262{
2263	if (timeout) {
2264		struct timespec64 ts;
2265		if (get_old_timespec32(&ts, timeout))
2266			return -EFAULT;
2267		return do_semtimedop(semid, tsems, nsops, &ts);
2268	}
2269	return do_semtimedop(semid, tsems, nsops, NULL);
2270}
2271
2272SYSCALL_DEFINE4(semtimedop_time32, int, semid, struct sembuf __user *, tsems,
2273		       unsigned int, nsops,
2274		       const struct old_timespec32 __user *, timeout)
2275{
2276	return compat_ksys_semtimedop(semid, tsems, nsops, timeout);
2277}
2278#endif
2279
2280SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
2281		unsigned, nsops)
2282{
2283	return do_semtimedop(semid, tsops, nsops, NULL);
2284}
2285
2286/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
2287 * parent and child tasks.
2288 */
2289
2290int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
2291{
2292	struct sem_undo_list *undo_list;
2293	int error;
2294
2295	if (clone_flags & CLONE_SYSVSEM) {
2296		error = get_undo_list(&undo_list);
2297		if (error)
2298			return error;
2299		refcount_inc(&undo_list->refcnt);
2300		tsk->sysvsem.undo_list = undo_list;
2301	} else
2302		tsk->sysvsem.undo_list = NULL;
2303
2304	return 0;
2305}
2306
2307/*
2308 * add semadj values to semaphores, free undo structures.
2309 * undo structures are not freed when semaphore arrays are destroyed
2310 * so some of them may be out of date.
2311 * IMPLEMENTATION NOTE: There is some confusion over whether the
2312 * set of adjustments that needs to be done should be done in an atomic
2313 * manner or not. That is, if we are attempting to decrement the semval
2314 * should we queue up and wait until we can do so legally?
2315 * The original implementation attempted to do this (queue and wait).
2316 * The current implementation does not do so. The POSIX standard
2317 * and SVID should be consulted to determine what behavior is mandated.
2318 */
2319void exit_sem(struct task_struct *tsk)
2320{
2321	struct sem_undo_list *ulp;
2322
2323	ulp = tsk->sysvsem.undo_list;
2324	if (!ulp)
2325		return;
2326	tsk->sysvsem.undo_list = NULL;
2327
2328	if (!refcount_dec_and_test(&ulp->refcnt))
2329		return;
2330
2331	for (;;) {
2332		struct sem_array *sma;
2333		struct sem_undo *un;
 
2334		int semid, i;
2335		DEFINE_WAKE_Q(wake_q);
2336
2337		cond_resched();
2338
2339		rcu_read_lock();
2340		un = list_entry_rcu(ulp->list_proc.next,
2341				    struct sem_undo, list_proc);
2342		if (&un->list_proc == &ulp->list_proc) {
2343			/*
2344			 * We must wait for freeary() before freeing this ulp,
2345			 * in case we raced with last sem_undo. There is a small
2346			 * possibility where we exit while freeary() didn't
2347			 * finish unlocking sem_undo_list.
2348			 */
2349			spin_lock(&ulp->lock);
2350			spin_unlock(&ulp->lock);
2351			rcu_read_unlock();
2352			break;
2353		}
2354		spin_lock(&ulp->lock);
2355		semid = un->semid;
2356		spin_unlock(&ulp->lock);
2357
2358		/* exit_sem raced with IPC_RMID, nothing to do */
2359		if (semid == -1) {
2360			rcu_read_unlock();
2361			continue;
2362		}
2363
2364		sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2365		/* exit_sem raced with IPC_RMID, nothing to do */
2366		if (IS_ERR(sma)) {
2367			rcu_read_unlock();
2368			continue;
2369		}
2370
2371		sem_lock(sma, NULL, -1);
2372		/* exit_sem raced with IPC_RMID, nothing to do */
2373		if (!ipc_valid_object(&sma->sem_perm)) {
2374			sem_unlock(sma, -1);
2375			rcu_read_unlock();
2376			continue;
2377		}
2378		un = __lookup_undo(ulp, semid);
2379		if (un == NULL) {
2380			/* exit_sem raced with IPC_RMID+semget() that created
2381			 * exactly the same semid. Nothing to do.
2382			 */
2383			sem_unlock(sma, -1);
2384			rcu_read_unlock();
2385			continue;
2386		}
2387
2388		/* remove un from the linked lists */
2389		ipc_assert_locked_object(&sma->sem_perm);
2390		list_del(&un->list_id);
2391
2392		spin_lock(&ulp->lock);
2393		list_del_rcu(&un->list_proc);
2394		spin_unlock(&ulp->lock);
2395
2396		/* perform adjustments registered in un */
2397		for (i = 0; i < sma->sem_nsems; i++) {
2398			struct sem *semaphore = &sma->sems[i];
2399			if (un->semadj[i]) {
2400				semaphore->semval += un->semadj[i];
2401				/*
2402				 * Range checks of the new semaphore value,
2403				 * not defined by sus:
2404				 * - Some unices ignore the undo entirely
2405				 *   (e.g. HP UX 11i 11.22, Tru64 V5.1)
2406				 * - some cap the value (e.g. FreeBSD caps
2407				 *   at 0, but doesn't enforce SEMVMX)
2408				 *
2409				 * Linux caps the semaphore value, both at 0
2410				 * and at SEMVMX.
2411				 *
2412				 *	Manfred <manfred@colorfullife.com>
2413				 */
2414				if (semaphore->semval < 0)
2415					semaphore->semval = 0;
2416				if (semaphore->semval > SEMVMX)
2417					semaphore->semval = SEMVMX;
2418				ipc_update_pid(&semaphore->sempid, task_tgid(current));
2419			}
2420		}
2421		/* maybe some queued-up processes were waiting for this */
2422		do_smart_update(sma, NULL, 0, 1, &wake_q);
 
2423		sem_unlock(sma, -1);
2424		rcu_read_unlock();
2425		wake_up_q(&wake_q);
2426
2427		kvfree_rcu(un, rcu);
2428	}
2429	kfree(ulp);
2430}
2431
2432#ifdef CONFIG_PROC_FS
2433static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2434{
2435	struct user_namespace *user_ns = seq_user_ns(s);
2436	struct kern_ipc_perm *ipcp = it;
2437	struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
2438	time64_t sem_otime;
2439
2440	/*
2441	 * The proc interface isn't aware of sem_lock(), it calls
2442	 * ipc_lock_object(), i.e. spin_lock(&sma->sem_perm.lock).
2443	 * (in sysvipc_find_ipc)
2444	 * In order to stay compatible with sem_lock(), we must
2445	 * enter / leave complex_mode.
2446	 */
2447	complexmode_enter(sma);
2448
2449	sem_otime = get_semotime(sma);
2450
2451	seq_printf(s,
2452		   "%10d %10d  %4o %10u %5u %5u %5u %5u %10llu %10llu\n",
2453		   sma->sem_perm.key,
2454		   sma->sem_perm.id,
2455		   sma->sem_perm.mode,
2456		   sma->sem_nsems,
2457		   from_kuid_munged(user_ns, sma->sem_perm.uid),
2458		   from_kgid_munged(user_ns, sma->sem_perm.gid),
2459		   from_kuid_munged(user_ns, sma->sem_perm.cuid),
2460		   from_kgid_munged(user_ns, sma->sem_perm.cgid),
2461		   sem_otime,
2462		   sma->sem_ctime);
2463
2464	complexmode_tryleave(sma);
2465
2466	return 0;
2467}
2468#endif