Linux Audio

Check our new training course

Loading...
v4.10.11
 
   1/*
   2 * linux/ipc/sem.c
   3 * Copyright (C) 1992 Krishna Balasubramanian
   4 * Copyright (C) 1995 Eric Schenk, Bruno Haible
   5 *
   6 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
   7 *
   8 * SMP-threaded, sysctl's added
   9 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
  10 * Enforced range limit on SEM_UNDO
  11 * (c) 2001 Red Hat Inc
  12 * Lockless wakeup
  13 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
  14 * (c) 2016 Davidlohr Bueso <dave@stgolabs.net>
  15 * Further wakeup optimizations, documentation
  16 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
  17 *
  18 * support for audit of ipc object properties and permission changes
  19 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  20 *
  21 * namespaces support
  22 * OpenVZ, SWsoft Inc.
  23 * Pavel Emelianov <xemul@openvz.org>
  24 *
  25 * Implementation notes: (May 2010)
  26 * This file implements System V semaphores.
  27 *
  28 * User space visible behavior:
  29 * - FIFO ordering for semop() operations (just FIFO, not starvation
  30 *   protection)
  31 * - multiple semaphore operations that alter the same semaphore in
  32 *   one semop() are handled.
  33 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
  34 *   SETALL calls.
  35 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
  36 * - undo adjustments at process exit are limited to 0..SEMVMX.
  37 * - namespace are supported.
  38 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
  39 *   to /proc/sys/kernel/sem.
  40 * - statistics about the usage are reported in /proc/sysvipc/sem.
  41 *
  42 * Internals:
  43 * - scalability:
  44 *   - all global variables are read-mostly.
  45 *   - semop() calls and semctl(RMID) are synchronized by RCU.
  46 *   - most operations do write operations (actually: spin_lock calls) to
  47 *     the per-semaphore array structure.
  48 *   Thus: Perfect SMP scaling between independent semaphore arrays.
  49 *         If multiple semaphores in one array are used, then cache line
  50 *         trashing on the semaphore array spinlock will limit the scaling.
  51 * - semncnt and semzcnt are calculated on demand in count_semcnt()
  52 * - the task that performs a successful semop() scans the list of all
  53 *   sleeping tasks and completes any pending operations that can be fulfilled.
  54 *   Semaphores are actively given to waiting tasks (necessary for FIFO).
  55 *   (see update_queue())
  56 * - To improve the scalability, the actual wake-up calls are performed after
  57 *   dropping all locks. (see wake_up_sem_queue_prepare())
  58 * - All work is done by the waker, the woken up task does not have to do
  59 *   anything - not even acquiring a lock or dropping a refcount.
  60 * - A woken up task may not even touch the semaphore array anymore, it may
  61 *   have been destroyed already by a semctl(RMID).
  62 * - UNDO values are stored in an array (one per process and per
  63 *   semaphore array, lazily allocated). For backwards compatibility, multiple
  64 *   modes for the UNDO variables are supported (per process, per thread)
  65 *   (see copy_semundo, CLONE_SYSVSEM)
  66 * - There are two lists of the pending operations: a per-array list
  67 *   and per-semaphore list (stored in the array). This allows to achieve FIFO
  68 *   ordering without always scanning all pending operations.
  69 *   The worst-case behavior is nevertheless O(N^2) for N wakeups.
  70 */
  71
 
  72#include <linux/slab.h>
  73#include <linux/spinlock.h>
  74#include <linux/init.h>
  75#include <linux/proc_fs.h>
  76#include <linux/time.h>
  77#include <linux/security.h>
  78#include <linux/syscalls.h>
  79#include <linux/audit.h>
  80#include <linux/capability.h>
  81#include <linux/seq_file.h>
  82#include <linux/rwsem.h>
  83#include <linux/nsproxy.h>
  84#include <linux/ipc_namespace.h>
 
 
 
  85
  86#include <linux/uaccess.h>
  87#include "util.h"
  88
  89/* One semaphore structure for each semaphore in the system. */
  90struct sem {
  91	int	semval;		/* current value */
  92	/*
  93	 * PID of the process that last modified the semaphore. For
  94	 * Linux, specifically these are:
  95	 *  - semop
  96	 *  - semctl, via SETVAL and SETALL.
  97	 *  - at task exit when performing undo adjustments (see exit_sem).
  98	 */
  99	int	sempid;
 100	spinlock_t	lock;	/* spinlock for fine-grained semtimedop */
 101	struct list_head pending_alter; /* pending single-sop operations */
 102					/* that alter the semaphore */
 103	struct list_head pending_const; /* pending single-sop operations */
 104					/* that do not alter the semaphore*/
 105	time_t	sem_otime;	/* candidate for sem_otime */
 106} ____cacheline_aligned_in_smp;
 107
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 108/* One queue for each sleeping process in the system. */
 109struct sem_queue {
 110	struct list_head	list;	 /* queue of pending operations */
 111	struct task_struct	*sleeper; /* this process */
 112	struct sem_undo		*undo;	 /* undo structure */
 113	int			pid;	 /* process id of requesting process */
 114	int			status;	 /* completion status of operation */
 115	struct sembuf		*sops;	 /* array of pending operations */
 116	struct sembuf		*blocking; /* the operation that blocked */
 117	int			nsops;	 /* number of operations */
 118	bool			alter;	 /* does *sops alter the array? */
 119	bool                    dupsop;	 /* sops on more than one sem_num */
 120};
 121
 122/* Each task has a list of undo requests. They are executed automatically
 123 * when the process exits.
 124 */
 125struct sem_undo {
 126	struct list_head	list_proc;	/* per-process list: *
 127						 * all undos from one process
 128						 * rcu protected */
 129	struct rcu_head		rcu;		/* rcu struct for sem_undo */
 130	struct sem_undo_list	*ulp;		/* back ptr to sem_undo_list */
 131	struct list_head	list_id;	/* per semaphore array list:
 132						 * all undos for one array */
 133	int			semid;		/* semaphore set identifier */
 134	short			*semadj;	/* array of adjustments */
 135						/* one per semaphore */
 136};
 137
 138/* sem_undo_list controls shared access to the list of sem_undo structures
 139 * that may be shared among all a CLONE_SYSVSEM task group.
 140 */
 141struct sem_undo_list {
 142	atomic_t		refcnt;
 143	spinlock_t		lock;
 144	struct list_head	list_proc;
 145};
 146
 147
 148#define sem_ids(ns)	((ns)->ids[IPC_SEM_IDS])
 149
 150#define sem_checkid(sma, semid)	ipc_checkid(&sma->sem_perm, semid)
 151
 152static int newary(struct ipc_namespace *, struct ipc_params *);
 153static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
 154#ifdef CONFIG_PROC_FS
 155static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
 156#endif
 157
 158#define SEMMSL_FAST	256 /* 512 bytes on stack */
 159#define SEMOPM_FAST	64  /* ~ 372 bytes on stack */
 160
 161/*
 
 
 
 
 
 
 
 162 * Locking:
 163 * a) global sem_lock() for read/write
 164 *	sem_undo.id_next,
 165 *	sem_array.complex_count,
 166 *	sem_array.complex_mode
 167 *	sem_array.pending{_alter,_const},
 168 *	sem_array.sem_undo
 169 *
 170 * b) global or semaphore sem_lock() for read/write:
 171 *	sem_array.sem_base[i].pending_{const,alter}:
 172 *	sem_array.complex_mode (for read)
 173 *
 174 * c) special:
 175 *	sem_undo_list.list_proc:
 176 *	* undo_list->lock for write
 177 *	* rcu for read
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 178 */
 179
 180#define sc_semmsl	sem_ctls[0]
 181#define sc_semmns	sem_ctls[1]
 182#define sc_semopm	sem_ctls[2]
 183#define sc_semmni	sem_ctls[3]
 184
 185void sem_init_ns(struct ipc_namespace *ns)
 186{
 187	ns->sc_semmsl = SEMMSL;
 188	ns->sc_semmns = SEMMNS;
 189	ns->sc_semopm = SEMOPM;
 190	ns->sc_semmni = SEMMNI;
 191	ns->used_sems = 0;
 192	ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
 193}
 194
 195#ifdef CONFIG_IPC_NS
 196void sem_exit_ns(struct ipc_namespace *ns)
 197{
 198	free_ipcs(ns, &sem_ids(ns), freeary);
 199	idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
 
 200}
 201#endif
 202
 203void __init sem_init(void)
 204{
 205	sem_init_ns(&init_ipc_ns);
 206	ipc_init_proc_interface("sysvipc/sem",
 207				"       key      semid perms      nsems   uid   gid  cuid  cgid      otime      ctime\n",
 208				IPC_SEM_IDS, sysvipc_sem_proc_show);
 209}
 210
 211/**
 212 * unmerge_queues - unmerge queues, if possible.
 213 * @sma: semaphore array
 214 *
 215 * The function unmerges the wait queues if complex_count is 0.
 216 * It must be called prior to dropping the global semaphore array lock.
 217 */
 218static void unmerge_queues(struct sem_array *sma)
 219{
 220	struct sem_queue *q, *tq;
 221
 222	/* complex operations still around? */
 223	if (sma->complex_count)
 224		return;
 225	/*
 226	 * We will switch back to simple mode.
 227	 * Move all pending operation back into the per-semaphore
 228	 * queues.
 229	 */
 230	list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
 231		struct sem *curr;
 232		curr = &sma->sem_base[q->sops[0].sem_num];
 233
 234		list_add_tail(&q->list, &curr->pending_alter);
 235	}
 236	INIT_LIST_HEAD(&sma->pending_alter);
 237}
 238
 239/**
 240 * merge_queues - merge single semop queues into global queue
 241 * @sma: semaphore array
 242 *
 243 * This function merges all per-semaphore queues into the global queue.
 244 * It is necessary to achieve FIFO ordering for the pending single-sop
 245 * operations when a multi-semop operation must sleep.
 246 * Only the alter operations must be moved, the const operations can stay.
 247 */
 248static void merge_queues(struct sem_array *sma)
 249{
 250	int i;
 251	for (i = 0; i < sma->sem_nsems; i++) {
 252		struct sem *sem = sma->sem_base + i;
 253
 254		list_splice_init(&sem->pending_alter, &sma->pending_alter);
 255	}
 256}
 257
 258static void sem_rcu_free(struct rcu_head *head)
 259{
 260	struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
 261	struct sem_array *sma = ipc_rcu_to_struct(p);
 262
 263	security_sem_free(sma);
 264	ipc_rcu_free(head);
 265}
 266
 267/*
 268 * Enter the mode suitable for non-simple operations:
 269 * Caller must own sem_perm.lock.
 270 */
 271static void complexmode_enter(struct sem_array *sma)
 272{
 273	int i;
 274	struct sem *sem;
 275
 276	if (sma->complex_mode)  {
 277		/* We are already in complex_mode. Nothing to do */
 
 
 
 
 
 278		return;
 279	}
 280
 281	/* We need a full barrier after seting complex_mode:
 282	 * The write to complex_mode must be visible
 283	 * before we read the first sem->lock spinlock state.
 284	 */
 285	smp_store_mb(sma->complex_mode, true);
 286
 287	for (i = 0; i < sma->sem_nsems; i++) {
 288		sem = sma->sem_base + i;
 289		spin_unlock_wait(&sem->lock);
 
 290	}
 291	/*
 292	 * spin_unlock_wait() is not a memory barriers, it is only a
 293	 * control barrier. The code must pair with spin_unlock(&sem->lock),
 294	 * thus just the control barrier is insufficient.
 295	 *
 296	 * smp_rmb() is sufficient, as writes cannot pass the control barrier.
 297	 */
 298	smp_rmb();
 299}
 300
 301/*
 302 * Try to leave the mode that disallows simple operations:
 303 * Caller must own sem_perm.lock.
 304 */
 305static void complexmode_tryleave(struct sem_array *sma)
 306{
 307	if (sma->complex_count)  {
 308		/* Complex ops are sleeping.
 309		 * We must stay in complex mode
 310		 */
 311		return;
 312	}
 313	/*
 314	 * Immediately after setting complex_mode to false,
 315	 * a simple op can start. Thus: all memory writes
 316	 * performed by the current operation must be visible
 317	 * before we set complex_mode to false.
 318	 */
 319	smp_store_release(&sma->complex_mode, false);
 320}
 321
 322#define SEM_GLOBAL_LOCK	(-1)
 323/*
 324 * If the request contains only one semaphore operation, and there are
 325 * no complex transactions pending, lock only the semaphore involved.
 326 * Otherwise, lock the entire semaphore array, since we either have
 327 * multiple semaphores in our own semops, or we need to look at
 328 * semaphores from other pending complex operations.
 329 */
 330static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
 331			      int nsops)
 332{
 333	struct sem *sem;
 
 334
 335	if (nsops != 1) {
 336		/* Complex operation - acquire a full lock */
 337		ipc_lock_object(&sma->sem_perm);
 338
 339		/* Prevent parallel simple ops */
 340		complexmode_enter(sma);
 341		return SEM_GLOBAL_LOCK;
 342	}
 343
 344	/*
 345	 * Only one semaphore affected - try to optimize locking.
 346	 * Optimized locking is possible if no complex operation
 347	 * is either enqueued or processed right now.
 348	 *
 349	 * Both facts are tracked by complex_mode.
 350	 */
 351	sem = sma->sem_base + sops->sem_num;
 
 352
 353	/*
 354	 * Initial check for complex_mode. Just an optimization,
 355	 * no locking, no memory barrier.
 356	 */
 357	if (!sma->complex_mode) {
 358		/*
 359		 * It appears that no complex operation is around.
 360		 * Acquire the per-semaphore lock.
 361		 */
 362		spin_lock(&sem->lock);
 363
 364		/*
 365		 * See 51d7d5205d33
 366		 * ("powerpc: Add smp_mb() to arch_spin_is_locked()"):
 367		 * A full barrier is required: the write of sem->lock
 368		 * must be visible before the read is executed
 369		 */
 370		smp_mb();
 371
 372		if (!smp_load_acquire(&sma->complex_mode)) {
 373			/* fast path successful! */
 374			return sops->sem_num;
 375		}
 376		spin_unlock(&sem->lock);
 377	}
 378
 379	/* slow path: acquire the full lock */
 380	ipc_lock_object(&sma->sem_perm);
 381
 382	if (sma->complex_count == 0) {
 383		/* False alarm:
 384		 * There is no complex operation, thus we can switch
 385		 * back to the fast path.
 
 
 
 
 
 386		 */
 387		spin_lock(&sem->lock);
 
 388		ipc_unlock_object(&sma->sem_perm);
 389		return sops->sem_num;
 390	} else {
 391		/* Not a false alarm, thus complete the sequence for a
 392		 * full lock.
 
 
 393		 */
 394		complexmode_enter(sma);
 395		return SEM_GLOBAL_LOCK;
 396	}
 397}
 398
 399static inline void sem_unlock(struct sem_array *sma, int locknum)
 400{
 401	if (locknum == SEM_GLOBAL_LOCK) {
 402		unmerge_queues(sma);
 403		complexmode_tryleave(sma);
 404		ipc_unlock_object(&sma->sem_perm);
 405	} else {
 406		struct sem *sem = sma->sem_base + locknum;
 407		spin_unlock(&sem->lock);
 408	}
 409}
 410
 411/*
 412 * sem_lock_(check_) routines are called in the paths where the rwsem
 413 * is not held.
 414 *
 415 * The caller holds the RCU read lock.
 416 */
 417static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
 418{
 419	struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
 420
 421	if (IS_ERR(ipcp))
 422		return ERR_CAST(ipcp);
 423
 424	return container_of(ipcp, struct sem_array, sem_perm);
 425}
 426
 427static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
 428							int id)
 429{
 430	struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
 431
 432	if (IS_ERR(ipcp))
 433		return ERR_CAST(ipcp);
 434
 435	return container_of(ipcp, struct sem_array, sem_perm);
 436}
 437
 438static inline void sem_lock_and_putref(struct sem_array *sma)
 439{
 440	sem_lock(sma, NULL, -1);
 441	ipc_rcu_putref(sma, sem_rcu_free);
 442}
 443
 444static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
 445{
 446	ipc_rmid(&sem_ids(ns), &s->sem_perm);
 447}
 448
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 449/**
 450 * newary - Create a new semaphore set
 451 * @ns: namespace
 452 * @params: ptr to the structure that contains key, semflg and nsems
 453 *
 454 * Called with sem_ids.rwsem held (as a writer)
 455 */
 456static int newary(struct ipc_namespace *ns, struct ipc_params *params)
 457{
 458	int id;
 459	int retval;
 460	struct sem_array *sma;
 461	int size;
 462	key_t key = params->key;
 463	int nsems = params->u.nsems;
 464	int semflg = params->flg;
 465	int i;
 466
 467	if (!nsems)
 468		return -EINVAL;
 469	if (ns->used_sems + nsems > ns->sc_semmns)
 470		return -ENOSPC;
 471
 472	size = sizeof(*sma) + nsems * sizeof(struct sem);
 473	sma = ipc_rcu_alloc(size);
 474	if (!sma)
 475		return -ENOMEM;
 476
 477	memset(sma, 0, size);
 478
 479	sma->sem_perm.mode = (semflg & S_IRWXUGO);
 480	sma->sem_perm.key = key;
 481
 482	sma->sem_perm.security = NULL;
 483	retval = security_sem_alloc(sma);
 484	if (retval) {
 485		ipc_rcu_putref(sma, ipc_rcu_free);
 486		return retval;
 487	}
 488
 489	sma->sem_base = (struct sem *) &sma[1];
 490
 491	for (i = 0; i < nsems; i++) {
 492		INIT_LIST_HEAD(&sma->sem_base[i].pending_alter);
 493		INIT_LIST_HEAD(&sma->sem_base[i].pending_const);
 494		spin_lock_init(&sma->sem_base[i].lock);
 495	}
 496
 497	sma->complex_count = 0;
 498	sma->complex_mode = true; /* dropped by sem_unlock below */
 499	INIT_LIST_HEAD(&sma->pending_alter);
 500	INIT_LIST_HEAD(&sma->pending_const);
 501	INIT_LIST_HEAD(&sma->list_id);
 502	sma->sem_nsems = nsems;
 503	sma->sem_ctime = get_seconds();
 504
 505	id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
 506	if (id < 0) {
 507		ipc_rcu_putref(sma, sem_rcu_free);
 508		return id;
 
 509	}
 510	ns->used_sems += nsems;
 511
 512	sem_unlock(sma, -1);
 513	rcu_read_unlock();
 514
 515	return sma->sem_perm.id;
 516}
 517
 518
 519/*
 520 * Called with sem_ids.rwsem and ipcp locked.
 521 */
 522static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
 523{
 524	struct sem_array *sma;
 525
 526	sma = container_of(ipcp, struct sem_array, sem_perm);
 527	return security_sem_associate(sma, semflg);
 528}
 529
 530/*
 531 * Called with sem_ids.rwsem and ipcp locked.
 532 */
 533static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
 534				struct ipc_params *params)
 535{
 536	struct sem_array *sma;
 537
 538	sma = container_of(ipcp, struct sem_array, sem_perm);
 539	if (params->u.nsems > sma->sem_nsems)
 540		return -EINVAL;
 541
 542	return 0;
 543}
 544
 545SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
 546{
 547	struct ipc_namespace *ns;
 548	static const struct ipc_ops sem_ops = {
 549		.getnew = newary,
 550		.associate = sem_security,
 551		.more_checks = sem_more_checks,
 552	};
 553	struct ipc_params sem_params;
 554
 555	ns = current->nsproxy->ipc_ns;
 556
 557	if (nsems < 0 || nsems > ns->sc_semmsl)
 558		return -EINVAL;
 559
 560	sem_params.key = key;
 561	sem_params.flg = semflg;
 562	sem_params.u.nsems = nsems;
 563
 564	return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
 565}
 566
 
 
 
 
 
 567/**
 568 * perform_atomic_semop[_slow] - Attempt to perform semaphore
 569 *                               operations on a given array.
 570 * @sma: semaphore array
 571 * @q: struct sem_queue that describes the operation
 572 *
 573 * Caller blocking are as follows, based the value
 574 * indicated by the semaphore operation (sem_op):
 575 *
 576 *  (1) >0 never blocks.
 577 *  (2)  0 (wait-for-zero operation): semval is non-zero.
 578 *  (3) <0 attempting to decrement semval to a value smaller than zero.
 579 *
 580 * Returns 0 if the operation was possible.
 581 * Returns 1 if the operation is impossible, the caller must sleep.
 582 * Returns <0 for error codes.
 583 */
 584static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
 585{
 586	int result, sem_op, nsops, pid;
 
 587	struct sembuf *sop;
 588	struct sem *curr;
 589	struct sembuf *sops;
 590	struct sem_undo *un;
 591
 592	sops = q->sops;
 593	nsops = q->nsops;
 594	un = q->undo;
 595
 596	for (sop = sops; sop < sops + nsops; sop++) {
 597		curr = sma->sem_base + sop->sem_num;
 
 598		sem_op = sop->sem_op;
 599		result = curr->semval;
 600
 601		if (!sem_op && result)
 602			goto would_block;
 603
 604		result += sem_op;
 605		if (result < 0)
 606			goto would_block;
 607		if (result > SEMVMX)
 608			goto out_of_range;
 609
 610		if (sop->sem_flg & SEM_UNDO) {
 611			int undo = un->semadj[sop->sem_num] - sem_op;
 612			/* Exceeding the undo range is an error. */
 613			if (undo < (-SEMAEM - 1) || undo > SEMAEM)
 614				goto out_of_range;
 615			un->semadj[sop->sem_num] = undo;
 616		}
 617
 618		curr->semval = result;
 619	}
 620
 621	sop--;
 622	pid = q->pid;
 623	while (sop >= sops) {
 624		sma->sem_base[sop->sem_num].sempid = pid;
 625		sop--;
 626	}
 627
 628	return 0;
 629
 630out_of_range:
 631	result = -ERANGE;
 632	goto undo;
 633
 634would_block:
 635	q->blocking = sop;
 636
 637	if (sop->sem_flg & IPC_NOWAIT)
 638		result = -EAGAIN;
 639	else
 640		result = 1;
 641
 642undo:
 643	sop--;
 644	while (sop >= sops) {
 645		sem_op = sop->sem_op;
 646		sma->sem_base[sop->sem_num].semval -= sem_op;
 647		if (sop->sem_flg & SEM_UNDO)
 648			un->semadj[sop->sem_num] += sem_op;
 649		sop--;
 650	}
 651
 652	return result;
 653}
 654
 655static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
 656{
 657	int result, sem_op, nsops;
 658	struct sembuf *sop;
 659	struct sem *curr;
 660	struct sembuf *sops;
 661	struct sem_undo *un;
 662
 663	sops = q->sops;
 664	nsops = q->nsops;
 665	un = q->undo;
 666
 667	if (unlikely(q->dupsop))
 668		return perform_atomic_semop_slow(sma, q);
 669
 670	/*
 671	 * We scan the semaphore set twice, first to ensure that the entire
 672	 * operation can succeed, therefore avoiding any pointless writes
 673	 * to shared memory and having to undo such changes in order to block
 674	 * until the operations can go through.
 675	 */
 676	for (sop = sops; sop < sops + nsops; sop++) {
 677		curr = sma->sem_base + sop->sem_num;
 
 
 678		sem_op = sop->sem_op;
 679		result = curr->semval;
 680
 681		if (!sem_op && result)
 682			goto would_block; /* wait-for-zero */
 683
 684		result += sem_op;
 685		if (result < 0)
 686			goto would_block;
 687
 688		if (result > SEMVMX)
 689			return -ERANGE;
 690
 691		if (sop->sem_flg & SEM_UNDO) {
 692			int undo = un->semadj[sop->sem_num] - sem_op;
 693
 694			/* Exceeding the undo range is an error. */
 695			if (undo < (-SEMAEM - 1) || undo > SEMAEM)
 696				return -ERANGE;
 697		}
 698	}
 699
 700	for (sop = sops; sop < sops + nsops; sop++) {
 701		curr = sma->sem_base + sop->sem_num;
 702		sem_op = sop->sem_op;
 703		result = curr->semval;
 704
 705		if (sop->sem_flg & SEM_UNDO) {
 706			int undo = un->semadj[sop->sem_num] - sem_op;
 707
 708			un->semadj[sop->sem_num] = undo;
 709		}
 710		curr->semval += sem_op;
 711		curr->sempid = q->pid;
 712	}
 713
 714	return 0;
 715
 716would_block:
 717	q->blocking = sop;
 718	return sop->sem_flg & IPC_NOWAIT ? -EAGAIN : 1;
 719}
 720
 721static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
 722					     struct wake_q_head *wake_q)
 723{
 724	wake_q_add(wake_q, q->sleeper);
 725	/*
 726	 * Rely on the above implicit barrier, such that we can
 727	 * ensure that we hold reference to the task before setting
 728	 * q->status. Otherwise we could race with do_exit if the
 729	 * task is awoken by an external event before calling
 730	 * wake_up_process().
 731	 */
 732	WRITE_ONCE(q->status, error);
 733}
 734
 735static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
 736{
 737	list_del(&q->list);
 738	if (q->nsops > 1)
 739		sma->complex_count--;
 740}
 741
 742/** check_restart(sma, q)
 743 * @sma: semaphore array
 744 * @q: the operation that just completed
 745 *
 746 * update_queue is O(N^2) when it restarts scanning the whole queue of
 747 * waiting operations. Therefore this function checks if the restart is
 748 * really necessary. It is called after a previously waiting operation
 749 * modified the array.
 750 * Note that wait-for-zero operations are handled without restart.
 751 */
 752static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
 753{
 754	/* pending complex alter operations are too difficult to analyse */
 755	if (!list_empty(&sma->pending_alter))
 756		return 1;
 757
 758	/* we were a sleeping complex operation. Too difficult */
 759	if (q->nsops > 1)
 760		return 1;
 761
 762	/* It is impossible that someone waits for the new value:
 763	 * - complex operations always restart.
 764	 * - wait-for-zero are handled seperately.
 765	 * - q is a previously sleeping simple operation that
 766	 *   altered the array. It must be a decrement, because
 767	 *   simple increments never sleep.
 768	 * - If there are older (higher priority) decrements
 769	 *   in the queue, then they have observed the original
 770	 *   semval value and couldn't proceed. The operation
 771	 *   decremented to value - thus they won't proceed either.
 772	 */
 773	return 0;
 774}
 775
 776/**
 777 * wake_const_ops - wake up non-alter tasks
 778 * @sma: semaphore array.
 779 * @semnum: semaphore that was modified.
 780 * @wake_q: lockless wake-queue head.
 781 *
 782 * wake_const_ops must be called after a semaphore in a semaphore array
 783 * was set to 0. If complex const operations are pending, wake_const_ops must
 784 * be called with semnum = -1, as well as with the number of each modified
 785 * semaphore.
 786 * The tasks that must be woken up are added to @wake_q. The return code
 787 * is stored in q->pid.
 788 * The function returns 1 if at least one operation was completed successfully.
 789 */
 790static int wake_const_ops(struct sem_array *sma, int semnum,
 791			  struct wake_q_head *wake_q)
 792{
 793	struct sem_queue *q, *tmp;
 794	struct list_head *pending_list;
 795	int semop_completed = 0;
 796
 797	if (semnum == -1)
 798		pending_list = &sma->pending_const;
 799	else
 800		pending_list = &sma->sem_base[semnum].pending_const;
 801
 802	list_for_each_entry_safe(q, tmp, pending_list, list) {
 803		int error = perform_atomic_semop(sma, q);
 804
 805		if (error > 0)
 806			continue;
 807		/* operation completed, remove from queue & wakeup */
 808		unlink_queue(sma, q);
 809
 810		wake_up_sem_queue_prepare(q, error, wake_q);
 811		if (error == 0)
 812			semop_completed = 1;
 813	}
 814
 815	return semop_completed;
 816}
 817
 818/**
 819 * do_smart_wakeup_zero - wakeup all wait for zero tasks
 820 * @sma: semaphore array
 821 * @sops: operations that were performed
 822 * @nsops: number of operations
 823 * @wake_q: lockless wake-queue head
 824 *
 825 * Checks all required queue for wait-for-zero operations, based
 826 * on the actual changes that were performed on the semaphore array.
 827 * The function returns 1 if at least one operation was completed successfully.
 828 */
 829static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
 830				int nsops, struct wake_q_head *wake_q)
 831{
 832	int i;
 833	int semop_completed = 0;
 834	int got_zero = 0;
 835
 836	/* first: the per-semaphore queues, if known */
 837	if (sops) {
 838		for (i = 0; i < nsops; i++) {
 839			int num = sops[i].sem_num;
 840
 841			if (sma->sem_base[num].semval == 0) {
 842				got_zero = 1;
 843				semop_completed |= wake_const_ops(sma, num, wake_q);
 844			}
 845		}
 846	} else {
 847		/*
 848		 * No sops means modified semaphores not known.
 849		 * Assume all were changed.
 850		 */
 851		for (i = 0; i < sma->sem_nsems; i++) {
 852			if (sma->sem_base[i].semval == 0) {
 853				got_zero = 1;
 854				semop_completed |= wake_const_ops(sma, i, wake_q);
 855			}
 856		}
 857	}
 858	/*
 859	 * If one of the modified semaphores got 0,
 860	 * then check the global queue, too.
 861	 */
 862	if (got_zero)
 863		semop_completed |= wake_const_ops(sma, -1, wake_q);
 864
 865	return semop_completed;
 866}
 867
 868
 869/**
 870 * update_queue - look for tasks that can be completed.
 871 * @sma: semaphore array.
 872 * @semnum: semaphore that was modified.
 873 * @wake_q: lockless wake-queue head.
 874 *
 875 * update_queue must be called after a semaphore in a semaphore array
 876 * was modified. If multiple semaphores were modified, update_queue must
 877 * be called with semnum = -1, as well as with the number of each modified
 878 * semaphore.
 879 * The tasks that must be woken up are added to @wake_q. The return code
 880 * is stored in q->pid.
 881 * The function internally checks if const operations can now succeed.
 882 *
 883 * The function return 1 if at least one semop was completed successfully.
 884 */
 885static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q)
 886{
 887	struct sem_queue *q, *tmp;
 888	struct list_head *pending_list;
 889	int semop_completed = 0;
 890
 891	if (semnum == -1)
 892		pending_list = &sma->pending_alter;
 893	else
 894		pending_list = &sma->sem_base[semnum].pending_alter;
 895
 896again:
 897	list_for_each_entry_safe(q, tmp, pending_list, list) {
 898		int error, restart;
 899
 900		/* If we are scanning the single sop, per-semaphore list of
 901		 * one semaphore and that semaphore is 0, then it is not
 902		 * necessary to scan further: simple increments
 903		 * that affect only one entry succeed immediately and cannot
 904		 * be in the  per semaphore pending queue, and decrements
 905		 * cannot be successful if the value is already 0.
 906		 */
 907		if (semnum != -1 && sma->sem_base[semnum].semval == 0)
 908			break;
 909
 910		error = perform_atomic_semop(sma, q);
 911
 912		/* Does q->sleeper still need to sleep? */
 913		if (error > 0)
 914			continue;
 915
 916		unlink_queue(sma, q);
 917
 918		if (error) {
 919			restart = 0;
 920		} else {
 921			semop_completed = 1;
 922			do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q);
 923			restart = check_restart(sma, q);
 924		}
 925
 926		wake_up_sem_queue_prepare(q, error, wake_q);
 927		if (restart)
 928			goto again;
 929	}
 930	return semop_completed;
 931}
 932
 933/**
 934 * set_semotime - set sem_otime
 935 * @sma: semaphore array
 936 * @sops: operations that modified the array, may be NULL
 937 *
 938 * sem_otime is replicated to avoid cache line trashing.
 939 * This function sets one instance to the current time.
 940 */
 941static void set_semotime(struct sem_array *sma, struct sembuf *sops)
 942{
 943	if (sops == NULL) {
 944		sma->sem_base[0].sem_otime = get_seconds();
 945	} else {
 946		sma->sem_base[sops[0].sem_num].sem_otime =
 947							get_seconds();
 948	}
 949}
 950
 951/**
 952 * do_smart_update - optimized update_queue
 953 * @sma: semaphore array
 954 * @sops: operations that were performed
 955 * @nsops: number of operations
 956 * @otime: force setting otime
 957 * @wake_q: lockless wake-queue head
 958 *
 959 * do_smart_update() does the required calls to update_queue and wakeup_zero,
 960 * based on the actual changes that were performed on the semaphore array.
 961 * Note that the function does not do the actual wake-up: the caller is
 962 * responsible for calling wake_up_q().
 963 * It is safe to perform this call after dropping all locks.
 964 */
 965static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
 966			    int otime, struct wake_q_head *wake_q)
 967{
 968	int i;
 969
 970	otime |= do_smart_wakeup_zero(sma, sops, nsops, wake_q);
 971
 972	if (!list_empty(&sma->pending_alter)) {
 973		/* semaphore array uses the global queue - just process it. */
 974		otime |= update_queue(sma, -1, wake_q);
 975	} else {
 976		if (!sops) {
 977			/*
 978			 * No sops, thus the modified semaphores are not
 979			 * known. Check all.
 980			 */
 981			for (i = 0; i < sma->sem_nsems; i++)
 982				otime |= update_queue(sma, i, wake_q);
 983		} else {
 984			/*
 985			 * Check the semaphores that were increased:
 986			 * - No complex ops, thus all sleeping ops are
 987			 *   decrease.
 988			 * - if we decreased the value, then any sleeping
 989			 *   semaphore ops wont be able to run: If the
 990			 *   previous value was too small, then the new
 991			 *   value will be too small, too.
 992			 */
 993			for (i = 0; i < nsops; i++) {
 994				if (sops[i].sem_op > 0) {
 995					otime |= update_queue(sma,
 996							      sops[i].sem_num, wake_q);
 997				}
 998			}
 999		}
1000	}
1001	if (otime)
1002		set_semotime(sma, sops);
1003}
1004
1005/*
1006 * check_qop: Test if a queued operation sleeps on the semaphore semnum
1007 */
1008static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
1009			bool count_zero)
1010{
1011	struct sembuf *sop = q->blocking;
1012
1013	/*
1014	 * Linux always (since 0.99.10) reported a task as sleeping on all
1015	 * semaphores. This violates SUS, therefore it was changed to the
1016	 * standard compliant behavior.
1017	 * Give the administrators a chance to notice that an application
1018	 * might misbehave because it relies on the Linux behavior.
1019	 */
1020	pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
1021			"The task %s (%d) triggered the difference, watch for misbehavior.\n",
1022			current->comm, task_pid_nr(current));
1023
1024	if (sop->sem_num != semnum)
1025		return 0;
1026
1027	if (count_zero && sop->sem_op == 0)
1028		return 1;
1029	if (!count_zero && sop->sem_op < 0)
1030		return 1;
1031
1032	return 0;
1033}
1034
1035/* The following counts are associated to each semaphore:
1036 *   semncnt        number of tasks waiting on semval being nonzero
1037 *   semzcnt        number of tasks waiting on semval being zero
1038 *
1039 * Per definition, a task waits only on the semaphore of the first semop
1040 * that cannot proceed, even if additional operation would block, too.
1041 */
1042static int count_semcnt(struct sem_array *sma, ushort semnum,
1043			bool count_zero)
1044{
1045	struct list_head *l;
1046	struct sem_queue *q;
1047	int semcnt;
1048
1049	semcnt = 0;
1050	/* First: check the simple operations. They are easy to evaluate */
1051	if (count_zero)
1052		l = &sma->sem_base[semnum].pending_const;
1053	else
1054		l = &sma->sem_base[semnum].pending_alter;
1055
1056	list_for_each_entry(q, l, list) {
1057		/* all task on a per-semaphore list sleep on exactly
1058		 * that semaphore
1059		 */
1060		semcnt++;
1061	}
1062
1063	/* Then: check the complex operations. */
1064	list_for_each_entry(q, &sma->pending_alter, list) {
1065		semcnt += check_qop(sma, semnum, q, count_zero);
1066	}
1067	if (count_zero) {
1068		list_for_each_entry(q, &sma->pending_const, list) {
1069			semcnt += check_qop(sma, semnum, q, count_zero);
1070		}
1071	}
1072	return semcnt;
1073}
1074
1075/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
1076 * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
1077 * remains locked on exit.
1078 */
1079static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1080{
1081	struct sem_undo *un, *tu;
1082	struct sem_queue *q, *tq;
1083	struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1084	int i;
1085	DEFINE_WAKE_Q(wake_q);
1086
1087	/* Free the existing undo structures for this semaphore set.  */
1088	ipc_assert_locked_object(&sma->sem_perm);
1089	list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1090		list_del(&un->list_id);
1091		spin_lock(&un->ulp->lock);
1092		un->semid = -1;
1093		list_del_rcu(&un->list_proc);
1094		spin_unlock(&un->ulp->lock);
1095		kfree_rcu(un, rcu);
1096	}
1097
1098	/* Wake up all pending processes and let them fail with EIDRM. */
1099	list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1100		unlink_queue(sma, q);
1101		wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1102	}
1103
1104	list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1105		unlink_queue(sma, q);
1106		wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1107	}
1108	for (i = 0; i < sma->sem_nsems; i++) {
1109		struct sem *sem = sma->sem_base + i;
1110		list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
1111			unlink_queue(sma, q);
1112			wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1113		}
1114		list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
1115			unlink_queue(sma, q);
1116			wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1117		}
 
1118	}
1119
1120	/* Remove the semaphore set from the IDR */
1121	sem_rmid(ns, sma);
1122	sem_unlock(sma, -1);
1123	rcu_read_unlock();
1124
1125	wake_up_q(&wake_q);
1126	ns->used_sems -= sma->sem_nsems;
1127	ipc_rcu_putref(sma, sem_rcu_free);
1128}
1129
1130static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1131{
1132	switch (version) {
1133	case IPC_64:
1134		return copy_to_user(buf, in, sizeof(*in));
1135	case IPC_OLD:
1136	    {
1137		struct semid_ds out;
1138
1139		memset(&out, 0, sizeof(out));
1140
1141		ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
1142
1143		out.sem_otime	= in->sem_otime;
1144		out.sem_ctime	= in->sem_ctime;
1145		out.sem_nsems	= in->sem_nsems;
1146
1147		return copy_to_user(buf, &out, sizeof(out));
1148	    }
1149	default:
1150		return -EINVAL;
1151	}
1152}
1153
1154static time_t get_semotime(struct sem_array *sma)
1155{
1156	int i;
1157	time_t res;
1158
1159	res = sma->sem_base[0].sem_otime;
1160	for (i = 1; i < sma->sem_nsems; i++) {
1161		time_t to = sma->sem_base[i].sem_otime;
1162
1163		if (to > res)
1164			res = to;
1165	}
1166	return res;
1167}
1168
1169static int semctl_nolock(struct ipc_namespace *ns, int semid,
1170			 int cmd, int version, void __user *p)
1171{
1172	int err;
1173	struct sem_array *sma;
 
 
1174
1175	switch (cmd) {
1176	case IPC_INFO:
1177	case SEM_INFO:
1178	{
1179		struct seminfo seminfo;
1180		int max_id;
1181
1182		err = security_sem_semctl(NULL, cmd);
1183		if (err)
1184			return err;
1185
1186		memset(&seminfo, 0, sizeof(seminfo));
1187		seminfo.semmni = ns->sc_semmni;
1188		seminfo.semmns = ns->sc_semmns;
1189		seminfo.semmsl = ns->sc_semmsl;
1190		seminfo.semopm = ns->sc_semopm;
1191		seminfo.semvmx = SEMVMX;
1192		seminfo.semmnu = SEMMNU;
1193		seminfo.semmap = SEMMAP;
1194		seminfo.semume = SEMUME;
1195		down_read(&sem_ids(ns).rwsem);
1196		if (cmd == SEM_INFO) {
1197			seminfo.semusz = sem_ids(ns).in_use;
1198			seminfo.semaem = ns->used_sems;
1199		} else {
1200			seminfo.semusz = SEMUSZ;
1201			seminfo.semaem = SEMAEM;
1202		}
1203		max_id = ipc_get_maxid(&sem_ids(ns));
1204		up_read(&sem_ids(ns).rwsem);
1205		if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1206			return -EFAULT;
1207		return (max_id < 0) ? 0 : max_id;
1208	}
1209	case IPC_STAT:
1210	case SEM_STAT:
1211	{
1212		struct semid64_ds tbuf;
1213		int id = 0;
1214
1215		memset(&tbuf, 0, sizeof(tbuf));
1216
1217		rcu_read_lock();
1218		if (cmd == SEM_STAT) {
1219			sma = sem_obtain_object(ns, semid);
1220			if (IS_ERR(sma)) {
1221				err = PTR_ERR(sma);
1222				goto out_unlock;
1223			}
1224			id = sma->sem_perm.id;
1225		} else {
1226			sma = sem_obtain_object_check(ns, semid);
1227			if (IS_ERR(sma)) {
1228				err = PTR_ERR(sma);
1229				goto out_unlock;
1230			}
1231		}
 
1232
 
 
 
 
1233		err = -EACCES;
1234		if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1235			goto out_unlock;
 
1236
1237		err = security_sem_semctl(sma, cmd);
1238		if (err)
1239			goto out_unlock;
1240
1241		kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
1242		tbuf.sem_otime = get_semotime(sma);
1243		tbuf.sem_ctime = sma->sem_ctime;
1244		tbuf.sem_nsems = sma->sem_nsems;
1245		rcu_read_unlock();
1246		if (copy_semid_to_user(p, &tbuf, version))
1247			return -EFAULT;
1248		return id;
1249	}
1250	default:
1251		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1252	}
 
1253out_unlock:
1254	rcu_read_unlock();
1255	return err;
1256}
1257
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1258static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1259		unsigned long arg)
1260{
1261	struct sem_undo *un;
1262	struct sem_array *sma;
1263	struct sem *curr;
1264	int err, val;
1265	DEFINE_WAKE_Q(wake_q);
1266
1267#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1268	/* big-endian 64bit */
1269	val = arg >> 32;
1270#else
1271	/* 32bit or little-endian 64bit */
1272	val = arg;
1273#endif
1274
1275	if (val > SEMVMX || val < 0)
1276		return -ERANGE;
1277
1278	rcu_read_lock();
1279	sma = sem_obtain_object_check(ns, semid);
1280	if (IS_ERR(sma)) {
1281		rcu_read_unlock();
1282		return PTR_ERR(sma);
1283	}
1284
1285	if (semnum < 0 || semnum >= sma->sem_nsems) {
1286		rcu_read_unlock();
1287		return -EINVAL;
1288	}
1289
1290
1291	if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1292		rcu_read_unlock();
1293		return -EACCES;
1294	}
1295
1296	err = security_sem_semctl(sma, SETVAL);
1297	if (err) {
1298		rcu_read_unlock();
1299		return -EACCES;
1300	}
1301
1302	sem_lock(sma, NULL, -1);
1303
1304	if (!ipc_valid_object(&sma->sem_perm)) {
1305		sem_unlock(sma, -1);
1306		rcu_read_unlock();
1307		return -EIDRM;
1308	}
1309
1310	curr = &sma->sem_base[semnum];
 
1311
1312	ipc_assert_locked_object(&sma->sem_perm);
1313	list_for_each_entry(un, &sma->list_id, list_id)
1314		un->semadj[semnum] = 0;
1315
1316	curr->semval = val;
1317	curr->sempid = task_tgid_vnr(current);
1318	sma->sem_ctime = get_seconds();
1319	/* maybe some queued-up processes were waiting for this */
1320	do_smart_update(sma, NULL, 0, 0, &wake_q);
1321	sem_unlock(sma, -1);
1322	rcu_read_unlock();
1323	wake_up_q(&wake_q);
1324	return 0;
1325}
1326
1327static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1328		int cmd, void __user *p)
1329{
1330	struct sem_array *sma;
1331	struct sem *curr;
1332	int err, nsems;
1333	ushort fast_sem_io[SEMMSL_FAST];
1334	ushort *sem_io = fast_sem_io;
1335	DEFINE_WAKE_Q(wake_q);
1336
1337	rcu_read_lock();
1338	sma = sem_obtain_object_check(ns, semid);
1339	if (IS_ERR(sma)) {
1340		rcu_read_unlock();
1341		return PTR_ERR(sma);
1342	}
1343
1344	nsems = sma->sem_nsems;
1345
1346	err = -EACCES;
1347	if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1348		goto out_rcu_wakeup;
1349
1350	err = security_sem_semctl(sma, cmd);
1351	if (err)
1352		goto out_rcu_wakeup;
1353
1354	err = -EACCES;
1355	switch (cmd) {
1356	case GETALL:
1357	{
1358		ushort __user *array = p;
1359		int i;
1360
1361		sem_lock(sma, NULL, -1);
1362		if (!ipc_valid_object(&sma->sem_perm)) {
1363			err = -EIDRM;
1364			goto out_unlock;
1365		}
1366		if (nsems > SEMMSL_FAST) {
1367			if (!ipc_rcu_getref(sma)) {
1368				err = -EIDRM;
1369				goto out_unlock;
1370			}
1371			sem_unlock(sma, -1);
1372			rcu_read_unlock();
1373			sem_io = ipc_alloc(sizeof(ushort)*nsems);
 
1374			if (sem_io == NULL) {
1375				ipc_rcu_putref(sma, sem_rcu_free);
1376				return -ENOMEM;
1377			}
1378
1379			rcu_read_lock();
1380			sem_lock_and_putref(sma);
1381			if (!ipc_valid_object(&sma->sem_perm)) {
1382				err = -EIDRM;
1383				goto out_unlock;
1384			}
1385		}
1386		for (i = 0; i < sma->sem_nsems; i++)
1387			sem_io[i] = sma->sem_base[i].semval;
1388		sem_unlock(sma, -1);
1389		rcu_read_unlock();
1390		err = 0;
1391		if (copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1392			err = -EFAULT;
1393		goto out_free;
1394	}
1395	case SETALL:
1396	{
1397		int i;
1398		struct sem_undo *un;
1399
1400		if (!ipc_rcu_getref(sma)) {
1401			err = -EIDRM;
1402			goto out_rcu_wakeup;
1403		}
1404		rcu_read_unlock();
1405
1406		if (nsems > SEMMSL_FAST) {
1407			sem_io = ipc_alloc(sizeof(ushort)*nsems);
 
1408			if (sem_io == NULL) {
1409				ipc_rcu_putref(sma, sem_rcu_free);
1410				return -ENOMEM;
1411			}
1412		}
1413
1414		if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
1415			ipc_rcu_putref(sma, sem_rcu_free);
1416			err = -EFAULT;
1417			goto out_free;
1418		}
1419
1420		for (i = 0; i < nsems; i++) {
1421			if (sem_io[i] > SEMVMX) {
1422				ipc_rcu_putref(sma, sem_rcu_free);
1423				err = -ERANGE;
1424				goto out_free;
1425			}
1426		}
1427		rcu_read_lock();
1428		sem_lock_and_putref(sma);
1429		if (!ipc_valid_object(&sma->sem_perm)) {
1430			err = -EIDRM;
1431			goto out_unlock;
1432		}
1433
1434		for (i = 0; i < nsems; i++) {
1435			sma->sem_base[i].semval = sem_io[i];
1436			sma->sem_base[i].sempid = task_tgid_vnr(current);
1437		}
1438
1439		ipc_assert_locked_object(&sma->sem_perm);
1440		list_for_each_entry(un, &sma->list_id, list_id) {
1441			for (i = 0; i < nsems; i++)
1442				un->semadj[i] = 0;
1443		}
1444		sma->sem_ctime = get_seconds();
1445		/* maybe some queued-up processes were waiting for this */
1446		do_smart_update(sma, NULL, 0, 0, &wake_q);
1447		err = 0;
1448		goto out_unlock;
1449	}
1450	/* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */
1451	}
1452	err = -EINVAL;
1453	if (semnum < 0 || semnum >= nsems)
1454		goto out_rcu_wakeup;
1455
1456	sem_lock(sma, NULL, -1);
1457	if (!ipc_valid_object(&sma->sem_perm)) {
1458		err = -EIDRM;
1459		goto out_unlock;
1460	}
1461	curr = &sma->sem_base[semnum];
 
 
1462
1463	switch (cmd) {
1464	case GETVAL:
1465		err = curr->semval;
1466		goto out_unlock;
1467	case GETPID:
1468		err = curr->sempid;
1469		goto out_unlock;
1470	case GETNCNT:
1471		err = count_semcnt(sma, semnum, 0);
1472		goto out_unlock;
1473	case GETZCNT:
1474		err = count_semcnt(sma, semnum, 1);
1475		goto out_unlock;
1476	}
1477
1478out_unlock:
1479	sem_unlock(sma, -1);
1480out_rcu_wakeup:
1481	rcu_read_unlock();
1482	wake_up_q(&wake_q);
1483out_free:
1484	if (sem_io != fast_sem_io)
1485		ipc_free(sem_io);
1486	return err;
1487}
1488
1489static inline unsigned long
1490copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1491{
1492	switch (version) {
1493	case IPC_64:
1494		if (copy_from_user(out, buf, sizeof(*out)))
1495			return -EFAULT;
1496		return 0;
1497	case IPC_OLD:
1498	    {
1499		struct semid_ds tbuf_old;
1500
1501		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1502			return -EFAULT;
1503
1504		out->sem_perm.uid	= tbuf_old.sem_perm.uid;
1505		out->sem_perm.gid	= tbuf_old.sem_perm.gid;
1506		out->sem_perm.mode	= tbuf_old.sem_perm.mode;
1507
1508		return 0;
1509	    }
1510	default:
1511		return -EINVAL;
1512	}
1513}
1514
1515/*
1516 * This function handles some semctl commands which require the rwsem
1517 * to be held in write mode.
1518 * NOTE: no locks must be held, the rwsem is taken inside this function.
1519 */
1520static int semctl_down(struct ipc_namespace *ns, int semid,
1521		       int cmd, int version, void __user *p)
1522{
1523	struct sem_array *sma;
1524	int err;
1525	struct semid64_ds semid64;
1526	struct kern_ipc_perm *ipcp;
1527
1528	if (cmd == IPC_SET) {
1529		if (copy_semid_from_user(&semid64, p, version))
1530			return -EFAULT;
1531	}
1532
1533	down_write(&sem_ids(ns).rwsem);
1534	rcu_read_lock();
1535
1536	ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd,
1537				      &semid64.sem_perm, 0);
1538	if (IS_ERR(ipcp)) {
1539		err = PTR_ERR(ipcp);
1540		goto out_unlock1;
1541	}
1542
1543	sma = container_of(ipcp, struct sem_array, sem_perm);
1544
1545	err = security_sem_semctl(sma, cmd);
1546	if (err)
1547		goto out_unlock1;
1548
1549	switch (cmd) {
1550	case IPC_RMID:
1551		sem_lock(sma, NULL, -1);
1552		/* freeary unlocks the ipc object and rcu */
1553		freeary(ns, ipcp);
1554		goto out_up;
1555	case IPC_SET:
1556		sem_lock(sma, NULL, -1);
1557		err = ipc_update_perm(&semid64.sem_perm, ipcp);
1558		if (err)
1559			goto out_unlock0;
1560		sma->sem_ctime = get_seconds();
1561		break;
1562	default:
1563		err = -EINVAL;
1564		goto out_unlock1;
1565	}
1566
1567out_unlock0:
1568	sem_unlock(sma, -1);
1569out_unlock1:
1570	rcu_read_unlock();
1571out_up:
1572	up_write(&sem_ids(ns).rwsem);
1573	return err;
1574}
1575
1576SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1577{
1578	int version;
1579	struct ipc_namespace *ns;
1580	void __user *p = (void __user *)arg;
 
 
1581
1582	if (semid < 0)
1583		return -EINVAL;
1584
1585	version = ipc_parse_version(&cmd);
1586	ns = current->nsproxy->ipc_ns;
1587
1588	switch (cmd) {
1589	case IPC_INFO:
1590	case SEM_INFO:
 
1591	case IPC_STAT:
1592	case SEM_STAT:
1593		return semctl_nolock(ns, semid, cmd, version, p);
 
 
 
 
 
 
1594	case GETALL:
1595	case GETVAL:
1596	case GETPID:
1597	case GETNCNT:
1598	case GETZCNT:
1599	case SETALL:
1600		return semctl_main(ns, semid, semnum, cmd, p);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1601	case SETVAL:
1602		return semctl_setval(ns, semid, semnum, arg);
1603	case IPC_RMID:
1604	case IPC_SET:
1605		return semctl_down(ns, semid, cmd, version, p);
 
 
 
 
1606	default:
1607		return -EINVAL;
1608	}
1609}
1610
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1611/* If the task doesn't already have a undo_list, then allocate one
1612 * here.  We guarantee there is only one thread using this undo list,
1613 * and current is THE ONE
1614 *
1615 * If this allocation and assignment succeeds, but later
1616 * portions of this code fail, there is no need to free the sem_undo_list.
1617 * Just let it stay associated with the task, and it'll be freed later
1618 * at exit time.
1619 *
1620 * This can block, so callers must hold no locks.
1621 */
1622static inline int get_undo_list(struct sem_undo_list **undo_listp)
1623{
1624	struct sem_undo_list *undo_list;
1625
1626	undo_list = current->sysvsem.undo_list;
1627	if (!undo_list) {
1628		undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1629		if (undo_list == NULL)
1630			return -ENOMEM;
1631		spin_lock_init(&undo_list->lock);
1632		atomic_set(&undo_list->refcnt, 1);
1633		INIT_LIST_HEAD(&undo_list->list_proc);
1634
1635		current->sysvsem.undo_list = undo_list;
1636	}
1637	*undo_listp = undo_list;
1638	return 0;
1639}
1640
1641static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1642{
1643	struct sem_undo *un;
1644
1645	list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
 
1646		if (un->semid == semid)
1647			return un;
1648	}
1649	return NULL;
1650}
1651
1652static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1653{
1654	struct sem_undo *un;
1655
1656	assert_spin_locked(&ulp->lock);
1657
1658	un = __lookup_undo(ulp, semid);
1659	if (un) {
1660		list_del_rcu(&un->list_proc);
1661		list_add_rcu(&un->list_proc, &ulp->list_proc);
1662	}
1663	return un;
1664}
1665
1666/**
1667 * find_alloc_undo - lookup (and if not present create) undo array
1668 * @ns: namespace
1669 * @semid: semaphore array id
1670 *
1671 * The function looks up (and if not present creates) the undo structure.
1672 * The size of the undo structure depends on the size of the semaphore
1673 * array, thus the alloc path is not that straightforward.
1674 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1675 * performs a rcu_read_lock().
1676 */
1677static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1678{
1679	struct sem_array *sma;
1680	struct sem_undo_list *ulp;
1681	struct sem_undo *un, *new;
1682	int nsems, error;
1683
1684	error = get_undo_list(&ulp);
1685	if (error)
1686		return ERR_PTR(error);
1687
1688	rcu_read_lock();
1689	spin_lock(&ulp->lock);
1690	un = lookup_undo(ulp, semid);
1691	spin_unlock(&ulp->lock);
1692	if (likely(un != NULL))
1693		goto out;
1694
1695	/* no undo structure around - allocate one. */
1696	/* step 1: figure out the size of the semaphore array */
1697	sma = sem_obtain_object_check(ns, semid);
1698	if (IS_ERR(sma)) {
1699		rcu_read_unlock();
1700		return ERR_CAST(sma);
1701	}
1702
1703	nsems = sma->sem_nsems;
1704	if (!ipc_rcu_getref(sma)) {
1705		rcu_read_unlock();
1706		un = ERR_PTR(-EIDRM);
1707		goto out;
1708	}
1709	rcu_read_unlock();
1710
1711	/* step 2: allocate new undo structure */
1712	new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1713	if (!new) {
1714		ipc_rcu_putref(sma, sem_rcu_free);
1715		return ERR_PTR(-ENOMEM);
1716	}
1717
1718	/* step 3: Acquire the lock on semaphore array */
1719	rcu_read_lock();
1720	sem_lock_and_putref(sma);
1721	if (!ipc_valid_object(&sma->sem_perm)) {
1722		sem_unlock(sma, -1);
1723		rcu_read_unlock();
1724		kfree(new);
1725		un = ERR_PTR(-EIDRM);
1726		goto out;
1727	}
1728	spin_lock(&ulp->lock);
1729
1730	/*
1731	 * step 4: check for races: did someone else allocate the undo struct?
1732	 */
1733	un = lookup_undo(ulp, semid);
1734	if (un) {
1735		kfree(new);
1736		goto success;
1737	}
1738	/* step 5: initialize & link new undo structure */
1739	new->semadj = (short *) &new[1];
1740	new->ulp = ulp;
1741	new->semid = semid;
1742	assert_spin_locked(&ulp->lock);
1743	list_add_rcu(&new->list_proc, &ulp->list_proc);
1744	ipc_assert_locked_object(&sma->sem_perm);
1745	list_add(&new->list_id, &sma->list_id);
1746	un = new;
1747
1748success:
1749	spin_unlock(&ulp->lock);
1750	sem_unlock(sma, -1);
1751out:
1752	return un;
1753}
1754
1755SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1756		unsigned, nsops, const struct timespec __user *, timeout)
1757{
1758	int error = -EINVAL;
1759	struct sem_array *sma;
1760	struct sembuf fast_sops[SEMOPM_FAST];
1761	struct sembuf *sops = fast_sops, *sop;
1762	struct sem_undo *un;
1763	int max, locknum;
1764	bool undos = false, alter = false, dupsop = false;
1765	struct sem_queue queue;
1766	unsigned long dup = 0, jiffies_left = 0;
1767	struct ipc_namespace *ns;
1768
1769	ns = current->nsproxy->ipc_ns;
1770
1771	if (nsops < 1 || semid < 0)
1772		return -EINVAL;
1773	if (nsops > ns->sc_semopm)
1774		return -E2BIG;
1775	if (nsops > SEMOPM_FAST) {
1776		sops = kmalloc(sizeof(*sops)*nsops, GFP_KERNEL);
1777		if (sops == NULL)
1778			return -ENOMEM;
1779	}
1780
1781	if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
1782		error =  -EFAULT;
1783		goto out_free;
1784	}
1785
1786	if (timeout) {
1787		struct timespec _timeout;
1788		if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) {
1789			error = -EFAULT;
1790			goto out_free;
1791		}
1792		if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 ||
1793			_timeout.tv_nsec >= 1000000000L) {
1794			error = -EINVAL;
1795			goto out_free;
1796		}
1797		jiffies_left = timespec_to_jiffies(&_timeout);
1798	}
1799
1800	max = 0;
1801	for (sop = sops; sop < sops + nsops; sop++) {
1802		unsigned long mask = 1ULL << ((sop->sem_num) % BITS_PER_LONG);
1803
1804		if (sop->sem_num >= max)
1805			max = sop->sem_num;
1806		if (sop->sem_flg & SEM_UNDO)
1807			undos = true;
1808		if (dup & mask) {
1809			/*
1810			 * There was a previous alter access that appears
1811			 * to have accessed the same semaphore, thus use
1812			 * the dupsop logic. "appears", because the detection
1813			 * can only check % BITS_PER_LONG.
1814			 */
1815			dupsop = true;
1816		}
1817		if (sop->sem_op != 0) {
1818			alter = true;
1819			dup |= mask;
1820		}
1821	}
1822
1823	if (undos) {
1824		/* On success, find_alloc_undo takes the rcu_read_lock */
1825		un = find_alloc_undo(ns, semid);
1826		if (IS_ERR(un)) {
1827			error = PTR_ERR(un);
1828			goto out_free;
1829		}
1830	} else {
1831		un = NULL;
1832		rcu_read_lock();
1833	}
1834
1835	sma = sem_obtain_object_check(ns, semid);
1836	if (IS_ERR(sma)) {
1837		rcu_read_unlock();
1838		error = PTR_ERR(sma);
1839		goto out_free;
1840	}
1841
1842	error = -EFBIG;
1843	if (max >= sma->sem_nsems) {
1844		rcu_read_unlock();
1845		goto out_free;
1846	}
1847
1848	error = -EACCES;
1849	if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) {
1850		rcu_read_unlock();
1851		goto out_free;
1852	}
1853
1854	error = security_sem_semop(sma, sops, nsops, alter);
1855	if (error) {
1856		rcu_read_unlock();
1857		goto out_free;
1858	}
1859
1860	error = -EIDRM;
1861	locknum = sem_lock(sma, sops, nsops);
1862	/*
1863	 * We eventually might perform the following check in a lockless
1864	 * fashion, considering ipc_valid_object() locking constraints.
1865	 * If nsops == 1 and there is no contention for sem_perm.lock, then
1866	 * only a per-semaphore lock is held and it's OK to proceed with the
1867	 * check below. More details on the fine grained locking scheme
1868	 * entangled here and why it's RMID race safe on comments at sem_lock()
1869	 */
1870	if (!ipc_valid_object(&sma->sem_perm))
1871		goto out_unlock_free;
1872	/*
1873	 * semid identifiers are not unique - find_alloc_undo may have
1874	 * allocated an undo structure, it was invalidated by an RMID
1875	 * and now a new array with received the same id. Check and fail.
1876	 * This case can be detected checking un->semid. The existence of
1877	 * "un" itself is guaranteed by rcu.
1878	 */
1879	if (un && un->semid == -1)
1880		goto out_unlock_free;
1881
1882	queue.sops = sops;
1883	queue.nsops = nsops;
1884	queue.undo = un;
1885	queue.pid = task_tgid_vnr(current);
1886	queue.alter = alter;
1887	queue.dupsop = dupsop;
1888
1889	error = perform_atomic_semop(sma, &queue);
1890	if (error == 0) { /* non-blocking succesfull path */
1891		DEFINE_WAKE_Q(wake_q);
1892
1893		/*
1894		 * If the operation was successful, then do
1895		 * the required updates.
1896		 */
1897		if (alter)
1898			do_smart_update(sma, sops, nsops, 1, &wake_q);
1899		else
1900			set_semotime(sma, sops);
1901
1902		sem_unlock(sma, locknum);
1903		rcu_read_unlock();
1904		wake_up_q(&wake_q);
1905
1906		goto out_free;
1907	}
1908	if (error < 0) /* non-blocking error path */
1909		goto out_unlock_free;
1910
1911	/*
1912	 * We need to sleep on this operation, so we put the current
1913	 * task into the pending queue and go to sleep.
1914	 */
1915	if (nsops == 1) {
1916		struct sem *curr;
1917		curr = &sma->sem_base[sops->sem_num];
 
1918
1919		if (alter) {
1920			if (sma->complex_count) {
1921				list_add_tail(&queue.list,
1922						&sma->pending_alter);
1923			} else {
1924
1925				list_add_tail(&queue.list,
1926						&curr->pending_alter);
1927			}
1928		} else {
1929			list_add_tail(&queue.list, &curr->pending_const);
1930		}
1931	} else {
1932		if (!sma->complex_count)
1933			merge_queues(sma);
1934
1935		if (alter)
1936			list_add_tail(&queue.list, &sma->pending_alter);
1937		else
1938			list_add_tail(&queue.list, &sma->pending_const);
1939
1940		sma->complex_count++;
1941	}
1942
1943	do {
1944		queue.status = -EINTR;
 
1945		queue.sleeper = current;
1946
 
1947		__set_current_state(TASK_INTERRUPTIBLE);
1948		sem_unlock(sma, locknum);
1949		rcu_read_unlock();
1950
1951		if (timeout)
1952			jiffies_left = schedule_timeout(jiffies_left);
1953		else
1954			schedule();
1955
1956		/*
1957		 * fastpath: the semop has completed, either successfully or
1958		 * not, from the syscall pov, is quite irrelevant to us at this
1959		 * point; we're done.
1960		 *
1961		 * We _do_ care, nonetheless, about being awoken by a signal or
1962		 * spuriously.  The queue.status is checked again in the
1963		 * slowpath (aka after taking sem_lock), such that we can detect
1964		 * scenarios where we were awakened externally, during the
1965		 * window between wake_q_add() and wake_up_q().
1966		 */
1967		error = READ_ONCE(queue.status);
1968		if (error != -EINTR) {
1969			/*
1970			 * User space could assume that semop() is a memory
1971			 * barrier: Without the mb(), the cpu could
1972			 * speculatively read in userspace stale data that was
1973			 * overwritten by the previous owner of the semaphore.
1974			 */
1975			smp_mb();
1976			goto out_free;
1977		}
1978
1979		rcu_read_lock();
1980		locknum = sem_lock(sma, sops, nsops);
1981
1982		if (!ipc_valid_object(&sma->sem_perm))
1983			goto out_unlock_free;
1984
 
 
 
1985		error = READ_ONCE(queue.status);
1986
1987		/*
1988		 * If queue.status != -EINTR we are woken up by another process.
1989		 * Leave without unlink_queue(), but with sem_unlock().
1990		 */
1991		if (error != -EINTR)
1992			goto out_unlock_free;
1993
1994		/*
1995		 * If an interrupt occurred we have to clean up the queue.
1996		 */
1997		if (timeout && jiffies_left == 0)
1998			error = -EAGAIN;
1999	} while (error == -EINTR && !signal_pending(current)); /* spurious */
2000
2001	unlink_queue(sma, &queue);
2002
2003out_unlock_free:
2004	sem_unlock(sma, locknum);
2005	rcu_read_unlock();
2006out_free:
2007	if (sops != fast_sops)
2008		kfree(sops);
2009	return error;
2010}
2011
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2012SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
2013		unsigned, nsops)
2014{
2015	return sys_semtimedop(semid, tsops, nsops, NULL);
2016}
2017
2018/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
2019 * parent and child tasks.
2020 */
2021
2022int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
2023{
2024	struct sem_undo_list *undo_list;
2025	int error;
2026
2027	if (clone_flags & CLONE_SYSVSEM) {
2028		error = get_undo_list(&undo_list);
2029		if (error)
2030			return error;
2031		atomic_inc(&undo_list->refcnt);
2032		tsk->sysvsem.undo_list = undo_list;
2033	} else
2034		tsk->sysvsem.undo_list = NULL;
2035
2036	return 0;
2037}
2038
2039/*
2040 * add semadj values to semaphores, free undo structures.
2041 * undo structures are not freed when semaphore arrays are destroyed
2042 * so some of them may be out of date.
2043 * IMPLEMENTATION NOTE: There is some confusion over whether the
2044 * set of adjustments that needs to be done should be done in an atomic
2045 * manner or not. That is, if we are attempting to decrement the semval
2046 * should we queue up and wait until we can do so legally?
2047 * The original implementation attempted to do this (queue and wait).
2048 * The current implementation does not do so. The POSIX standard
2049 * and SVID should be consulted to determine what behavior is mandated.
2050 */
2051void exit_sem(struct task_struct *tsk)
2052{
2053	struct sem_undo_list *ulp;
2054
2055	ulp = tsk->sysvsem.undo_list;
2056	if (!ulp)
2057		return;
2058	tsk->sysvsem.undo_list = NULL;
2059
2060	if (!atomic_dec_and_test(&ulp->refcnt))
2061		return;
2062
2063	for (;;) {
2064		struct sem_array *sma;
2065		struct sem_undo *un;
2066		int semid, i;
2067		DEFINE_WAKE_Q(wake_q);
2068
2069		cond_resched();
2070
2071		rcu_read_lock();
2072		un = list_entry_rcu(ulp->list_proc.next,
2073				    struct sem_undo, list_proc);
2074		if (&un->list_proc == &ulp->list_proc) {
2075			/*
2076			 * We must wait for freeary() before freeing this ulp,
2077			 * in case we raced with last sem_undo. There is a small
2078			 * possibility where we exit while freeary() didn't
2079			 * finish unlocking sem_undo_list.
2080			 */
2081			spin_unlock_wait(&ulp->lock);
 
2082			rcu_read_unlock();
2083			break;
2084		}
2085		spin_lock(&ulp->lock);
2086		semid = un->semid;
2087		spin_unlock(&ulp->lock);
2088
2089		/* exit_sem raced with IPC_RMID, nothing to do */
2090		if (semid == -1) {
2091			rcu_read_unlock();
2092			continue;
2093		}
2094
2095		sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2096		/* exit_sem raced with IPC_RMID, nothing to do */
2097		if (IS_ERR(sma)) {
2098			rcu_read_unlock();
2099			continue;
2100		}
2101
2102		sem_lock(sma, NULL, -1);
2103		/* exit_sem raced with IPC_RMID, nothing to do */
2104		if (!ipc_valid_object(&sma->sem_perm)) {
2105			sem_unlock(sma, -1);
2106			rcu_read_unlock();
2107			continue;
2108		}
2109		un = __lookup_undo(ulp, semid);
2110		if (un == NULL) {
2111			/* exit_sem raced with IPC_RMID+semget() that created
2112			 * exactly the same semid. Nothing to do.
2113			 */
2114			sem_unlock(sma, -1);
2115			rcu_read_unlock();
2116			continue;
2117		}
2118
2119		/* remove un from the linked lists */
2120		ipc_assert_locked_object(&sma->sem_perm);
2121		list_del(&un->list_id);
2122
2123		/* we are the last process using this ulp, acquiring ulp->lock
2124		 * isn't required. Besides that, we are also protected against
2125		 * IPC_RMID as we hold sma->sem_perm lock now
2126		 */
2127		list_del_rcu(&un->list_proc);
 
2128
2129		/* perform adjustments registered in un */
2130		for (i = 0; i < sma->sem_nsems; i++) {
2131			struct sem *semaphore = &sma->sem_base[i];
2132			if (un->semadj[i]) {
2133				semaphore->semval += un->semadj[i];
2134				/*
2135				 * Range checks of the new semaphore value,
2136				 * not defined by sus:
2137				 * - Some unices ignore the undo entirely
2138				 *   (e.g. HP UX 11i 11.22, Tru64 V5.1)
2139				 * - some cap the value (e.g. FreeBSD caps
2140				 *   at 0, but doesn't enforce SEMVMX)
2141				 *
2142				 * Linux caps the semaphore value, both at 0
2143				 * and at SEMVMX.
2144				 *
2145				 *	Manfred <manfred@colorfullife.com>
2146				 */
2147				if (semaphore->semval < 0)
2148					semaphore->semval = 0;
2149				if (semaphore->semval > SEMVMX)
2150					semaphore->semval = SEMVMX;
2151				semaphore->sempid = task_tgid_vnr(current);
2152			}
2153		}
2154		/* maybe some queued-up processes were waiting for this */
2155		do_smart_update(sma, NULL, 0, 1, &wake_q);
2156		sem_unlock(sma, -1);
2157		rcu_read_unlock();
2158		wake_up_q(&wake_q);
2159
2160		kfree_rcu(un, rcu);
2161	}
2162	kfree(ulp);
2163}
2164
2165#ifdef CONFIG_PROC_FS
2166static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2167{
2168	struct user_namespace *user_ns = seq_user_ns(s);
2169	struct sem_array *sma = it;
2170	time_t sem_otime;
 
2171
2172	/*
2173	 * The proc interface isn't aware of sem_lock(), it calls
2174	 * ipc_lock_object() directly (in sysvipc_find_ipc).
2175	 * In order to stay compatible with sem_lock(), we must
2176	 * enter / leave complex_mode.
2177	 */
2178	complexmode_enter(sma);
2179
2180	sem_otime = get_semotime(sma);
2181
2182	seq_printf(s,
2183		   "%10d %10d  %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
2184		   sma->sem_perm.key,
2185		   sma->sem_perm.id,
2186		   sma->sem_perm.mode,
2187		   sma->sem_nsems,
2188		   from_kuid_munged(user_ns, sma->sem_perm.uid),
2189		   from_kgid_munged(user_ns, sma->sem_perm.gid),
2190		   from_kuid_munged(user_ns, sma->sem_perm.cuid),
2191		   from_kgid_munged(user_ns, sma->sem_perm.cgid),
2192		   sem_otime,
2193		   sma->sem_ctime);
2194
2195	complexmode_tryleave(sma);
2196
2197	return 0;
2198}
2199#endif
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * linux/ipc/sem.c
   4 * Copyright (C) 1992 Krishna Balasubramanian
   5 * Copyright (C) 1995 Eric Schenk, Bruno Haible
   6 *
   7 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
   8 *
   9 * SMP-threaded, sysctl's added
  10 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
  11 * Enforced range limit on SEM_UNDO
  12 * (c) 2001 Red Hat Inc
  13 * Lockless wakeup
  14 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
  15 * (c) 2016 Davidlohr Bueso <dave@stgolabs.net>
  16 * Further wakeup optimizations, documentation
  17 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
  18 *
  19 * support for audit of ipc object properties and permission changes
  20 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  21 *
  22 * namespaces support
  23 * OpenVZ, SWsoft Inc.
  24 * Pavel Emelianov <xemul@openvz.org>
  25 *
  26 * Implementation notes: (May 2010)
  27 * This file implements System V semaphores.
  28 *
  29 * User space visible behavior:
  30 * - FIFO ordering for semop() operations (just FIFO, not starvation
  31 *   protection)
  32 * - multiple semaphore operations that alter the same semaphore in
  33 *   one semop() are handled.
  34 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
  35 *   SETALL calls.
  36 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
  37 * - undo adjustments at process exit are limited to 0..SEMVMX.
  38 * - namespace are supported.
  39 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
  40 *   to /proc/sys/kernel/sem.
  41 * - statistics about the usage are reported in /proc/sysvipc/sem.
  42 *
  43 * Internals:
  44 * - scalability:
  45 *   - all global variables are read-mostly.
  46 *   - semop() calls and semctl(RMID) are synchronized by RCU.
  47 *   - most operations do write operations (actually: spin_lock calls) to
  48 *     the per-semaphore array structure.
  49 *   Thus: Perfect SMP scaling between independent semaphore arrays.
  50 *         If multiple semaphores in one array are used, then cache line
  51 *         trashing on the semaphore array spinlock will limit the scaling.
  52 * - semncnt and semzcnt are calculated on demand in count_semcnt()
  53 * - the task that performs a successful semop() scans the list of all
  54 *   sleeping tasks and completes any pending operations that can be fulfilled.
  55 *   Semaphores are actively given to waiting tasks (necessary for FIFO).
  56 *   (see update_queue())
  57 * - To improve the scalability, the actual wake-up calls are performed after
  58 *   dropping all locks. (see wake_up_sem_queue_prepare())
  59 * - All work is done by the waker, the woken up task does not have to do
  60 *   anything - not even acquiring a lock or dropping a refcount.
  61 * - A woken up task may not even touch the semaphore array anymore, it may
  62 *   have been destroyed already by a semctl(RMID).
  63 * - UNDO values are stored in an array (one per process and per
  64 *   semaphore array, lazily allocated). For backwards compatibility, multiple
  65 *   modes for the UNDO variables are supported (per process, per thread)
  66 *   (see copy_semundo, CLONE_SYSVSEM)
  67 * - There are two lists of the pending operations: a per-array list
  68 *   and per-semaphore list (stored in the array). This allows to achieve FIFO
  69 *   ordering without always scanning all pending operations.
  70 *   The worst-case behavior is nevertheless O(N^2) for N wakeups.
  71 */
  72
  73#include <linux/compat.h>
  74#include <linux/slab.h>
  75#include <linux/spinlock.h>
  76#include <linux/init.h>
  77#include <linux/proc_fs.h>
  78#include <linux/time.h>
  79#include <linux/security.h>
  80#include <linux/syscalls.h>
  81#include <linux/audit.h>
  82#include <linux/capability.h>
  83#include <linux/seq_file.h>
  84#include <linux/rwsem.h>
  85#include <linux/nsproxy.h>
  86#include <linux/ipc_namespace.h>
  87#include <linux/sched/wake_q.h>
  88#include <linux/nospec.h>
  89#include <linux/rhashtable.h>
  90
  91#include <linux/uaccess.h>
  92#include "util.h"
  93
  94/* One semaphore structure for each semaphore in the system. */
  95struct sem {
  96	int	semval;		/* current value */
  97	/*
  98	 * PID of the process that last modified the semaphore. For
  99	 * Linux, specifically these are:
 100	 *  - semop
 101	 *  - semctl, via SETVAL and SETALL.
 102	 *  - at task exit when performing undo adjustments (see exit_sem).
 103	 */
 104	struct pid *sempid;
 105	spinlock_t	lock;	/* spinlock for fine-grained semtimedop */
 106	struct list_head pending_alter; /* pending single-sop operations */
 107					/* that alter the semaphore */
 108	struct list_head pending_const; /* pending single-sop operations */
 109					/* that do not alter the semaphore*/
 110	time64_t	 sem_otime;	/* candidate for sem_otime */
 111} ____cacheline_aligned_in_smp;
 112
 113/* One sem_array data structure for each set of semaphores in the system. */
 114struct sem_array {
 115	struct kern_ipc_perm	sem_perm;	/* permissions .. see ipc.h */
 116	time64_t		sem_ctime;	/* create/last semctl() time */
 117	struct list_head	pending_alter;	/* pending operations */
 118						/* that alter the array */
 119	struct list_head	pending_const;	/* pending complex operations */
 120						/* that do not alter semvals */
 121	struct list_head	list_id;	/* undo requests on this array */
 122	int			sem_nsems;	/* no. of semaphores in array */
 123	int			complex_count;	/* pending complex operations */
 124	unsigned int		use_global_lock;/* >0: global lock required */
 125
 126	struct sem		sems[];
 127} __randomize_layout;
 128
 129/* One queue for each sleeping process in the system. */
 130struct sem_queue {
 131	struct list_head	list;	 /* queue of pending operations */
 132	struct task_struct	*sleeper; /* this process */
 133	struct sem_undo		*undo;	 /* undo structure */
 134	struct pid		*pid;	 /* process id of requesting process */
 135	int			status;	 /* completion status of operation */
 136	struct sembuf		*sops;	 /* array of pending operations */
 137	struct sembuf		*blocking; /* the operation that blocked */
 138	int			nsops;	 /* number of operations */
 139	bool			alter;	 /* does *sops alter the array? */
 140	bool                    dupsop;	 /* sops on more than one sem_num */
 141};
 142
 143/* Each task has a list of undo requests. They are executed automatically
 144 * when the process exits.
 145 */
 146struct sem_undo {
 147	struct list_head	list_proc;	/* per-process list: *
 148						 * all undos from one process
 149						 * rcu protected */
 150	struct rcu_head		rcu;		/* rcu struct for sem_undo */
 151	struct sem_undo_list	*ulp;		/* back ptr to sem_undo_list */
 152	struct list_head	list_id;	/* per semaphore array list:
 153						 * all undos for one array */
 154	int			semid;		/* semaphore set identifier */
 155	short			*semadj;	/* array of adjustments */
 156						/* one per semaphore */
 157};
 158
 159/* sem_undo_list controls shared access to the list of sem_undo structures
 160 * that may be shared among all a CLONE_SYSVSEM task group.
 161 */
 162struct sem_undo_list {
 163	refcount_t		refcnt;
 164	spinlock_t		lock;
 165	struct list_head	list_proc;
 166};
 167
 168
 169#define sem_ids(ns)	((ns)->ids[IPC_SEM_IDS])
 170
 
 
 171static int newary(struct ipc_namespace *, struct ipc_params *);
 172static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
 173#ifdef CONFIG_PROC_FS
 174static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
 175#endif
 176
 177#define SEMMSL_FAST	256 /* 512 bytes on stack */
 178#define SEMOPM_FAST	64  /* ~ 372 bytes on stack */
 179
 180/*
 181 * Switching from the mode suitable for simple ops
 182 * to the mode for complex ops is costly. Therefore:
 183 * use some hysteresis
 184 */
 185#define USE_GLOBAL_LOCK_HYSTERESIS	10
 186
 187/*
 188 * Locking:
 189 * a) global sem_lock() for read/write
 190 *	sem_undo.id_next,
 191 *	sem_array.complex_count,
 
 192 *	sem_array.pending{_alter,_const},
 193 *	sem_array.sem_undo
 194 *
 195 * b) global or semaphore sem_lock() for read/write:
 196 *	sem_array.sems[i].pending_{const,alter}:
 
 197 *
 198 * c) special:
 199 *	sem_undo_list.list_proc:
 200 *	* undo_list->lock for write
 201 *	* rcu for read
 202 *	use_global_lock:
 203 *	* global sem_lock() for write
 204 *	* either local or global sem_lock() for read.
 205 *
 206 * Memory ordering:
 207 * Most ordering is enforced by using spin_lock() and spin_unlock().
 208 *
 209 * Exceptions:
 210 * 1) use_global_lock: (SEM_BARRIER_1)
 211 * Setting it from non-zero to 0 is a RELEASE, this is ensured by
 212 * using smp_store_release(): Immediately after setting it to 0,
 213 * a simple op can start.
 214 * Testing if it is non-zero is an ACQUIRE, this is ensured by using
 215 * smp_load_acquire().
 216 * Setting it from 0 to non-zero must be ordered with regards to
 217 * this smp_load_acquire(), this is guaranteed because the smp_load_acquire()
 218 * is inside a spin_lock() and after a write from 0 to non-zero a
 219 * spin_lock()+spin_unlock() is done.
 220 *
 221 * 2) queue.status: (SEM_BARRIER_2)
 222 * Initialization is done while holding sem_lock(), so no further barrier is
 223 * required.
 224 * Setting it to a result code is a RELEASE, this is ensured by both a
 225 * smp_store_release() (for case a) and while holding sem_lock()
 226 * (for case b).
 227 * The AQUIRE when reading the result code without holding sem_lock() is
 228 * achieved by using READ_ONCE() + smp_acquire__after_ctrl_dep().
 229 * (case a above).
 230 * Reading the result code while holding sem_lock() needs no further barriers,
 231 * the locks inside sem_lock() enforce ordering (case b above)
 232 *
 233 * 3) current->state:
 234 * current->state is set to TASK_INTERRUPTIBLE while holding sem_lock().
 235 * The wakeup is handled using the wake_q infrastructure. wake_q wakeups may
 236 * happen immediately after calling wake_q_add. As wake_q_add_safe() is called
 237 * when holding sem_lock(), no further barriers are required.
 238 *
 239 * See also ipc/mqueue.c for more details on the covered races.
 240 */
 241
 242#define sc_semmsl	sem_ctls[0]
 243#define sc_semmns	sem_ctls[1]
 244#define sc_semopm	sem_ctls[2]
 245#define sc_semmni	sem_ctls[3]
 246
 247void sem_init_ns(struct ipc_namespace *ns)
 248{
 249	ns->sc_semmsl = SEMMSL;
 250	ns->sc_semmns = SEMMNS;
 251	ns->sc_semopm = SEMOPM;
 252	ns->sc_semmni = SEMMNI;
 253	ns->used_sems = 0;
 254	ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
 255}
 256
 257#ifdef CONFIG_IPC_NS
 258void sem_exit_ns(struct ipc_namespace *ns)
 259{
 260	free_ipcs(ns, &sem_ids(ns), freeary);
 261	idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
 262	rhashtable_destroy(&ns->ids[IPC_SEM_IDS].key_ht);
 263}
 264#endif
 265
 266void __init sem_init(void)
 267{
 268	sem_init_ns(&init_ipc_ns);
 269	ipc_init_proc_interface("sysvipc/sem",
 270				"       key      semid perms      nsems   uid   gid  cuid  cgid      otime      ctime\n",
 271				IPC_SEM_IDS, sysvipc_sem_proc_show);
 272}
 273
 274/**
 275 * unmerge_queues - unmerge queues, if possible.
 276 * @sma: semaphore array
 277 *
 278 * The function unmerges the wait queues if complex_count is 0.
 279 * It must be called prior to dropping the global semaphore array lock.
 280 */
 281static void unmerge_queues(struct sem_array *sma)
 282{
 283	struct sem_queue *q, *tq;
 284
 285	/* complex operations still around? */
 286	if (sma->complex_count)
 287		return;
 288	/*
 289	 * We will switch back to simple mode.
 290	 * Move all pending operation back into the per-semaphore
 291	 * queues.
 292	 */
 293	list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
 294		struct sem *curr;
 295		curr = &sma->sems[q->sops[0].sem_num];
 296
 297		list_add_tail(&q->list, &curr->pending_alter);
 298	}
 299	INIT_LIST_HEAD(&sma->pending_alter);
 300}
 301
 302/**
 303 * merge_queues - merge single semop queues into global queue
 304 * @sma: semaphore array
 305 *
 306 * This function merges all per-semaphore queues into the global queue.
 307 * It is necessary to achieve FIFO ordering for the pending single-sop
 308 * operations when a multi-semop operation must sleep.
 309 * Only the alter operations must be moved, the const operations can stay.
 310 */
 311static void merge_queues(struct sem_array *sma)
 312{
 313	int i;
 314	for (i = 0; i < sma->sem_nsems; i++) {
 315		struct sem *sem = &sma->sems[i];
 316
 317		list_splice_init(&sem->pending_alter, &sma->pending_alter);
 318	}
 319}
 320
 321static void sem_rcu_free(struct rcu_head *head)
 322{
 323	struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu);
 324	struct sem_array *sma = container_of(p, struct sem_array, sem_perm);
 325
 326	security_sem_free(&sma->sem_perm);
 327	kvfree(sma);
 328}
 329
 330/*
 331 * Enter the mode suitable for non-simple operations:
 332 * Caller must own sem_perm.lock.
 333 */
 334static void complexmode_enter(struct sem_array *sma)
 335{
 336	int i;
 337	struct sem *sem;
 338
 339	if (sma->use_global_lock > 0)  {
 340		/*
 341		 * We are already in global lock mode.
 342		 * Nothing to do, just reset the
 343		 * counter until we return to simple mode.
 344		 */
 345		sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
 346		return;
 347	}
 348	sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
 
 
 
 
 
 349
 350	for (i = 0; i < sma->sem_nsems; i++) {
 351		sem = &sma->sems[i];
 352		spin_lock(&sem->lock);
 353		spin_unlock(&sem->lock);
 354	}
 
 
 
 
 
 
 
 
 355}
 356
 357/*
 358 * Try to leave the mode that disallows simple operations:
 359 * Caller must own sem_perm.lock.
 360 */
 361static void complexmode_tryleave(struct sem_array *sma)
 362{
 363	if (sma->complex_count)  {
 364		/* Complex ops are sleeping.
 365		 * We must stay in complex mode
 366		 */
 367		return;
 368	}
 369	if (sma->use_global_lock == 1) {
 370
 371		/* See SEM_BARRIER_1 for purpose/pairing */
 372		smp_store_release(&sma->use_global_lock, 0);
 373	} else {
 374		sma->use_global_lock--;
 375	}
 376}
 377
 378#define SEM_GLOBAL_LOCK	(-1)
 379/*
 380 * If the request contains only one semaphore operation, and there are
 381 * no complex transactions pending, lock only the semaphore involved.
 382 * Otherwise, lock the entire semaphore array, since we either have
 383 * multiple semaphores in our own semops, or we need to look at
 384 * semaphores from other pending complex operations.
 385 */
 386static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
 387			      int nsops)
 388{
 389	struct sem *sem;
 390	int idx;
 391
 392	if (nsops != 1) {
 393		/* Complex operation - acquire a full lock */
 394		ipc_lock_object(&sma->sem_perm);
 395
 396		/* Prevent parallel simple ops */
 397		complexmode_enter(sma);
 398		return SEM_GLOBAL_LOCK;
 399	}
 400
 401	/*
 402	 * Only one semaphore affected - try to optimize locking.
 403	 * Optimized locking is possible if no complex operation
 404	 * is either enqueued or processed right now.
 405	 *
 406	 * Both facts are tracked by use_global_mode.
 407	 */
 408	idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
 409	sem = &sma->sems[idx];
 410
 411	/*
 412	 * Initial check for use_global_lock. Just an optimization,
 413	 * no locking, no memory barrier.
 414	 */
 415	if (!sma->use_global_lock) {
 416		/*
 417		 * It appears that no complex operation is around.
 418		 * Acquire the per-semaphore lock.
 419		 */
 420		spin_lock(&sem->lock);
 421
 422		/* see SEM_BARRIER_1 for purpose/pairing */
 423		if (!smp_load_acquire(&sma->use_global_lock)) {
 
 
 
 
 
 
 
 424			/* fast path successful! */
 425			return sops->sem_num;
 426		}
 427		spin_unlock(&sem->lock);
 428	}
 429
 430	/* slow path: acquire the full lock */
 431	ipc_lock_object(&sma->sem_perm);
 432
 433	if (sma->use_global_lock == 0) {
 434		/*
 435		 * The use_global_lock mode ended while we waited for
 436		 * sma->sem_perm.lock. Thus we must switch to locking
 437		 * with sem->lock.
 438		 * Unlike in the fast path, there is no need to recheck
 439		 * sma->use_global_lock after we have acquired sem->lock:
 440		 * We own sma->sem_perm.lock, thus use_global_lock cannot
 441		 * change.
 442		 */
 443		spin_lock(&sem->lock);
 444
 445		ipc_unlock_object(&sma->sem_perm);
 446		return sops->sem_num;
 447	} else {
 448		/*
 449		 * Not a false alarm, thus continue to use the global lock
 450		 * mode. No need for complexmode_enter(), this was done by
 451		 * the caller that has set use_global_mode to non-zero.
 452		 */
 
 453		return SEM_GLOBAL_LOCK;
 454	}
 455}
 456
 457static inline void sem_unlock(struct sem_array *sma, int locknum)
 458{
 459	if (locknum == SEM_GLOBAL_LOCK) {
 460		unmerge_queues(sma);
 461		complexmode_tryleave(sma);
 462		ipc_unlock_object(&sma->sem_perm);
 463	} else {
 464		struct sem *sem = &sma->sems[locknum];
 465		spin_unlock(&sem->lock);
 466	}
 467}
 468
 469/*
 470 * sem_lock_(check_) routines are called in the paths where the rwsem
 471 * is not held.
 472 *
 473 * The caller holds the RCU read lock.
 474 */
 475static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
 476{
 477	struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
 478
 479	if (IS_ERR(ipcp))
 480		return ERR_CAST(ipcp);
 481
 482	return container_of(ipcp, struct sem_array, sem_perm);
 483}
 484
 485static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
 486							int id)
 487{
 488	struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
 489
 490	if (IS_ERR(ipcp))
 491		return ERR_CAST(ipcp);
 492
 493	return container_of(ipcp, struct sem_array, sem_perm);
 494}
 495
 496static inline void sem_lock_and_putref(struct sem_array *sma)
 497{
 498	sem_lock(sma, NULL, -1);
 499	ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
 500}
 501
 502static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
 503{
 504	ipc_rmid(&sem_ids(ns), &s->sem_perm);
 505}
 506
 507static struct sem_array *sem_alloc(size_t nsems)
 508{
 509	struct sem_array *sma;
 510
 511	if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0]))
 512		return NULL;
 513
 514	sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL);
 515	if (unlikely(!sma))
 516		return NULL;
 517
 518	return sma;
 519}
 520
 521/**
 522 * newary - Create a new semaphore set
 523 * @ns: namespace
 524 * @params: ptr to the structure that contains key, semflg and nsems
 525 *
 526 * Called with sem_ids.rwsem held (as a writer)
 527 */
 528static int newary(struct ipc_namespace *ns, struct ipc_params *params)
 529{
 
 530	int retval;
 531	struct sem_array *sma;
 
 532	key_t key = params->key;
 533	int nsems = params->u.nsems;
 534	int semflg = params->flg;
 535	int i;
 536
 537	if (!nsems)
 538		return -EINVAL;
 539	if (ns->used_sems + nsems > ns->sc_semmns)
 540		return -ENOSPC;
 541
 542	sma = sem_alloc(nsems);
 
 543	if (!sma)
 544		return -ENOMEM;
 545
 
 
 546	sma->sem_perm.mode = (semflg & S_IRWXUGO);
 547	sma->sem_perm.key = key;
 548
 549	sma->sem_perm.security = NULL;
 550	retval = security_sem_alloc(&sma->sem_perm);
 551	if (retval) {
 552		kvfree(sma);
 553		return retval;
 554	}
 555
 
 
 556	for (i = 0; i < nsems; i++) {
 557		INIT_LIST_HEAD(&sma->sems[i].pending_alter);
 558		INIT_LIST_HEAD(&sma->sems[i].pending_const);
 559		spin_lock_init(&sma->sems[i].lock);
 560	}
 561
 562	sma->complex_count = 0;
 563	sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
 564	INIT_LIST_HEAD(&sma->pending_alter);
 565	INIT_LIST_HEAD(&sma->pending_const);
 566	INIT_LIST_HEAD(&sma->list_id);
 567	sma->sem_nsems = nsems;
 568	sma->sem_ctime = ktime_get_real_seconds();
 569
 570	/* ipc_addid() locks sma upon success. */
 571	retval = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
 572	if (retval < 0) {
 573		ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
 574		return retval;
 575	}
 576	ns->used_sems += nsems;
 577
 578	sem_unlock(sma, -1);
 579	rcu_read_unlock();
 580
 581	return sma->sem_perm.id;
 582}
 583
 584
 585/*
 586 * Called with sem_ids.rwsem and ipcp locked.
 587 */
 588static int sem_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params)
 
 
 
 
 
 
 
 
 
 
 
 
 589{
 590	struct sem_array *sma;
 591
 592	sma = container_of(ipcp, struct sem_array, sem_perm);
 593	if (params->u.nsems > sma->sem_nsems)
 594		return -EINVAL;
 595
 596	return 0;
 597}
 598
 599long ksys_semget(key_t key, int nsems, int semflg)
 600{
 601	struct ipc_namespace *ns;
 602	static const struct ipc_ops sem_ops = {
 603		.getnew = newary,
 604		.associate = security_sem_associate,
 605		.more_checks = sem_more_checks,
 606	};
 607	struct ipc_params sem_params;
 608
 609	ns = current->nsproxy->ipc_ns;
 610
 611	if (nsems < 0 || nsems > ns->sc_semmsl)
 612		return -EINVAL;
 613
 614	sem_params.key = key;
 615	sem_params.flg = semflg;
 616	sem_params.u.nsems = nsems;
 617
 618	return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
 619}
 620
 621SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
 622{
 623	return ksys_semget(key, nsems, semflg);
 624}
 625
 626/**
 627 * perform_atomic_semop[_slow] - Attempt to perform semaphore
 628 *                               operations on a given array.
 629 * @sma: semaphore array
 630 * @q: struct sem_queue that describes the operation
 631 *
 632 * Caller blocking are as follows, based the value
 633 * indicated by the semaphore operation (sem_op):
 634 *
 635 *  (1) >0 never blocks.
 636 *  (2)  0 (wait-for-zero operation): semval is non-zero.
 637 *  (3) <0 attempting to decrement semval to a value smaller than zero.
 638 *
 639 * Returns 0 if the operation was possible.
 640 * Returns 1 if the operation is impossible, the caller must sleep.
 641 * Returns <0 for error codes.
 642 */
 643static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
 644{
 645	int result, sem_op, nsops;
 646	struct pid *pid;
 647	struct sembuf *sop;
 648	struct sem *curr;
 649	struct sembuf *sops;
 650	struct sem_undo *un;
 651
 652	sops = q->sops;
 653	nsops = q->nsops;
 654	un = q->undo;
 655
 656	for (sop = sops; sop < sops + nsops; sop++) {
 657		int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
 658		curr = &sma->sems[idx];
 659		sem_op = sop->sem_op;
 660		result = curr->semval;
 661
 662		if (!sem_op && result)
 663			goto would_block;
 664
 665		result += sem_op;
 666		if (result < 0)
 667			goto would_block;
 668		if (result > SEMVMX)
 669			goto out_of_range;
 670
 671		if (sop->sem_flg & SEM_UNDO) {
 672			int undo = un->semadj[sop->sem_num] - sem_op;
 673			/* Exceeding the undo range is an error. */
 674			if (undo < (-SEMAEM - 1) || undo > SEMAEM)
 675				goto out_of_range;
 676			un->semadj[sop->sem_num] = undo;
 677		}
 678
 679		curr->semval = result;
 680	}
 681
 682	sop--;
 683	pid = q->pid;
 684	while (sop >= sops) {
 685		ipc_update_pid(&sma->sems[sop->sem_num].sempid, pid);
 686		sop--;
 687	}
 688
 689	return 0;
 690
 691out_of_range:
 692	result = -ERANGE;
 693	goto undo;
 694
 695would_block:
 696	q->blocking = sop;
 697
 698	if (sop->sem_flg & IPC_NOWAIT)
 699		result = -EAGAIN;
 700	else
 701		result = 1;
 702
 703undo:
 704	sop--;
 705	while (sop >= sops) {
 706		sem_op = sop->sem_op;
 707		sma->sems[sop->sem_num].semval -= sem_op;
 708		if (sop->sem_flg & SEM_UNDO)
 709			un->semadj[sop->sem_num] += sem_op;
 710		sop--;
 711	}
 712
 713	return result;
 714}
 715
 716static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
 717{
 718	int result, sem_op, nsops;
 719	struct sembuf *sop;
 720	struct sem *curr;
 721	struct sembuf *sops;
 722	struct sem_undo *un;
 723
 724	sops = q->sops;
 725	nsops = q->nsops;
 726	un = q->undo;
 727
 728	if (unlikely(q->dupsop))
 729		return perform_atomic_semop_slow(sma, q);
 730
 731	/*
 732	 * We scan the semaphore set twice, first to ensure that the entire
 733	 * operation can succeed, therefore avoiding any pointless writes
 734	 * to shared memory and having to undo such changes in order to block
 735	 * until the operations can go through.
 736	 */
 737	for (sop = sops; sop < sops + nsops; sop++) {
 738		int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
 739
 740		curr = &sma->sems[idx];
 741		sem_op = sop->sem_op;
 742		result = curr->semval;
 743
 744		if (!sem_op && result)
 745			goto would_block; /* wait-for-zero */
 746
 747		result += sem_op;
 748		if (result < 0)
 749			goto would_block;
 750
 751		if (result > SEMVMX)
 752			return -ERANGE;
 753
 754		if (sop->sem_flg & SEM_UNDO) {
 755			int undo = un->semadj[sop->sem_num] - sem_op;
 756
 757			/* Exceeding the undo range is an error. */
 758			if (undo < (-SEMAEM - 1) || undo > SEMAEM)
 759				return -ERANGE;
 760		}
 761	}
 762
 763	for (sop = sops; sop < sops + nsops; sop++) {
 764		curr = &sma->sems[sop->sem_num];
 765		sem_op = sop->sem_op;
 766		result = curr->semval;
 767
 768		if (sop->sem_flg & SEM_UNDO) {
 769			int undo = un->semadj[sop->sem_num] - sem_op;
 770
 771			un->semadj[sop->sem_num] = undo;
 772		}
 773		curr->semval += sem_op;
 774		ipc_update_pid(&curr->sempid, q->pid);
 775	}
 776
 777	return 0;
 778
 779would_block:
 780	q->blocking = sop;
 781	return sop->sem_flg & IPC_NOWAIT ? -EAGAIN : 1;
 782}
 783
 784static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
 785					     struct wake_q_head *wake_q)
 786{
 787	get_task_struct(q->sleeper);
 788
 789	/* see SEM_BARRIER_2 for purpuse/pairing */
 790	smp_store_release(&q->status, error);
 791
 792	wake_q_add_safe(wake_q, q->sleeper);
 
 
 
 793}
 794
 795static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
 796{
 797	list_del(&q->list);
 798	if (q->nsops > 1)
 799		sma->complex_count--;
 800}
 801
 802/** check_restart(sma, q)
 803 * @sma: semaphore array
 804 * @q: the operation that just completed
 805 *
 806 * update_queue is O(N^2) when it restarts scanning the whole queue of
 807 * waiting operations. Therefore this function checks if the restart is
 808 * really necessary. It is called after a previously waiting operation
 809 * modified the array.
 810 * Note that wait-for-zero operations are handled without restart.
 811 */
 812static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
 813{
 814	/* pending complex alter operations are too difficult to analyse */
 815	if (!list_empty(&sma->pending_alter))
 816		return 1;
 817
 818	/* we were a sleeping complex operation. Too difficult */
 819	if (q->nsops > 1)
 820		return 1;
 821
 822	/* It is impossible that someone waits for the new value:
 823	 * - complex operations always restart.
 824	 * - wait-for-zero are handled seperately.
 825	 * - q is a previously sleeping simple operation that
 826	 *   altered the array. It must be a decrement, because
 827	 *   simple increments never sleep.
 828	 * - If there are older (higher priority) decrements
 829	 *   in the queue, then they have observed the original
 830	 *   semval value and couldn't proceed. The operation
 831	 *   decremented to value - thus they won't proceed either.
 832	 */
 833	return 0;
 834}
 835
 836/**
 837 * wake_const_ops - wake up non-alter tasks
 838 * @sma: semaphore array.
 839 * @semnum: semaphore that was modified.
 840 * @wake_q: lockless wake-queue head.
 841 *
 842 * wake_const_ops must be called after a semaphore in a semaphore array
 843 * was set to 0. If complex const operations are pending, wake_const_ops must
 844 * be called with semnum = -1, as well as with the number of each modified
 845 * semaphore.
 846 * The tasks that must be woken up are added to @wake_q. The return code
 847 * is stored in q->pid.
 848 * The function returns 1 if at least one operation was completed successfully.
 849 */
 850static int wake_const_ops(struct sem_array *sma, int semnum,
 851			  struct wake_q_head *wake_q)
 852{
 853	struct sem_queue *q, *tmp;
 854	struct list_head *pending_list;
 855	int semop_completed = 0;
 856
 857	if (semnum == -1)
 858		pending_list = &sma->pending_const;
 859	else
 860		pending_list = &sma->sems[semnum].pending_const;
 861
 862	list_for_each_entry_safe(q, tmp, pending_list, list) {
 863		int error = perform_atomic_semop(sma, q);
 864
 865		if (error > 0)
 866			continue;
 867		/* operation completed, remove from queue & wakeup */
 868		unlink_queue(sma, q);
 869
 870		wake_up_sem_queue_prepare(q, error, wake_q);
 871		if (error == 0)
 872			semop_completed = 1;
 873	}
 874
 875	return semop_completed;
 876}
 877
 878/**
 879 * do_smart_wakeup_zero - wakeup all wait for zero tasks
 880 * @sma: semaphore array
 881 * @sops: operations that were performed
 882 * @nsops: number of operations
 883 * @wake_q: lockless wake-queue head
 884 *
 885 * Checks all required queue for wait-for-zero operations, based
 886 * on the actual changes that were performed on the semaphore array.
 887 * The function returns 1 if at least one operation was completed successfully.
 888 */
 889static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
 890				int nsops, struct wake_q_head *wake_q)
 891{
 892	int i;
 893	int semop_completed = 0;
 894	int got_zero = 0;
 895
 896	/* first: the per-semaphore queues, if known */
 897	if (sops) {
 898		for (i = 0; i < nsops; i++) {
 899			int num = sops[i].sem_num;
 900
 901			if (sma->sems[num].semval == 0) {
 902				got_zero = 1;
 903				semop_completed |= wake_const_ops(sma, num, wake_q);
 904			}
 905		}
 906	} else {
 907		/*
 908		 * No sops means modified semaphores not known.
 909		 * Assume all were changed.
 910		 */
 911		for (i = 0; i < sma->sem_nsems; i++) {
 912			if (sma->sems[i].semval == 0) {
 913				got_zero = 1;
 914				semop_completed |= wake_const_ops(sma, i, wake_q);
 915			}
 916		}
 917	}
 918	/*
 919	 * If one of the modified semaphores got 0,
 920	 * then check the global queue, too.
 921	 */
 922	if (got_zero)
 923		semop_completed |= wake_const_ops(sma, -1, wake_q);
 924
 925	return semop_completed;
 926}
 927
 928
 929/**
 930 * update_queue - look for tasks that can be completed.
 931 * @sma: semaphore array.
 932 * @semnum: semaphore that was modified.
 933 * @wake_q: lockless wake-queue head.
 934 *
 935 * update_queue must be called after a semaphore in a semaphore array
 936 * was modified. If multiple semaphores were modified, update_queue must
 937 * be called with semnum = -1, as well as with the number of each modified
 938 * semaphore.
 939 * The tasks that must be woken up are added to @wake_q. The return code
 940 * is stored in q->pid.
 941 * The function internally checks if const operations can now succeed.
 942 *
 943 * The function return 1 if at least one semop was completed successfully.
 944 */
 945static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q)
 946{
 947	struct sem_queue *q, *tmp;
 948	struct list_head *pending_list;
 949	int semop_completed = 0;
 950
 951	if (semnum == -1)
 952		pending_list = &sma->pending_alter;
 953	else
 954		pending_list = &sma->sems[semnum].pending_alter;
 955
 956again:
 957	list_for_each_entry_safe(q, tmp, pending_list, list) {
 958		int error, restart;
 959
 960		/* If we are scanning the single sop, per-semaphore list of
 961		 * one semaphore and that semaphore is 0, then it is not
 962		 * necessary to scan further: simple increments
 963		 * that affect only one entry succeed immediately and cannot
 964		 * be in the  per semaphore pending queue, and decrements
 965		 * cannot be successful if the value is already 0.
 966		 */
 967		if (semnum != -1 && sma->sems[semnum].semval == 0)
 968			break;
 969
 970		error = perform_atomic_semop(sma, q);
 971
 972		/* Does q->sleeper still need to sleep? */
 973		if (error > 0)
 974			continue;
 975
 976		unlink_queue(sma, q);
 977
 978		if (error) {
 979			restart = 0;
 980		} else {
 981			semop_completed = 1;
 982			do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q);
 983			restart = check_restart(sma, q);
 984		}
 985
 986		wake_up_sem_queue_prepare(q, error, wake_q);
 987		if (restart)
 988			goto again;
 989	}
 990	return semop_completed;
 991}
 992
 993/**
 994 * set_semotime - set sem_otime
 995 * @sma: semaphore array
 996 * @sops: operations that modified the array, may be NULL
 997 *
 998 * sem_otime is replicated to avoid cache line trashing.
 999 * This function sets one instance to the current time.
1000 */
1001static void set_semotime(struct sem_array *sma, struct sembuf *sops)
1002{
1003	if (sops == NULL) {
1004		sma->sems[0].sem_otime = ktime_get_real_seconds();
1005	} else {
1006		sma->sems[sops[0].sem_num].sem_otime =
1007						ktime_get_real_seconds();
1008	}
1009}
1010
1011/**
1012 * do_smart_update - optimized update_queue
1013 * @sma: semaphore array
1014 * @sops: operations that were performed
1015 * @nsops: number of operations
1016 * @otime: force setting otime
1017 * @wake_q: lockless wake-queue head
1018 *
1019 * do_smart_update() does the required calls to update_queue and wakeup_zero,
1020 * based on the actual changes that were performed on the semaphore array.
1021 * Note that the function does not do the actual wake-up: the caller is
1022 * responsible for calling wake_up_q().
1023 * It is safe to perform this call after dropping all locks.
1024 */
1025static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
1026			    int otime, struct wake_q_head *wake_q)
1027{
1028	int i;
1029
1030	otime |= do_smart_wakeup_zero(sma, sops, nsops, wake_q);
1031
1032	if (!list_empty(&sma->pending_alter)) {
1033		/* semaphore array uses the global queue - just process it. */
1034		otime |= update_queue(sma, -1, wake_q);
1035	} else {
1036		if (!sops) {
1037			/*
1038			 * No sops, thus the modified semaphores are not
1039			 * known. Check all.
1040			 */
1041			for (i = 0; i < sma->sem_nsems; i++)
1042				otime |= update_queue(sma, i, wake_q);
1043		} else {
1044			/*
1045			 * Check the semaphores that were increased:
1046			 * - No complex ops, thus all sleeping ops are
1047			 *   decrease.
1048			 * - if we decreased the value, then any sleeping
1049			 *   semaphore ops wont be able to run: If the
1050			 *   previous value was too small, then the new
1051			 *   value will be too small, too.
1052			 */
1053			for (i = 0; i < nsops; i++) {
1054				if (sops[i].sem_op > 0) {
1055					otime |= update_queue(sma,
1056							      sops[i].sem_num, wake_q);
1057				}
1058			}
1059		}
1060	}
1061	if (otime)
1062		set_semotime(sma, sops);
1063}
1064
1065/*
1066 * check_qop: Test if a queued operation sleeps on the semaphore semnum
1067 */
1068static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
1069			bool count_zero)
1070{
1071	struct sembuf *sop = q->blocking;
1072
1073	/*
1074	 * Linux always (since 0.99.10) reported a task as sleeping on all
1075	 * semaphores. This violates SUS, therefore it was changed to the
1076	 * standard compliant behavior.
1077	 * Give the administrators a chance to notice that an application
1078	 * might misbehave because it relies on the Linux behavior.
1079	 */
1080	pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
1081			"The task %s (%d) triggered the difference, watch for misbehavior.\n",
1082			current->comm, task_pid_nr(current));
1083
1084	if (sop->sem_num != semnum)
1085		return 0;
1086
1087	if (count_zero && sop->sem_op == 0)
1088		return 1;
1089	if (!count_zero && sop->sem_op < 0)
1090		return 1;
1091
1092	return 0;
1093}
1094
1095/* The following counts are associated to each semaphore:
1096 *   semncnt        number of tasks waiting on semval being nonzero
1097 *   semzcnt        number of tasks waiting on semval being zero
1098 *
1099 * Per definition, a task waits only on the semaphore of the first semop
1100 * that cannot proceed, even if additional operation would block, too.
1101 */
1102static int count_semcnt(struct sem_array *sma, ushort semnum,
1103			bool count_zero)
1104{
1105	struct list_head *l;
1106	struct sem_queue *q;
1107	int semcnt;
1108
1109	semcnt = 0;
1110	/* First: check the simple operations. They are easy to evaluate */
1111	if (count_zero)
1112		l = &sma->sems[semnum].pending_const;
1113	else
1114		l = &sma->sems[semnum].pending_alter;
1115
1116	list_for_each_entry(q, l, list) {
1117		/* all task on a per-semaphore list sleep on exactly
1118		 * that semaphore
1119		 */
1120		semcnt++;
1121	}
1122
1123	/* Then: check the complex operations. */
1124	list_for_each_entry(q, &sma->pending_alter, list) {
1125		semcnt += check_qop(sma, semnum, q, count_zero);
1126	}
1127	if (count_zero) {
1128		list_for_each_entry(q, &sma->pending_const, list) {
1129			semcnt += check_qop(sma, semnum, q, count_zero);
1130		}
1131	}
1132	return semcnt;
1133}
1134
1135/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
1136 * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
1137 * remains locked on exit.
1138 */
1139static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1140{
1141	struct sem_undo *un, *tu;
1142	struct sem_queue *q, *tq;
1143	struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1144	int i;
1145	DEFINE_WAKE_Q(wake_q);
1146
1147	/* Free the existing undo structures for this semaphore set.  */
1148	ipc_assert_locked_object(&sma->sem_perm);
1149	list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1150		list_del(&un->list_id);
1151		spin_lock(&un->ulp->lock);
1152		un->semid = -1;
1153		list_del_rcu(&un->list_proc);
1154		spin_unlock(&un->ulp->lock);
1155		kfree_rcu(un, rcu);
1156	}
1157
1158	/* Wake up all pending processes and let them fail with EIDRM. */
1159	list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1160		unlink_queue(sma, q);
1161		wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1162	}
1163
1164	list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1165		unlink_queue(sma, q);
1166		wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1167	}
1168	for (i = 0; i < sma->sem_nsems; i++) {
1169		struct sem *sem = &sma->sems[i];
1170		list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
1171			unlink_queue(sma, q);
1172			wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1173		}
1174		list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
1175			unlink_queue(sma, q);
1176			wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1177		}
1178		ipc_update_pid(&sem->sempid, NULL);
1179	}
1180
1181	/* Remove the semaphore set from the IDR */
1182	sem_rmid(ns, sma);
1183	sem_unlock(sma, -1);
1184	rcu_read_unlock();
1185
1186	wake_up_q(&wake_q);
1187	ns->used_sems -= sma->sem_nsems;
1188	ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1189}
1190
1191static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1192{
1193	switch (version) {
1194	case IPC_64:
1195		return copy_to_user(buf, in, sizeof(*in));
1196	case IPC_OLD:
1197	    {
1198		struct semid_ds out;
1199
1200		memset(&out, 0, sizeof(out));
1201
1202		ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
1203
1204		out.sem_otime	= in->sem_otime;
1205		out.sem_ctime	= in->sem_ctime;
1206		out.sem_nsems	= in->sem_nsems;
1207
1208		return copy_to_user(buf, &out, sizeof(out));
1209	    }
1210	default:
1211		return -EINVAL;
1212	}
1213}
1214
1215static time64_t get_semotime(struct sem_array *sma)
1216{
1217	int i;
1218	time64_t res;
1219
1220	res = sma->sems[0].sem_otime;
1221	for (i = 1; i < sma->sem_nsems; i++) {
1222		time64_t to = sma->sems[i].sem_otime;
1223
1224		if (to > res)
1225			res = to;
1226	}
1227	return res;
1228}
1229
1230static int semctl_stat(struct ipc_namespace *ns, int semid,
1231			 int cmd, struct semid64_ds *semid64)
1232{
 
1233	struct sem_array *sma;
1234	time64_t semotime;
1235	int err;
1236
1237	memset(semid64, 0, sizeof(*semid64));
 
 
 
 
 
 
 
 
 
1238
1239	rcu_read_lock();
1240	if (cmd == SEM_STAT || cmd == SEM_STAT_ANY) {
1241		sma = sem_obtain_object(ns, semid);
1242		if (IS_ERR(sma)) {
1243			err = PTR_ERR(sma);
1244			goto out_unlock;
 
 
 
 
 
 
 
 
 
 
1245		}
1246	} else { /* IPC_STAT */
1247		sma = sem_obtain_object_check(ns, semid);
1248		if (IS_ERR(sma)) {
1249			err = PTR_ERR(sma);
1250			goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1251		}
1252	}
1253
1254	/* see comment for SHM_STAT_ANY */
1255	if (cmd == SEM_STAT_ANY)
1256		audit_ipc_obj(&sma->sem_perm);
1257	else {
1258		err = -EACCES;
1259		if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1260			goto out_unlock;
1261	}
1262
1263	err = security_sem_semctl(&sma->sem_perm, cmd);
1264	if (err)
1265		goto out_unlock;
1266
1267	ipc_lock_object(&sma->sem_perm);
1268
1269	if (!ipc_valid_object(&sma->sem_perm)) {
1270		ipc_unlock_object(&sma->sem_perm);
1271		err = -EIDRM;
1272		goto out_unlock;
 
 
1273	}
1274
1275	kernel_to_ipc64_perm(&sma->sem_perm, &semid64->sem_perm);
1276	semotime = get_semotime(sma);
1277	semid64->sem_otime = semotime;
1278	semid64->sem_ctime = sma->sem_ctime;
1279#ifndef CONFIG_64BIT
1280	semid64->sem_otime_high = semotime >> 32;
1281	semid64->sem_ctime_high = sma->sem_ctime >> 32;
1282#endif
1283	semid64->sem_nsems = sma->sem_nsems;
1284
1285	if (cmd == IPC_STAT) {
1286		/*
1287		 * As defined in SUS:
1288		 * Return 0 on success
1289		 */
1290		err = 0;
1291	} else {
1292		/*
1293		 * SEM_STAT and SEM_STAT_ANY (both Linux specific)
1294		 * Return the full id, including the sequence number
1295		 */
1296		err = sma->sem_perm.id;
1297	}
1298	ipc_unlock_object(&sma->sem_perm);
1299out_unlock:
1300	rcu_read_unlock();
1301	return err;
1302}
1303
1304static int semctl_info(struct ipc_namespace *ns, int semid,
1305			 int cmd, void __user *p)
1306{
1307	struct seminfo seminfo;
1308	int max_idx;
1309	int err;
1310
1311	err = security_sem_semctl(NULL, cmd);
1312	if (err)
1313		return err;
1314
1315	memset(&seminfo, 0, sizeof(seminfo));
1316	seminfo.semmni = ns->sc_semmni;
1317	seminfo.semmns = ns->sc_semmns;
1318	seminfo.semmsl = ns->sc_semmsl;
1319	seminfo.semopm = ns->sc_semopm;
1320	seminfo.semvmx = SEMVMX;
1321	seminfo.semmnu = SEMMNU;
1322	seminfo.semmap = SEMMAP;
1323	seminfo.semume = SEMUME;
1324	down_read(&sem_ids(ns).rwsem);
1325	if (cmd == SEM_INFO) {
1326		seminfo.semusz = sem_ids(ns).in_use;
1327		seminfo.semaem = ns->used_sems;
1328	} else {
1329		seminfo.semusz = SEMUSZ;
1330		seminfo.semaem = SEMAEM;
1331	}
1332	max_idx = ipc_get_maxidx(&sem_ids(ns));
1333	up_read(&sem_ids(ns).rwsem);
1334	if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1335		return -EFAULT;
1336	return (max_idx < 0) ? 0 : max_idx;
1337}
1338
1339static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1340		int val)
1341{
1342	struct sem_undo *un;
1343	struct sem_array *sma;
1344	struct sem *curr;
1345	int err;
1346	DEFINE_WAKE_Q(wake_q);
1347
 
 
 
 
 
 
 
 
1348	if (val > SEMVMX || val < 0)
1349		return -ERANGE;
1350
1351	rcu_read_lock();
1352	sma = sem_obtain_object_check(ns, semid);
1353	if (IS_ERR(sma)) {
1354		rcu_read_unlock();
1355		return PTR_ERR(sma);
1356	}
1357
1358	if (semnum < 0 || semnum >= sma->sem_nsems) {
1359		rcu_read_unlock();
1360		return -EINVAL;
1361	}
1362
1363
1364	if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1365		rcu_read_unlock();
1366		return -EACCES;
1367	}
1368
1369	err = security_sem_semctl(&sma->sem_perm, SETVAL);
1370	if (err) {
1371		rcu_read_unlock();
1372		return -EACCES;
1373	}
1374
1375	sem_lock(sma, NULL, -1);
1376
1377	if (!ipc_valid_object(&sma->sem_perm)) {
1378		sem_unlock(sma, -1);
1379		rcu_read_unlock();
1380		return -EIDRM;
1381	}
1382
1383	semnum = array_index_nospec(semnum, sma->sem_nsems);
1384	curr = &sma->sems[semnum];
1385
1386	ipc_assert_locked_object(&sma->sem_perm);
1387	list_for_each_entry(un, &sma->list_id, list_id)
1388		un->semadj[semnum] = 0;
1389
1390	curr->semval = val;
1391	ipc_update_pid(&curr->sempid, task_tgid(current));
1392	sma->sem_ctime = ktime_get_real_seconds();
1393	/* maybe some queued-up processes were waiting for this */
1394	do_smart_update(sma, NULL, 0, 0, &wake_q);
1395	sem_unlock(sma, -1);
1396	rcu_read_unlock();
1397	wake_up_q(&wake_q);
1398	return 0;
1399}
1400
1401static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1402		int cmd, void __user *p)
1403{
1404	struct sem_array *sma;
1405	struct sem *curr;
1406	int err, nsems;
1407	ushort fast_sem_io[SEMMSL_FAST];
1408	ushort *sem_io = fast_sem_io;
1409	DEFINE_WAKE_Q(wake_q);
1410
1411	rcu_read_lock();
1412	sma = sem_obtain_object_check(ns, semid);
1413	if (IS_ERR(sma)) {
1414		rcu_read_unlock();
1415		return PTR_ERR(sma);
1416	}
1417
1418	nsems = sma->sem_nsems;
1419
1420	err = -EACCES;
1421	if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1422		goto out_rcu_wakeup;
1423
1424	err = security_sem_semctl(&sma->sem_perm, cmd);
1425	if (err)
1426		goto out_rcu_wakeup;
1427
1428	err = -EACCES;
1429	switch (cmd) {
1430	case GETALL:
1431	{
1432		ushort __user *array = p;
1433		int i;
1434
1435		sem_lock(sma, NULL, -1);
1436		if (!ipc_valid_object(&sma->sem_perm)) {
1437			err = -EIDRM;
1438			goto out_unlock;
1439		}
1440		if (nsems > SEMMSL_FAST) {
1441			if (!ipc_rcu_getref(&sma->sem_perm)) {
1442				err = -EIDRM;
1443				goto out_unlock;
1444			}
1445			sem_unlock(sma, -1);
1446			rcu_read_unlock();
1447			sem_io = kvmalloc_array(nsems, sizeof(ushort),
1448						GFP_KERNEL);
1449			if (sem_io == NULL) {
1450				ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1451				return -ENOMEM;
1452			}
1453
1454			rcu_read_lock();
1455			sem_lock_and_putref(sma);
1456			if (!ipc_valid_object(&sma->sem_perm)) {
1457				err = -EIDRM;
1458				goto out_unlock;
1459			}
1460		}
1461		for (i = 0; i < sma->sem_nsems; i++)
1462			sem_io[i] = sma->sems[i].semval;
1463		sem_unlock(sma, -1);
1464		rcu_read_unlock();
1465		err = 0;
1466		if (copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1467			err = -EFAULT;
1468		goto out_free;
1469	}
1470	case SETALL:
1471	{
1472		int i;
1473		struct sem_undo *un;
1474
1475		if (!ipc_rcu_getref(&sma->sem_perm)) {
1476			err = -EIDRM;
1477			goto out_rcu_wakeup;
1478		}
1479		rcu_read_unlock();
1480
1481		if (nsems > SEMMSL_FAST) {
1482			sem_io = kvmalloc_array(nsems, sizeof(ushort),
1483						GFP_KERNEL);
1484			if (sem_io == NULL) {
1485				ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1486				return -ENOMEM;
1487			}
1488		}
1489
1490		if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
1491			ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1492			err = -EFAULT;
1493			goto out_free;
1494		}
1495
1496		for (i = 0; i < nsems; i++) {
1497			if (sem_io[i] > SEMVMX) {
1498				ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1499				err = -ERANGE;
1500				goto out_free;
1501			}
1502		}
1503		rcu_read_lock();
1504		sem_lock_and_putref(sma);
1505		if (!ipc_valid_object(&sma->sem_perm)) {
1506			err = -EIDRM;
1507			goto out_unlock;
1508		}
1509
1510		for (i = 0; i < nsems; i++) {
1511			sma->sems[i].semval = sem_io[i];
1512			ipc_update_pid(&sma->sems[i].sempid, task_tgid(current));
1513		}
1514
1515		ipc_assert_locked_object(&sma->sem_perm);
1516		list_for_each_entry(un, &sma->list_id, list_id) {
1517			for (i = 0; i < nsems; i++)
1518				un->semadj[i] = 0;
1519		}
1520		sma->sem_ctime = ktime_get_real_seconds();
1521		/* maybe some queued-up processes were waiting for this */
1522		do_smart_update(sma, NULL, 0, 0, &wake_q);
1523		err = 0;
1524		goto out_unlock;
1525	}
1526	/* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */
1527	}
1528	err = -EINVAL;
1529	if (semnum < 0 || semnum >= nsems)
1530		goto out_rcu_wakeup;
1531
1532	sem_lock(sma, NULL, -1);
1533	if (!ipc_valid_object(&sma->sem_perm)) {
1534		err = -EIDRM;
1535		goto out_unlock;
1536	}
1537
1538	semnum = array_index_nospec(semnum, nsems);
1539	curr = &sma->sems[semnum];
1540
1541	switch (cmd) {
1542	case GETVAL:
1543		err = curr->semval;
1544		goto out_unlock;
1545	case GETPID:
1546		err = pid_vnr(curr->sempid);
1547		goto out_unlock;
1548	case GETNCNT:
1549		err = count_semcnt(sma, semnum, 0);
1550		goto out_unlock;
1551	case GETZCNT:
1552		err = count_semcnt(sma, semnum, 1);
1553		goto out_unlock;
1554	}
1555
1556out_unlock:
1557	sem_unlock(sma, -1);
1558out_rcu_wakeup:
1559	rcu_read_unlock();
1560	wake_up_q(&wake_q);
1561out_free:
1562	if (sem_io != fast_sem_io)
1563		kvfree(sem_io);
1564	return err;
1565}
1566
1567static inline unsigned long
1568copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1569{
1570	switch (version) {
1571	case IPC_64:
1572		if (copy_from_user(out, buf, sizeof(*out)))
1573			return -EFAULT;
1574		return 0;
1575	case IPC_OLD:
1576	    {
1577		struct semid_ds tbuf_old;
1578
1579		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1580			return -EFAULT;
1581
1582		out->sem_perm.uid	= tbuf_old.sem_perm.uid;
1583		out->sem_perm.gid	= tbuf_old.sem_perm.gid;
1584		out->sem_perm.mode	= tbuf_old.sem_perm.mode;
1585
1586		return 0;
1587	    }
1588	default:
1589		return -EINVAL;
1590	}
1591}
1592
1593/*
1594 * This function handles some semctl commands which require the rwsem
1595 * to be held in write mode.
1596 * NOTE: no locks must be held, the rwsem is taken inside this function.
1597 */
1598static int semctl_down(struct ipc_namespace *ns, int semid,
1599		       int cmd, struct semid64_ds *semid64)
1600{
1601	struct sem_array *sma;
1602	int err;
 
1603	struct kern_ipc_perm *ipcp;
1604
 
 
 
 
 
1605	down_write(&sem_ids(ns).rwsem);
1606	rcu_read_lock();
1607
1608	ipcp = ipcctl_obtain_check(ns, &sem_ids(ns), semid, cmd,
1609				      &semid64->sem_perm, 0);
1610	if (IS_ERR(ipcp)) {
1611		err = PTR_ERR(ipcp);
1612		goto out_unlock1;
1613	}
1614
1615	sma = container_of(ipcp, struct sem_array, sem_perm);
1616
1617	err = security_sem_semctl(&sma->sem_perm, cmd);
1618	if (err)
1619		goto out_unlock1;
1620
1621	switch (cmd) {
1622	case IPC_RMID:
1623		sem_lock(sma, NULL, -1);
1624		/* freeary unlocks the ipc object and rcu */
1625		freeary(ns, ipcp);
1626		goto out_up;
1627	case IPC_SET:
1628		sem_lock(sma, NULL, -1);
1629		err = ipc_update_perm(&semid64->sem_perm, ipcp);
1630		if (err)
1631			goto out_unlock0;
1632		sma->sem_ctime = ktime_get_real_seconds();
1633		break;
1634	default:
1635		err = -EINVAL;
1636		goto out_unlock1;
1637	}
1638
1639out_unlock0:
1640	sem_unlock(sma, -1);
1641out_unlock1:
1642	rcu_read_unlock();
1643out_up:
1644	up_write(&sem_ids(ns).rwsem);
1645	return err;
1646}
1647
1648static long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg, int version)
1649{
 
1650	struct ipc_namespace *ns;
1651	void __user *p = (void __user *)arg;
1652	struct semid64_ds semid64;
1653	int err;
1654
1655	if (semid < 0)
1656		return -EINVAL;
1657
 
1658	ns = current->nsproxy->ipc_ns;
1659
1660	switch (cmd) {
1661	case IPC_INFO:
1662	case SEM_INFO:
1663		return semctl_info(ns, semid, cmd, p);
1664	case IPC_STAT:
1665	case SEM_STAT:
1666	case SEM_STAT_ANY:
1667		err = semctl_stat(ns, semid, cmd, &semid64);
1668		if (err < 0)
1669			return err;
1670		if (copy_semid_to_user(p, &semid64, version))
1671			err = -EFAULT;
1672		return err;
1673	case GETALL:
1674	case GETVAL:
1675	case GETPID:
1676	case GETNCNT:
1677	case GETZCNT:
1678	case SETALL:
1679		return semctl_main(ns, semid, semnum, cmd, p);
1680	case SETVAL: {
1681		int val;
1682#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1683		/* big-endian 64bit */
1684		val = arg >> 32;
1685#else
1686		/* 32bit or little-endian 64bit */
1687		val = arg;
1688#endif
1689		return semctl_setval(ns, semid, semnum, val);
1690	}
1691	case IPC_SET:
1692		if (copy_semid_from_user(&semid64, p, version))
1693			return -EFAULT;
1694		fallthrough;
1695	case IPC_RMID:
1696		return semctl_down(ns, semid, cmd, &semid64);
1697	default:
1698		return -EINVAL;
1699	}
1700}
1701
1702SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1703{
1704	return ksys_semctl(semid, semnum, cmd, arg, IPC_64);
1705}
1706
1707#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
1708long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg)
1709{
1710	int version = ipc_parse_version(&cmd);
1711
1712	return ksys_semctl(semid, semnum, cmd, arg, version);
1713}
1714
1715SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1716{
1717	return ksys_old_semctl(semid, semnum, cmd, arg);
1718}
1719#endif
1720
1721#ifdef CONFIG_COMPAT
1722
1723struct compat_semid_ds {
1724	struct compat_ipc_perm sem_perm;
1725	old_time32_t sem_otime;
1726	old_time32_t sem_ctime;
1727	compat_uptr_t sem_base;
1728	compat_uptr_t sem_pending;
1729	compat_uptr_t sem_pending_last;
1730	compat_uptr_t undo;
1731	unsigned short sem_nsems;
1732};
1733
1734static int copy_compat_semid_from_user(struct semid64_ds *out, void __user *buf,
1735					int version)
1736{
1737	memset(out, 0, sizeof(*out));
1738	if (version == IPC_64) {
1739		struct compat_semid64_ds __user *p = buf;
1740		return get_compat_ipc64_perm(&out->sem_perm, &p->sem_perm);
1741	} else {
1742		struct compat_semid_ds __user *p = buf;
1743		return get_compat_ipc_perm(&out->sem_perm, &p->sem_perm);
1744	}
1745}
1746
1747static int copy_compat_semid_to_user(void __user *buf, struct semid64_ds *in,
1748					int version)
1749{
1750	if (version == IPC_64) {
1751		struct compat_semid64_ds v;
1752		memset(&v, 0, sizeof(v));
1753		to_compat_ipc64_perm(&v.sem_perm, &in->sem_perm);
1754		v.sem_otime	 = lower_32_bits(in->sem_otime);
1755		v.sem_otime_high = upper_32_bits(in->sem_otime);
1756		v.sem_ctime	 = lower_32_bits(in->sem_ctime);
1757		v.sem_ctime_high = upper_32_bits(in->sem_ctime);
1758		v.sem_nsems = in->sem_nsems;
1759		return copy_to_user(buf, &v, sizeof(v));
1760	} else {
1761		struct compat_semid_ds v;
1762		memset(&v, 0, sizeof(v));
1763		to_compat_ipc_perm(&v.sem_perm, &in->sem_perm);
1764		v.sem_otime = in->sem_otime;
1765		v.sem_ctime = in->sem_ctime;
1766		v.sem_nsems = in->sem_nsems;
1767		return copy_to_user(buf, &v, sizeof(v));
1768	}
1769}
1770
1771static long compat_ksys_semctl(int semid, int semnum, int cmd, int arg, int version)
1772{
1773	void __user *p = compat_ptr(arg);
1774	struct ipc_namespace *ns;
1775	struct semid64_ds semid64;
1776	int err;
1777
1778	ns = current->nsproxy->ipc_ns;
1779
1780	if (semid < 0)
1781		return -EINVAL;
1782
1783	switch (cmd & (~IPC_64)) {
1784	case IPC_INFO:
1785	case SEM_INFO:
1786		return semctl_info(ns, semid, cmd, p);
1787	case IPC_STAT:
1788	case SEM_STAT:
1789	case SEM_STAT_ANY:
1790		err = semctl_stat(ns, semid, cmd, &semid64);
1791		if (err < 0)
1792			return err;
1793		if (copy_compat_semid_to_user(p, &semid64, version))
1794			err = -EFAULT;
1795		return err;
1796	case GETVAL:
1797	case GETPID:
1798	case GETNCNT:
1799	case GETZCNT:
1800	case GETALL:
1801	case SETALL:
1802		return semctl_main(ns, semid, semnum, cmd, p);
1803	case SETVAL:
1804		return semctl_setval(ns, semid, semnum, arg);
 
1805	case IPC_SET:
1806		if (copy_compat_semid_from_user(&semid64, p, version))
1807			return -EFAULT;
1808		fallthrough;
1809	case IPC_RMID:
1810		return semctl_down(ns, semid, cmd, &semid64);
1811	default:
1812		return -EINVAL;
1813	}
1814}
1815
1816COMPAT_SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, int, arg)
1817{
1818	return compat_ksys_semctl(semid, semnum, cmd, arg, IPC_64);
1819}
1820
1821#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION
1822long compat_ksys_old_semctl(int semid, int semnum, int cmd, int arg)
1823{
1824	int version = compat_ipc_parse_version(&cmd);
1825
1826	return compat_ksys_semctl(semid, semnum, cmd, arg, version);
1827}
1828
1829COMPAT_SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, int, arg)
1830{
1831	return compat_ksys_old_semctl(semid, semnum, cmd, arg);
1832}
1833#endif
1834#endif
1835
1836/* If the task doesn't already have a undo_list, then allocate one
1837 * here.  We guarantee there is only one thread using this undo list,
1838 * and current is THE ONE
1839 *
1840 * If this allocation and assignment succeeds, but later
1841 * portions of this code fail, there is no need to free the sem_undo_list.
1842 * Just let it stay associated with the task, and it'll be freed later
1843 * at exit time.
1844 *
1845 * This can block, so callers must hold no locks.
1846 */
1847static inline int get_undo_list(struct sem_undo_list **undo_listp)
1848{
1849	struct sem_undo_list *undo_list;
1850
1851	undo_list = current->sysvsem.undo_list;
1852	if (!undo_list) {
1853		undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1854		if (undo_list == NULL)
1855			return -ENOMEM;
1856		spin_lock_init(&undo_list->lock);
1857		refcount_set(&undo_list->refcnt, 1);
1858		INIT_LIST_HEAD(&undo_list->list_proc);
1859
1860		current->sysvsem.undo_list = undo_list;
1861	}
1862	*undo_listp = undo_list;
1863	return 0;
1864}
1865
1866static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1867{
1868	struct sem_undo *un;
1869
1870	list_for_each_entry_rcu(un, &ulp->list_proc, list_proc,
1871				spin_is_locked(&ulp->lock)) {
1872		if (un->semid == semid)
1873			return un;
1874	}
1875	return NULL;
1876}
1877
1878static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1879{
1880	struct sem_undo *un;
1881
1882	assert_spin_locked(&ulp->lock);
1883
1884	un = __lookup_undo(ulp, semid);
1885	if (un) {
1886		list_del_rcu(&un->list_proc);
1887		list_add_rcu(&un->list_proc, &ulp->list_proc);
1888	}
1889	return un;
1890}
1891
1892/**
1893 * find_alloc_undo - lookup (and if not present create) undo array
1894 * @ns: namespace
1895 * @semid: semaphore array id
1896 *
1897 * The function looks up (and if not present creates) the undo structure.
1898 * The size of the undo structure depends on the size of the semaphore
1899 * array, thus the alloc path is not that straightforward.
1900 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1901 * performs a rcu_read_lock().
1902 */
1903static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1904{
1905	struct sem_array *sma;
1906	struct sem_undo_list *ulp;
1907	struct sem_undo *un, *new;
1908	int nsems, error;
1909
1910	error = get_undo_list(&ulp);
1911	if (error)
1912		return ERR_PTR(error);
1913
1914	rcu_read_lock();
1915	spin_lock(&ulp->lock);
1916	un = lookup_undo(ulp, semid);
1917	spin_unlock(&ulp->lock);
1918	if (likely(un != NULL))
1919		goto out;
1920
1921	/* no undo structure around - allocate one. */
1922	/* step 1: figure out the size of the semaphore array */
1923	sma = sem_obtain_object_check(ns, semid);
1924	if (IS_ERR(sma)) {
1925		rcu_read_unlock();
1926		return ERR_CAST(sma);
1927	}
1928
1929	nsems = sma->sem_nsems;
1930	if (!ipc_rcu_getref(&sma->sem_perm)) {
1931		rcu_read_unlock();
1932		un = ERR_PTR(-EIDRM);
1933		goto out;
1934	}
1935	rcu_read_unlock();
1936
1937	/* step 2: allocate new undo structure */
1938	new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1939	if (!new) {
1940		ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1941		return ERR_PTR(-ENOMEM);
1942	}
1943
1944	/* step 3: Acquire the lock on semaphore array */
1945	rcu_read_lock();
1946	sem_lock_and_putref(sma);
1947	if (!ipc_valid_object(&sma->sem_perm)) {
1948		sem_unlock(sma, -1);
1949		rcu_read_unlock();
1950		kfree(new);
1951		un = ERR_PTR(-EIDRM);
1952		goto out;
1953	}
1954	spin_lock(&ulp->lock);
1955
1956	/*
1957	 * step 4: check for races: did someone else allocate the undo struct?
1958	 */
1959	un = lookup_undo(ulp, semid);
1960	if (un) {
1961		kfree(new);
1962		goto success;
1963	}
1964	/* step 5: initialize & link new undo structure */
1965	new->semadj = (short *) &new[1];
1966	new->ulp = ulp;
1967	new->semid = semid;
1968	assert_spin_locked(&ulp->lock);
1969	list_add_rcu(&new->list_proc, &ulp->list_proc);
1970	ipc_assert_locked_object(&sma->sem_perm);
1971	list_add(&new->list_id, &sma->list_id);
1972	un = new;
1973
1974success:
1975	spin_unlock(&ulp->lock);
1976	sem_unlock(sma, -1);
1977out:
1978	return un;
1979}
1980
1981static long do_semtimedop(int semid, struct sembuf __user *tsops,
1982		unsigned nsops, const struct timespec64 *timeout)
1983{
1984	int error = -EINVAL;
1985	struct sem_array *sma;
1986	struct sembuf fast_sops[SEMOPM_FAST];
1987	struct sembuf *sops = fast_sops, *sop;
1988	struct sem_undo *un;
1989	int max, locknum;
1990	bool undos = false, alter = false, dupsop = false;
1991	struct sem_queue queue;
1992	unsigned long dup = 0, jiffies_left = 0;
1993	struct ipc_namespace *ns;
1994
1995	ns = current->nsproxy->ipc_ns;
1996
1997	if (nsops < 1 || semid < 0)
1998		return -EINVAL;
1999	if (nsops > ns->sc_semopm)
2000		return -E2BIG;
2001	if (nsops > SEMOPM_FAST) {
2002		sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL);
2003		if (sops == NULL)
2004			return -ENOMEM;
2005	}
2006
2007	if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
2008		error =  -EFAULT;
2009		goto out_free;
2010	}
2011
2012	if (timeout) {
2013		if (timeout->tv_sec < 0 || timeout->tv_nsec < 0 ||
2014			timeout->tv_nsec >= 1000000000L) {
 
 
 
 
 
2015			error = -EINVAL;
2016			goto out_free;
2017		}
2018		jiffies_left = timespec64_to_jiffies(timeout);
2019	}
2020
2021	max = 0;
2022	for (sop = sops; sop < sops + nsops; sop++) {
2023		unsigned long mask = 1ULL << ((sop->sem_num) % BITS_PER_LONG);
2024
2025		if (sop->sem_num >= max)
2026			max = sop->sem_num;
2027		if (sop->sem_flg & SEM_UNDO)
2028			undos = true;
2029		if (dup & mask) {
2030			/*
2031			 * There was a previous alter access that appears
2032			 * to have accessed the same semaphore, thus use
2033			 * the dupsop logic. "appears", because the detection
2034			 * can only check % BITS_PER_LONG.
2035			 */
2036			dupsop = true;
2037		}
2038		if (sop->sem_op != 0) {
2039			alter = true;
2040			dup |= mask;
2041		}
2042	}
2043
2044	if (undos) {
2045		/* On success, find_alloc_undo takes the rcu_read_lock */
2046		un = find_alloc_undo(ns, semid);
2047		if (IS_ERR(un)) {
2048			error = PTR_ERR(un);
2049			goto out_free;
2050		}
2051	} else {
2052		un = NULL;
2053		rcu_read_lock();
2054	}
2055
2056	sma = sem_obtain_object_check(ns, semid);
2057	if (IS_ERR(sma)) {
2058		rcu_read_unlock();
2059		error = PTR_ERR(sma);
2060		goto out_free;
2061	}
2062
2063	error = -EFBIG;
2064	if (max >= sma->sem_nsems) {
2065		rcu_read_unlock();
2066		goto out_free;
2067	}
2068
2069	error = -EACCES;
2070	if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) {
2071		rcu_read_unlock();
2072		goto out_free;
2073	}
2074
2075	error = security_sem_semop(&sma->sem_perm, sops, nsops, alter);
2076	if (error) {
2077		rcu_read_unlock();
2078		goto out_free;
2079	}
2080
2081	error = -EIDRM;
2082	locknum = sem_lock(sma, sops, nsops);
2083	/*
2084	 * We eventually might perform the following check in a lockless
2085	 * fashion, considering ipc_valid_object() locking constraints.
2086	 * If nsops == 1 and there is no contention for sem_perm.lock, then
2087	 * only a per-semaphore lock is held and it's OK to proceed with the
2088	 * check below. More details on the fine grained locking scheme
2089	 * entangled here and why it's RMID race safe on comments at sem_lock()
2090	 */
2091	if (!ipc_valid_object(&sma->sem_perm))
2092		goto out_unlock_free;
2093	/*
2094	 * semid identifiers are not unique - find_alloc_undo may have
2095	 * allocated an undo structure, it was invalidated by an RMID
2096	 * and now a new array with received the same id. Check and fail.
2097	 * This case can be detected checking un->semid. The existence of
2098	 * "un" itself is guaranteed by rcu.
2099	 */
2100	if (un && un->semid == -1)
2101		goto out_unlock_free;
2102
2103	queue.sops = sops;
2104	queue.nsops = nsops;
2105	queue.undo = un;
2106	queue.pid = task_tgid(current);
2107	queue.alter = alter;
2108	queue.dupsop = dupsop;
2109
2110	error = perform_atomic_semop(sma, &queue);
2111	if (error == 0) { /* non-blocking succesfull path */
2112		DEFINE_WAKE_Q(wake_q);
2113
2114		/*
2115		 * If the operation was successful, then do
2116		 * the required updates.
2117		 */
2118		if (alter)
2119			do_smart_update(sma, sops, nsops, 1, &wake_q);
2120		else
2121			set_semotime(sma, sops);
2122
2123		sem_unlock(sma, locknum);
2124		rcu_read_unlock();
2125		wake_up_q(&wake_q);
2126
2127		goto out_free;
2128	}
2129	if (error < 0) /* non-blocking error path */
2130		goto out_unlock_free;
2131
2132	/*
2133	 * We need to sleep on this operation, so we put the current
2134	 * task into the pending queue and go to sleep.
2135	 */
2136	if (nsops == 1) {
2137		struct sem *curr;
2138		int idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
2139		curr = &sma->sems[idx];
2140
2141		if (alter) {
2142			if (sma->complex_count) {
2143				list_add_tail(&queue.list,
2144						&sma->pending_alter);
2145			} else {
2146
2147				list_add_tail(&queue.list,
2148						&curr->pending_alter);
2149			}
2150		} else {
2151			list_add_tail(&queue.list, &curr->pending_const);
2152		}
2153	} else {
2154		if (!sma->complex_count)
2155			merge_queues(sma);
2156
2157		if (alter)
2158			list_add_tail(&queue.list, &sma->pending_alter);
2159		else
2160			list_add_tail(&queue.list, &sma->pending_const);
2161
2162		sma->complex_count++;
2163	}
2164
2165	do {
2166		/* memory ordering ensured by the lock in sem_lock() */
2167		WRITE_ONCE(queue.status, -EINTR);
2168		queue.sleeper = current;
2169
2170		/* memory ordering is ensured by the lock in sem_lock() */
2171		__set_current_state(TASK_INTERRUPTIBLE);
2172		sem_unlock(sma, locknum);
2173		rcu_read_unlock();
2174
2175		if (timeout)
2176			jiffies_left = schedule_timeout(jiffies_left);
2177		else
2178			schedule();
2179
2180		/*
2181		 * fastpath: the semop has completed, either successfully or
2182		 * not, from the syscall pov, is quite irrelevant to us at this
2183		 * point; we're done.
2184		 *
2185		 * We _do_ care, nonetheless, about being awoken by a signal or
2186		 * spuriously.  The queue.status is checked again in the
2187		 * slowpath (aka after taking sem_lock), such that we can detect
2188		 * scenarios where we were awakened externally, during the
2189		 * window between wake_q_add() and wake_up_q().
2190		 */
2191		error = READ_ONCE(queue.status);
2192		if (error != -EINTR) {
2193			/* see SEM_BARRIER_2 for purpose/pairing */
2194			smp_acquire__after_ctrl_dep();
 
 
 
 
 
2195			goto out_free;
2196		}
2197
2198		rcu_read_lock();
2199		locknum = sem_lock(sma, sops, nsops);
2200
2201		if (!ipc_valid_object(&sma->sem_perm))
2202			goto out_unlock_free;
2203
2204		/*
2205		 * No necessity for any barrier: We are protect by sem_lock()
2206		 */
2207		error = READ_ONCE(queue.status);
2208
2209		/*
2210		 * If queue.status != -EINTR we are woken up by another process.
2211		 * Leave without unlink_queue(), but with sem_unlock().
2212		 */
2213		if (error != -EINTR)
2214			goto out_unlock_free;
2215
2216		/*
2217		 * If an interrupt occurred we have to clean up the queue.
2218		 */
2219		if (timeout && jiffies_left == 0)
2220			error = -EAGAIN;
2221	} while (error == -EINTR && !signal_pending(current)); /* spurious */
2222
2223	unlink_queue(sma, &queue);
2224
2225out_unlock_free:
2226	sem_unlock(sma, locknum);
2227	rcu_read_unlock();
2228out_free:
2229	if (sops != fast_sops)
2230		kvfree(sops);
2231	return error;
2232}
2233
2234long ksys_semtimedop(int semid, struct sembuf __user *tsops,
2235		     unsigned int nsops, const struct __kernel_timespec __user *timeout)
2236{
2237	if (timeout) {
2238		struct timespec64 ts;
2239		if (get_timespec64(&ts, timeout))
2240			return -EFAULT;
2241		return do_semtimedop(semid, tsops, nsops, &ts);
2242	}
2243	return do_semtimedop(semid, tsops, nsops, NULL);
2244}
2245
2246SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
2247		unsigned int, nsops, const struct __kernel_timespec __user *, timeout)
2248{
2249	return ksys_semtimedop(semid, tsops, nsops, timeout);
2250}
2251
2252#ifdef CONFIG_COMPAT_32BIT_TIME
2253long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
2254			    unsigned int nsops,
2255			    const struct old_timespec32 __user *timeout)
2256{
2257	if (timeout) {
2258		struct timespec64 ts;
2259		if (get_old_timespec32(&ts, timeout))
2260			return -EFAULT;
2261		return do_semtimedop(semid, tsems, nsops, &ts);
2262	}
2263	return do_semtimedop(semid, tsems, nsops, NULL);
2264}
2265
2266SYSCALL_DEFINE4(semtimedop_time32, int, semid, struct sembuf __user *, tsems,
2267		       unsigned int, nsops,
2268		       const struct old_timespec32 __user *, timeout)
2269{
2270	return compat_ksys_semtimedop(semid, tsems, nsops, timeout);
2271}
2272#endif
2273
2274SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
2275		unsigned, nsops)
2276{
2277	return do_semtimedop(semid, tsops, nsops, NULL);
2278}
2279
2280/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
2281 * parent and child tasks.
2282 */
2283
2284int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
2285{
2286	struct sem_undo_list *undo_list;
2287	int error;
2288
2289	if (clone_flags & CLONE_SYSVSEM) {
2290		error = get_undo_list(&undo_list);
2291		if (error)
2292			return error;
2293		refcount_inc(&undo_list->refcnt);
2294		tsk->sysvsem.undo_list = undo_list;
2295	} else
2296		tsk->sysvsem.undo_list = NULL;
2297
2298	return 0;
2299}
2300
2301/*
2302 * add semadj values to semaphores, free undo structures.
2303 * undo structures are not freed when semaphore arrays are destroyed
2304 * so some of them may be out of date.
2305 * IMPLEMENTATION NOTE: There is some confusion over whether the
2306 * set of adjustments that needs to be done should be done in an atomic
2307 * manner or not. That is, if we are attempting to decrement the semval
2308 * should we queue up and wait until we can do so legally?
2309 * The original implementation attempted to do this (queue and wait).
2310 * The current implementation does not do so. The POSIX standard
2311 * and SVID should be consulted to determine what behavior is mandated.
2312 */
2313void exit_sem(struct task_struct *tsk)
2314{
2315	struct sem_undo_list *ulp;
2316
2317	ulp = tsk->sysvsem.undo_list;
2318	if (!ulp)
2319		return;
2320	tsk->sysvsem.undo_list = NULL;
2321
2322	if (!refcount_dec_and_test(&ulp->refcnt))
2323		return;
2324
2325	for (;;) {
2326		struct sem_array *sma;
2327		struct sem_undo *un;
2328		int semid, i;
2329		DEFINE_WAKE_Q(wake_q);
2330
2331		cond_resched();
2332
2333		rcu_read_lock();
2334		un = list_entry_rcu(ulp->list_proc.next,
2335				    struct sem_undo, list_proc);
2336		if (&un->list_proc == &ulp->list_proc) {
2337			/*
2338			 * We must wait for freeary() before freeing this ulp,
2339			 * in case we raced with last sem_undo. There is a small
2340			 * possibility where we exit while freeary() didn't
2341			 * finish unlocking sem_undo_list.
2342			 */
2343			spin_lock(&ulp->lock);
2344			spin_unlock(&ulp->lock);
2345			rcu_read_unlock();
2346			break;
2347		}
2348		spin_lock(&ulp->lock);
2349		semid = un->semid;
2350		spin_unlock(&ulp->lock);
2351
2352		/* exit_sem raced with IPC_RMID, nothing to do */
2353		if (semid == -1) {
2354			rcu_read_unlock();
2355			continue;
2356		}
2357
2358		sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2359		/* exit_sem raced with IPC_RMID, nothing to do */
2360		if (IS_ERR(sma)) {
2361			rcu_read_unlock();
2362			continue;
2363		}
2364
2365		sem_lock(sma, NULL, -1);
2366		/* exit_sem raced with IPC_RMID, nothing to do */
2367		if (!ipc_valid_object(&sma->sem_perm)) {
2368			sem_unlock(sma, -1);
2369			rcu_read_unlock();
2370			continue;
2371		}
2372		un = __lookup_undo(ulp, semid);
2373		if (un == NULL) {
2374			/* exit_sem raced with IPC_RMID+semget() that created
2375			 * exactly the same semid. Nothing to do.
2376			 */
2377			sem_unlock(sma, -1);
2378			rcu_read_unlock();
2379			continue;
2380		}
2381
2382		/* remove un from the linked lists */
2383		ipc_assert_locked_object(&sma->sem_perm);
2384		list_del(&un->list_id);
2385
2386		spin_lock(&ulp->lock);
 
 
 
2387		list_del_rcu(&un->list_proc);
2388		spin_unlock(&ulp->lock);
2389
2390		/* perform adjustments registered in un */
2391		for (i = 0; i < sma->sem_nsems; i++) {
2392			struct sem *semaphore = &sma->sems[i];
2393			if (un->semadj[i]) {
2394				semaphore->semval += un->semadj[i];
2395				/*
2396				 * Range checks of the new semaphore value,
2397				 * not defined by sus:
2398				 * - Some unices ignore the undo entirely
2399				 *   (e.g. HP UX 11i 11.22, Tru64 V5.1)
2400				 * - some cap the value (e.g. FreeBSD caps
2401				 *   at 0, but doesn't enforce SEMVMX)
2402				 *
2403				 * Linux caps the semaphore value, both at 0
2404				 * and at SEMVMX.
2405				 *
2406				 *	Manfred <manfred@colorfullife.com>
2407				 */
2408				if (semaphore->semval < 0)
2409					semaphore->semval = 0;
2410				if (semaphore->semval > SEMVMX)
2411					semaphore->semval = SEMVMX;
2412				ipc_update_pid(&semaphore->sempid, task_tgid(current));
2413			}
2414		}
2415		/* maybe some queued-up processes were waiting for this */
2416		do_smart_update(sma, NULL, 0, 1, &wake_q);
2417		sem_unlock(sma, -1);
2418		rcu_read_unlock();
2419		wake_up_q(&wake_q);
2420
2421		kfree_rcu(un, rcu);
2422	}
2423	kfree(ulp);
2424}
2425
2426#ifdef CONFIG_PROC_FS
2427static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2428{
2429	struct user_namespace *user_ns = seq_user_ns(s);
2430	struct kern_ipc_perm *ipcp = it;
2431	struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
2432	time64_t sem_otime;
2433
2434	/*
2435	 * The proc interface isn't aware of sem_lock(), it calls
2436	 * ipc_lock_object() directly (in sysvipc_find_ipc).
2437	 * In order to stay compatible with sem_lock(), we must
2438	 * enter / leave complex_mode.
2439	 */
2440	complexmode_enter(sma);
2441
2442	sem_otime = get_semotime(sma);
2443
2444	seq_printf(s,
2445		   "%10d %10d  %4o %10u %5u %5u %5u %5u %10llu %10llu\n",
2446		   sma->sem_perm.key,
2447		   sma->sem_perm.id,
2448		   sma->sem_perm.mode,
2449		   sma->sem_nsems,
2450		   from_kuid_munged(user_ns, sma->sem_perm.uid),
2451		   from_kgid_munged(user_ns, sma->sem_perm.gid),
2452		   from_kuid_munged(user_ns, sma->sem_perm.cuid),
2453		   from_kgid_munged(user_ns, sma->sem_perm.cgid),
2454		   sem_otime,
2455		   sma->sem_ctime);
2456
2457	complexmode_tryleave(sma);
2458
2459	return 0;
2460}
2461#endif