Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * linux/ipc/sem.c
   4 * Copyright (C) 1992 Krishna Balasubramanian
   5 * Copyright (C) 1995 Eric Schenk, Bruno Haible
   6 *
   7 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
   8 *
   9 * SMP-threaded, sysctl's added
  10 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
  11 * Enforced range limit on SEM_UNDO
  12 * (c) 2001 Red Hat Inc
  13 * Lockless wakeup
  14 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
  15 * (c) 2016 Davidlohr Bueso <dave@stgolabs.net>
  16 * Further wakeup optimizations, documentation
  17 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
  18 *
  19 * support for audit of ipc object properties and permission changes
  20 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  21 *
  22 * namespaces support
  23 * OpenVZ, SWsoft Inc.
  24 * Pavel Emelianov <xemul@openvz.org>
  25 *
  26 * Implementation notes: (May 2010)
  27 * This file implements System V semaphores.
  28 *
  29 * User space visible behavior:
  30 * - FIFO ordering for semop() operations (just FIFO, not starvation
  31 *   protection)
  32 * - multiple semaphore operations that alter the same semaphore in
  33 *   one semop() are handled.
  34 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
  35 *   SETALL calls.
  36 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
  37 * - undo adjustments at process exit are limited to 0..SEMVMX.
  38 * - namespace are supported.
  39 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtime by writing
  40 *   to /proc/sys/kernel/sem.
  41 * - statistics about the usage are reported in /proc/sysvipc/sem.
  42 *
  43 * Internals:
  44 * - scalability:
  45 *   - all global variables are read-mostly.
  46 *   - semop() calls and semctl(RMID) are synchronized by RCU.
  47 *   - most operations do write operations (actually: spin_lock calls) to
  48 *     the per-semaphore array structure.
  49 *   Thus: Perfect SMP scaling between independent semaphore arrays.
  50 *         If multiple semaphores in one array are used, then cache line
  51 *         trashing on the semaphore array spinlock will limit the scaling.
  52 * - semncnt and semzcnt are calculated on demand in count_semcnt()
  53 * - the task that performs a successful semop() scans the list of all
  54 *   sleeping tasks and completes any pending operations that can be fulfilled.
  55 *   Semaphores are actively given to waiting tasks (necessary for FIFO).
  56 *   (see update_queue())
  57 * - To improve the scalability, the actual wake-up calls are performed after
  58 *   dropping all locks. (see wake_up_sem_queue_prepare())
  59 * - All work is done by the waker, the woken up task does not have to do
  60 *   anything - not even acquiring a lock or dropping a refcount.
  61 * - A woken up task may not even touch the semaphore array anymore, it may
  62 *   have been destroyed already by a semctl(RMID).
  63 * - UNDO values are stored in an array (one per process and per
  64 *   semaphore array, lazily allocated). For backwards compatibility, multiple
  65 *   modes for the UNDO variables are supported (per process, per thread)
  66 *   (see copy_semundo, CLONE_SYSVSEM)
  67 * - There are two lists of the pending operations: a per-array list
  68 *   and per-semaphore list (stored in the array). This allows to achieve FIFO
  69 *   ordering without always scanning all pending operations.
  70 *   The worst-case behavior is nevertheless O(N^2) for N wakeups.
  71 */
  72
  73#include <linux/compat.h>
  74#include <linux/slab.h>
  75#include <linux/spinlock.h>
  76#include <linux/init.h>
  77#include <linux/proc_fs.h>
  78#include <linux/time.h>
  79#include <linux/security.h>
  80#include <linux/syscalls.h>
  81#include <linux/audit.h>
  82#include <linux/capability.h>
  83#include <linux/seq_file.h>
  84#include <linux/rwsem.h>
  85#include <linux/nsproxy.h>
  86#include <linux/ipc_namespace.h>
  87#include <linux/sched/wake_q.h>
  88#include <linux/nospec.h>
  89#include <linux/rhashtable.h>
  90
  91#include <linux/uaccess.h>
  92#include "util.h"
  93
  94/* One semaphore structure for each semaphore in the system. */
  95struct sem {
  96	int	semval;		/* current value */
  97	/*
  98	 * PID of the process that last modified the semaphore. For
  99	 * Linux, specifically these are:
 100	 *  - semop
 101	 *  - semctl, via SETVAL and SETALL.
 102	 *  - at task exit when performing undo adjustments (see exit_sem).
 103	 */
 104	struct pid *sempid;
 105	spinlock_t	lock;	/* spinlock for fine-grained semtimedop */
 106	struct list_head pending_alter; /* pending single-sop operations */
 107					/* that alter the semaphore */
 108	struct list_head pending_const; /* pending single-sop operations */
 109					/* that do not alter the semaphore*/
 110	time64_t	 sem_otime;	/* candidate for sem_otime */
 111} ____cacheline_aligned_in_smp;
 112
 113/* One sem_array data structure for each set of semaphores in the system. */
 114struct sem_array {
 115	struct kern_ipc_perm	sem_perm;	/* permissions .. see ipc.h */
 116	time64_t		sem_ctime;	/* create/last semctl() time */
 117	struct list_head	pending_alter;	/* pending operations */
 118						/* that alter the array */
 119	struct list_head	pending_const;	/* pending complex operations */
 120						/* that do not alter semvals */
 121	struct list_head	list_id;	/* undo requests on this array */
 122	int			sem_nsems;	/* no. of semaphores in array */
 123	int			complex_count;	/* pending complex operations */
 124	unsigned int		use_global_lock;/* >0: global lock required */
 125
 126	struct sem		sems[];
 127} __randomize_layout;
 128
 129/* One queue for each sleeping process in the system. */
 130struct sem_queue {
 131	struct list_head	list;	 /* queue of pending operations */
 132	struct task_struct	*sleeper; /* this process */
 133	struct sem_undo		*undo;	 /* undo structure */
 134	struct pid		*pid;	 /* process id of requesting process */
 135	int			status;	 /* completion status of operation */
 136	struct sembuf		*sops;	 /* array of pending operations */
 137	struct sembuf		*blocking; /* the operation that blocked */
 138	int			nsops;	 /* number of operations */
 139	bool			alter;	 /* does *sops alter the array? */
 140	bool                    dupsop;	 /* sops on more than one sem_num */
 141};
 142
 143/* Each task has a list of undo requests. They are executed automatically
 144 * when the process exits.
 145 */
 146struct sem_undo {
 147	struct list_head	list_proc;	/* per-process list: *
 148						 * all undos from one process
 149						 * rcu protected */
 150	struct rcu_head		rcu;		/* rcu struct for sem_undo */
 151	struct sem_undo_list	*ulp;		/* back ptr to sem_undo_list */
 152	struct list_head	list_id;	/* per semaphore array list:
 153						 * all undos for one array */
 154	int			semid;		/* semaphore set identifier */
 155	short			semadj[];	/* array of adjustments */
 156						/* one per semaphore */
 157};
 158
 159/* sem_undo_list controls shared access to the list of sem_undo structures
 160 * that may be shared among all a CLONE_SYSVSEM task group.
 161 */
 162struct sem_undo_list {
 163	refcount_t		refcnt;
 164	spinlock_t		lock;
 165	struct list_head	list_proc;
 166};
 167
 168
 169#define sem_ids(ns)	((ns)->ids[IPC_SEM_IDS])
 170
 171static int newary(struct ipc_namespace *, struct ipc_params *);
 172static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
 173#ifdef CONFIG_PROC_FS
 174static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
 175#endif
 176
 177#define SEMMSL_FAST	256 /* 512 bytes on stack */
 178#define SEMOPM_FAST	64  /* ~ 372 bytes on stack */
 179
 180/*
 181 * Switching from the mode suitable for simple ops
 182 * to the mode for complex ops is costly. Therefore:
 183 * use some hysteresis
 184 */
 185#define USE_GLOBAL_LOCK_HYSTERESIS	10
 186
 187/*
 188 * Locking:
 189 * a) global sem_lock() for read/write
 190 *	sem_undo.id_next,
 191 *	sem_array.complex_count,
 192 *	sem_array.pending{_alter,_const},
 193 *	sem_array.sem_undo
 194 *
 195 * b) global or semaphore sem_lock() for read/write:
 196 *	sem_array.sems[i].pending_{const,alter}:
 197 *
 198 * c) special:
 199 *	sem_undo_list.list_proc:
 200 *	* undo_list->lock for write
 201 *	* rcu for read
 202 *	use_global_lock:
 203 *	* global sem_lock() for write
 204 *	* either local or global sem_lock() for read.
 205 *
 206 * Memory ordering:
 207 * Most ordering is enforced by using spin_lock() and spin_unlock().
 208 *
 209 * Exceptions:
 210 * 1) use_global_lock: (SEM_BARRIER_1)
 211 * Setting it from non-zero to 0 is a RELEASE, this is ensured by
 212 * using smp_store_release(): Immediately after setting it to 0,
 213 * a simple op can start.
 214 * Testing if it is non-zero is an ACQUIRE, this is ensured by using
 215 * smp_load_acquire().
 216 * Setting it from 0 to non-zero must be ordered with regards to
 217 * this smp_load_acquire(), this is guaranteed because the smp_load_acquire()
 218 * is inside a spin_lock() and after a write from 0 to non-zero a
 219 * spin_lock()+spin_unlock() is done.
 220 * To prevent the compiler/cpu temporarily writing 0 to use_global_lock,
 221 * READ_ONCE()/WRITE_ONCE() is used.
 222 *
 223 * 2) queue.status: (SEM_BARRIER_2)
 224 * Initialization is done while holding sem_lock(), so no further barrier is
 225 * required.
 226 * Setting it to a result code is a RELEASE, this is ensured by both a
 227 * smp_store_release() (for case a) and while holding sem_lock()
 228 * (for case b).
 229 * The ACQUIRE when reading the result code without holding sem_lock() is
 230 * achieved by using READ_ONCE() + smp_acquire__after_ctrl_dep().
 231 * (case a above).
 232 * Reading the result code while holding sem_lock() needs no further barriers,
 233 * the locks inside sem_lock() enforce ordering (case b above)
 234 *
 235 * 3) current->state:
 236 * current->state is set to TASK_INTERRUPTIBLE while holding sem_lock().
 237 * The wakeup is handled using the wake_q infrastructure. wake_q wakeups may
 238 * happen immediately after calling wake_q_add. As wake_q_add_safe() is called
 239 * when holding sem_lock(), no further barriers are required.
 240 *
 241 * See also ipc/mqueue.c for more details on the covered races.
 242 */
 243
 244#define sc_semmsl	sem_ctls[0]
 245#define sc_semmns	sem_ctls[1]
 246#define sc_semopm	sem_ctls[2]
 247#define sc_semmni	sem_ctls[3]
 248
 249void sem_init_ns(struct ipc_namespace *ns)
 250{
 251	ns->sc_semmsl = SEMMSL;
 252	ns->sc_semmns = SEMMNS;
 253	ns->sc_semopm = SEMOPM;
 254	ns->sc_semmni = SEMMNI;
 255	ns->used_sems = 0;
 256	ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
 257}
 258
 259#ifdef CONFIG_IPC_NS
 260void sem_exit_ns(struct ipc_namespace *ns)
 261{
 262	free_ipcs(ns, &sem_ids(ns), freeary);
 263	idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
 264	rhashtable_destroy(&ns->ids[IPC_SEM_IDS].key_ht);
 265}
 266#endif
 267
 268void __init sem_init(void)
 269{
 270	sem_init_ns(&init_ipc_ns);
 
 271	ipc_init_proc_interface("sysvipc/sem",
 272				"       key      semid perms      nsems   uid   gid  cuid  cgid      otime      ctime\n",
 273				IPC_SEM_IDS, sysvipc_sem_proc_show);
 
 274}
 275
 276/**
 277 * unmerge_queues - unmerge queues, if possible.
 278 * @sma: semaphore array
 279 *
 280 * The function unmerges the wait queues if complex_count is 0.
 281 * It must be called prior to dropping the global semaphore array lock.
 282 */
 283static void unmerge_queues(struct sem_array *sma)
 284{
 285	struct sem_queue *q, *tq;
 286
 287	/* complex operations still around? */
 288	if (sma->complex_count)
 289		return;
 290	/*
 291	 * We will switch back to simple mode.
 292	 * Move all pending operation back into the per-semaphore
 293	 * queues.
 294	 */
 295	list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
 296		struct sem *curr;
 297		curr = &sma->sems[q->sops[0].sem_num];
 298
 299		list_add_tail(&q->list, &curr->pending_alter);
 300	}
 301	INIT_LIST_HEAD(&sma->pending_alter);
 302}
 303
 304/**
 305 * merge_queues - merge single semop queues into global queue
 306 * @sma: semaphore array
 307 *
 308 * This function merges all per-semaphore queues into the global queue.
 309 * It is necessary to achieve FIFO ordering for the pending single-sop
 310 * operations when a multi-semop operation must sleep.
 311 * Only the alter operations must be moved, the const operations can stay.
 312 */
 313static void merge_queues(struct sem_array *sma)
 314{
 315	int i;
 316	for (i = 0; i < sma->sem_nsems; i++) {
 317		struct sem *sem = &sma->sems[i];
 318
 319		list_splice_init(&sem->pending_alter, &sma->pending_alter);
 320	}
 321}
 322
 323static void sem_rcu_free(struct rcu_head *head)
 324{
 325	struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu);
 326	struct sem_array *sma = container_of(p, struct sem_array, sem_perm);
 327
 328	security_sem_free(&sma->sem_perm);
 329	kvfree(sma);
 330}
 331
 332/*
 333 * Enter the mode suitable for non-simple operations:
 334 * Caller must own sem_perm.lock.
 335 */
 336static void complexmode_enter(struct sem_array *sma)
 337{
 338	int i;
 339	struct sem *sem;
 340
 341	if (sma->use_global_lock > 0)  {
 342		/*
 343		 * We are already in global lock mode.
 344		 * Nothing to do, just reset the
 345		 * counter until we return to simple mode.
 346		 */
 347		WRITE_ONCE(sma->use_global_lock, USE_GLOBAL_LOCK_HYSTERESIS);
 348		return;
 349	}
 350	WRITE_ONCE(sma->use_global_lock, USE_GLOBAL_LOCK_HYSTERESIS);
 351
 352	for (i = 0; i < sma->sem_nsems; i++) {
 353		sem = &sma->sems[i];
 354		spin_lock(&sem->lock);
 355		spin_unlock(&sem->lock);
 356	}
 357}
 358
 359/*
 360 * Try to leave the mode that disallows simple operations:
 361 * Caller must own sem_perm.lock.
 362 */
 363static void complexmode_tryleave(struct sem_array *sma)
 364{
 365	if (sma->complex_count)  {
 366		/* Complex ops are sleeping.
 367		 * We must stay in complex mode
 368		 */
 369		return;
 370	}
 371	if (sma->use_global_lock == 1) {
 372
 373		/* See SEM_BARRIER_1 for purpose/pairing */
 
 
 
 
 374		smp_store_release(&sma->use_global_lock, 0);
 375	} else {
 376		WRITE_ONCE(sma->use_global_lock,
 377				sma->use_global_lock-1);
 378	}
 379}
 380
 381#define SEM_GLOBAL_LOCK	(-1)
 382/*
 383 * If the request contains only one semaphore operation, and there are
 384 * no complex transactions pending, lock only the semaphore involved.
 385 * Otherwise, lock the entire semaphore array, since we either have
 386 * multiple semaphores in our own semops, or we need to look at
 387 * semaphores from other pending complex operations.
 388 */
 389static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
 390			      int nsops)
 391{
 392	struct sem *sem;
 393	int idx;
 394
 395	if (nsops != 1) {
 396		/* Complex operation - acquire a full lock */
 397		ipc_lock_object(&sma->sem_perm);
 398
 399		/* Prevent parallel simple ops */
 400		complexmode_enter(sma);
 401		return SEM_GLOBAL_LOCK;
 402	}
 403
 404	/*
 405	 * Only one semaphore affected - try to optimize locking.
 406	 * Optimized locking is possible if no complex operation
 407	 * is either enqueued or processed right now.
 408	 *
 409	 * Both facts are tracked by use_global_mode.
 410	 */
 411	idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
 412	sem = &sma->sems[idx];
 413
 414	/*
 415	 * Initial check for use_global_lock. Just an optimization,
 416	 * no locking, no memory barrier.
 417	 */
 418	if (!READ_ONCE(sma->use_global_lock)) {
 419		/*
 420		 * It appears that no complex operation is around.
 421		 * Acquire the per-semaphore lock.
 422		 */
 423		spin_lock(&sem->lock);
 424
 425		/* see SEM_BARRIER_1 for purpose/pairing */
 426		if (!smp_load_acquire(&sma->use_global_lock)) {
 427			/* fast path successful! */
 428			return sops->sem_num;
 429		}
 430		spin_unlock(&sem->lock);
 431	}
 432
 433	/* slow path: acquire the full lock */
 434	ipc_lock_object(&sma->sem_perm);
 435
 436	if (sma->use_global_lock == 0) {
 437		/*
 438		 * The use_global_lock mode ended while we waited for
 439		 * sma->sem_perm.lock. Thus we must switch to locking
 440		 * with sem->lock.
 441		 * Unlike in the fast path, there is no need to recheck
 442		 * sma->use_global_lock after we have acquired sem->lock:
 443		 * We own sma->sem_perm.lock, thus use_global_lock cannot
 444		 * change.
 445		 */
 446		spin_lock(&sem->lock);
 447
 448		ipc_unlock_object(&sma->sem_perm);
 449		return sops->sem_num;
 450	} else {
 451		/*
 452		 * Not a false alarm, thus continue to use the global lock
 453		 * mode. No need for complexmode_enter(), this was done by
 454		 * the caller that has set use_global_mode to non-zero.
 455		 */
 456		return SEM_GLOBAL_LOCK;
 457	}
 458}
 459
 460static inline void sem_unlock(struct sem_array *sma, int locknum)
 461{
 462	if (locknum == SEM_GLOBAL_LOCK) {
 463		unmerge_queues(sma);
 464		complexmode_tryleave(sma);
 465		ipc_unlock_object(&sma->sem_perm);
 466	} else {
 467		struct sem *sem = &sma->sems[locknum];
 468		spin_unlock(&sem->lock);
 469	}
 470}
 471
 472/*
 473 * sem_lock_(check_) routines are called in the paths where the rwsem
 474 * is not held.
 475 *
 476 * The caller holds the RCU read lock.
 477 */
 478static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
 479{
 480	struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
 481
 482	if (IS_ERR(ipcp))
 483		return ERR_CAST(ipcp);
 484
 485	return container_of(ipcp, struct sem_array, sem_perm);
 486}
 487
 488static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
 489							int id)
 490{
 491	struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
 492
 493	if (IS_ERR(ipcp))
 494		return ERR_CAST(ipcp);
 495
 496	return container_of(ipcp, struct sem_array, sem_perm);
 497}
 498
 499static inline void sem_lock_and_putref(struct sem_array *sma)
 500{
 501	sem_lock(sma, NULL, -1);
 502	ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
 503}
 504
 505static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
 506{
 507	ipc_rmid(&sem_ids(ns), &s->sem_perm);
 508}
 509
 510static struct sem_array *sem_alloc(size_t nsems)
 511{
 512	struct sem_array *sma;
 
 513
 514	if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0]))
 515		return NULL;
 516
 517	sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL_ACCOUNT);
 
 518	if (unlikely(!sma))
 519		return NULL;
 520
 
 
 521	return sma;
 522}
 523
 524/**
 525 * newary - Create a new semaphore set
 526 * @ns: namespace
 527 * @params: ptr to the structure that contains key, semflg and nsems
 528 *
 529 * Called with sem_ids.rwsem held (as a writer)
 530 */
 531static int newary(struct ipc_namespace *ns, struct ipc_params *params)
 532{
 533	int retval;
 534	struct sem_array *sma;
 535	key_t key = params->key;
 536	int nsems = params->u.nsems;
 537	int semflg = params->flg;
 538	int i;
 539
 540	if (!nsems)
 541		return -EINVAL;
 542	if (ns->used_sems + nsems > ns->sc_semmns)
 543		return -ENOSPC;
 544
 545	sma = sem_alloc(nsems);
 546	if (!sma)
 547		return -ENOMEM;
 548
 549	sma->sem_perm.mode = (semflg & S_IRWXUGO);
 550	sma->sem_perm.key = key;
 551
 552	sma->sem_perm.security = NULL;
 553	retval = security_sem_alloc(&sma->sem_perm);
 554	if (retval) {
 555		kvfree(sma);
 556		return retval;
 557	}
 558
 559	for (i = 0; i < nsems; i++) {
 560		INIT_LIST_HEAD(&sma->sems[i].pending_alter);
 561		INIT_LIST_HEAD(&sma->sems[i].pending_const);
 562		spin_lock_init(&sma->sems[i].lock);
 563	}
 564
 565	sma->complex_count = 0;
 566	sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
 567	INIT_LIST_HEAD(&sma->pending_alter);
 568	INIT_LIST_HEAD(&sma->pending_const);
 569	INIT_LIST_HEAD(&sma->list_id);
 570	sma->sem_nsems = nsems;
 571	sma->sem_ctime = ktime_get_real_seconds();
 572
 573	/* ipc_addid() locks sma upon success. */
 574	retval = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
 575	if (retval < 0) {
 576		ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
 577		return retval;
 578	}
 579	ns->used_sems += nsems;
 580
 581	sem_unlock(sma, -1);
 582	rcu_read_unlock();
 583
 584	return sma->sem_perm.id;
 585}
 586
 587
 588/*
 589 * Called with sem_ids.rwsem and ipcp locked.
 590 */
 591static int sem_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params)
 
 592{
 593	struct sem_array *sma;
 594
 595	sma = container_of(ipcp, struct sem_array, sem_perm);
 596	if (params->u.nsems > sma->sem_nsems)
 597		return -EINVAL;
 598
 599	return 0;
 600}
 601
 602long ksys_semget(key_t key, int nsems, int semflg)
 603{
 604	struct ipc_namespace *ns;
 605	static const struct ipc_ops sem_ops = {
 606		.getnew = newary,
 607		.associate = security_sem_associate,
 608		.more_checks = sem_more_checks,
 609	};
 610	struct ipc_params sem_params;
 611
 612	ns = current->nsproxy->ipc_ns;
 613
 614	if (nsems < 0 || nsems > ns->sc_semmsl)
 615		return -EINVAL;
 616
 617	sem_params.key = key;
 618	sem_params.flg = semflg;
 619	sem_params.u.nsems = nsems;
 620
 621	return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
 622}
 623
 624SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
 625{
 626	return ksys_semget(key, nsems, semflg);
 627}
 628
 629/**
 630 * perform_atomic_semop[_slow] - Attempt to perform semaphore
 631 *                               operations on a given array.
 632 * @sma: semaphore array
 633 * @q: struct sem_queue that describes the operation
 634 *
 635 * Caller blocking are as follows, based the value
 636 * indicated by the semaphore operation (sem_op):
 637 *
 638 *  (1) >0 never blocks.
 639 *  (2)  0 (wait-for-zero operation): semval is non-zero.
 640 *  (3) <0 attempting to decrement semval to a value smaller than zero.
 641 *
 642 * Returns 0 if the operation was possible.
 643 * Returns 1 if the operation is impossible, the caller must sleep.
 644 * Returns <0 for error codes.
 645 */
 646static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
 647{
 648	int result, sem_op, nsops;
 649	struct pid *pid;
 650	struct sembuf *sop;
 651	struct sem *curr;
 652	struct sembuf *sops;
 653	struct sem_undo *un;
 654
 655	sops = q->sops;
 656	nsops = q->nsops;
 657	un = q->undo;
 658
 659	for (sop = sops; sop < sops + nsops; sop++) {
 660		int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
 661		curr = &sma->sems[idx];
 662		sem_op = sop->sem_op;
 663		result = curr->semval;
 664
 665		if (!sem_op && result)
 666			goto would_block;
 667
 668		result += sem_op;
 669		if (result < 0)
 670			goto would_block;
 671		if (result > SEMVMX)
 672			goto out_of_range;
 673
 674		if (sop->sem_flg & SEM_UNDO) {
 675			int undo = un->semadj[sop->sem_num] - sem_op;
 676			/* Exceeding the undo range is an error. */
 677			if (undo < (-SEMAEM - 1) || undo > SEMAEM)
 678				goto out_of_range;
 679			un->semadj[sop->sem_num] = undo;
 680		}
 681
 682		curr->semval = result;
 683	}
 684
 685	sop--;
 686	pid = q->pid;
 687	while (sop >= sops) {
 688		ipc_update_pid(&sma->sems[sop->sem_num].sempid, pid);
 689		sop--;
 690	}
 691
 692	return 0;
 693
 694out_of_range:
 695	result = -ERANGE;
 696	goto undo;
 697
 698would_block:
 699	q->blocking = sop;
 700
 701	if (sop->sem_flg & IPC_NOWAIT)
 702		result = -EAGAIN;
 703	else
 704		result = 1;
 705
 706undo:
 707	sop--;
 708	while (sop >= sops) {
 709		sem_op = sop->sem_op;
 710		sma->sems[sop->sem_num].semval -= sem_op;
 711		if (sop->sem_flg & SEM_UNDO)
 712			un->semadj[sop->sem_num] += sem_op;
 713		sop--;
 714	}
 715
 716	return result;
 717}
 718
 719static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
 720{
 721	int result, sem_op, nsops;
 722	struct sembuf *sop;
 723	struct sem *curr;
 724	struct sembuf *sops;
 725	struct sem_undo *un;
 726
 727	sops = q->sops;
 728	nsops = q->nsops;
 729	un = q->undo;
 730
 731	if (unlikely(q->dupsop))
 732		return perform_atomic_semop_slow(sma, q);
 733
 734	/*
 735	 * We scan the semaphore set twice, first to ensure that the entire
 736	 * operation can succeed, therefore avoiding any pointless writes
 737	 * to shared memory and having to undo such changes in order to block
 738	 * until the operations can go through.
 739	 */
 740	for (sop = sops; sop < sops + nsops; sop++) {
 741		int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
 742
 743		curr = &sma->sems[idx];
 744		sem_op = sop->sem_op;
 745		result = curr->semval;
 746
 747		if (!sem_op && result)
 748			goto would_block; /* wait-for-zero */
 749
 750		result += sem_op;
 751		if (result < 0)
 752			goto would_block;
 753
 754		if (result > SEMVMX)
 755			return -ERANGE;
 756
 757		if (sop->sem_flg & SEM_UNDO) {
 758			int undo = un->semadj[sop->sem_num] - sem_op;
 759
 760			/* Exceeding the undo range is an error. */
 761			if (undo < (-SEMAEM - 1) || undo > SEMAEM)
 762				return -ERANGE;
 763		}
 764	}
 765
 766	for (sop = sops; sop < sops + nsops; sop++) {
 767		curr = &sma->sems[sop->sem_num];
 768		sem_op = sop->sem_op;
 
 769
 770		if (sop->sem_flg & SEM_UNDO) {
 771			int undo = un->semadj[sop->sem_num] - sem_op;
 772
 773			un->semadj[sop->sem_num] = undo;
 774		}
 775		curr->semval += sem_op;
 776		ipc_update_pid(&curr->sempid, q->pid);
 777	}
 778
 779	return 0;
 780
 781would_block:
 782	q->blocking = sop;
 783	return sop->sem_flg & IPC_NOWAIT ? -EAGAIN : 1;
 784}
 785
 786static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
 787					     struct wake_q_head *wake_q)
 788{
 789	struct task_struct *sleeper;
 790
 791	sleeper = get_task_struct(q->sleeper);
 792
 793	/* see SEM_BARRIER_2 for purpose/pairing */
 794	smp_store_release(&q->status, error);
 795
 796	wake_q_add_safe(wake_q, sleeper);
 
 797}
 798
 799static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
 800{
 801	list_del(&q->list);
 802	if (q->nsops > 1)
 803		sma->complex_count--;
 804}
 805
 806/** check_restart(sma, q)
 807 * @sma: semaphore array
 808 * @q: the operation that just completed
 809 *
 810 * update_queue is O(N^2) when it restarts scanning the whole queue of
 811 * waiting operations. Therefore this function checks if the restart is
 812 * really necessary. It is called after a previously waiting operation
 813 * modified the array.
 814 * Note that wait-for-zero operations are handled without restart.
 815 */
 816static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
 817{
 818	/* pending complex alter operations are too difficult to analyse */
 819	if (!list_empty(&sma->pending_alter))
 820		return 1;
 821
 822	/* we were a sleeping complex operation. Too difficult */
 823	if (q->nsops > 1)
 824		return 1;
 825
 826	/* It is impossible that someone waits for the new value:
 827	 * - complex operations always restart.
 828	 * - wait-for-zero are handled separately.
 829	 * - q is a previously sleeping simple operation that
 830	 *   altered the array. It must be a decrement, because
 831	 *   simple increments never sleep.
 832	 * - If there are older (higher priority) decrements
 833	 *   in the queue, then they have observed the original
 834	 *   semval value and couldn't proceed. The operation
 835	 *   decremented to value - thus they won't proceed either.
 836	 */
 837	return 0;
 838}
 839
 840/**
 841 * wake_const_ops - wake up non-alter tasks
 842 * @sma: semaphore array.
 843 * @semnum: semaphore that was modified.
 844 * @wake_q: lockless wake-queue head.
 845 *
 846 * wake_const_ops must be called after a semaphore in a semaphore array
 847 * was set to 0. If complex const operations are pending, wake_const_ops must
 848 * be called with semnum = -1, as well as with the number of each modified
 849 * semaphore.
 850 * The tasks that must be woken up are added to @wake_q. The return code
 851 * is stored in q->pid.
 852 * The function returns 1 if at least one operation was completed successfully.
 853 */
 854static int wake_const_ops(struct sem_array *sma, int semnum,
 855			  struct wake_q_head *wake_q)
 856{
 857	struct sem_queue *q, *tmp;
 858	struct list_head *pending_list;
 859	int semop_completed = 0;
 860
 861	if (semnum == -1)
 862		pending_list = &sma->pending_const;
 863	else
 864		pending_list = &sma->sems[semnum].pending_const;
 865
 866	list_for_each_entry_safe(q, tmp, pending_list, list) {
 867		int error = perform_atomic_semop(sma, q);
 868
 869		if (error > 0)
 870			continue;
 871		/* operation completed, remove from queue & wakeup */
 872		unlink_queue(sma, q);
 873
 874		wake_up_sem_queue_prepare(q, error, wake_q);
 875		if (error == 0)
 876			semop_completed = 1;
 877	}
 878
 879	return semop_completed;
 880}
 881
 882/**
 883 * do_smart_wakeup_zero - wakeup all wait for zero tasks
 884 * @sma: semaphore array
 885 * @sops: operations that were performed
 886 * @nsops: number of operations
 887 * @wake_q: lockless wake-queue head
 888 *
 889 * Checks all required queue for wait-for-zero operations, based
 890 * on the actual changes that were performed on the semaphore array.
 891 * The function returns 1 if at least one operation was completed successfully.
 892 */
 893static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
 894				int nsops, struct wake_q_head *wake_q)
 895{
 896	int i;
 897	int semop_completed = 0;
 898	int got_zero = 0;
 899
 900	/* first: the per-semaphore queues, if known */
 901	if (sops) {
 902		for (i = 0; i < nsops; i++) {
 903			int num = sops[i].sem_num;
 904
 905			if (sma->sems[num].semval == 0) {
 906				got_zero = 1;
 907				semop_completed |= wake_const_ops(sma, num, wake_q);
 908			}
 909		}
 910	} else {
 911		/*
 912		 * No sops means modified semaphores not known.
 913		 * Assume all were changed.
 914		 */
 915		for (i = 0; i < sma->sem_nsems; i++) {
 916			if (sma->sems[i].semval == 0) {
 917				got_zero = 1;
 918				semop_completed |= wake_const_ops(sma, i, wake_q);
 919			}
 920		}
 921	}
 922	/*
 923	 * If one of the modified semaphores got 0,
 924	 * then check the global queue, too.
 925	 */
 926	if (got_zero)
 927		semop_completed |= wake_const_ops(sma, -1, wake_q);
 928
 929	return semop_completed;
 930}
 931
 932
 933/**
 934 * update_queue - look for tasks that can be completed.
 935 * @sma: semaphore array.
 936 * @semnum: semaphore that was modified.
 937 * @wake_q: lockless wake-queue head.
 938 *
 939 * update_queue must be called after a semaphore in a semaphore array
 940 * was modified. If multiple semaphores were modified, update_queue must
 941 * be called with semnum = -1, as well as with the number of each modified
 942 * semaphore.
 943 * The tasks that must be woken up are added to @wake_q. The return code
 944 * is stored in q->pid.
 945 * The function internally checks if const operations can now succeed.
 946 *
 947 * The function return 1 if at least one semop was completed successfully.
 948 */
 949static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q)
 950{
 951	struct sem_queue *q, *tmp;
 952	struct list_head *pending_list;
 953	int semop_completed = 0;
 954
 955	if (semnum == -1)
 956		pending_list = &sma->pending_alter;
 957	else
 958		pending_list = &sma->sems[semnum].pending_alter;
 959
 960again:
 961	list_for_each_entry_safe(q, tmp, pending_list, list) {
 962		int error, restart;
 963
 964		/* If we are scanning the single sop, per-semaphore list of
 965		 * one semaphore and that semaphore is 0, then it is not
 966		 * necessary to scan further: simple increments
 967		 * that affect only one entry succeed immediately and cannot
 968		 * be in the  per semaphore pending queue, and decrements
 969		 * cannot be successful if the value is already 0.
 970		 */
 971		if (semnum != -1 && sma->sems[semnum].semval == 0)
 972			break;
 973
 974		error = perform_atomic_semop(sma, q);
 975
 976		/* Does q->sleeper still need to sleep? */
 977		if (error > 0)
 978			continue;
 979
 980		unlink_queue(sma, q);
 981
 982		if (error) {
 983			restart = 0;
 984		} else {
 985			semop_completed = 1;
 986			do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q);
 987			restart = check_restart(sma, q);
 988		}
 989
 990		wake_up_sem_queue_prepare(q, error, wake_q);
 991		if (restart)
 992			goto again;
 993	}
 994	return semop_completed;
 995}
 996
 997/**
 998 * set_semotime - set sem_otime
 999 * @sma: semaphore array
1000 * @sops: operations that modified the array, may be NULL
1001 *
1002 * sem_otime is replicated to avoid cache line trashing.
1003 * This function sets one instance to the current time.
1004 */
1005static void set_semotime(struct sem_array *sma, struct sembuf *sops)
1006{
1007	if (sops == NULL) {
1008		sma->sems[0].sem_otime = ktime_get_real_seconds();
1009	} else {
1010		sma->sems[sops[0].sem_num].sem_otime =
1011						ktime_get_real_seconds();
1012	}
1013}
1014
1015/**
1016 * do_smart_update - optimized update_queue
1017 * @sma: semaphore array
1018 * @sops: operations that were performed
1019 * @nsops: number of operations
1020 * @otime: force setting otime
1021 * @wake_q: lockless wake-queue head
1022 *
1023 * do_smart_update() does the required calls to update_queue and wakeup_zero,
1024 * based on the actual changes that were performed on the semaphore array.
1025 * Note that the function does not do the actual wake-up: the caller is
1026 * responsible for calling wake_up_q().
1027 * It is safe to perform this call after dropping all locks.
1028 */
1029static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
1030			    int otime, struct wake_q_head *wake_q)
1031{
1032	int i;
1033
1034	otime |= do_smart_wakeup_zero(sma, sops, nsops, wake_q);
1035
1036	if (!list_empty(&sma->pending_alter)) {
1037		/* semaphore array uses the global queue - just process it. */
1038		otime |= update_queue(sma, -1, wake_q);
1039	} else {
1040		if (!sops) {
1041			/*
1042			 * No sops, thus the modified semaphores are not
1043			 * known. Check all.
1044			 */
1045			for (i = 0; i < sma->sem_nsems; i++)
1046				otime |= update_queue(sma, i, wake_q);
1047		} else {
1048			/*
1049			 * Check the semaphores that were increased:
1050			 * - No complex ops, thus all sleeping ops are
1051			 *   decrease.
1052			 * - if we decreased the value, then any sleeping
1053			 *   semaphore ops won't be able to run: If the
1054			 *   previous value was too small, then the new
1055			 *   value will be too small, too.
1056			 */
1057			for (i = 0; i < nsops; i++) {
1058				if (sops[i].sem_op > 0) {
1059					otime |= update_queue(sma,
1060							      sops[i].sem_num, wake_q);
1061				}
1062			}
1063		}
1064	}
1065	if (otime)
1066		set_semotime(sma, sops);
1067}
1068
1069/*
1070 * check_qop: Test if a queued operation sleeps on the semaphore semnum
1071 */
1072static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
1073			bool count_zero)
1074{
1075	struct sembuf *sop = q->blocking;
1076
1077	/*
1078	 * Linux always (since 0.99.10) reported a task as sleeping on all
1079	 * semaphores. This violates SUS, therefore it was changed to the
1080	 * standard compliant behavior.
1081	 * Give the administrators a chance to notice that an application
1082	 * might misbehave because it relies on the Linux behavior.
1083	 */
1084	pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
1085			"The task %s (%d) triggered the difference, watch for misbehavior.\n",
1086			current->comm, task_pid_nr(current));
1087
1088	if (sop->sem_num != semnum)
1089		return 0;
1090
1091	if (count_zero && sop->sem_op == 0)
1092		return 1;
1093	if (!count_zero && sop->sem_op < 0)
1094		return 1;
1095
1096	return 0;
1097}
1098
1099/* The following counts are associated to each semaphore:
1100 *   semncnt        number of tasks waiting on semval being nonzero
1101 *   semzcnt        number of tasks waiting on semval being zero
1102 *
1103 * Per definition, a task waits only on the semaphore of the first semop
1104 * that cannot proceed, even if additional operation would block, too.
1105 */
1106static int count_semcnt(struct sem_array *sma, ushort semnum,
1107			bool count_zero)
1108{
1109	struct list_head *l;
1110	struct sem_queue *q;
1111	int semcnt;
1112
1113	semcnt = 0;
1114	/* First: check the simple operations. They are easy to evaluate */
1115	if (count_zero)
1116		l = &sma->sems[semnum].pending_const;
1117	else
1118		l = &sma->sems[semnum].pending_alter;
1119
1120	list_for_each_entry(q, l, list) {
1121		/* all task on a per-semaphore list sleep on exactly
1122		 * that semaphore
1123		 */
1124		semcnt++;
1125	}
1126
1127	/* Then: check the complex operations. */
1128	list_for_each_entry(q, &sma->pending_alter, list) {
1129		semcnt += check_qop(sma, semnum, q, count_zero);
1130	}
1131	if (count_zero) {
1132		list_for_each_entry(q, &sma->pending_const, list) {
1133			semcnt += check_qop(sma, semnum, q, count_zero);
1134		}
1135	}
1136	return semcnt;
1137}
1138
1139/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
1140 * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
1141 * remains locked on exit.
1142 */
1143static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1144{
1145	struct sem_undo *un, *tu;
1146	struct sem_queue *q, *tq;
1147	struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1148	int i;
1149	DEFINE_WAKE_Q(wake_q);
1150
1151	/* Free the existing undo structures for this semaphore set.  */
1152	ipc_assert_locked_object(&sma->sem_perm);
1153	list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1154		list_del(&un->list_id);
1155		spin_lock(&un->ulp->lock);
1156		un->semid = -1;
1157		list_del_rcu(&un->list_proc);
1158		spin_unlock(&un->ulp->lock);
1159		kvfree_rcu(un, rcu);
1160	}
1161
1162	/* Wake up all pending processes and let them fail with EIDRM. */
1163	list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1164		unlink_queue(sma, q);
1165		wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1166	}
1167
1168	list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1169		unlink_queue(sma, q);
1170		wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1171	}
1172	for (i = 0; i < sma->sem_nsems; i++) {
1173		struct sem *sem = &sma->sems[i];
1174		list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
1175			unlink_queue(sma, q);
1176			wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1177		}
1178		list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
1179			unlink_queue(sma, q);
1180			wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1181		}
1182		ipc_update_pid(&sem->sempid, NULL);
1183	}
1184
1185	/* Remove the semaphore set from the IDR */
1186	sem_rmid(ns, sma);
1187	sem_unlock(sma, -1);
1188	rcu_read_unlock();
1189
1190	wake_up_q(&wake_q);
1191	ns->used_sems -= sma->sem_nsems;
1192	ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1193}
1194
1195static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1196{
1197	switch (version) {
1198	case IPC_64:
1199		return copy_to_user(buf, in, sizeof(*in));
1200	case IPC_OLD:
1201	    {
1202		struct semid_ds out;
1203
1204		memset(&out, 0, sizeof(out));
1205
1206		ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
1207
1208		out.sem_otime	= in->sem_otime;
1209		out.sem_ctime	= in->sem_ctime;
1210		out.sem_nsems	= in->sem_nsems;
1211
1212		return copy_to_user(buf, &out, sizeof(out));
1213	    }
1214	default:
1215		return -EINVAL;
1216	}
1217}
1218
1219static time64_t get_semotime(struct sem_array *sma)
1220{
1221	int i;
1222	time64_t res;
1223
1224	res = sma->sems[0].sem_otime;
1225	for (i = 1; i < sma->sem_nsems; i++) {
1226		time64_t to = sma->sems[i].sem_otime;
1227
1228		if (to > res)
1229			res = to;
1230	}
1231	return res;
1232}
1233
1234static int semctl_stat(struct ipc_namespace *ns, int semid,
1235			 int cmd, struct semid64_ds *semid64)
1236{
1237	struct sem_array *sma;
1238	time64_t semotime;
1239	int err;
1240
1241	memset(semid64, 0, sizeof(*semid64));
1242
1243	rcu_read_lock();
1244	if (cmd == SEM_STAT || cmd == SEM_STAT_ANY) {
1245		sma = sem_obtain_object(ns, semid);
1246		if (IS_ERR(sma)) {
1247			err = PTR_ERR(sma);
1248			goto out_unlock;
1249		}
 
1250	} else { /* IPC_STAT */
1251		sma = sem_obtain_object_check(ns, semid);
1252		if (IS_ERR(sma)) {
1253			err = PTR_ERR(sma);
1254			goto out_unlock;
1255		}
1256	}
1257
1258	/* see comment for SHM_STAT_ANY */
1259	if (cmd == SEM_STAT_ANY)
1260		audit_ipc_obj(&sma->sem_perm);
1261	else {
1262		err = -EACCES;
1263		if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1264			goto out_unlock;
1265	}
1266
1267	err = security_sem_semctl(&sma->sem_perm, cmd);
1268	if (err)
1269		goto out_unlock;
1270
1271	ipc_lock_object(&sma->sem_perm);
1272
1273	if (!ipc_valid_object(&sma->sem_perm)) {
1274		ipc_unlock_object(&sma->sem_perm);
1275		err = -EIDRM;
1276		goto out_unlock;
1277	}
1278
1279	kernel_to_ipc64_perm(&sma->sem_perm, &semid64->sem_perm);
1280	semotime = get_semotime(sma);
1281	semid64->sem_otime = semotime;
1282	semid64->sem_ctime = sma->sem_ctime;
1283#ifndef CONFIG_64BIT
1284	semid64->sem_otime_high = semotime >> 32;
1285	semid64->sem_ctime_high = sma->sem_ctime >> 32;
1286#endif
1287	semid64->sem_nsems = sma->sem_nsems;
1288
1289	if (cmd == IPC_STAT) {
1290		/*
1291		 * As defined in SUS:
1292		 * Return 0 on success
1293		 */
1294		err = 0;
1295	} else {
1296		/*
1297		 * SEM_STAT and SEM_STAT_ANY (both Linux specific)
1298		 * Return the full id, including the sequence number
1299		 */
1300		err = sma->sem_perm.id;
1301	}
1302	ipc_unlock_object(&sma->sem_perm);
 
 
 
1303out_unlock:
1304	rcu_read_unlock();
1305	return err;
1306}
1307
1308static int semctl_info(struct ipc_namespace *ns, int semid,
1309			 int cmd, void __user *p)
1310{
1311	struct seminfo seminfo;
1312	int max_idx;
1313	int err;
1314
1315	err = security_sem_semctl(NULL, cmd);
1316	if (err)
1317		return err;
1318
1319	memset(&seminfo, 0, sizeof(seminfo));
1320	seminfo.semmni = ns->sc_semmni;
1321	seminfo.semmns = ns->sc_semmns;
1322	seminfo.semmsl = ns->sc_semmsl;
1323	seminfo.semopm = ns->sc_semopm;
1324	seminfo.semvmx = SEMVMX;
1325	seminfo.semmnu = SEMMNU;
1326	seminfo.semmap = SEMMAP;
1327	seminfo.semume = SEMUME;
1328	down_read(&sem_ids(ns).rwsem);
1329	if (cmd == SEM_INFO) {
1330		seminfo.semusz = sem_ids(ns).in_use;
1331		seminfo.semaem = ns->used_sems;
1332	} else {
1333		seminfo.semusz = SEMUSZ;
1334		seminfo.semaem = SEMAEM;
1335	}
1336	max_idx = ipc_get_maxidx(&sem_ids(ns));
1337	up_read(&sem_ids(ns).rwsem);
1338	if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1339		return -EFAULT;
1340	return (max_idx < 0) ? 0 : max_idx;
1341}
1342
1343static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1344		int val)
1345{
1346	struct sem_undo *un;
1347	struct sem_array *sma;
1348	struct sem *curr;
1349	int err;
1350	DEFINE_WAKE_Q(wake_q);
1351
1352	if (val > SEMVMX || val < 0)
1353		return -ERANGE;
1354
1355	rcu_read_lock();
1356	sma = sem_obtain_object_check(ns, semid);
1357	if (IS_ERR(sma)) {
1358		rcu_read_unlock();
1359		return PTR_ERR(sma);
1360	}
1361
1362	if (semnum < 0 || semnum >= sma->sem_nsems) {
1363		rcu_read_unlock();
1364		return -EINVAL;
1365	}
1366
1367
1368	if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1369		rcu_read_unlock();
1370		return -EACCES;
1371	}
1372
1373	err = security_sem_semctl(&sma->sem_perm, SETVAL);
1374	if (err) {
1375		rcu_read_unlock();
1376		return -EACCES;
1377	}
1378
1379	sem_lock(sma, NULL, -1);
1380
1381	if (!ipc_valid_object(&sma->sem_perm)) {
1382		sem_unlock(sma, -1);
1383		rcu_read_unlock();
1384		return -EIDRM;
1385	}
1386
1387	semnum = array_index_nospec(semnum, sma->sem_nsems);
1388	curr = &sma->sems[semnum];
1389
1390	ipc_assert_locked_object(&sma->sem_perm);
1391	list_for_each_entry(un, &sma->list_id, list_id)
1392		un->semadj[semnum] = 0;
1393
1394	curr->semval = val;
1395	ipc_update_pid(&curr->sempid, task_tgid(current));
1396	sma->sem_ctime = ktime_get_real_seconds();
1397	/* maybe some queued-up processes were waiting for this */
1398	do_smart_update(sma, NULL, 0, 0, &wake_q);
1399	sem_unlock(sma, -1);
1400	rcu_read_unlock();
1401	wake_up_q(&wake_q);
1402	return 0;
1403}
1404
1405static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1406		int cmd, void __user *p)
1407{
1408	struct sem_array *sma;
1409	struct sem *curr;
1410	int err, nsems;
1411	ushort fast_sem_io[SEMMSL_FAST];
1412	ushort *sem_io = fast_sem_io;
1413	DEFINE_WAKE_Q(wake_q);
1414
1415	rcu_read_lock();
1416	sma = sem_obtain_object_check(ns, semid);
1417	if (IS_ERR(sma)) {
1418		rcu_read_unlock();
1419		return PTR_ERR(sma);
1420	}
1421
1422	nsems = sma->sem_nsems;
1423
1424	err = -EACCES;
1425	if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1426		goto out_rcu_wakeup;
1427
1428	err = security_sem_semctl(&sma->sem_perm, cmd);
1429	if (err)
1430		goto out_rcu_wakeup;
1431
 
1432	switch (cmd) {
1433	case GETALL:
1434	{
1435		ushort __user *array = p;
1436		int i;
1437
1438		sem_lock(sma, NULL, -1);
1439		if (!ipc_valid_object(&sma->sem_perm)) {
1440			err = -EIDRM;
1441			goto out_unlock;
1442		}
1443		if (nsems > SEMMSL_FAST) {
1444			if (!ipc_rcu_getref(&sma->sem_perm)) {
1445				err = -EIDRM;
1446				goto out_unlock;
1447			}
1448			sem_unlock(sma, -1);
1449			rcu_read_unlock();
1450			sem_io = kvmalloc_array(nsems, sizeof(ushort),
1451						GFP_KERNEL);
1452			if (sem_io == NULL) {
1453				ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1454				return -ENOMEM;
1455			}
1456
1457			rcu_read_lock();
1458			sem_lock_and_putref(sma);
1459			if (!ipc_valid_object(&sma->sem_perm)) {
1460				err = -EIDRM;
1461				goto out_unlock;
1462			}
1463		}
1464		for (i = 0; i < sma->sem_nsems; i++)
1465			sem_io[i] = sma->sems[i].semval;
1466		sem_unlock(sma, -1);
1467		rcu_read_unlock();
1468		err = 0;
1469		if (copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1470			err = -EFAULT;
1471		goto out_free;
1472	}
1473	case SETALL:
1474	{
1475		int i;
1476		struct sem_undo *un;
1477
1478		if (!ipc_rcu_getref(&sma->sem_perm)) {
1479			err = -EIDRM;
1480			goto out_rcu_wakeup;
1481		}
1482		rcu_read_unlock();
1483
1484		if (nsems > SEMMSL_FAST) {
1485			sem_io = kvmalloc_array(nsems, sizeof(ushort),
1486						GFP_KERNEL);
1487			if (sem_io == NULL) {
1488				ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1489				return -ENOMEM;
1490			}
1491		}
1492
1493		if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
1494			ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1495			err = -EFAULT;
1496			goto out_free;
1497		}
1498
1499		for (i = 0; i < nsems; i++) {
1500			if (sem_io[i] > SEMVMX) {
1501				ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1502				err = -ERANGE;
1503				goto out_free;
1504			}
1505		}
1506		rcu_read_lock();
1507		sem_lock_and_putref(sma);
1508		if (!ipc_valid_object(&sma->sem_perm)) {
1509			err = -EIDRM;
1510			goto out_unlock;
1511		}
1512
1513		for (i = 0; i < nsems; i++) {
1514			sma->sems[i].semval = sem_io[i];
1515			ipc_update_pid(&sma->sems[i].sempid, task_tgid(current));
1516		}
1517
1518		ipc_assert_locked_object(&sma->sem_perm);
1519		list_for_each_entry(un, &sma->list_id, list_id) {
1520			for (i = 0; i < nsems; i++)
1521				un->semadj[i] = 0;
1522		}
1523		sma->sem_ctime = ktime_get_real_seconds();
1524		/* maybe some queued-up processes were waiting for this */
1525		do_smart_update(sma, NULL, 0, 0, &wake_q);
1526		err = 0;
1527		goto out_unlock;
1528	}
1529	/* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */
1530	}
1531	err = -EINVAL;
1532	if (semnum < 0 || semnum >= nsems)
1533		goto out_rcu_wakeup;
1534
1535	sem_lock(sma, NULL, -1);
1536	if (!ipc_valid_object(&sma->sem_perm)) {
1537		err = -EIDRM;
1538		goto out_unlock;
1539	}
1540
1541	semnum = array_index_nospec(semnum, nsems);
1542	curr = &sma->sems[semnum];
1543
1544	switch (cmd) {
1545	case GETVAL:
1546		err = curr->semval;
1547		goto out_unlock;
1548	case GETPID:
1549		err = pid_vnr(curr->sempid);
1550		goto out_unlock;
1551	case GETNCNT:
1552		err = count_semcnt(sma, semnum, 0);
1553		goto out_unlock;
1554	case GETZCNT:
1555		err = count_semcnt(sma, semnum, 1);
1556		goto out_unlock;
1557	}
1558
1559out_unlock:
1560	sem_unlock(sma, -1);
1561out_rcu_wakeup:
1562	rcu_read_unlock();
1563	wake_up_q(&wake_q);
1564out_free:
1565	if (sem_io != fast_sem_io)
1566		kvfree(sem_io);
1567	return err;
1568}
1569
1570static inline unsigned long
1571copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1572{
1573	switch (version) {
1574	case IPC_64:
1575		if (copy_from_user(out, buf, sizeof(*out)))
1576			return -EFAULT;
1577		return 0;
1578	case IPC_OLD:
1579	    {
1580		struct semid_ds tbuf_old;
1581
1582		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1583			return -EFAULT;
1584
1585		out->sem_perm.uid	= tbuf_old.sem_perm.uid;
1586		out->sem_perm.gid	= tbuf_old.sem_perm.gid;
1587		out->sem_perm.mode	= tbuf_old.sem_perm.mode;
1588
1589		return 0;
1590	    }
1591	default:
1592		return -EINVAL;
1593	}
1594}
1595
1596/*
1597 * This function handles some semctl commands which require the rwsem
1598 * to be held in write mode.
1599 * NOTE: no locks must be held, the rwsem is taken inside this function.
1600 */
1601static int semctl_down(struct ipc_namespace *ns, int semid,
1602		       int cmd, struct semid64_ds *semid64)
1603{
1604	struct sem_array *sma;
1605	int err;
1606	struct kern_ipc_perm *ipcp;
1607
1608	down_write(&sem_ids(ns).rwsem);
1609	rcu_read_lock();
1610
1611	ipcp = ipcctl_obtain_check(ns, &sem_ids(ns), semid, cmd,
1612				      &semid64->sem_perm, 0);
1613	if (IS_ERR(ipcp)) {
1614		err = PTR_ERR(ipcp);
1615		goto out_unlock1;
1616	}
1617
1618	sma = container_of(ipcp, struct sem_array, sem_perm);
1619
1620	err = security_sem_semctl(&sma->sem_perm, cmd);
1621	if (err)
1622		goto out_unlock1;
1623
1624	switch (cmd) {
1625	case IPC_RMID:
1626		sem_lock(sma, NULL, -1);
1627		/* freeary unlocks the ipc object and rcu */
1628		freeary(ns, ipcp);
1629		goto out_up;
1630	case IPC_SET:
1631		sem_lock(sma, NULL, -1);
1632		err = ipc_update_perm(&semid64->sem_perm, ipcp);
1633		if (err)
1634			goto out_unlock0;
1635		sma->sem_ctime = ktime_get_real_seconds();
1636		break;
1637	default:
1638		err = -EINVAL;
1639		goto out_unlock1;
1640	}
1641
1642out_unlock0:
1643	sem_unlock(sma, -1);
1644out_unlock1:
1645	rcu_read_unlock();
1646out_up:
1647	up_write(&sem_ids(ns).rwsem);
1648	return err;
1649}
1650
1651static long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg, int version)
1652{
 
1653	struct ipc_namespace *ns;
1654	void __user *p = (void __user *)arg;
1655	struct semid64_ds semid64;
1656	int err;
1657
1658	if (semid < 0)
1659		return -EINVAL;
1660
 
1661	ns = current->nsproxy->ipc_ns;
1662
1663	switch (cmd) {
1664	case IPC_INFO:
1665	case SEM_INFO:
1666		return semctl_info(ns, semid, cmd, p);
1667	case IPC_STAT:
1668	case SEM_STAT:
1669	case SEM_STAT_ANY:
1670		err = semctl_stat(ns, semid, cmd, &semid64);
1671		if (err < 0)
1672			return err;
1673		if (copy_semid_to_user(p, &semid64, version))
1674			err = -EFAULT;
1675		return err;
1676	case GETALL:
1677	case GETVAL:
1678	case GETPID:
1679	case GETNCNT:
1680	case GETZCNT:
1681	case SETALL:
1682		return semctl_main(ns, semid, semnum, cmd, p);
1683	case SETVAL: {
1684		int val;
1685#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1686		/* big-endian 64bit */
1687		val = arg >> 32;
1688#else
1689		/* 32bit or little-endian 64bit */
1690		val = arg;
1691#endif
1692		return semctl_setval(ns, semid, semnum, val);
1693	}
1694	case IPC_SET:
1695		if (copy_semid_from_user(&semid64, p, version))
1696			return -EFAULT;
1697		fallthrough;
1698	case IPC_RMID:
1699		return semctl_down(ns, semid, cmd, &semid64);
1700	default:
1701		return -EINVAL;
1702	}
1703}
1704
1705SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1706{
1707	return ksys_semctl(semid, semnum, cmd, arg, IPC_64);
1708}
1709
1710#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
1711long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg)
1712{
1713	int version = ipc_parse_version(&cmd);
1714
1715	return ksys_semctl(semid, semnum, cmd, arg, version);
1716}
1717
1718SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1719{
1720	return ksys_old_semctl(semid, semnum, cmd, arg);
1721}
1722#endif
1723
1724#ifdef CONFIG_COMPAT
1725
1726struct compat_semid_ds {
1727	struct compat_ipc_perm sem_perm;
1728	old_time32_t sem_otime;
1729	old_time32_t sem_ctime;
1730	compat_uptr_t sem_base;
1731	compat_uptr_t sem_pending;
1732	compat_uptr_t sem_pending_last;
1733	compat_uptr_t undo;
1734	unsigned short sem_nsems;
1735};
1736
1737static int copy_compat_semid_from_user(struct semid64_ds *out, void __user *buf,
1738					int version)
1739{
1740	memset(out, 0, sizeof(*out));
1741	if (version == IPC_64) {
1742		struct compat_semid64_ds __user *p = buf;
1743		return get_compat_ipc64_perm(&out->sem_perm, &p->sem_perm);
1744	} else {
1745		struct compat_semid_ds __user *p = buf;
1746		return get_compat_ipc_perm(&out->sem_perm, &p->sem_perm);
1747	}
1748}
1749
1750static int copy_compat_semid_to_user(void __user *buf, struct semid64_ds *in,
1751					int version)
1752{
1753	if (version == IPC_64) {
1754		struct compat_semid64_ds v;
1755		memset(&v, 0, sizeof(v));
1756		to_compat_ipc64_perm(&v.sem_perm, &in->sem_perm);
1757		v.sem_otime	 = lower_32_bits(in->sem_otime);
1758		v.sem_otime_high = upper_32_bits(in->sem_otime);
1759		v.sem_ctime	 = lower_32_bits(in->sem_ctime);
1760		v.sem_ctime_high = upper_32_bits(in->sem_ctime);
1761		v.sem_nsems = in->sem_nsems;
1762		return copy_to_user(buf, &v, sizeof(v));
1763	} else {
1764		struct compat_semid_ds v;
1765		memset(&v, 0, sizeof(v));
1766		to_compat_ipc_perm(&v.sem_perm, &in->sem_perm);
1767		v.sem_otime = in->sem_otime;
1768		v.sem_ctime = in->sem_ctime;
1769		v.sem_nsems = in->sem_nsems;
1770		return copy_to_user(buf, &v, sizeof(v));
1771	}
1772}
1773
1774static long compat_ksys_semctl(int semid, int semnum, int cmd, int arg, int version)
1775{
1776	void __user *p = compat_ptr(arg);
1777	struct ipc_namespace *ns;
1778	struct semid64_ds semid64;
 
1779	int err;
1780
1781	ns = current->nsproxy->ipc_ns;
1782
1783	if (semid < 0)
1784		return -EINVAL;
1785
1786	switch (cmd & (~IPC_64)) {
1787	case IPC_INFO:
1788	case SEM_INFO:
1789		return semctl_info(ns, semid, cmd, p);
1790	case IPC_STAT:
1791	case SEM_STAT:
1792	case SEM_STAT_ANY:
1793		err = semctl_stat(ns, semid, cmd, &semid64);
1794		if (err < 0)
1795			return err;
1796		if (copy_compat_semid_to_user(p, &semid64, version))
1797			err = -EFAULT;
1798		return err;
1799	case GETVAL:
1800	case GETPID:
1801	case GETNCNT:
1802	case GETZCNT:
1803	case GETALL:
1804	case SETALL:
1805		return semctl_main(ns, semid, semnum, cmd, p);
1806	case SETVAL:
1807		return semctl_setval(ns, semid, semnum, arg);
1808	case IPC_SET:
1809		if (copy_compat_semid_from_user(&semid64, p, version))
1810			return -EFAULT;
1811		fallthrough;
1812	case IPC_RMID:
1813		return semctl_down(ns, semid, cmd, &semid64);
1814	default:
1815		return -EINVAL;
1816	}
1817}
1818
1819COMPAT_SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, int, arg)
1820{
1821	return compat_ksys_semctl(semid, semnum, cmd, arg, IPC_64);
1822}
1823
1824#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION
1825long compat_ksys_old_semctl(int semid, int semnum, int cmd, int arg)
1826{
1827	int version = compat_ipc_parse_version(&cmd);
1828
1829	return compat_ksys_semctl(semid, semnum, cmd, arg, version);
1830}
1831
1832COMPAT_SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, int, arg)
1833{
1834	return compat_ksys_old_semctl(semid, semnum, cmd, arg);
1835}
1836#endif
1837#endif
1838
1839/* If the task doesn't already have a undo_list, then allocate one
1840 * here.  We guarantee there is only one thread using this undo list,
1841 * and current is THE ONE
1842 *
1843 * If this allocation and assignment succeeds, but later
1844 * portions of this code fail, there is no need to free the sem_undo_list.
1845 * Just let it stay associated with the task, and it'll be freed later
1846 * at exit time.
1847 *
1848 * This can block, so callers must hold no locks.
1849 */
1850static inline int get_undo_list(struct sem_undo_list **undo_listp)
1851{
1852	struct sem_undo_list *undo_list;
1853
1854	undo_list = current->sysvsem.undo_list;
1855	if (!undo_list) {
1856		undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL_ACCOUNT);
1857		if (undo_list == NULL)
1858			return -ENOMEM;
1859		spin_lock_init(&undo_list->lock);
1860		refcount_set(&undo_list->refcnt, 1);
1861		INIT_LIST_HEAD(&undo_list->list_proc);
1862
1863		current->sysvsem.undo_list = undo_list;
1864	}
1865	*undo_listp = undo_list;
1866	return 0;
1867}
1868
1869static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1870{
1871	struct sem_undo *un;
1872
1873	list_for_each_entry_rcu(un, &ulp->list_proc, list_proc,
1874				spin_is_locked(&ulp->lock)) {
1875		if (un->semid == semid)
1876			return un;
1877	}
1878	return NULL;
1879}
1880
1881static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1882{
1883	struct sem_undo *un;
1884
1885	assert_spin_locked(&ulp->lock);
1886
1887	un = __lookup_undo(ulp, semid);
1888	if (un) {
1889		list_del_rcu(&un->list_proc);
1890		list_add_rcu(&un->list_proc, &ulp->list_proc);
1891	}
1892	return un;
1893}
1894
1895/**
1896 * find_alloc_undo - lookup (and if not present create) undo array
1897 * @ns: namespace
1898 * @semid: semaphore array id
1899 *
1900 * The function looks up (and if not present creates) the undo structure.
1901 * The size of the undo structure depends on the size of the semaphore
1902 * array, thus the alloc path is not that straightforward.
1903 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1904 * performs a rcu_read_lock().
1905 */
1906static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1907{
1908	struct sem_array *sma;
1909	struct sem_undo_list *ulp;
1910	struct sem_undo *un, *new;
1911	int nsems, error;
1912
1913	error = get_undo_list(&ulp);
1914	if (error)
1915		return ERR_PTR(error);
1916
1917	rcu_read_lock();
1918	spin_lock(&ulp->lock);
1919	un = lookup_undo(ulp, semid);
1920	spin_unlock(&ulp->lock);
1921	if (likely(un != NULL))
1922		goto out;
1923
1924	/* no undo structure around - allocate one. */
1925	/* step 1: figure out the size of the semaphore array */
1926	sma = sem_obtain_object_check(ns, semid);
1927	if (IS_ERR(sma)) {
1928		rcu_read_unlock();
1929		return ERR_CAST(sma);
1930	}
1931
1932	nsems = sma->sem_nsems;
1933	if (!ipc_rcu_getref(&sma->sem_perm)) {
1934		rcu_read_unlock();
1935		un = ERR_PTR(-EIDRM);
1936		goto out;
1937	}
1938	rcu_read_unlock();
1939
1940	/* step 2: allocate new undo structure */
1941	new = kvzalloc(struct_size(new, semadj, nsems), GFP_KERNEL_ACCOUNT);
1942	if (!new) {
1943		ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1944		return ERR_PTR(-ENOMEM);
1945	}
1946
1947	/* step 3: Acquire the lock on semaphore array */
1948	rcu_read_lock();
1949	sem_lock_and_putref(sma);
1950	if (!ipc_valid_object(&sma->sem_perm)) {
1951		sem_unlock(sma, -1);
1952		rcu_read_unlock();
1953		kvfree(new);
1954		un = ERR_PTR(-EIDRM);
1955		goto out;
1956	}
1957	spin_lock(&ulp->lock);
1958
1959	/*
1960	 * step 4: check for races: did someone else allocate the undo struct?
1961	 */
1962	un = lookup_undo(ulp, semid);
1963	if (un) {
1964		spin_unlock(&ulp->lock);
1965		kvfree(new);
1966		goto success;
1967	}
1968	/* step 5: initialize & link new undo structure */
 
1969	new->ulp = ulp;
1970	new->semid = semid;
1971	assert_spin_locked(&ulp->lock);
1972	list_add_rcu(&new->list_proc, &ulp->list_proc);
1973	ipc_assert_locked_object(&sma->sem_perm);
1974	list_add(&new->list_id, &sma->list_id);
1975	un = new;
1976	spin_unlock(&ulp->lock);
1977success:
 
1978	sem_unlock(sma, -1);
1979out:
1980	return un;
1981}
1982
1983long __do_semtimedop(int semid, struct sembuf *sops,
1984		unsigned nsops, const struct timespec64 *timeout,
1985		struct ipc_namespace *ns)
1986{
1987	int error = -EINVAL;
1988	struct sem_array *sma;
1989	struct sembuf *sop;
 
1990	struct sem_undo *un;
1991	int max, locknum;
1992	bool undos = false, alter = false, dupsop = false;
1993	struct sem_queue queue;
1994	unsigned long dup = 0;
1995	ktime_t expires, *exp = NULL;
1996	bool timed_out = false;
 
1997
1998	if (nsops < 1 || semid < 0)
1999		return -EINVAL;
2000	if (nsops > ns->sc_semopm)
2001		return -E2BIG;
 
 
 
 
 
2002
2003	if (timeout) {
2004		if (!timespec64_valid(timeout))
2005			return -EINVAL;
2006		expires = ktime_add_safe(ktime_get(),
2007				timespec64_to_ktime(*timeout));
2008		exp = &expires;
2009	}
2010
 
 
 
 
 
 
 
 
2011
2012	max = 0;
2013	for (sop = sops; sop < sops + nsops; sop++) {
2014		unsigned long mask = 1ULL << ((sop->sem_num) % BITS_PER_LONG);
2015
2016		if (sop->sem_num >= max)
2017			max = sop->sem_num;
2018		if (sop->sem_flg & SEM_UNDO)
2019			undos = true;
2020		if (dup & mask) {
2021			/*
2022			 * There was a previous alter access that appears
2023			 * to have accessed the same semaphore, thus use
2024			 * the dupsop logic. "appears", because the detection
2025			 * can only check % BITS_PER_LONG.
2026			 */
2027			dupsop = true;
2028		}
2029		if (sop->sem_op != 0) {
2030			alter = true;
2031			dup |= mask;
2032		}
2033	}
2034
2035	if (undos) {
2036		/* On success, find_alloc_undo takes the rcu_read_lock */
2037		un = find_alloc_undo(ns, semid);
2038		if (IS_ERR(un)) {
2039			error = PTR_ERR(un);
2040			goto out;
2041		}
2042	} else {
2043		un = NULL;
2044		rcu_read_lock();
2045	}
2046
2047	sma = sem_obtain_object_check(ns, semid);
2048	if (IS_ERR(sma)) {
2049		rcu_read_unlock();
2050		error = PTR_ERR(sma);
2051		goto out;
2052	}
2053
2054	error = -EFBIG;
2055	if (max >= sma->sem_nsems) {
2056		rcu_read_unlock();
2057		goto out;
2058	}
2059
2060	error = -EACCES;
2061	if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) {
2062		rcu_read_unlock();
2063		goto out;
2064	}
2065
2066	error = security_sem_semop(&sma->sem_perm, sops, nsops, alter);
2067	if (error) {
2068		rcu_read_unlock();
2069		goto out;
2070	}
2071
2072	error = -EIDRM;
2073	locknum = sem_lock(sma, sops, nsops);
2074	/*
2075	 * We eventually might perform the following check in a lockless
2076	 * fashion, considering ipc_valid_object() locking constraints.
2077	 * If nsops == 1 and there is no contention for sem_perm.lock, then
2078	 * only a per-semaphore lock is held and it's OK to proceed with the
2079	 * check below. More details on the fine grained locking scheme
2080	 * entangled here and why it's RMID race safe on comments at sem_lock()
2081	 */
2082	if (!ipc_valid_object(&sma->sem_perm))
2083		goto out_unlock;
2084	/*
2085	 * semid identifiers are not unique - find_alloc_undo may have
2086	 * allocated an undo structure, it was invalidated by an RMID
2087	 * and now a new array with received the same id. Check and fail.
2088	 * This case can be detected checking un->semid. The existence of
2089	 * "un" itself is guaranteed by rcu.
2090	 */
2091	if (un && un->semid == -1)
2092		goto out_unlock;
2093
2094	queue.sops = sops;
2095	queue.nsops = nsops;
2096	queue.undo = un;
2097	queue.pid = task_tgid(current);
2098	queue.alter = alter;
2099	queue.dupsop = dupsop;
2100
2101	error = perform_atomic_semop(sma, &queue);
2102	if (error == 0) { /* non-blocking successful path */
2103		DEFINE_WAKE_Q(wake_q);
2104
2105		/*
2106		 * If the operation was successful, then do
2107		 * the required updates.
2108		 */
2109		if (alter)
2110			do_smart_update(sma, sops, nsops, 1, &wake_q);
2111		else
2112			set_semotime(sma, sops);
2113
2114		sem_unlock(sma, locknum);
2115		rcu_read_unlock();
2116		wake_up_q(&wake_q);
2117
2118		goto out;
2119	}
2120	if (error < 0) /* non-blocking error path */
2121		goto out_unlock;
2122
2123	/*
2124	 * We need to sleep on this operation, so we put the current
2125	 * task into the pending queue and go to sleep.
2126	 */
2127	if (nsops == 1) {
2128		struct sem *curr;
2129		int idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
2130		curr = &sma->sems[idx];
2131
2132		if (alter) {
2133			if (sma->complex_count) {
2134				list_add_tail(&queue.list,
2135						&sma->pending_alter);
2136			} else {
2137
2138				list_add_tail(&queue.list,
2139						&curr->pending_alter);
2140			}
2141		} else {
2142			list_add_tail(&queue.list, &curr->pending_const);
2143		}
2144	} else {
2145		if (!sma->complex_count)
2146			merge_queues(sma);
2147
2148		if (alter)
2149			list_add_tail(&queue.list, &sma->pending_alter);
2150		else
2151			list_add_tail(&queue.list, &sma->pending_const);
2152
2153		sma->complex_count++;
2154	}
2155
2156	do {
2157		/* memory ordering ensured by the lock in sem_lock() */
2158		WRITE_ONCE(queue.status, -EINTR);
2159		queue.sleeper = current;
2160
2161		/* memory ordering is ensured by the lock in sem_lock() */
2162		__set_current_state(TASK_INTERRUPTIBLE);
2163		sem_unlock(sma, locknum);
2164		rcu_read_unlock();
2165
2166		timed_out = !schedule_hrtimeout_range(exp,
2167				current->timer_slack_ns, HRTIMER_MODE_ABS);
 
 
2168
2169		/*
2170		 * fastpath: the semop has completed, either successfully or
2171		 * not, from the syscall pov, is quite irrelevant to us at this
2172		 * point; we're done.
2173		 *
2174		 * We _do_ care, nonetheless, about being awoken by a signal or
2175		 * spuriously.  The queue.status is checked again in the
2176		 * slowpath (aka after taking sem_lock), such that we can detect
2177		 * scenarios where we were awakened externally, during the
2178		 * window between wake_q_add() and wake_up_q().
2179		 */
2180		rcu_read_lock();
2181		error = READ_ONCE(queue.status);
2182		if (error != -EINTR) {
2183			/* see SEM_BARRIER_2 for purpose/pairing */
2184			smp_acquire__after_ctrl_dep();
2185			rcu_read_unlock();
2186			goto out;
 
 
 
 
2187		}
2188
 
2189		locknum = sem_lock(sma, sops, nsops);
2190
2191		if (!ipc_valid_object(&sma->sem_perm))
2192			goto out_unlock;
2193
2194		/*
2195		 * No necessity for any barrier: We are protect by sem_lock()
2196		 */
2197		error = READ_ONCE(queue.status);
2198
2199		/*
2200		 * If queue.status != -EINTR we are woken up by another process.
2201		 * Leave without unlink_queue(), but with sem_unlock().
2202		 */
2203		if (error != -EINTR)
2204			goto out_unlock;
2205
2206		/*
2207		 * If an interrupt occurred we have to clean up the queue.
2208		 */
2209		if (timed_out)
2210			error = -EAGAIN;
2211	} while (error == -EINTR && !signal_pending(current)); /* spurious */
2212
2213	unlink_queue(sma, &queue);
2214
2215out_unlock:
2216	sem_unlock(sma, locknum);
2217	rcu_read_unlock();
2218out:
2219	return error;
2220}
2221
2222static long do_semtimedop(int semid, struct sembuf __user *tsops,
2223		unsigned nsops, const struct timespec64 *timeout)
2224{
2225	struct sembuf fast_sops[SEMOPM_FAST];
2226	struct sembuf *sops = fast_sops;
2227	struct ipc_namespace *ns;
2228	int ret;
2229
2230	ns = current->nsproxy->ipc_ns;
2231	if (nsops > ns->sc_semopm)
2232		return -E2BIG;
2233	if (nsops < 1)
2234		return -EINVAL;
2235
2236	if (nsops > SEMOPM_FAST) {
2237		sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL);
2238		if (sops == NULL)
2239			return -ENOMEM;
2240	}
2241
2242	if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
2243		ret =  -EFAULT;
2244		goto out_free;
2245	}
2246
2247	ret = __do_semtimedop(semid, sops, nsops, timeout, ns);
2248
2249out_free:
2250	if (sops != fast_sops)
2251		kvfree(sops);
2252
2253	return ret;
2254}
2255
2256long ksys_semtimedop(int semid, struct sembuf __user *tsops,
2257		     unsigned int nsops, const struct __kernel_timespec __user *timeout)
2258{
2259	if (timeout) {
2260		struct timespec64 ts;
2261		if (get_timespec64(&ts, timeout))
2262			return -EFAULT;
2263		return do_semtimedop(semid, tsops, nsops, &ts);
2264	}
2265	return do_semtimedop(semid, tsops, nsops, NULL);
2266}
2267
2268SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
2269		unsigned int, nsops, const struct __kernel_timespec __user *, timeout)
2270{
2271	return ksys_semtimedop(semid, tsops, nsops, timeout);
2272}
2273
2274#ifdef CONFIG_COMPAT_32BIT_TIME
2275long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
2276			    unsigned int nsops,
2277			    const struct old_timespec32 __user *timeout)
2278{
2279	if (timeout) {
2280		struct timespec64 ts;
2281		if (get_old_timespec32(&ts, timeout))
2282			return -EFAULT;
2283		return do_semtimedop(semid, tsems, nsops, &ts);
2284	}
2285	return do_semtimedop(semid, tsems, nsops, NULL);
2286}
2287
2288SYSCALL_DEFINE4(semtimedop_time32, int, semid, struct sembuf __user *, tsems,
2289		       unsigned int, nsops,
2290		       const struct old_timespec32 __user *, timeout)
2291{
2292	return compat_ksys_semtimedop(semid, tsems, nsops, timeout);
2293}
2294#endif
2295
2296SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
2297		unsigned, nsops)
2298{
2299	return do_semtimedop(semid, tsops, nsops, NULL);
2300}
2301
2302/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
2303 * parent and child tasks.
2304 */
2305
2306int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
2307{
2308	struct sem_undo_list *undo_list;
2309	int error;
2310
2311	if (clone_flags & CLONE_SYSVSEM) {
2312		error = get_undo_list(&undo_list);
2313		if (error)
2314			return error;
2315		refcount_inc(&undo_list->refcnt);
2316		tsk->sysvsem.undo_list = undo_list;
2317	} else
2318		tsk->sysvsem.undo_list = NULL;
2319
2320	return 0;
2321}
2322
2323/*
2324 * add semadj values to semaphores, free undo structures.
2325 * undo structures are not freed when semaphore arrays are destroyed
2326 * so some of them may be out of date.
2327 * IMPLEMENTATION NOTE: There is some confusion over whether the
2328 * set of adjustments that needs to be done should be done in an atomic
2329 * manner or not. That is, if we are attempting to decrement the semval
2330 * should we queue up and wait until we can do so legally?
2331 * The original implementation attempted to do this (queue and wait).
2332 * The current implementation does not do so. The POSIX standard
2333 * and SVID should be consulted to determine what behavior is mandated.
2334 */
2335void exit_sem(struct task_struct *tsk)
2336{
2337	struct sem_undo_list *ulp;
2338
2339	ulp = tsk->sysvsem.undo_list;
2340	if (!ulp)
2341		return;
2342	tsk->sysvsem.undo_list = NULL;
2343
2344	if (!refcount_dec_and_test(&ulp->refcnt))
2345		return;
2346
2347	for (;;) {
2348		struct sem_array *sma;
2349		struct sem_undo *un;
2350		int semid, i;
2351		DEFINE_WAKE_Q(wake_q);
2352
2353		cond_resched();
2354
2355		rcu_read_lock();
2356		un = list_entry_rcu(ulp->list_proc.next,
2357				    struct sem_undo, list_proc);
2358		if (&un->list_proc == &ulp->list_proc) {
2359			/*
2360			 * We must wait for freeary() before freeing this ulp,
2361			 * in case we raced with last sem_undo. There is a small
2362			 * possibility where we exit while freeary() didn't
2363			 * finish unlocking sem_undo_list.
2364			 */
2365			spin_lock(&ulp->lock);
2366			spin_unlock(&ulp->lock);
2367			rcu_read_unlock();
2368			break;
2369		}
2370		spin_lock(&ulp->lock);
2371		semid = un->semid;
2372		spin_unlock(&ulp->lock);
2373
2374		/* exit_sem raced with IPC_RMID, nothing to do */
2375		if (semid == -1) {
2376			rcu_read_unlock();
2377			continue;
2378		}
2379
2380		sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2381		/* exit_sem raced with IPC_RMID, nothing to do */
2382		if (IS_ERR(sma)) {
2383			rcu_read_unlock();
2384			continue;
2385		}
2386
2387		sem_lock(sma, NULL, -1);
2388		/* exit_sem raced with IPC_RMID, nothing to do */
2389		if (!ipc_valid_object(&sma->sem_perm)) {
2390			sem_unlock(sma, -1);
2391			rcu_read_unlock();
2392			continue;
2393		}
2394		un = __lookup_undo(ulp, semid);
2395		if (un == NULL) {
2396			/* exit_sem raced with IPC_RMID+semget() that created
2397			 * exactly the same semid. Nothing to do.
2398			 */
2399			sem_unlock(sma, -1);
2400			rcu_read_unlock();
2401			continue;
2402		}
2403
2404		/* remove un from the linked lists */
2405		ipc_assert_locked_object(&sma->sem_perm);
2406		list_del(&un->list_id);
2407
2408		spin_lock(&ulp->lock);
 
 
 
2409		list_del_rcu(&un->list_proc);
2410		spin_unlock(&ulp->lock);
2411
2412		/* perform adjustments registered in un */
2413		for (i = 0; i < sma->sem_nsems; i++) {
2414			struct sem *semaphore = &sma->sems[i];
2415			if (un->semadj[i]) {
2416				semaphore->semval += un->semadj[i];
2417				/*
2418				 * Range checks of the new semaphore value,
2419				 * not defined by sus:
2420				 * - Some unices ignore the undo entirely
2421				 *   (e.g. HP UX 11i 11.22, Tru64 V5.1)
2422				 * - some cap the value (e.g. FreeBSD caps
2423				 *   at 0, but doesn't enforce SEMVMX)
2424				 *
2425				 * Linux caps the semaphore value, both at 0
2426				 * and at SEMVMX.
2427				 *
2428				 *	Manfred <manfred@colorfullife.com>
2429				 */
2430				if (semaphore->semval < 0)
2431					semaphore->semval = 0;
2432				if (semaphore->semval > SEMVMX)
2433					semaphore->semval = SEMVMX;
2434				ipc_update_pid(&semaphore->sempid, task_tgid(current));
2435			}
2436		}
2437		/* maybe some queued-up processes were waiting for this */
2438		do_smart_update(sma, NULL, 0, 1, &wake_q);
2439		sem_unlock(sma, -1);
2440		rcu_read_unlock();
2441		wake_up_q(&wake_q);
2442
2443		kvfree_rcu(un, rcu);
2444	}
2445	kfree(ulp);
2446}
2447
2448#ifdef CONFIG_PROC_FS
2449static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2450{
2451	struct user_namespace *user_ns = seq_user_ns(s);
2452	struct kern_ipc_perm *ipcp = it;
2453	struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
2454	time64_t sem_otime;
2455
2456	/*
2457	 * The proc interface isn't aware of sem_lock(), it calls
2458	 * ipc_lock_object(), i.e. spin_lock(&sma->sem_perm.lock).
2459	 * (in sysvipc_find_ipc)
2460	 * In order to stay compatible with sem_lock(), we must
2461	 * enter / leave complex_mode.
2462	 */
2463	complexmode_enter(sma);
2464
2465	sem_otime = get_semotime(sma);
2466
2467	seq_printf(s,
2468		   "%10d %10d  %4o %10u %5u %5u %5u %5u %10llu %10llu\n",
2469		   sma->sem_perm.key,
2470		   sma->sem_perm.id,
2471		   sma->sem_perm.mode,
2472		   sma->sem_nsems,
2473		   from_kuid_munged(user_ns, sma->sem_perm.uid),
2474		   from_kgid_munged(user_ns, sma->sem_perm.gid),
2475		   from_kuid_munged(user_ns, sma->sem_perm.cuid),
2476		   from_kgid_munged(user_ns, sma->sem_perm.cgid),
2477		   sem_otime,
2478		   sma->sem_ctime);
2479
2480	complexmode_tryleave(sma);
2481
2482	return 0;
2483}
2484#endif
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * linux/ipc/sem.c
   4 * Copyright (C) 1992 Krishna Balasubramanian
   5 * Copyright (C) 1995 Eric Schenk, Bruno Haible
   6 *
   7 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
   8 *
   9 * SMP-threaded, sysctl's added
  10 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
  11 * Enforced range limit on SEM_UNDO
  12 * (c) 2001 Red Hat Inc
  13 * Lockless wakeup
  14 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
  15 * (c) 2016 Davidlohr Bueso <dave@stgolabs.net>
  16 * Further wakeup optimizations, documentation
  17 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
  18 *
  19 * support for audit of ipc object properties and permission changes
  20 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  21 *
  22 * namespaces support
  23 * OpenVZ, SWsoft Inc.
  24 * Pavel Emelianov <xemul@openvz.org>
  25 *
  26 * Implementation notes: (May 2010)
  27 * This file implements System V semaphores.
  28 *
  29 * User space visible behavior:
  30 * - FIFO ordering for semop() operations (just FIFO, not starvation
  31 *   protection)
  32 * - multiple semaphore operations that alter the same semaphore in
  33 *   one semop() are handled.
  34 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
  35 *   SETALL calls.
  36 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
  37 * - undo adjustments at process exit are limited to 0..SEMVMX.
  38 * - namespace are supported.
  39 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
  40 *   to /proc/sys/kernel/sem.
  41 * - statistics about the usage are reported in /proc/sysvipc/sem.
  42 *
  43 * Internals:
  44 * - scalability:
  45 *   - all global variables are read-mostly.
  46 *   - semop() calls and semctl(RMID) are synchronized by RCU.
  47 *   - most operations do write operations (actually: spin_lock calls) to
  48 *     the per-semaphore array structure.
  49 *   Thus: Perfect SMP scaling between independent semaphore arrays.
  50 *         If multiple semaphores in one array are used, then cache line
  51 *         trashing on the semaphore array spinlock will limit the scaling.
  52 * - semncnt and semzcnt are calculated on demand in count_semcnt()
  53 * - the task that performs a successful semop() scans the list of all
  54 *   sleeping tasks and completes any pending operations that can be fulfilled.
  55 *   Semaphores are actively given to waiting tasks (necessary for FIFO).
  56 *   (see update_queue())
  57 * - To improve the scalability, the actual wake-up calls are performed after
  58 *   dropping all locks. (see wake_up_sem_queue_prepare())
  59 * - All work is done by the waker, the woken up task does not have to do
  60 *   anything - not even acquiring a lock or dropping a refcount.
  61 * - A woken up task may not even touch the semaphore array anymore, it may
  62 *   have been destroyed already by a semctl(RMID).
  63 * - UNDO values are stored in an array (one per process and per
  64 *   semaphore array, lazily allocated). For backwards compatibility, multiple
  65 *   modes for the UNDO variables are supported (per process, per thread)
  66 *   (see copy_semundo, CLONE_SYSVSEM)
  67 * - There are two lists of the pending operations: a per-array list
  68 *   and per-semaphore list (stored in the array). This allows to achieve FIFO
  69 *   ordering without always scanning all pending operations.
  70 *   The worst-case behavior is nevertheless O(N^2) for N wakeups.
  71 */
  72
 
  73#include <linux/slab.h>
  74#include <linux/spinlock.h>
  75#include <linux/init.h>
  76#include <linux/proc_fs.h>
  77#include <linux/time.h>
  78#include <linux/security.h>
  79#include <linux/syscalls.h>
  80#include <linux/audit.h>
  81#include <linux/capability.h>
  82#include <linux/seq_file.h>
  83#include <linux/rwsem.h>
  84#include <linux/nsproxy.h>
  85#include <linux/ipc_namespace.h>
  86#include <linux/sched/wake_q.h>
 
 
  87
  88#include <linux/uaccess.h>
  89#include "util.h"
  90
  91/* One semaphore structure for each semaphore in the system. */
  92struct sem {
  93	int	semval;		/* current value */
  94	/*
  95	 * PID of the process that last modified the semaphore. For
  96	 * Linux, specifically these are:
  97	 *  - semop
  98	 *  - semctl, via SETVAL and SETALL.
  99	 *  - at task exit when performing undo adjustments (see exit_sem).
 100	 */
 101	struct pid *sempid;
 102	spinlock_t	lock;	/* spinlock for fine-grained semtimedop */
 103	struct list_head pending_alter; /* pending single-sop operations */
 104					/* that alter the semaphore */
 105	struct list_head pending_const; /* pending single-sop operations */
 106					/* that do not alter the semaphore*/
 107	time_t	sem_otime;	/* candidate for sem_otime */
 108} ____cacheline_aligned_in_smp;
 109
 110/* One sem_array data structure for each set of semaphores in the system. */
 111struct sem_array {
 112	struct kern_ipc_perm	sem_perm;	/* permissions .. see ipc.h */
 113	time64_t		sem_ctime;	/* create/last semctl() time */
 114	struct list_head	pending_alter;	/* pending operations */
 115						/* that alter the array */
 116	struct list_head	pending_const;	/* pending complex operations */
 117						/* that do not alter semvals */
 118	struct list_head	list_id;	/* undo requests on this array */
 119	int			sem_nsems;	/* no. of semaphores in array */
 120	int			complex_count;	/* pending complex operations */
 121	unsigned int		use_global_lock;/* >0: global lock required */
 122
 123	struct sem		sems[];
 124} __randomize_layout;
 125
 126/* One queue for each sleeping process in the system. */
 127struct sem_queue {
 128	struct list_head	list;	 /* queue of pending operations */
 129	struct task_struct	*sleeper; /* this process */
 130	struct sem_undo		*undo;	 /* undo structure */
 131	struct pid		*pid;	 /* process id of requesting process */
 132	int			status;	 /* completion status of operation */
 133	struct sembuf		*sops;	 /* array of pending operations */
 134	struct sembuf		*blocking; /* the operation that blocked */
 135	int			nsops;	 /* number of operations */
 136	bool			alter;	 /* does *sops alter the array? */
 137	bool                    dupsop;	 /* sops on more than one sem_num */
 138};
 139
 140/* Each task has a list of undo requests. They are executed automatically
 141 * when the process exits.
 142 */
 143struct sem_undo {
 144	struct list_head	list_proc;	/* per-process list: *
 145						 * all undos from one process
 146						 * rcu protected */
 147	struct rcu_head		rcu;		/* rcu struct for sem_undo */
 148	struct sem_undo_list	*ulp;		/* back ptr to sem_undo_list */
 149	struct list_head	list_id;	/* per semaphore array list:
 150						 * all undos for one array */
 151	int			semid;		/* semaphore set identifier */
 152	short			*semadj;	/* array of adjustments */
 153						/* one per semaphore */
 154};
 155
 156/* sem_undo_list controls shared access to the list of sem_undo structures
 157 * that may be shared among all a CLONE_SYSVSEM task group.
 158 */
 159struct sem_undo_list {
 160	refcount_t		refcnt;
 161	spinlock_t		lock;
 162	struct list_head	list_proc;
 163};
 164
 165
 166#define sem_ids(ns)	((ns)->ids[IPC_SEM_IDS])
 167
 168static int newary(struct ipc_namespace *, struct ipc_params *);
 169static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
 170#ifdef CONFIG_PROC_FS
 171static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
 172#endif
 173
 174#define SEMMSL_FAST	256 /* 512 bytes on stack */
 175#define SEMOPM_FAST	64  /* ~ 372 bytes on stack */
 176
 177/*
 178 * Switching from the mode suitable for simple ops
 179 * to the mode for complex ops is costly. Therefore:
 180 * use some hysteresis
 181 */
 182#define USE_GLOBAL_LOCK_HYSTERESIS	10
 183
 184/*
 185 * Locking:
 186 * a) global sem_lock() for read/write
 187 *	sem_undo.id_next,
 188 *	sem_array.complex_count,
 189 *	sem_array.pending{_alter,_const},
 190 *	sem_array.sem_undo
 191 *
 192 * b) global or semaphore sem_lock() for read/write:
 193 *	sem_array.sems[i].pending_{const,alter}:
 194 *
 195 * c) special:
 196 *	sem_undo_list.list_proc:
 197 *	* undo_list->lock for write
 198 *	* rcu for read
 199 *	use_global_lock:
 200 *	* global sem_lock() for write
 201 *	* either local or global sem_lock() for read.
 202 *
 203 * Memory ordering:
 204 * Most ordering is enforced by using spin_lock() and spin_unlock().
 205 * The special case is use_global_lock:
 
 
 206 * Setting it from non-zero to 0 is a RELEASE, this is ensured by
 207 * using smp_store_release().
 
 208 * Testing if it is non-zero is an ACQUIRE, this is ensured by using
 209 * smp_load_acquire().
 210 * Setting it from 0 to non-zero must be ordered with regards to
 211 * this smp_load_acquire(), this is guaranteed because the smp_load_acquire()
 212 * is inside a spin_lock() and after a write from 0 to non-zero a
 213 * spin_lock()+spin_unlock() is done.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 214 */
 215
 216#define sc_semmsl	sem_ctls[0]
 217#define sc_semmns	sem_ctls[1]
 218#define sc_semopm	sem_ctls[2]
 219#define sc_semmni	sem_ctls[3]
 220
 221int sem_init_ns(struct ipc_namespace *ns)
 222{
 223	ns->sc_semmsl = SEMMSL;
 224	ns->sc_semmns = SEMMNS;
 225	ns->sc_semopm = SEMOPM;
 226	ns->sc_semmni = SEMMNI;
 227	ns->used_sems = 0;
 228	return ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
 229}
 230
 231#ifdef CONFIG_IPC_NS
 232void sem_exit_ns(struct ipc_namespace *ns)
 233{
 234	free_ipcs(ns, &sem_ids(ns), freeary);
 235	idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
 236	rhashtable_destroy(&ns->ids[IPC_SEM_IDS].key_ht);
 237}
 238#endif
 239
 240int __init sem_init(void)
 241{
 242	const int err = sem_init_ns(&init_ipc_ns);
 243
 244	ipc_init_proc_interface("sysvipc/sem",
 245				"       key      semid perms      nsems   uid   gid  cuid  cgid      otime      ctime\n",
 246				IPC_SEM_IDS, sysvipc_sem_proc_show);
 247	return err;
 248}
 249
 250/**
 251 * unmerge_queues - unmerge queues, if possible.
 252 * @sma: semaphore array
 253 *
 254 * The function unmerges the wait queues if complex_count is 0.
 255 * It must be called prior to dropping the global semaphore array lock.
 256 */
 257static void unmerge_queues(struct sem_array *sma)
 258{
 259	struct sem_queue *q, *tq;
 260
 261	/* complex operations still around? */
 262	if (sma->complex_count)
 263		return;
 264	/*
 265	 * We will switch back to simple mode.
 266	 * Move all pending operation back into the per-semaphore
 267	 * queues.
 268	 */
 269	list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
 270		struct sem *curr;
 271		curr = &sma->sems[q->sops[0].sem_num];
 272
 273		list_add_tail(&q->list, &curr->pending_alter);
 274	}
 275	INIT_LIST_HEAD(&sma->pending_alter);
 276}
 277
 278/**
 279 * merge_queues - merge single semop queues into global queue
 280 * @sma: semaphore array
 281 *
 282 * This function merges all per-semaphore queues into the global queue.
 283 * It is necessary to achieve FIFO ordering for the pending single-sop
 284 * operations when a multi-semop operation must sleep.
 285 * Only the alter operations must be moved, the const operations can stay.
 286 */
 287static void merge_queues(struct sem_array *sma)
 288{
 289	int i;
 290	for (i = 0; i < sma->sem_nsems; i++) {
 291		struct sem *sem = &sma->sems[i];
 292
 293		list_splice_init(&sem->pending_alter, &sma->pending_alter);
 294	}
 295}
 296
 297static void sem_rcu_free(struct rcu_head *head)
 298{
 299	struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu);
 300	struct sem_array *sma = container_of(p, struct sem_array, sem_perm);
 301
 302	security_sem_free(&sma->sem_perm);
 303	kvfree(sma);
 304}
 305
 306/*
 307 * Enter the mode suitable for non-simple operations:
 308 * Caller must own sem_perm.lock.
 309 */
 310static void complexmode_enter(struct sem_array *sma)
 311{
 312	int i;
 313	struct sem *sem;
 314
 315	if (sma->use_global_lock > 0)  {
 316		/*
 317		 * We are already in global lock mode.
 318		 * Nothing to do, just reset the
 319		 * counter until we return to simple mode.
 320		 */
 321		sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
 322		return;
 323	}
 324	sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
 325
 326	for (i = 0; i < sma->sem_nsems; i++) {
 327		sem = &sma->sems[i];
 328		spin_lock(&sem->lock);
 329		spin_unlock(&sem->lock);
 330	}
 331}
 332
 333/*
 334 * Try to leave the mode that disallows simple operations:
 335 * Caller must own sem_perm.lock.
 336 */
 337static void complexmode_tryleave(struct sem_array *sma)
 338{
 339	if (sma->complex_count)  {
 340		/* Complex ops are sleeping.
 341		 * We must stay in complex mode
 342		 */
 343		return;
 344	}
 345	if (sma->use_global_lock == 1) {
 346		/*
 347		 * Immediately after setting use_global_lock to 0,
 348		 * a simple op can start. Thus: all memory writes
 349		 * performed by the current operation must be visible
 350		 * before we set use_global_lock to 0.
 351		 */
 352		smp_store_release(&sma->use_global_lock, 0);
 353	} else {
 354		sma->use_global_lock--;
 
 355	}
 356}
 357
 358#define SEM_GLOBAL_LOCK	(-1)
 359/*
 360 * If the request contains only one semaphore operation, and there are
 361 * no complex transactions pending, lock only the semaphore involved.
 362 * Otherwise, lock the entire semaphore array, since we either have
 363 * multiple semaphores in our own semops, or we need to look at
 364 * semaphores from other pending complex operations.
 365 */
 366static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
 367			      int nsops)
 368{
 369	struct sem *sem;
 
 370
 371	if (nsops != 1) {
 372		/* Complex operation - acquire a full lock */
 373		ipc_lock_object(&sma->sem_perm);
 374
 375		/* Prevent parallel simple ops */
 376		complexmode_enter(sma);
 377		return SEM_GLOBAL_LOCK;
 378	}
 379
 380	/*
 381	 * Only one semaphore affected - try to optimize locking.
 382	 * Optimized locking is possible if no complex operation
 383	 * is either enqueued or processed right now.
 384	 *
 385	 * Both facts are tracked by use_global_mode.
 386	 */
 387	sem = &sma->sems[sops->sem_num];
 
 388
 389	/*
 390	 * Initial check for use_global_lock. Just an optimization,
 391	 * no locking, no memory barrier.
 392	 */
 393	if (!sma->use_global_lock) {
 394		/*
 395		 * It appears that no complex operation is around.
 396		 * Acquire the per-semaphore lock.
 397		 */
 398		spin_lock(&sem->lock);
 399
 400		/* pairs with smp_store_release() */
 401		if (!smp_load_acquire(&sma->use_global_lock)) {
 402			/* fast path successful! */
 403			return sops->sem_num;
 404		}
 405		spin_unlock(&sem->lock);
 406	}
 407
 408	/* slow path: acquire the full lock */
 409	ipc_lock_object(&sma->sem_perm);
 410
 411	if (sma->use_global_lock == 0) {
 412		/*
 413		 * The use_global_lock mode ended while we waited for
 414		 * sma->sem_perm.lock. Thus we must switch to locking
 415		 * with sem->lock.
 416		 * Unlike in the fast path, there is no need to recheck
 417		 * sma->use_global_lock after we have acquired sem->lock:
 418		 * We own sma->sem_perm.lock, thus use_global_lock cannot
 419		 * change.
 420		 */
 421		spin_lock(&sem->lock);
 422
 423		ipc_unlock_object(&sma->sem_perm);
 424		return sops->sem_num;
 425	} else {
 426		/*
 427		 * Not a false alarm, thus continue to use the global lock
 428		 * mode. No need for complexmode_enter(), this was done by
 429		 * the caller that has set use_global_mode to non-zero.
 430		 */
 431		return SEM_GLOBAL_LOCK;
 432	}
 433}
 434
 435static inline void sem_unlock(struct sem_array *sma, int locknum)
 436{
 437	if (locknum == SEM_GLOBAL_LOCK) {
 438		unmerge_queues(sma);
 439		complexmode_tryleave(sma);
 440		ipc_unlock_object(&sma->sem_perm);
 441	} else {
 442		struct sem *sem = &sma->sems[locknum];
 443		spin_unlock(&sem->lock);
 444	}
 445}
 446
 447/*
 448 * sem_lock_(check_) routines are called in the paths where the rwsem
 449 * is not held.
 450 *
 451 * The caller holds the RCU read lock.
 452 */
 453static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
 454{
 455	struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
 456
 457	if (IS_ERR(ipcp))
 458		return ERR_CAST(ipcp);
 459
 460	return container_of(ipcp, struct sem_array, sem_perm);
 461}
 462
 463static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
 464							int id)
 465{
 466	struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
 467
 468	if (IS_ERR(ipcp))
 469		return ERR_CAST(ipcp);
 470
 471	return container_of(ipcp, struct sem_array, sem_perm);
 472}
 473
 474static inline void sem_lock_and_putref(struct sem_array *sma)
 475{
 476	sem_lock(sma, NULL, -1);
 477	ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
 478}
 479
 480static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
 481{
 482	ipc_rmid(&sem_ids(ns), &s->sem_perm);
 483}
 484
 485static struct sem_array *sem_alloc(size_t nsems)
 486{
 487	struct sem_array *sma;
 488	size_t size;
 489
 490	if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0]))
 491		return NULL;
 492
 493	size = sizeof(*sma) + nsems * sizeof(sma->sems[0]);
 494	sma = kvmalloc(size, GFP_KERNEL);
 495	if (unlikely(!sma))
 496		return NULL;
 497
 498	memset(sma, 0, size);
 499
 500	return sma;
 501}
 502
 503/**
 504 * newary - Create a new semaphore set
 505 * @ns: namespace
 506 * @params: ptr to the structure that contains key, semflg and nsems
 507 *
 508 * Called with sem_ids.rwsem held (as a writer)
 509 */
 510static int newary(struct ipc_namespace *ns, struct ipc_params *params)
 511{
 512	int retval;
 513	struct sem_array *sma;
 514	key_t key = params->key;
 515	int nsems = params->u.nsems;
 516	int semflg = params->flg;
 517	int i;
 518
 519	if (!nsems)
 520		return -EINVAL;
 521	if (ns->used_sems + nsems > ns->sc_semmns)
 522		return -ENOSPC;
 523
 524	sma = sem_alloc(nsems);
 525	if (!sma)
 526		return -ENOMEM;
 527
 528	sma->sem_perm.mode = (semflg & S_IRWXUGO);
 529	sma->sem_perm.key = key;
 530
 531	sma->sem_perm.security = NULL;
 532	retval = security_sem_alloc(&sma->sem_perm);
 533	if (retval) {
 534		kvfree(sma);
 535		return retval;
 536	}
 537
 538	for (i = 0; i < nsems; i++) {
 539		INIT_LIST_HEAD(&sma->sems[i].pending_alter);
 540		INIT_LIST_HEAD(&sma->sems[i].pending_const);
 541		spin_lock_init(&sma->sems[i].lock);
 542	}
 543
 544	sma->complex_count = 0;
 545	sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
 546	INIT_LIST_HEAD(&sma->pending_alter);
 547	INIT_LIST_HEAD(&sma->pending_const);
 548	INIT_LIST_HEAD(&sma->list_id);
 549	sma->sem_nsems = nsems;
 550	sma->sem_ctime = ktime_get_real_seconds();
 551
 552	/* ipc_addid() locks sma upon success. */
 553	retval = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
 554	if (retval < 0) {
 555		call_rcu(&sma->sem_perm.rcu, sem_rcu_free);
 556		return retval;
 557	}
 558	ns->used_sems += nsems;
 559
 560	sem_unlock(sma, -1);
 561	rcu_read_unlock();
 562
 563	return sma->sem_perm.id;
 564}
 565
 566
 567/*
 568 * Called with sem_ids.rwsem and ipcp locked.
 569 */
 570static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
 571				struct ipc_params *params)
 572{
 573	struct sem_array *sma;
 574
 575	sma = container_of(ipcp, struct sem_array, sem_perm);
 576	if (params->u.nsems > sma->sem_nsems)
 577		return -EINVAL;
 578
 579	return 0;
 580}
 581
 582long ksys_semget(key_t key, int nsems, int semflg)
 583{
 584	struct ipc_namespace *ns;
 585	static const struct ipc_ops sem_ops = {
 586		.getnew = newary,
 587		.associate = security_sem_associate,
 588		.more_checks = sem_more_checks,
 589	};
 590	struct ipc_params sem_params;
 591
 592	ns = current->nsproxy->ipc_ns;
 593
 594	if (nsems < 0 || nsems > ns->sc_semmsl)
 595		return -EINVAL;
 596
 597	sem_params.key = key;
 598	sem_params.flg = semflg;
 599	sem_params.u.nsems = nsems;
 600
 601	return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
 602}
 603
 604SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
 605{
 606	return ksys_semget(key, nsems, semflg);
 607}
 608
 609/**
 610 * perform_atomic_semop[_slow] - Attempt to perform semaphore
 611 *                               operations on a given array.
 612 * @sma: semaphore array
 613 * @q: struct sem_queue that describes the operation
 614 *
 615 * Caller blocking are as follows, based the value
 616 * indicated by the semaphore operation (sem_op):
 617 *
 618 *  (1) >0 never blocks.
 619 *  (2)  0 (wait-for-zero operation): semval is non-zero.
 620 *  (3) <0 attempting to decrement semval to a value smaller than zero.
 621 *
 622 * Returns 0 if the operation was possible.
 623 * Returns 1 if the operation is impossible, the caller must sleep.
 624 * Returns <0 for error codes.
 625 */
 626static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
 627{
 628	int result, sem_op, nsops;
 629	struct pid *pid;
 630	struct sembuf *sop;
 631	struct sem *curr;
 632	struct sembuf *sops;
 633	struct sem_undo *un;
 634
 635	sops = q->sops;
 636	nsops = q->nsops;
 637	un = q->undo;
 638
 639	for (sop = sops; sop < sops + nsops; sop++) {
 640		curr = &sma->sems[sop->sem_num];
 
 641		sem_op = sop->sem_op;
 642		result = curr->semval;
 643
 644		if (!sem_op && result)
 645			goto would_block;
 646
 647		result += sem_op;
 648		if (result < 0)
 649			goto would_block;
 650		if (result > SEMVMX)
 651			goto out_of_range;
 652
 653		if (sop->sem_flg & SEM_UNDO) {
 654			int undo = un->semadj[sop->sem_num] - sem_op;
 655			/* Exceeding the undo range is an error. */
 656			if (undo < (-SEMAEM - 1) || undo > SEMAEM)
 657				goto out_of_range;
 658			un->semadj[sop->sem_num] = undo;
 659		}
 660
 661		curr->semval = result;
 662	}
 663
 664	sop--;
 665	pid = q->pid;
 666	while (sop >= sops) {
 667		ipc_update_pid(&sma->sems[sop->sem_num].sempid, pid);
 668		sop--;
 669	}
 670
 671	return 0;
 672
 673out_of_range:
 674	result = -ERANGE;
 675	goto undo;
 676
 677would_block:
 678	q->blocking = sop;
 679
 680	if (sop->sem_flg & IPC_NOWAIT)
 681		result = -EAGAIN;
 682	else
 683		result = 1;
 684
 685undo:
 686	sop--;
 687	while (sop >= sops) {
 688		sem_op = sop->sem_op;
 689		sma->sems[sop->sem_num].semval -= sem_op;
 690		if (sop->sem_flg & SEM_UNDO)
 691			un->semadj[sop->sem_num] += sem_op;
 692		sop--;
 693	}
 694
 695	return result;
 696}
 697
 698static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
 699{
 700	int result, sem_op, nsops;
 701	struct sembuf *sop;
 702	struct sem *curr;
 703	struct sembuf *sops;
 704	struct sem_undo *un;
 705
 706	sops = q->sops;
 707	nsops = q->nsops;
 708	un = q->undo;
 709
 710	if (unlikely(q->dupsop))
 711		return perform_atomic_semop_slow(sma, q);
 712
 713	/*
 714	 * We scan the semaphore set twice, first to ensure that the entire
 715	 * operation can succeed, therefore avoiding any pointless writes
 716	 * to shared memory and having to undo such changes in order to block
 717	 * until the operations can go through.
 718	 */
 719	for (sop = sops; sop < sops + nsops; sop++) {
 720		curr = &sma->sems[sop->sem_num];
 
 
 721		sem_op = sop->sem_op;
 722		result = curr->semval;
 723
 724		if (!sem_op && result)
 725			goto would_block; /* wait-for-zero */
 726
 727		result += sem_op;
 728		if (result < 0)
 729			goto would_block;
 730
 731		if (result > SEMVMX)
 732			return -ERANGE;
 733
 734		if (sop->sem_flg & SEM_UNDO) {
 735			int undo = un->semadj[sop->sem_num] - sem_op;
 736
 737			/* Exceeding the undo range is an error. */
 738			if (undo < (-SEMAEM - 1) || undo > SEMAEM)
 739				return -ERANGE;
 740		}
 741	}
 742
 743	for (sop = sops; sop < sops + nsops; sop++) {
 744		curr = &sma->sems[sop->sem_num];
 745		sem_op = sop->sem_op;
 746		result = curr->semval;
 747
 748		if (sop->sem_flg & SEM_UNDO) {
 749			int undo = un->semadj[sop->sem_num] - sem_op;
 750
 751			un->semadj[sop->sem_num] = undo;
 752		}
 753		curr->semval += sem_op;
 754		ipc_update_pid(&curr->sempid, q->pid);
 755	}
 756
 757	return 0;
 758
 759would_block:
 760	q->blocking = sop;
 761	return sop->sem_flg & IPC_NOWAIT ? -EAGAIN : 1;
 762}
 763
 764static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
 765					     struct wake_q_head *wake_q)
 766{
 767	wake_q_add(wake_q, q->sleeper);
 768	/*
 769	 * Rely on the above implicit barrier, such that we can
 770	 * ensure that we hold reference to the task before setting
 771	 * q->status. Otherwise we could race with do_exit if the
 772	 * task is awoken by an external event before calling
 773	 * wake_up_process().
 774	 */
 775	WRITE_ONCE(q->status, error);
 776}
 777
 778static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
 779{
 780	list_del(&q->list);
 781	if (q->nsops > 1)
 782		sma->complex_count--;
 783}
 784
 785/** check_restart(sma, q)
 786 * @sma: semaphore array
 787 * @q: the operation that just completed
 788 *
 789 * update_queue is O(N^2) when it restarts scanning the whole queue of
 790 * waiting operations. Therefore this function checks if the restart is
 791 * really necessary. It is called after a previously waiting operation
 792 * modified the array.
 793 * Note that wait-for-zero operations are handled without restart.
 794 */
 795static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
 796{
 797	/* pending complex alter operations are too difficult to analyse */
 798	if (!list_empty(&sma->pending_alter))
 799		return 1;
 800
 801	/* we were a sleeping complex operation. Too difficult */
 802	if (q->nsops > 1)
 803		return 1;
 804
 805	/* It is impossible that someone waits for the new value:
 806	 * - complex operations always restart.
 807	 * - wait-for-zero are handled seperately.
 808	 * - q is a previously sleeping simple operation that
 809	 *   altered the array. It must be a decrement, because
 810	 *   simple increments never sleep.
 811	 * - If there are older (higher priority) decrements
 812	 *   in the queue, then they have observed the original
 813	 *   semval value and couldn't proceed. The operation
 814	 *   decremented to value - thus they won't proceed either.
 815	 */
 816	return 0;
 817}
 818
 819/**
 820 * wake_const_ops - wake up non-alter tasks
 821 * @sma: semaphore array.
 822 * @semnum: semaphore that was modified.
 823 * @wake_q: lockless wake-queue head.
 824 *
 825 * wake_const_ops must be called after a semaphore in a semaphore array
 826 * was set to 0. If complex const operations are pending, wake_const_ops must
 827 * be called with semnum = -1, as well as with the number of each modified
 828 * semaphore.
 829 * The tasks that must be woken up are added to @wake_q. The return code
 830 * is stored in q->pid.
 831 * The function returns 1 if at least one operation was completed successfully.
 832 */
 833static int wake_const_ops(struct sem_array *sma, int semnum,
 834			  struct wake_q_head *wake_q)
 835{
 836	struct sem_queue *q, *tmp;
 837	struct list_head *pending_list;
 838	int semop_completed = 0;
 839
 840	if (semnum == -1)
 841		pending_list = &sma->pending_const;
 842	else
 843		pending_list = &sma->sems[semnum].pending_const;
 844
 845	list_for_each_entry_safe(q, tmp, pending_list, list) {
 846		int error = perform_atomic_semop(sma, q);
 847
 848		if (error > 0)
 849			continue;
 850		/* operation completed, remove from queue & wakeup */
 851		unlink_queue(sma, q);
 852
 853		wake_up_sem_queue_prepare(q, error, wake_q);
 854		if (error == 0)
 855			semop_completed = 1;
 856	}
 857
 858	return semop_completed;
 859}
 860
 861/**
 862 * do_smart_wakeup_zero - wakeup all wait for zero tasks
 863 * @sma: semaphore array
 864 * @sops: operations that were performed
 865 * @nsops: number of operations
 866 * @wake_q: lockless wake-queue head
 867 *
 868 * Checks all required queue for wait-for-zero operations, based
 869 * on the actual changes that were performed on the semaphore array.
 870 * The function returns 1 if at least one operation was completed successfully.
 871 */
 872static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
 873				int nsops, struct wake_q_head *wake_q)
 874{
 875	int i;
 876	int semop_completed = 0;
 877	int got_zero = 0;
 878
 879	/* first: the per-semaphore queues, if known */
 880	if (sops) {
 881		for (i = 0; i < nsops; i++) {
 882			int num = sops[i].sem_num;
 883
 884			if (sma->sems[num].semval == 0) {
 885				got_zero = 1;
 886				semop_completed |= wake_const_ops(sma, num, wake_q);
 887			}
 888		}
 889	} else {
 890		/*
 891		 * No sops means modified semaphores not known.
 892		 * Assume all were changed.
 893		 */
 894		for (i = 0; i < sma->sem_nsems; i++) {
 895			if (sma->sems[i].semval == 0) {
 896				got_zero = 1;
 897				semop_completed |= wake_const_ops(sma, i, wake_q);
 898			}
 899		}
 900	}
 901	/*
 902	 * If one of the modified semaphores got 0,
 903	 * then check the global queue, too.
 904	 */
 905	if (got_zero)
 906		semop_completed |= wake_const_ops(sma, -1, wake_q);
 907
 908	return semop_completed;
 909}
 910
 911
 912/**
 913 * update_queue - look for tasks that can be completed.
 914 * @sma: semaphore array.
 915 * @semnum: semaphore that was modified.
 916 * @wake_q: lockless wake-queue head.
 917 *
 918 * update_queue must be called after a semaphore in a semaphore array
 919 * was modified. If multiple semaphores were modified, update_queue must
 920 * be called with semnum = -1, as well as with the number of each modified
 921 * semaphore.
 922 * The tasks that must be woken up are added to @wake_q. The return code
 923 * is stored in q->pid.
 924 * The function internally checks if const operations can now succeed.
 925 *
 926 * The function return 1 if at least one semop was completed successfully.
 927 */
 928static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q)
 929{
 930	struct sem_queue *q, *tmp;
 931	struct list_head *pending_list;
 932	int semop_completed = 0;
 933
 934	if (semnum == -1)
 935		pending_list = &sma->pending_alter;
 936	else
 937		pending_list = &sma->sems[semnum].pending_alter;
 938
 939again:
 940	list_for_each_entry_safe(q, tmp, pending_list, list) {
 941		int error, restart;
 942
 943		/* If we are scanning the single sop, per-semaphore list of
 944		 * one semaphore and that semaphore is 0, then it is not
 945		 * necessary to scan further: simple increments
 946		 * that affect only one entry succeed immediately and cannot
 947		 * be in the  per semaphore pending queue, and decrements
 948		 * cannot be successful if the value is already 0.
 949		 */
 950		if (semnum != -1 && sma->sems[semnum].semval == 0)
 951			break;
 952
 953		error = perform_atomic_semop(sma, q);
 954
 955		/* Does q->sleeper still need to sleep? */
 956		if (error > 0)
 957			continue;
 958
 959		unlink_queue(sma, q);
 960
 961		if (error) {
 962			restart = 0;
 963		} else {
 964			semop_completed = 1;
 965			do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q);
 966			restart = check_restart(sma, q);
 967		}
 968
 969		wake_up_sem_queue_prepare(q, error, wake_q);
 970		if (restart)
 971			goto again;
 972	}
 973	return semop_completed;
 974}
 975
 976/**
 977 * set_semotime - set sem_otime
 978 * @sma: semaphore array
 979 * @sops: operations that modified the array, may be NULL
 980 *
 981 * sem_otime is replicated to avoid cache line trashing.
 982 * This function sets one instance to the current time.
 983 */
 984static void set_semotime(struct sem_array *sma, struct sembuf *sops)
 985{
 986	if (sops == NULL) {
 987		sma->sems[0].sem_otime = get_seconds();
 988	} else {
 989		sma->sems[sops[0].sem_num].sem_otime =
 990							get_seconds();
 991	}
 992}
 993
 994/**
 995 * do_smart_update - optimized update_queue
 996 * @sma: semaphore array
 997 * @sops: operations that were performed
 998 * @nsops: number of operations
 999 * @otime: force setting otime
1000 * @wake_q: lockless wake-queue head
1001 *
1002 * do_smart_update() does the required calls to update_queue and wakeup_zero,
1003 * based on the actual changes that were performed on the semaphore array.
1004 * Note that the function does not do the actual wake-up: the caller is
1005 * responsible for calling wake_up_q().
1006 * It is safe to perform this call after dropping all locks.
1007 */
1008static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
1009			    int otime, struct wake_q_head *wake_q)
1010{
1011	int i;
1012
1013	otime |= do_smart_wakeup_zero(sma, sops, nsops, wake_q);
1014
1015	if (!list_empty(&sma->pending_alter)) {
1016		/* semaphore array uses the global queue - just process it. */
1017		otime |= update_queue(sma, -1, wake_q);
1018	} else {
1019		if (!sops) {
1020			/*
1021			 * No sops, thus the modified semaphores are not
1022			 * known. Check all.
1023			 */
1024			for (i = 0; i < sma->sem_nsems; i++)
1025				otime |= update_queue(sma, i, wake_q);
1026		} else {
1027			/*
1028			 * Check the semaphores that were increased:
1029			 * - No complex ops, thus all sleeping ops are
1030			 *   decrease.
1031			 * - if we decreased the value, then any sleeping
1032			 *   semaphore ops wont be able to run: If the
1033			 *   previous value was too small, then the new
1034			 *   value will be too small, too.
1035			 */
1036			for (i = 0; i < nsops; i++) {
1037				if (sops[i].sem_op > 0) {
1038					otime |= update_queue(sma,
1039							      sops[i].sem_num, wake_q);
1040				}
1041			}
1042		}
1043	}
1044	if (otime)
1045		set_semotime(sma, sops);
1046}
1047
1048/*
1049 * check_qop: Test if a queued operation sleeps on the semaphore semnum
1050 */
1051static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
1052			bool count_zero)
1053{
1054	struct sembuf *sop = q->blocking;
1055
1056	/*
1057	 * Linux always (since 0.99.10) reported a task as sleeping on all
1058	 * semaphores. This violates SUS, therefore it was changed to the
1059	 * standard compliant behavior.
1060	 * Give the administrators a chance to notice that an application
1061	 * might misbehave because it relies on the Linux behavior.
1062	 */
1063	pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
1064			"The task %s (%d) triggered the difference, watch for misbehavior.\n",
1065			current->comm, task_pid_nr(current));
1066
1067	if (sop->sem_num != semnum)
1068		return 0;
1069
1070	if (count_zero && sop->sem_op == 0)
1071		return 1;
1072	if (!count_zero && sop->sem_op < 0)
1073		return 1;
1074
1075	return 0;
1076}
1077
1078/* The following counts are associated to each semaphore:
1079 *   semncnt        number of tasks waiting on semval being nonzero
1080 *   semzcnt        number of tasks waiting on semval being zero
1081 *
1082 * Per definition, a task waits only on the semaphore of the first semop
1083 * that cannot proceed, even if additional operation would block, too.
1084 */
1085static int count_semcnt(struct sem_array *sma, ushort semnum,
1086			bool count_zero)
1087{
1088	struct list_head *l;
1089	struct sem_queue *q;
1090	int semcnt;
1091
1092	semcnt = 0;
1093	/* First: check the simple operations. They are easy to evaluate */
1094	if (count_zero)
1095		l = &sma->sems[semnum].pending_const;
1096	else
1097		l = &sma->sems[semnum].pending_alter;
1098
1099	list_for_each_entry(q, l, list) {
1100		/* all task on a per-semaphore list sleep on exactly
1101		 * that semaphore
1102		 */
1103		semcnt++;
1104	}
1105
1106	/* Then: check the complex operations. */
1107	list_for_each_entry(q, &sma->pending_alter, list) {
1108		semcnt += check_qop(sma, semnum, q, count_zero);
1109	}
1110	if (count_zero) {
1111		list_for_each_entry(q, &sma->pending_const, list) {
1112			semcnt += check_qop(sma, semnum, q, count_zero);
1113		}
1114	}
1115	return semcnt;
1116}
1117
1118/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
1119 * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
1120 * remains locked on exit.
1121 */
1122static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1123{
1124	struct sem_undo *un, *tu;
1125	struct sem_queue *q, *tq;
1126	struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1127	int i;
1128	DEFINE_WAKE_Q(wake_q);
1129
1130	/* Free the existing undo structures for this semaphore set.  */
1131	ipc_assert_locked_object(&sma->sem_perm);
1132	list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1133		list_del(&un->list_id);
1134		spin_lock(&un->ulp->lock);
1135		un->semid = -1;
1136		list_del_rcu(&un->list_proc);
1137		spin_unlock(&un->ulp->lock);
1138		kfree_rcu(un, rcu);
1139	}
1140
1141	/* Wake up all pending processes and let them fail with EIDRM. */
1142	list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1143		unlink_queue(sma, q);
1144		wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1145	}
1146
1147	list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1148		unlink_queue(sma, q);
1149		wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1150	}
1151	for (i = 0; i < sma->sem_nsems; i++) {
1152		struct sem *sem = &sma->sems[i];
1153		list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
1154			unlink_queue(sma, q);
1155			wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1156		}
1157		list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
1158			unlink_queue(sma, q);
1159			wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1160		}
1161		ipc_update_pid(&sem->sempid, NULL);
1162	}
1163
1164	/* Remove the semaphore set from the IDR */
1165	sem_rmid(ns, sma);
1166	sem_unlock(sma, -1);
1167	rcu_read_unlock();
1168
1169	wake_up_q(&wake_q);
1170	ns->used_sems -= sma->sem_nsems;
1171	ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1172}
1173
1174static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1175{
1176	switch (version) {
1177	case IPC_64:
1178		return copy_to_user(buf, in, sizeof(*in));
1179	case IPC_OLD:
1180	    {
1181		struct semid_ds out;
1182
1183		memset(&out, 0, sizeof(out));
1184
1185		ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
1186
1187		out.sem_otime	= in->sem_otime;
1188		out.sem_ctime	= in->sem_ctime;
1189		out.sem_nsems	= in->sem_nsems;
1190
1191		return copy_to_user(buf, &out, sizeof(out));
1192	    }
1193	default:
1194		return -EINVAL;
1195	}
1196}
1197
1198static time64_t get_semotime(struct sem_array *sma)
1199{
1200	int i;
1201	time64_t res;
1202
1203	res = sma->sems[0].sem_otime;
1204	for (i = 1; i < sma->sem_nsems; i++) {
1205		time64_t to = sma->sems[i].sem_otime;
1206
1207		if (to > res)
1208			res = to;
1209	}
1210	return res;
1211}
1212
1213static int semctl_stat(struct ipc_namespace *ns, int semid,
1214			 int cmd, struct semid64_ds *semid64)
1215{
1216	struct sem_array *sma;
1217	int id = 0;
1218	int err;
1219
1220	memset(semid64, 0, sizeof(*semid64));
1221
1222	rcu_read_lock();
1223	if (cmd == SEM_STAT || cmd == SEM_STAT_ANY) {
1224		sma = sem_obtain_object(ns, semid);
1225		if (IS_ERR(sma)) {
1226			err = PTR_ERR(sma);
1227			goto out_unlock;
1228		}
1229		id = sma->sem_perm.id;
1230	} else { /* IPC_STAT */
1231		sma = sem_obtain_object_check(ns, semid);
1232		if (IS_ERR(sma)) {
1233			err = PTR_ERR(sma);
1234			goto out_unlock;
1235		}
1236	}
1237
1238	/* see comment for SHM_STAT_ANY */
1239	if (cmd == SEM_STAT_ANY)
1240		audit_ipc_obj(&sma->sem_perm);
1241	else {
1242		err = -EACCES;
1243		if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1244			goto out_unlock;
1245	}
1246
1247	err = security_sem_semctl(&sma->sem_perm, cmd);
1248	if (err)
1249		goto out_unlock;
1250
1251	ipc_lock_object(&sma->sem_perm);
1252
1253	if (!ipc_valid_object(&sma->sem_perm)) {
1254		ipc_unlock_object(&sma->sem_perm);
1255		err = -EIDRM;
1256		goto out_unlock;
1257	}
1258
1259	kernel_to_ipc64_perm(&sma->sem_perm, &semid64->sem_perm);
1260	semid64->sem_otime = get_semotime(sma);
 
1261	semid64->sem_ctime = sma->sem_ctime;
 
 
 
 
1262	semid64->sem_nsems = sma->sem_nsems;
1263
 
 
 
 
 
 
 
 
 
 
 
 
 
1264	ipc_unlock_object(&sma->sem_perm);
1265	rcu_read_unlock();
1266	return id;
1267
1268out_unlock:
1269	rcu_read_unlock();
1270	return err;
1271}
1272
1273static int semctl_info(struct ipc_namespace *ns, int semid,
1274			 int cmd, void __user *p)
1275{
1276	struct seminfo seminfo;
1277	int max_id;
1278	int err;
1279
1280	err = security_sem_semctl(NULL, cmd);
1281	if (err)
1282		return err;
1283
1284	memset(&seminfo, 0, sizeof(seminfo));
1285	seminfo.semmni = ns->sc_semmni;
1286	seminfo.semmns = ns->sc_semmns;
1287	seminfo.semmsl = ns->sc_semmsl;
1288	seminfo.semopm = ns->sc_semopm;
1289	seminfo.semvmx = SEMVMX;
1290	seminfo.semmnu = SEMMNU;
1291	seminfo.semmap = SEMMAP;
1292	seminfo.semume = SEMUME;
1293	down_read(&sem_ids(ns).rwsem);
1294	if (cmd == SEM_INFO) {
1295		seminfo.semusz = sem_ids(ns).in_use;
1296		seminfo.semaem = ns->used_sems;
1297	} else {
1298		seminfo.semusz = SEMUSZ;
1299		seminfo.semaem = SEMAEM;
1300	}
1301	max_id = ipc_get_maxid(&sem_ids(ns));
1302	up_read(&sem_ids(ns).rwsem);
1303	if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1304		return -EFAULT;
1305	return (max_id < 0) ? 0 : max_id;
1306}
1307
1308static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1309		int val)
1310{
1311	struct sem_undo *un;
1312	struct sem_array *sma;
1313	struct sem *curr;
1314	int err;
1315	DEFINE_WAKE_Q(wake_q);
1316
1317	if (val > SEMVMX || val < 0)
1318		return -ERANGE;
1319
1320	rcu_read_lock();
1321	sma = sem_obtain_object_check(ns, semid);
1322	if (IS_ERR(sma)) {
1323		rcu_read_unlock();
1324		return PTR_ERR(sma);
1325	}
1326
1327	if (semnum < 0 || semnum >= sma->sem_nsems) {
1328		rcu_read_unlock();
1329		return -EINVAL;
1330	}
1331
1332
1333	if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1334		rcu_read_unlock();
1335		return -EACCES;
1336	}
1337
1338	err = security_sem_semctl(&sma->sem_perm, SETVAL);
1339	if (err) {
1340		rcu_read_unlock();
1341		return -EACCES;
1342	}
1343
1344	sem_lock(sma, NULL, -1);
1345
1346	if (!ipc_valid_object(&sma->sem_perm)) {
1347		sem_unlock(sma, -1);
1348		rcu_read_unlock();
1349		return -EIDRM;
1350	}
1351
 
1352	curr = &sma->sems[semnum];
1353
1354	ipc_assert_locked_object(&sma->sem_perm);
1355	list_for_each_entry(un, &sma->list_id, list_id)
1356		un->semadj[semnum] = 0;
1357
1358	curr->semval = val;
1359	ipc_update_pid(&curr->sempid, task_tgid(current));
1360	sma->sem_ctime = ktime_get_real_seconds();
1361	/* maybe some queued-up processes were waiting for this */
1362	do_smart_update(sma, NULL, 0, 0, &wake_q);
1363	sem_unlock(sma, -1);
1364	rcu_read_unlock();
1365	wake_up_q(&wake_q);
1366	return 0;
1367}
1368
1369static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1370		int cmd, void __user *p)
1371{
1372	struct sem_array *sma;
1373	struct sem *curr;
1374	int err, nsems;
1375	ushort fast_sem_io[SEMMSL_FAST];
1376	ushort *sem_io = fast_sem_io;
1377	DEFINE_WAKE_Q(wake_q);
1378
1379	rcu_read_lock();
1380	sma = sem_obtain_object_check(ns, semid);
1381	if (IS_ERR(sma)) {
1382		rcu_read_unlock();
1383		return PTR_ERR(sma);
1384	}
1385
1386	nsems = sma->sem_nsems;
1387
1388	err = -EACCES;
1389	if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1390		goto out_rcu_wakeup;
1391
1392	err = security_sem_semctl(&sma->sem_perm, cmd);
1393	if (err)
1394		goto out_rcu_wakeup;
1395
1396	err = -EACCES;
1397	switch (cmd) {
1398	case GETALL:
1399	{
1400		ushort __user *array = p;
1401		int i;
1402
1403		sem_lock(sma, NULL, -1);
1404		if (!ipc_valid_object(&sma->sem_perm)) {
1405			err = -EIDRM;
1406			goto out_unlock;
1407		}
1408		if (nsems > SEMMSL_FAST) {
1409			if (!ipc_rcu_getref(&sma->sem_perm)) {
1410				err = -EIDRM;
1411				goto out_unlock;
1412			}
1413			sem_unlock(sma, -1);
1414			rcu_read_unlock();
1415			sem_io = kvmalloc_array(nsems, sizeof(ushort),
1416						GFP_KERNEL);
1417			if (sem_io == NULL) {
1418				ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1419				return -ENOMEM;
1420			}
1421
1422			rcu_read_lock();
1423			sem_lock_and_putref(sma);
1424			if (!ipc_valid_object(&sma->sem_perm)) {
1425				err = -EIDRM;
1426				goto out_unlock;
1427			}
1428		}
1429		for (i = 0; i < sma->sem_nsems; i++)
1430			sem_io[i] = sma->sems[i].semval;
1431		sem_unlock(sma, -1);
1432		rcu_read_unlock();
1433		err = 0;
1434		if (copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1435			err = -EFAULT;
1436		goto out_free;
1437	}
1438	case SETALL:
1439	{
1440		int i;
1441		struct sem_undo *un;
1442
1443		if (!ipc_rcu_getref(&sma->sem_perm)) {
1444			err = -EIDRM;
1445			goto out_rcu_wakeup;
1446		}
1447		rcu_read_unlock();
1448
1449		if (nsems > SEMMSL_FAST) {
1450			sem_io = kvmalloc_array(nsems, sizeof(ushort),
1451						GFP_KERNEL);
1452			if (sem_io == NULL) {
1453				ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1454				return -ENOMEM;
1455			}
1456		}
1457
1458		if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
1459			ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1460			err = -EFAULT;
1461			goto out_free;
1462		}
1463
1464		for (i = 0; i < nsems; i++) {
1465			if (sem_io[i] > SEMVMX) {
1466				ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1467				err = -ERANGE;
1468				goto out_free;
1469			}
1470		}
1471		rcu_read_lock();
1472		sem_lock_and_putref(sma);
1473		if (!ipc_valid_object(&sma->sem_perm)) {
1474			err = -EIDRM;
1475			goto out_unlock;
1476		}
1477
1478		for (i = 0; i < nsems; i++) {
1479			sma->sems[i].semval = sem_io[i];
1480			ipc_update_pid(&sma->sems[i].sempid, task_tgid(current));
1481		}
1482
1483		ipc_assert_locked_object(&sma->sem_perm);
1484		list_for_each_entry(un, &sma->list_id, list_id) {
1485			for (i = 0; i < nsems; i++)
1486				un->semadj[i] = 0;
1487		}
1488		sma->sem_ctime = ktime_get_real_seconds();
1489		/* maybe some queued-up processes were waiting for this */
1490		do_smart_update(sma, NULL, 0, 0, &wake_q);
1491		err = 0;
1492		goto out_unlock;
1493	}
1494	/* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */
1495	}
1496	err = -EINVAL;
1497	if (semnum < 0 || semnum >= nsems)
1498		goto out_rcu_wakeup;
1499
1500	sem_lock(sma, NULL, -1);
1501	if (!ipc_valid_object(&sma->sem_perm)) {
1502		err = -EIDRM;
1503		goto out_unlock;
1504	}
 
 
1505	curr = &sma->sems[semnum];
1506
1507	switch (cmd) {
1508	case GETVAL:
1509		err = curr->semval;
1510		goto out_unlock;
1511	case GETPID:
1512		err = pid_vnr(curr->sempid);
1513		goto out_unlock;
1514	case GETNCNT:
1515		err = count_semcnt(sma, semnum, 0);
1516		goto out_unlock;
1517	case GETZCNT:
1518		err = count_semcnt(sma, semnum, 1);
1519		goto out_unlock;
1520	}
1521
1522out_unlock:
1523	sem_unlock(sma, -1);
1524out_rcu_wakeup:
1525	rcu_read_unlock();
1526	wake_up_q(&wake_q);
1527out_free:
1528	if (sem_io != fast_sem_io)
1529		kvfree(sem_io);
1530	return err;
1531}
1532
1533static inline unsigned long
1534copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1535{
1536	switch (version) {
1537	case IPC_64:
1538		if (copy_from_user(out, buf, sizeof(*out)))
1539			return -EFAULT;
1540		return 0;
1541	case IPC_OLD:
1542	    {
1543		struct semid_ds tbuf_old;
1544
1545		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1546			return -EFAULT;
1547
1548		out->sem_perm.uid	= tbuf_old.sem_perm.uid;
1549		out->sem_perm.gid	= tbuf_old.sem_perm.gid;
1550		out->sem_perm.mode	= tbuf_old.sem_perm.mode;
1551
1552		return 0;
1553	    }
1554	default:
1555		return -EINVAL;
1556	}
1557}
1558
1559/*
1560 * This function handles some semctl commands which require the rwsem
1561 * to be held in write mode.
1562 * NOTE: no locks must be held, the rwsem is taken inside this function.
1563 */
1564static int semctl_down(struct ipc_namespace *ns, int semid,
1565		       int cmd, struct semid64_ds *semid64)
1566{
1567	struct sem_array *sma;
1568	int err;
1569	struct kern_ipc_perm *ipcp;
1570
1571	down_write(&sem_ids(ns).rwsem);
1572	rcu_read_lock();
1573
1574	ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd,
1575				      &semid64->sem_perm, 0);
1576	if (IS_ERR(ipcp)) {
1577		err = PTR_ERR(ipcp);
1578		goto out_unlock1;
1579	}
1580
1581	sma = container_of(ipcp, struct sem_array, sem_perm);
1582
1583	err = security_sem_semctl(&sma->sem_perm, cmd);
1584	if (err)
1585		goto out_unlock1;
1586
1587	switch (cmd) {
1588	case IPC_RMID:
1589		sem_lock(sma, NULL, -1);
1590		/* freeary unlocks the ipc object and rcu */
1591		freeary(ns, ipcp);
1592		goto out_up;
1593	case IPC_SET:
1594		sem_lock(sma, NULL, -1);
1595		err = ipc_update_perm(&semid64->sem_perm, ipcp);
1596		if (err)
1597			goto out_unlock0;
1598		sma->sem_ctime = ktime_get_real_seconds();
1599		break;
1600	default:
1601		err = -EINVAL;
1602		goto out_unlock1;
1603	}
1604
1605out_unlock0:
1606	sem_unlock(sma, -1);
1607out_unlock1:
1608	rcu_read_unlock();
1609out_up:
1610	up_write(&sem_ids(ns).rwsem);
1611	return err;
1612}
1613
1614long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg)
1615{
1616	int version;
1617	struct ipc_namespace *ns;
1618	void __user *p = (void __user *)arg;
1619	struct semid64_ds semid64;
1620	int err;
1621
1622	if (semid < 0)
1623		return -EINVAL;
1624
1625	version = ipc_parse_version(&cmd);
1626	ns = current->nsproxy->ipc_ns;
1627
1628	switch (cmd) {
1629	case IPC_INFO:
1630	case SEM_INFO:
1631		return semctl_info(ns, semid, cmd, p);
1632	case IPC_STAT:
1633	case SEM_STAT:
1634	case SEM_STAT_ANY:
1635		err = semctl_stat(ns, semid, cmd, &semid64);
1636		if (err < 0)
1637			return err;
1638		if (copy_semid_to_user(p, &semid64, version))
1639			err = -EFAULT;
1640		return err;
1641	case GETALL:
1642	case GETVAL:
1643	case GETPID:
1644	case GETNCNT:
1645	case GETZCNT:
1646	case SETALL:
1647		return semctl_main(ns, semid, semnum, cmd, p);
1648	case SETVAL: {
1649		int val;
1650#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1651		/* big-endian 64bit */
1652		val = arg >> 32;
1653#else
1654		/* 32bit or little-endian 64bit */
1655		val = arg;
1656#endif
1657		return semctl_setval(ns, semid, semnum, val);
1658	}
1659	case IPC_SET:
1660		if (copy_semid_from_user(&semid64, p, version))
1661			return -EFAULT;
 
1662	case IPC_RMID:
1663		return semctl_down(ns, semid, cmd, &semid64);
1664	default:
1665		return -EINVAL;
1666	}
1667}
1668
1669SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1670{
1671	return ksys_semctl(semid, semnum, cmd, arg);
 
 
 
 
 
 
 
 
1672}
1673
 
 
 
 
 
 
1674#ifdef CONFIG_COMPAT
1675
1676struct compat_semid_ds {
1677	struct compat_ipc_perm sem_perm;
1678	compat_time_t sem_otime;
1679	compat_time_t sem_ctime;
1680	compat_uptr_t sem_base;
1681	compat_uptr_t sem_pending;
1682	compat_uptr_t sem_pending_last;
1683	compat_uptr_t undo;
1684	unsigned short sem_nsems;
1685};
1686
1687static int copy_compat_semid_from_user(struct semid64_ds *out, void __user *buf,
1688					int version)
1689{
1690	memset(out, 0, sizeof(*out));
1691	if (version == IPC_64) {
1692		struct compat_semid64_ds __user *p = buf;
1693		return get_compat_ipc64_perm(&out->sem_perm, &p->sem_perm);
1694	} else {
1695		struct compat_semid_ds __user *p = buf;
1696		return get_compat_ipc_perm(&out->sem_perm, &p->sem_perm);
1697	}
1698}
1699
1700static int copy_compat_semid_to_user(void __user *buf, struct semid64_ds *in,
1701					int version)
1702{
1703	if (version == IPC_64) {
1704		struct compat_semid64_ds v;
1705		memset(&v, 0, sizeof(v));
1706		to_compat_ipc64_perm(&v.sem_perm, &in->sem_perm);
1707		v.sem_otime = in->sem_otime;
1708		v.sem_ctime = in->sem_ctime;
 
 
1709		v.sem_nsems = in->sem_nsems;
1710		return copy_to_user(buf, &v, sizeof(v));
1711	} else {
1712		struct compat_semid_ds v;
1713		memset(&v, 0, sizeof(v));
1714		to_compat_ipc_perm(&v.sem_perm, &in->sem_perm);
1715		v.sem_otime = in->sem_otime;
1716		v.sem_ctime = in->sem_ctime;
1717		v.sem_nsems = in->sem_nsems;
1718		return copy_to_user(buf, &v, sizeof(v));
1719	}
1720}
1721
1722long compat_ksys_semctl(int semid, int semnum, int cmd, int arg)
1723{
1724	void __user *p = compat_ptr(arg);
1725	struct ipc_namespace *ns;
1726	struct semid64_ds semid64;
1727	int version = compat_ipc_parse_version(&cmd);
1728	int err;
1729
1730	ns = current->nsproxy->ipc_ns;
1731
1732	if (semid < 0)
1733		return -EINVAL;
1734
1735	switch (cmd & (~IPC_64)) {
1736	case IPC_INFO:
1737	case SEM_INFO:
1738		return semctl_info(ns, semid, cmd, p);
1739	case IPC_STAT:
1740	case SEM_STAT:
1741	case SEM_STAT_ANY:
1742		err = semctl_stat(ns, semid, cmd, &semid64);
1743		if (err < 0)
1744			return err;
1745		if (copy_compat_semid_to_user(p, &semid64, version))
1746			err = -EFAULT;
1747		return err;
1748	case GETVAL:
1749	case GETPID:
1750	case GETNCNT:
1751	case GETZCNT:
1752	case GETALL:
1753	case SETALL:
1754		return semctl_main(ns, semid, semnum, cmd, p);
1755	case SETVAL:
1756		return semctl_setval(ns, semid, semnum, arg);
1757	case IPC_SET:
1758		if (copy_compat_semid_from_user(&semid64, p, version))
1759			return -EFAULT;
1760		/* fallthru */
1761	case IPC_RMID:
1762		return semctl_down(ns, semid, cmd, &semid64);
1763	default:
1764		return -EINVAL;
1765	}
1766}
1767
1768COMPAT_SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, int, arg)
1769{
1770	return compat_ksys_semctl(semid, semnum, cmd, arg);
 
 
 
 
 
 
 
 
1771}
 
 
 
 
 
 
1772#endif
1773
1774/* If the task doesn't already have a undo_list, then allocate one
1775 * here.  We guarantee there is only one thread using this undo list,
1776 * and current is THE ONE
1777 *
1778 * If this allocation and assignment succeeds, but later
1779 * portions of this code fail, there is no need to free the sem_undo_list.
1780 * Just let it stay associated with the task, and it'll be freed later
1781 * at exit time.
1782 *
1783 * This can block, so callers must hold no locks.
1784 */
1785static inline int get_undo_list(struct sem_undo_list **undo_listp)
1786{
1787	struct sem_undo_list *undo_list;
1788
1789	undo_list = current->sysvsem.undo_list;
1790	if (!undo_list) {
1791		undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1792		if (undo_list == NULL)
1793			return -ENOMEM;
1794		spin_lock_init(&undo_list->lock);
1795		refcount_set(&undo_list->refcnt, 1);
1796		INIT_LIST_HEAD(&undo_list->list_proc);
1797
1798		current->sysvsem.undo_list = undo_list;
1799	}
1800	*undo_listp = undo_list;
1801	return 0;
1802}
1803
1804static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1805{
1806	struct sem_undo *un;
1807
1808	list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
 
1809		if (un->semid == semid)
1810			return un;
1811	}
1812	return NULL;
1813}
1814
1815static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1816{
1817	struct sem_undo *un;
1818
1819	assert_spin_locked(&ulp->lock);
1820
1821	un = __lookup_undo(ulp, semid);
1822	if (un) {
1823		list_del_rcu(&un->list_proc);
1824		list_add_rcu(&un->list_proc, &ulp->list_proc);
1825	}
1826	return un;
1827}
1828
1829/**
1830 * find_alloc_undo - lookup (and if not present create) undo array
1831 * @ns: namespace
1832 * @semid: semaphore array id
1833 *
1834 * The function looks up (and if not present creates) the undo structure.
1835 * The size of the undo structure depends on the size of the semaphore
1836 * array, thus the alloc path is not that straightforward.
1837 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1838 * performs a rcu_read_lock().
1839 */
1840static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1841{
1842	struct sem_array *sma;
1843	struct sem_undo_list *ulp;
1844	struct sem_undo *un, *new;
1845	int nsems, error;
1846
1847	error = get_undo_list(&ulp);
1848	if (error)
1849		return ERR_PTR(error);
1850
1851	rcu_read_lock();
1852	spin_lock(&ulp->lock);
1853	un = lookup_undo(ulp, semid);
1854	spin_unlock(&ulp->lock);
1855	if (likely(un != NULL))
1856		goto out;
1857
1858	/* no undo structure around - allocate one. */
1859	/* step 1: figure out the size of the semaphore array */
1860	sma = sem_obtain_object_check(ns, semid);
1861	if (IS_ERR(sma)) {
1862		rcu_read_unlock();
1863		return ERR_CAST(sma);
1864	}
1865
1866	nsems = sma->sem_nsems;
1867	if (!ipc_rcu_getref(&sma->sem_perm)) {
1868		rcu_read_unlock();
1869		un = ERR_PTR(-EIDRM);
1870		goto out;
1871	}
1872	rcu_read_unlock();
1873
1874	/* step 2: allocate new undo structure */
1875	new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1876	if (!new) {
1877		ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1878		return ERR_PTR(-ENOMEM);
1879	}
1880
1881	/* step 3: Acquire the lock on semaphore array */
1882	rcu_read_lock();
1883	sem_lock_and_putref(sma);
1884	if (!ipc_valid_object(&sma->sem_perm)) {
1885		sem_unlock(sma, -1);
1886		rcu_read_unlock();
1887		kfree(new);
1888		un = ERR_PTR(-EIDRM);
1889		goto out;
1890	}
1891	spin_lock(&ulp->lock);
1892
1893	/*
1894	 * step 4: check for races: did someone else allocate the undo struct?
1895	 */
1896	un = lookup_undo(ulp, semid);
1897	if (un) {
1898		kfree(new);
 
1899		goto success;
1900	}
1901	/* step 5: initialize & link new undo structure */
1902	new->semadj = (short *) &new[1];
1903	new->ulp = ulp;
1904	new->semid = semid;
1905	assert_spin_locked(&ulp->lock);
1906	list_add_rcu(&new->list_proc, &ulp->list_proc);
1907	ipc_assert_locked_object(&sma->sem_perm);
1908	list_add(&new->list_id, &sma->list_id);
1909	un = new;
1910
1911success:
1912	spin_unlock(&ulp->lock);
1913	sem_unlock(sma, -1);
1914out:
1915	return un;
1916}
1917
1918static long do_semtimedop(int semid, struct sembuf __user *tsops,
1919		unsigned nsops, const struct timespec64 *timeout)
 
1920{
1921	int error = -EINVAL;
1922	struct sem_array *sma;
1923	struct sembuf fast_sops[SEMOPM_FAST];
1924	struct sembuf *sops = fast_sops, *sop;
1925	struct sem_undo *un;
1926	int max, locknum;
1927	bool undos = false, alter = false, dupsop = false;
1928	struct sem_queue queue;
1929	unsigned long dup = 0, jiffies_left = 0;
1930	struct ipc_namespace *ns;
1931
1932	ns = current->nsproxy->ipc_ns;
1933
1934	if (nsops < 1 || semid < 0)
1935		return -EINVAL;
1936	if (nsops > ns->sc_semopm)
1937		return -E2BIG;
1938	if (nsops > SEMOPM_FAST) {
1939		sops = kvmalloc(sizeof(*sops)*nsops, GFP_KERNEL);
1940		if (sops == NULL)
1941			return -ENOMEM;
1942	}
1943
1944	if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
1945		error =  -EFAULT;
1946		goto out_free;
 
 
 
1947	}
1948
1949	if (timeout) {
1950		if (timeout->tv_sec < 0 || timeout->tv_nsec < 0 ||
1951			timeout->tv_nsec >= 1000000000L) {
1952			error = -EINVAL;
1953			goto out_free;
1954		}
1955		jiffies_left = timespec64_to_jiffies(timeout);
1956	}
1957
1958	max = 0;
1959	for (sop = sops; sop < sops + nsops; sop++) {
1960		unsigned long mask = 1ULL << ((sop->sem_num) % BITS_PER_LONG);
1961
1962		if (sop->sem_num >= max)
1963			max = sop->sem_num;
1964		if (sop->sem_flg & SEM_UNDO)
1965			undos = true;
1966		if (dup & mask) {
1967			/*
1968			 * There was a previous alter access that appears
1969			 * to have accessed the same semaphore, thus use
1970			 * the dupsop logic. "appears", because the detection
1971			 * can only check % BITS_PER_LONG.
1972			 */
1973			dupsop = true;
1974		}
1975		if (sop->sem_op != 0) {
1976			alter = true;
1977			dup |= mask;
1978		}
1979	}
1980
1981	if (undos) {
1982		/* On success, find_alloc_undo takes the rcu_read_lock */
1983		un = find_alloc_undo(ns, semid);
1984		if (IS_ERR(un)) {
1985			error = PTR_ERR(un);
1986			goto out_free;
1987		}
1988	} else {
1989		un = NULL;
1990		rcu_read_lock();
1991	}
1992
1993	sma = sem_obtain_object_check(ns, semid);
1994	if (IS_ERR(sma)) {
1995		rcu_read_unlock();
1996		error = PTR_ERR(sma);
1997		goto out_free;
1998	}
1999
2000	error = -EFBIG;
2001	if (max >= sma->sem_nsems) {
2002		rcu_read_unlock();
2003		goto out_free;
2004	}
2005
2006	error = -EACCES;
2007	if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) {
2008		rcu_read_unlock();
2009		goto out_free;
2010	}
2011
2012	error = security_sem_semop(&sma->sem_perm, sops, nsops, alter);
2013	if (error) {
2014		rcu_read_unlock();
2015		goto out_free;
2016	}
2017
2018	error = -EIDRM;
2019	locknum = sem_lock(sma, sops, nsops);
2020	/*
2021	 * We eventually might perform the following check in a lockless
2022	 * fashion, considering ipc_valid_object() locking constraints.
2023	 * If nsops == 1 and there is no contention for sem_perm.lock, then
2024	 * only a per-semaphore lock is held and it's OK to proceed with the
2025	 * check below. More details on the fine grained locking scheme
2026	 * entangled here and why it's RMID race safe on comments at sem_lock()
2027	 */
2028	if (!ipc_valid_object(&sma->sem_perm))
2029		goto out_unlock_free;
2030	/*
2031	 * semid identifiers are not unique - find_alloc_undo may have
2032	 * allocated an undo structure, it was invalidated by an RMID
2033	 * and now a new array with received the same id. Check and fail.
2034	 * This case can be detected checking un->semid. The existence of
2035	 * "un" itself is guaranteed by rcu.
2036	 */
2037	if (un && un->semid == -1)
2038		goto out_unlock_free;
2039
2040	queue.sops = sops;
2041	queue.nsops = nsops;
2042	queue.undo = un;
2043	queue.pid = task_tgid(current);
2044	queue.alter = alter;
2045	queue.dupsop = dupsop;
2046
2047	error = perform_atomic_semop(sma, &queue);
2048	if (error == 0) { /* non-blocking succesfull path */
2049		DEFINE_WAKE_Q(wake_q);
2050
2051		/*
2052		 * If the operation was successful, then do
2053		 * the required updates.
2054		 */
2055		if (alter)
2056			do_smart_update(sma, sops, nsops, 1, &wake_q);
2057		else
2058			set_semotime(sma, sops);
2059
2060		sem_unlock(sma, locknum);
2061		rcu_read_unlock();
2062		wake_up_q(&wake_q);
2063
2064		goto out_free;
2065	}
2066	if (error < 0) /* non-blocking error path */
2067		goto out_unlock_free;
2068
2069	/*
2070	 * We need to sleep on this operation, so we put the current
2071	 * task into the pending queue and go to sleep.
2072	 */
2073	if (nsops == 1) {
2074		struct sem *curr;
2075		curr = &sma->sems[sops->sem_num];
 
2076
2077		if (alter) {
2078			if (sma->complex_count) {
2079				list_add_tail(&queue.list,
2080						&sma->pending_alter);
2081			} else {
2082
2083				list_add_tail(&queue.list,
2084						&curr->pending_alter);
2085			}
2086		} else {
2087			list_add_tail(&queue.list, &curr->pending_const);
2088		}
2089	} else {
2090		if (!sma->complex_count)
2091			merge_queues(sma);
2092
2093		if (alter)
2094			list_add_tail(&queue.list, &sma->pending_alter);
2095		else
2096			list_add_tail(&queue.list, &sma->pending_const);
2097
2098		sma->complex_count++;
2099	}
2100
2101	do {
2102		queue.status = -EINTR;
 
2103		queue.sleeper = current;
2104
 
2105		__set_current_state(TASK_INTERRUPTIBLE);
2106		sem_unlock(sma, locknum);
2107		rcu_read_unlock();
2108
2109		if (timeout)
2110			jiffies_left = schedule_timeout(jiffies_left);
2111		else
2112			schedule();
2113
2114		/*
2115		 * fastpath: the semop has completed, either successfully or
2116		 * not, from the syscall pov, is quite irrelevant to us at this
2117		 * point; we're done.
2118		 *
2119		 * We _do_ care, nonetheless, about being awoken by a signal or
2120		 * spuriously.  The queue.status is checked again in the
2121		 * slowpath (aka after taking sem_lock), such that we can detect
2122		 * scenarios where we were awakened externally, during the
2123		 * window between wake_q_add() and wake_up_q().
2124		 */
 
2125		error = READ_ONCE(queue.status);
2126		if (error != -EINTR) {
2127			/*
2128			 * User space could assume that semop() is a memory
2129			 * barrier: Without the mb(), the cpu could
2130			 * speculatively read in userspace stale data that was
2131			 * overwritten by the previous owner of the semaphore.
2132			 */
2133			smp_mb();
2134			goto out_free;
2135		}
2136
2137		rcu_read_lock();
2138		locknum = sem_lock(sma, sops, nsops);
2139
2140		if (!ipc_valid_object(&sma->sem_perm))
2141			goto out_unlock_free;
2142
 
 
 
2143		error = READ_ONCE(queue.status);
2144
2145		/*
2146		 * If queue.status != -EINTR we are woken up by another process.
2147		 * Leave without unlink_queue(), but with sem_unlock().
2148		 */
2149		if (error != -EINTR)
2150			goto out_unlock_free;
2151
2152		/*
2153		 * If an interrupt occurred we have to clean up the queue.
2154		 */
2155		if (timeout && jiffies_left == 0)
2156			error = -EAGAIN;
2157	} while (error == -EINTR && !signal_pending(current)); /* spurious */
2158
2159	unlink_queue(sma, &queue);
2160
2161out_unlock_free:
2162	sem_unlock(sma, locknum);
2163	rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2164out_free:
2165	if (sops != fast_sops)
2166		kvfree(sops);
2167	return error;
 
2168}
2169
2170long ksys_semtimedop(int semid, struct sembuf __user *tsops,
2171		     unsigned int nsops, const struct timespec __user *timeout)
2172{
2173	if (timeout) {
2174		struct timespec64 ts;
2175		if (get_timespec64(&ts, timeout))
2176			return -EFAULT;
2177		return do_semtimedop(semid, tsops, nsops, &ts);
2178	}
2179	return do_semtimedop(semid, tsops, nsops, NULL);
2180}
2181
2182SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
2183		unsigned int, nsops, const struct timespec __user *, timeout)
2184{
2185	return ksys_semtimedop(semid, tsops, nsops, timeout);
2186}
2187
2188#ifdef CONFIG_COMPAT
2189long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
2190			    unsigned int nsops,
2191			    const struct compat_timespec __user *timeout)
2192{
2193	if (timeout) {
2194		struct timespec64 ts;
2195		if (compat_get_timespec64(&ts, timeout))
2196			return -EFAULT;
2197		return do_semtimedop(semid, tsems, nsops, &ts);
2198	}
2199	return do_semtimedop(semid, tsems, nsops, NULL);
2200}
2201
2202COMPAT_SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsems,
2203		       unsigned int, nsops,
2204		       const struct compat_timespec __user *, timeout)
2205{
2206	return compat_ksys_semtimedop(semid, tsems, nsops, timeout);
2207}
2208#endif
2209
2210SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
2211		unsigned, nsops)
2212{
2213	return do_semtimedop(semid, tsops, nsops, NULL);
2214}
2215
2216/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
2217 * parent and child tasks.
2218 */
2219
2220int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
2221{
2222	struct sem_undo_list *undo_list;
2223	int error;
2224
2225	if (clone_flags & CLONE_SYSVSEM) {
2226		error = get_undo_list(&undo_list);
2227		if (error)
2228			return error;
2229		refcount_inc(&undo_list->refcnt);
2230		tsk->sysvsem.undo_list = undo_list;
2231	} else
2232		tsk->sysvsem.undo_list = NULL;
2233
2234	return 0;
2235}
2236
2237/*
2238 * add semadj values to semaphores, free undo structures.
2239 * undo structures are not freed when semaphore arrays are destroyed
2240 * so some of them may be out of date.
2241 * IMPLEMENTATION NOTE: There is some confusion over whether the
2242 * set of adjustments that needs to be done should be done in an atomic
2243 * manner or not. That is, if we are attempting to decrement the semval
2244 * should we queue up and wait until we can do so legally?
2245 * The original implementation attempted to do this (queue and wait).
2246 * The current implementation does not do so. The POSIX standard
2247 * and SVID should be consulted to determine what behavior is mandated.
2248 */
2249void exit_sem(struct task_struct *tsk)
2250{
2251	struct sem_undo_list *ulp;
2252
2253	ulp = tsk->sysvsem.undo_list;
2254	if (!ulp)
2255		return;
2256	tsk->sysvsem.undo_list = NULL;
2257
2258	if (!refcount_dec_and_test(&ulp->refcnt))
2259		return;
2260
2261	for (;;) {
2262		struct sem_array *sma;
2263		struct sem_undo *un;
2264		int semid, i;
2265		DEFINE_WAKE_Q(wake_q);
2266
2267		cond_resched();
2268
2269		rcu_read_lock();
2270		un = list_entry_rcu(ulp->list_proc.next,
2271				    struct sem_undo, list_proc);
2272		if (&un->list_proc == &ulp->list_proc) {
2273			/*
2274			 * We must wait for freeary() before freeing this ulp,
2275			 * in case we raced with last sem_undo. There is a small
2276			 * possibility where we exit while freeary() didn't
2277			 * finish unlocking sem_undo_list.
2278			 */
2279			spin_lock(&ulp->lock);
2280			spin_unlock(&ulp->lock);
2281			rcu_read_unlock();
2282			break;
2283		}
2284		spin_lock(&ulp->lock);
2285		semid = un->semid;
2286		spin_unlock(&ulp->lock);
2287
2288		/* exit_sem raced with IPC_RMID, nothing to do */
2289		if (semid == -1) {
2290			rcu_read_unlock();
2291			continue;
2292		}
2293
2294		sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2295		/* exit_sem raced with IPC_RMID, nothing to do */
2296		if (IS_ERR(sma)) {
2297			rcu_read_unlock();
2298			continue;
2299		}
2300
2301		sem_lock(sma, NULL, -1);
2302		/* exit_sem raced with IPC_RMID, nothing to do */
2303		if (!ipc_valid_object(&sma->sem_perm)) {
2304			sem_unlock(sma, -1);
2305			rcu_read_unlock();
2306			continue;
2307		}
2308		un = __lookup_undo(ulp, semid);
2309		if (un == NULL) {
2310			/* exit_sem raced with IPC_RMID+semget() that created
2311			 * exactly the same semid. Nothing to do.
2312			 */
2313			sem_unlock(sma, -1);
2314			rcu_read_unlock();
2315			continue;
2316		}
2317
2318		/* remove un from the linked lists */
2319		ipc_assert_locked_object(&sma->sem_perm);
2320		list_del(&un->list_id);
2321
2322		/* we are the last process using this ulp, acquiring ulp->lock
2323		 * isn't required. Besides that, we are also protected against
2324		 * IPC_RMID as we hold sma->sem_perm lock now
2325		 */
2326		list_del_rcu(&un->list_proc);
 
2327
2328		/* perform adjustments registered in un */
2329		for (i = 0; i < sma->sem_nsems; i++) {
2330			struct sem *semaphore = &sma->sems[i];
2331			if (un->semadj[i]) {
2332				semaphore->semval += un->semadj[i];
2333				/*
2334				 * Range checks of the new semaphore value,
2335				 * not defined by sus:
2336				 * - Some unices ignore the undo entirely
2337				 *   (e.g. HP UX 11i 11.22, Tru64 V5.1)
2338				 * - some cap the value (e.g. FreeBSD caps
2339				 *   at 0, but doesn't enforce SEMVMX)
2340				 *
2341				 * Linux caps the semaphore value, both at 0
2342				 * and at SEMVMX.
2343				 *
2344				 *	Manfred <manfred@colorfullife.com>
2345				 */
2346				if (semaphore->semval < 0)
2347					semaphore->semval = 0;
2348				if (semaphore->semval > SEMVMX)
2349					semaphore->semval = SEMVMX;
2350				ipc_update_pid(&semaphore->sempid, task_tgid(current));
2351			}
2352		}
2353		/* maybe some queued-up processes were waiting for this */
2354		do_smart_update(sma, NULL, 0, 1, &wake_q);
2355		sem_unlock(sma, -1);
2356		rcu_read_unlock();
2357		wake_up_q(&wake_q);
2358
2359		kfree_rcu(un, rcu);
2360	}
2361	kfree(ulp);
2362}
2363
2364#ifdef CONFIG_PROC_FS
2365static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2366{
2367	struct user_namespace *user_ns = seq_user_ns(s);
2368	struct kern_ipc_perm *ipcp = it;
2369	struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
2370	time64_t sem_otime;
2371
2372	/*
2373	 * The proc interface isn't aware of sem_lock(), it calls
2374	 * ipc_lock_object() directly (in sysvipc_find_ipc).
 
2375	 * In order to stay compatible with sem_lock(), we must
2376	 * enter / leave complex_mode.
2377	 */
2378	complexmode_enter(sma);
2379
2380	sem_otime = get_semotime(sma);
2381
2382	seq_printf(s,
2383		   "%10d %10d  %4o %10u %5u %5u %5u %5u %10llu %10llu\n",
2384		   sma->sem_perm.key,
2385		   sma->sem_perm.id,
2386		   sma->sem_perm.mode,
2387		   sma->sem_nsems,
2388		   from_kuid_munged(user_ns, sma->sem_perm.uid),
2389		   from_kgid_munged(user_ns, sma->sem_perm.gid),
2390		   from_kuid_munged(user_ns, sma->sem_perm.cuid),
2391		   from_kgid_munged(user_ns, sma->sem_perm.cgid),
2392		   sem_otime,
2393		   sma->sem_ctime);
2394
2395	complexmode_tryleave(sma);
2396
2397	return 0;
2398}
2399#endif