Linux Audio

Check our new training course

Loading...
v3.5.6
 
   1/*
   2 * linux/ipc/sem.c
   3 * Copyright (C) 1992 Krishna Balasubramanian
   4 * Copyright (C) 1995 Eric Schenk, Bruno Haible
   5 *
   6 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
   7 *
   8 * SMP-threaded, sysctl's added
   9 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
  10 * Enforced range limit on SEM_UNDO
  11 * (c) 2001 Red Hat Inc
  12 * Lockless wakeup
  13 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
 
  14 * Further wakeup optimizations, documentation
  15 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
  16 *
  17 * support for audit of ipc object properties and permission changes
  18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  19 *
  20 * namespaces support
  21 * OpenVZ, SWsoft Inc.
  22 * Pavel Emelianov <xemul@openvz.org>
  23 *
  24 * Implementation notes: (May 2010)
  25 * This file implements System V semaphores.
  26 *
  27 * User space visible behavior:
  28 * - FIFO ordering for semop() operations (just FIFO, not starvation
  29 *   protection)
  30 * - multiple semaphore operations that alter the same semaphore in
  31 *   one semop() are handled.
  32 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
  33 *   SETALL calls.
  34 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
  35 * - undo adjustments at process exit are limited to 0..SEMVMX.
  36 * - namespace are supported.
  37 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
  38 *   to /proc/sys/kernel/sem.
  39 * - statistics about the usage are reported in /proc/sysvipc/sem.
  40 *
  41 * Internals:
  42 * - scalability:
  43 *   - all global variables are read-mostly.
  44 *   - semop() calls and semctl(RMID) are synchronized by RCU.
  45 *   - most operations do write operations (actually: spin_lock calls) to
  46 *     the per-semaphore array structure.
  47 *   Thus: Perfect SMP scaling between independent semaphore arrays.
  48 *         If multiple semaphores in one array are used, then cache line
  49 *         trashing on the semaphore array spinlock will limit the scaling.
  50 * - semncnt and semzcnt are calculated on demand in count_semncnt() and
  51 *   count_semzcnt()
  52 * - the task that performs a successful semop() scans the list of all
  53 *   sleeping tasks and completes any pending operations that can be fulfilled.
  54 *   Semaphores are actively given to waiting tasks (necessary for FIFO).
  55 *   (see update_queue())
  56 * - To improve the scalability, the actual wake-up calls are performed after
  57 *   dropping all locks. (see wake_up_sem_queue_prepare(),
  58 *   wake_up_sem_queue_do())
  59 * - All work is done by the waker, the woken up task does not have to do
  60 *   anything - not even acquiring a lock or dropping a refcount.
  61 * - A woken up task may not even touch the semaphore array anymore, it may
  62 *   have been destroyed already by a semctl(RMID).
  63 * - The synchronizations between wake-ups due to a timeout/signal and a
  64 *   wake-up due to a completed semaphore operation is achieved by using an
  65 *   intermediate state (IN_WAKEUP).
  66 * - UNDO values are stored in an array (one per process and per
  67 *   semaphore array, lazily allocated). For backwards compatibility, multiple
  68 *   modes for the UNDO variables are supported (per process, per thread)
  69 *   (see copy_semundo, CLONE_SYSVSEM)
  70 * - There are two lists of the pending operations: a per-array list
  71 *   and per-semaphore list (stored in the array). This allows to achieve FIFO
  72 *   ordering without always scanning all pending operations.
  73 *   The worst-case behavior is nevertheless O(N^2) for N wakeups.
  74 */
  75
  76#include <linux/slab.h>
  77#include <linux/spinlock.h>
  78#include <linux/init.h>
  79#include <linux/proc_fs.h>
  80#include <linux/time.h>
  81#include <linux/security.h>
  82#include <linux/syscalls.h>
  83#include <linux/audit.h>
  84#include <linux/capability.h>
  85#include <linux/seq_file.h>
  86#include <linux/rwsem.h>
  87#include <linux/nsproxy.h>
  88#include <linux/ipc_namespace.h>
 
  89
  90#include <asm/uaccess.h>
  91#include "util.h"
  92
  93/* One semaphore structure for each semaphore in the system. */
  94struct sem {
  95	int	semval;		/* current value */
  96	int	sempid;		/* pid of last operation */
  97	struct list_head sem_pending; /* pending single-sop operations */
  98};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  99
 100/* One queue for each sleeping process in the system. */
 101struct sem_queue {
 102	struct list_head	simple_list; /* queue of pending operations */
 103	struct list_head	list;	 /* queue of pending operations */
 104	struct task_struct	*sleeper; /* this process */
 105	struct sem_undo		*undo;	 /* undo structure */
 106	int			pid;	 /* process id of requesting process */
 107	int			status;	 /* completion status of operation */
 108	struct sembuf		*sops;	 /* array of pending operations */
 
 109	int			nsops;	 /* number of operations */
 110	int			alter;	 /* does *sops alter the array? */
 
 111};
 112
 113/* Each task has a list of undo requests. They are executed automatically
 114 * when the process exits.
 115 */
 116struct sem_undo {
 117	struct list_head	list_proc;	/* per-process list: *
 118						 * all undos from one process
 119						 * rcu protected */
 120	struct rcu_head		rcu;		/* rcu struct for sem_undo */
 121	struct sem_undo_list	*ulp;		/* back ptr to sem_undo_list */
 122	struct list_head	list_id;	/* per semaphore array list:
 123						 * all undos for one array */
 124	int			semid;		/* semaphore set identifier */
 125	short			*semadj;	/* array of adjustments */
 126						/* one per semaphore */
 127};
 128
 129/* sem_undo_list controls shared access to the list of sem_undo structures
 130 * that may be shared among all a CLONE_SYSVSEM task group.
 131 */
 132struct sem_undo_list {
 133	atomic_t		refcnt;
 134	spinlock_t		lock;
 135	struct list_head	list_proc;
 136};
 137
 138
 139#define sem_ids(ns)	((ns)->ids[IPC_SEM_IDS])
 140
 141#define sem_unlock(sma)		ipc_unlock(&(sma)->sem_perm)
 142#define sem_checkid(sma, semid)	ipc_checkid(&sma->sem_perm, semid)
 143
 144static int newary(struct ipc_namespace *, struct ipc_params *);
 145static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
 146#ifdef CONFIG_PROC_FS
 147static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
 148#endif
 149
 150#define SEMMSL_FAST	256 /* 512 bytes on stack */
 151#define SEMOPM_FAST	64  /* ~ 372 bytes on stack */
 152
 153/*
 154 * linked list protection:
 
 
 
 
 
 
 
 
 155 *	sem_undo.id_next,
 156 *	sem_array.sem_pending{,last},
 157 *	sem_array.sem_undo: sem_lock() for read/write
 158 *	sem_undo.proc_next: only "current" is allowed to read/write that field.
 159 *	
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 160 */
 161
 162#define sc_semmsl	sem_ctls[0]
 163#define sc_semmns	sem_ctls[1]
 164#define sc_semopm	sem_ctls[2]
 165#define sc_semmni	sem_ctls[3]
 166
 167void sem_init_ns(struct ipc_namespace *ns)
 168{
 169	ns->sc_semmsl = SEMMSL;
 170	ns->sc_semmns = SEMMNS;
 171	ns->sc_semopm = SEMOPM;
 172	ns->sc_semmni = SEMMNI;
 173	ns->used_sems = 0;
 174	ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
 175}
 176
 177#ifdef CONFIG_IPC_NS
 178void sem_exit_ns(struct ipc_namespace *ns)
 179{
 180	free_ipcs(ns, &sem_ids(ns), freeary);
 181	idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
 
 182}
 183#endif
 184
 185void __init sem_init (void)
 186{
 187	sem_init_ns(&init_ipc_ns);
 
 188	ipc_init_proc_interface("sysvipc/sem",
 189				"       key      semid perms      nsems   uid   gid  cuid  cgid      otime      ctime\n",
 190				IPC_SEM_IDS, sysvipc_sem_proc_show);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 191}
 192
 193/*
 194 * sem_lock_(check_) routines are called in the paths where the rw_mutex
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 195 * is not held.
 
 
 196 */
 197static inline struct sem_array *sem_lock(struct ipc_namespace *ns, int id)
 198{
 199	struct kern_ipc_perm *ipcp = ipc_lock(&sem_ids(ns), id);
 200
 201	if (IS_ERR(ipcp))
 202		return (struct sem_array *)ipcp;
 203
 204	return container_of(ipcp, struct sem_array, sem_perm);
 205}
 206
 207static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
 208						int id)
 209{
 210	struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
 211
 212	if (IS_ERR(ipcp))
 213		return (struct sem_array *)ipcp;
 214
 215	return container_of(ipcp, struct sem_array, sem_perm);
 216}
 217
 218static inline void sem_lock_and_putref(struct sem_array *sma)
 219{
 220	ipc_lock_by_ptr(&sma->sem_perm);
 221	ipc_rcu_putref(sma);
 222}
 223
 224static inline void sem_getref_and_unlock(struct sem_array *sma)
 225{
 226	ipc_rcu_getref(sma);
 227	ipc_unlock(&(sma)->sem_perm);
 228}
 229
 230static inline void sem_putref(struct sem_array *sma)
 231{
 232	ipc_lock_by_ptr(&sma->sem_perm);
 233	ipc_rcu_putref(sma);
 234	ipc_unlock(&(sma)->sem_perm);
 235}
 236
 237static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
 238{
 239	ipc_rmid(&sem_ids(ns), &s->sem_perm);
 240}
 241
 242/*
 243 * Lockless wakeup algorithm:
 244 * Without the check/retry algorithm a lockless wakeup is possible:
 245 * - queue.status is initialized to -EINTR before blocking.
 246 * - wakeup is performed by
 247 *	* unlinking the queue entry from sma->sem_pending
 248 *	* setting queue.status to IN_WAKEUP
 249 *	  This is the notification for the blocked thread that a
 250 *	  result value is imminent.
 251 *	* call wake_up_process
 252 *	* set queue.status to the final value.
 253 * - the previously blocked thread checks queue.status:
 254 *   	* if it's IN_WAKEUP, then it must wait until the value changes
 255 *   	* if it's not -EINTR, then the operation was completed by
 256 *   	  update_queue. semtimedop can return queue.status without
 257 *   	  performing any operation on the sem array.
 258 *   	* otherwise it must acquire the spinlock and check what's up.
 259 *
 260 * The two-stage algorithm is necessary to protect against the following
 261 * races:
 262 * - if queue.status is set after wake_up_process, then the woken up idle
 263 *   thread could race forward and try (and fail) to acquire sma->lock
 264 *   before update_queue had a chance to set queue.status
 265 * - if queue.status is written before wake_up_process and if the
 266 *   blocked process is woken up by a signal between writing
 267 *   queue.status and the wake_up_process, then the woken up
 268 *   process could return from semtimedop and die by calling
 269 *   sys_exit before wake_up_process is called. Then wake_up_process
 270 *   will oops, because the task structure is already invalid.
 271 *   (yes, this happened on s390 with sysv msg).
 272 *
 273 */
 274#define IN_WAKEUP	1
 275
 276/**
 277 * newary - Create a new semaphore set
 278 * @ns: namespace
 279 * @params: ptr to the structure that contains key, semflg and nsems
 280 *
 281 * Called with sem_ids.rw_mutex held (as a writer)
 282 */
 283
 284static int newary(struct ipc_namespace *ns, struct ipc_params *params)
 285{
 286	int id;
 287	int retval;
 288	struct sem_array *sma;
 289	int size;
 290	key_t key = params->key;
 291	int nsems = params->u.nsems;
 292	int semflg = params->flg;
 293	int i;
 294
 295	if (!nsems)
 296		return -EINVAL;
 297	if (ns->used_sems + nsems > ns->sc_semmns)
 298		return -ENOSPC;
 299
 300	size = sizeof (*sma) + nsems * sizeof (struct sem);
 301	sma = ipc_rcu_alloc(size);
 302	if (!sma) {
 303		return -ENOMEM;
 304	}
 305	memset (sma, 0, size);
 306
 307	sma->sem_perm.mode = (semflg & S_IRWXUGO);
 308	sma->sem_perm.key = key;
 309
 310	sma->sem_perm.security = NULL;
 311	retval = security_sem_alloc(sma);
 312	if (retval) {
 313		ipc_rcu_putref(sma);
 314		return retval;
 315	}
 316
 317	id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
 318	if (id < 0) {
 319		security_sem_free(sma);
 320		ipc_rcu_putref(sma);
 321		return id;
 322	}
 323	ns->used_sems += nsems;
 324
 325	sma->sem_base = (struct sem *) &sma[1];
 326
 327	for (i = 0; i < nsems; i++)
 328		INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
 329
 330	sma->complex_count = 0;
 331	INIT_LIST_HEAD(&sma->sem_pending);
 
 
 332	INIT_LIST_HEAD(&sma->list_id);
 333	sma->sem_nsems = nsems;
 334	sma->sem_ctime = get_seconds();
 335	sem_unlock(sma);
 336
 337	return sma->sem_perm.id;
 338}
 339
 
 
 
 
 
 
 
 340
 341/*
 342 * Called with sem_ids.rw_mutex and ipcp locked.
 343 */
 344static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
 345{
 346	struct sem_array *sma;
 347
 348	sma = container_of(ipcp, struct sem_array, sem_perm);
 349	return security_sem_associate(sma, semflg);
 350}
 351
 
 352/*
 353 * Called with sem_ids.rw_mutex and ipcp locked.
 354 */
 355static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
 356				struct ipc_params *params)
 357{
 358	struct sem_array *sma;
 359
 360	sma = container_of(ipcp, struct sem_array, sem_perm);
 361	if (params->u.nsems > sma->sem_nsems)
 362		return -EINVAL;
 363
 364	return 0;
 365}
 366
 367SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
 368{
 369	struct ipc_namespace *ns;
 370	struct ipc_ops sem_ops;
 
 
 
 
 371	struct ipc_params sem_params;
 372
 373	ns = current->nsproxy->ipc_ns;
 374
 375	if (nsems < 0 || nsems > ns->sc_semmsl)
 376		return -EINVAL;
 377
 378	sem_ops.getnew = newary;
 379	sem_ops.associate = sem_security;
 380	sem_ops.more_checks = sem_more_checks;
 381
 382	sem_params.key = key;
 383	sem_params.flg = semflg;
 384	sem_params.u.nsems = nsems;
 385
 386	return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
 387}
 388
 389/*
 390 * Determine whether a sequence of semaphore operations would succeed
 391 * all at once. Return 0 if yes, 1 if need to sleep, else return error code.
 392 */
 393
 394static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops,
 395			     int nsops, struct sem_undo *un, int pid)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 396{
 397	int result, sem_op;
 
 398	struct sembuf *sop;
 399	struct sem * curr;
 
 
 
 
 
 
 400
 401	for (sop = sops; sop < sops + nsops; sop++) {
 402		curr = sma->sem_base + sop->sem_num;
 403		sem_op = sop->sem_op;
 404		result = curr->semval;
 405  
 406		if (!sem_op && result)
 407			goto would_block;
 408
 409		result += sem_op;
 410		if (result < 0)
 411			goto would_block;
 412		if (result > SEMVMX)
 413			goto out_of_range;
 
 414		if (sop->sem_flg & SEM_UNDO) {
 415			int undo = un->semadj[sop->sem_num] - sem_op;
 416			/*
 417	 		 *	Exceeding the undo range is an error.
 418			 */
 419			if (undo < (-SEMAEM - 1) || undo > SEMAEM)
 420				goto out_of_range;
 
 421		}
 
 422		curr->semval = result;
 423	}
 424
 425	sop--;
 
 426	while (sop >= sops) {
 427		sma->sem_base[sop->sem_num].sempid = pid;
 428		if (sop->sem_flg & SEM_UNDO)
 429			un->semadj[sop->sem_num] -= sop->sem_op;
 430		sop--;
 431	}
 432	
 433	return 0;
 434
 435out_of_range:
 436	result = -ERANGE;
 437	goto undo;
 438
 439would_block:
 
 
 440	if (sop->sem_flg & IPC_NOWAIT)
 441		result = -EAGAIN;
 442	else
 443		result = 1;
 444
 445undo:
 446	sop--;
 447	while (sop >= sops) {
 448		sma->sem_base[sop->sem_num].semval -= sop->sem_op;
 
 
 
 449		sop--;
 450	}
 451
 452	return result;
 453}
 454
 455/** wake_up_sem_queue_prepare(q, error): Prepare wake-up
 456 * @q: queue entry that must be signaled
 457 * @error: Error value for the signal
 458 *
 459 * Prepare the wake-up of the queue entry q.
 460 */
 461static void wake_up_sem_queue_prepare(struct list_head *pt,
 462				struct sem_queue *q, int error)
 463{
 464	if (list_empty(pt)) {
 465		/*
 466		 * Hold preempt off so that we don't get preempted and have the
 467		 * wakee busy-wait until we're scheduled back on.
 468		 */
 469		preempt_disable();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 470	}
 471	q->status = IN_WAKEUP;
 472	q->pid = error;
 473
 474	list_add_tail(&q->simple_list, pt);
 475}
 
 
 476
 477/**
 478 * wake_up_sem_queue_do(pt) - do the actual wake-up
 479 * @pt: list of tasks to be woken up
 480 *
 481 * Do the actual wake-up.
 482 * The function is called without any locks held, thus the semaphore array
 483 * could be destroyed already and the tasks can disappear as soon as the
 484 * status is set to the actual return code.
 485 */
 486static void wake_up_sem_queue_do(struct list_head *pt)
 487{
 488	struct sem_queue *q, *t;
 489	int did_something;
 490
 491	did_something = !list_empty(pt);
 492	list_for_each_entry_safe(q, t, pt, simple_list) {
 493		wake_up_process(q->sleeper);
 494		/* q can disappear immediately after writing q->status. */
 495		smp_wmb();
 496		q->status = q->pid;
 497	}
 498	if (did_something)
 499		preempt_enable();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 500}
 501
 502static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
 503{
 504	list_del(&q->list);
 505	if (q->nsops == 1)
 506		list_del(&q->simple_list);
 507	else
 508		sma->complex_count--;
 509}
 510
 511/** check_restart(sma, q)
 512 * @sma: semaphore array
 513 * @q: the operation that just completed
 514 *
 515 * update_queue is O(N^2) when it restarts scanning the whole queue of
 516 * waiting operations. Therefore this function checks if the restart is
 517 * really necessary. It is called after a previously waiting operation
 518 * was completed.
 
 519 */
 520static int check_restart(struct sem_array *sma, struct sem_queue *q)
 521{
 522	struct sem *curr;
 523	struct sem_queue *h;
 524
 525	/* if the operation didn't modify the array, then no restart */
 526	if (q->alter == 0)
 527		return 0;
 528
 529	/* pending complex operations are too difficult to analyse */
 530	if (sma->complex_count)
 531		return 1;
 532
 533	/* we were a sleeping complex operation. Too difficult */
 534	if (q->nsops > 1)
 535		return 1;
 536
 537	curr = sma->sem_base + q->sops[0].sem_num;
 
 
 
 
 
 
 
 
 
 
 
 
 538
 539	/* No-one waits on this queue */
 540	if (list_empty(&curr->sem_pending))
 541		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 542
 543	/* the new semaphore value */
 544	if (curr->semval) {
 545		/* It is impossible that someone waits for the new value:
 546		 * - q is a previously sleeping simple operation that
 547		 *   altered the array. It must be a decrement, because
 548		 *   simple increments never sleep.
 549		 * - The value is not 0, thus wait-for-zero won't proceed.
 550		 * - If there are older (higher priority) decrements
 551		 *   in the queue, then they have observed the original
 552		 *   semval value and couldn't proceed. The operation
 553		 *   decremented to value - thus they won't proceed either.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 554		 */
 555		BUG_ON(q->sops[0].sem_op >= 0);
 556		return 0;
 
 
 
 
 557	}
 558	/*
 559	 * semval is 0. Check if there are wait-for-zero semops.
 560	 * They must be the first entries in the per-semaphore simple queue
 561	 */
 562	h = list_first_entry(&curr->sem_pending, struct sem_queue, simple_list);
 563	BUG_ON(h->nsops != 1);
 564	BUG_ON(h->sops[0].sem_num != q->sops[0].sem_num);
 565
 566	/* Yes, there is a wait-for-zero semop. Restart */
 567	if (h->sops[0].sem_op == 0)
 568		return 1;
 569
 570	/* Again - no-one is waiting for the new value. */
 571	return 0;
 572}
 573
 574
 575/**
 576 * update_queue(sma, semnum): Look for tasks that can be completed.
 577 * @sma: semaphore array.
 578 * @semnum: semaphore that was modified.
 579 * @pt: list head for the tasks that must be woken up.
 580 *
 581 * update_queue must be called after a semaphore in a semaphore array
 582 * was modified. If multiple semaphore were modified, then @semnum
 583 * must be set to -1.
 584 * The tasks that must be woken up are added to @pt. The return code
 
 585 * is stored in q->pid.
 
 
 586 * The function return 1 if at least one semop was completed successfully.
 587 */
 588static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
 589{
 590	struct sem_queue *q;
 591	struct list_head *walk;
 592	struct list_head *pending_list;
 593	int offset;
 594	int semop_completed = 0;
 595
 596	/* if there are complex operations around, then knowing the semaphore
 597	 * that was modified doesn't help us. Assume that multiple semaphores
 598	 * were modified.
 599	 */
 600	if (sma->complex_count)
 601		semnum = -1;
 602
 603	if (semnum == -1) {
 604		pending_list = &sma->sem_pending;
 605		offset = offsetof(struct sem_queue, list);
 606	} else {
 607		pending_list = &sma->sem_base[semnum].sem_pending;
 608		offset = offsetof(struct sem_queue, simple_list);
 609	}
 610
 611again:
 612	walk = pending_list->next;
 613	while (walk != pending_list) {
 614		int error, restart;
 615
 616		q = (struct sem_queue *)((char *)walk - offset);
 617		walk = walk->next;
 618
 619		/* If we are scanning the single sop, per-semaphore list of
 620		 * one semaphore and that semaphore is 0, then it is not
 621		 * necessary to scan the "alter" entries: simple increments
 622		 * that affect only one entry succeed immediately and cannot
 623		 * be in the  per semaphore pending queue, and decrements
 624		 * cannot be successful if the value is already 0.
 625		 */
 626		if (semnum != -1 && sma->sem_base[semnum].semval == 0 &&
 627				q->alter)
 628			break;
 629
 630		error = try_atomic_semop(sma, q->sops, q->nsops,
 631					 q->undo, q->pid);
 632
 633		/* Does q->sleeper still need to sleep? */
 634		if (error > 0)
 635			continue;
 636
 637		unlink_queue(sma, q);
 638
 639		if (error) {
 640			restart = 0;
 641		} else {
 642			semop_completed = 1;
 
 643			restart = check_restart(sma, q);
 644		}
 645
 646		wake_up_sem_queue_prepare(pt, q, error);
 647		if (restart)
 648			goto again;
 649	}
 650	return semop_completed;
 651}
 652
 653/**
 654 * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 655 * @sma: semaphore array
 656 * @sops: operations that were performed
 657 * @nsops: number of operations
 658 * @otime: force setting otime
 659 * @pt: list head of the tasks that must be woken up.
 660 *
 661 * do_smart_update() does the required called to update_queue, based on the
 662 * actual changes that were performed on the semaphore array.
 663 * Note that the function does not do the actual wake-up: the caller is
 664 * responsible for calling wake_up_sem_queue_do(@pt).
 665 * It is safe to perform this call after dropping all locks.
 666 */
 667static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
 668			int otime, struct list_head *pt)
 669{
 670	int i;
 671
 672	if (sma->complex_count || sops == NULL) {
 673		if (update_queue(sma, -1, pt))
 674			otime = 1;
 675		goto done;
 676	}
 677
 678	for (i = 0; i < nsops; i++) {
 679		if (sops[i].sem_op > 0 ||
 680			(sops[i].sem_op < 0 &&
 681				sma->sem_base[sops[i].sem_num].semval == 0))
 682			if (update_queue(sma, sops[i].sem_num, pt))
 683				otime = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 684	}
 685done:
 686	if (otime)
 687		sma->sem_otime = get_seconds();
 688}
 689
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 690
 691/* The following counts are associated to each semaphore:
 692 *   semncnt        number of tasks waiting on semval being nonzero
 693 *   semzcnt        number of tasks waiting on semval being zero
 694 * This model assumes that a task waits on exactly one semaphore.
 695 * Since semaphore operations are to be performed atomically, tasks actually
 696 * wait on a whole sequence of semaphores simultaneously.
 697 * The counts we return here are a rough approximation, but still
 698 * warrant that semncnt+semzcnt>0 if the task is on the pending queue.
 699 */
 700static int count_semncnt (struct sem_array * sma, ushort semnum)
 701{
 702	int semncnt;
 703	struct sem_queue * q;
 704
 705	semncnt = 0;
 706	list_for_each_entry(q, &sma->sem_pending, list) {
 707		struct sembuf * sops = q->sops;
 708		int nsops = q->nsops;
 709		int i;
 710		for (i = 0; i < nsops; i++)
 711			if (sops[i].sem_num == semnum
 712			    && (sops[i].sem_op < 0)
 713			    && !(sops[i].sem_flg & IPC_NOWAIT))
 714				semncnt++;
 715	}
 716	return semncnt;
 717}
 718
 719static int count_semzcnt (struct sem_array * sma, ushort semnum)
 720{
 721	int semzcnt;
 722	struct sem_queue * q;
 
 723
 724	semzcnt = 0;
 725	list_for_each_entry(q, &sma->sem_pending, list) {
 726		struct sembuf * sops = q->sops;
 727		int nsops = q->nsops;
 728		int i;
 729		for (i = 0; i < nsops; i++)
 730			if (sops[i].sem_num == semnum
 731			    && (sops[i].sem_op == 0)
 732			    && !(sops[i].sem_flg & IPC_NOWAIT))
 733				semzcnt++;
 
 
 734	}
 735	return semzcnt;
 
 
 
 
 
 
 
 
 
 
 736}
 737
 738/* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked
 739 * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex
 740 * remains locked on exit.
 741 */
 742static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
 743{
 744	struct sem_undo *un, *tu;
 745	struct sem_queue *q, *tq;
 746	struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
 747	struct list_head tasks;
 
 748
 749	/* Free the existing undo structures for this semaphore set.  */
 750	assert_spin_locked(&sma->sem_perm.lock);
 751	list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
 752		list_del(&un->list_id);
 753		spin_lock(&un->ulp->lock);
 754		un->semid = -1;
 755		list_del_rcu(&un->list_proc);
 756		spin_unlock(&un->ulp->lock);
 757		kfree_rcu(un, rcu);
 758	}
 759
 760	/* Wake up all pending processes and let them fail with EIDRM. */
 761	INIT_LIST_HEAD(&tasks);
 762	list_for_each_entry_safe(q, tq, &sma->sem_pending, list) {
 763		unlink_queue(sma, q);
 764		wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 765	}
 766
 767	/* Remove the semaphore set from the IDR */
 768	sem_rmid(ns, sma);
 769	sem_unlock(sma);
 
 770
 771	wake_up_sem_queue_do(&tasks);
 772	ns->used_sems -= sma->sem_nsems;
 773	security_sem_free(sma);
 774	ipc_rcu_putref(sma);
 775}
 776
 777static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
 778{
 779	switch(version) {
 780	case IPC_64:
 781		return copy_to_user(buf, in, sizeof(*in));
 782	case IPC_OLD:
 783	    {
 784		struct semid_ds out;
 785
 786		memset(&out, 0, sizeof(out));
 787
 788		ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
 789
 790		out.sem_otime	= in->sem_otime;
 791		out.sem_ctime	= in->sem_ctime;
 792		out.sem_nsems	= in->sem_nsems;
 793
 794		return copy_to_user(buf, &out, sizeof(out));
 795	    }
 796	default:
 797		return -EINVAL;
 798	}
 799}
 800
 801static int semctl_nolock(struct ipc_namespace *ns, int semid,
 802			 int cmd, int version, union semun arg)
 803{
 804	int err;
 805	struct sem_array *sma;
 806
 807	switch(cmd) {
 808	case IPC_INFO:
 809	case SEM_INFO:
 810	{
 811		struct seminfo seminfo;
 812		int max_id;
 813
 814		err = security_sem_semctl(NULL, cmd);
 815		if (err)
 816			return err;
 817		
 818		memset(&seminfo,0,sizeof(seminfo));
 819		seminfo.semmni = ns->sc_semmni;
 820		seminfo.semmns = ns->sc_semmns;
 821		seminfo.semmsl = ns->sc_semmsl;
 822		seminfo.semopm = ns->sc_semopm;
 823		seminfo.semvmx = SEMVMX;
 824		seminfo.semmnu = SEMMNU;
 825		seminfo.semmap = SEMMAP;
 826		seminfo.semume = SEMUME;
 827		down_read(&sem_ids(ns).rw_mutex);
 828		if (cmd == SEM_INFO) {
 829			seminfo.semusz = sem_ids(ns).in_use;
 830			seminfo.semaem = ns->used_sems;
 831		} else {
 832			seminfo.semusz = SEMUSZ;
 833			seminfo.semaem = SEMAEM;
 834		}
 835		max_id = ipc_get_maxid(&sem_ids(ns));
 836		up_read(&sem_ids(ns).rw_mutex);
 837		if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) 
 838			return -EFAULT;
 839		return (max_id < 0) ? 0: max_id;
 840	}
 841	case IPC_STAT:
 842	case SEM_STAT:
 843	{
 844		struct semid64_ds tbuf;
 845		int id;
 846
 847		if (cmd == SEM_STAT) {
 848			sma = sem_lock(ns, semid);
 849			if (IS_ERR(sma))
 850				return PTR_ERR(sma);
 851			id = sma->sem_perm.id;
 852		} else {
 853			sma = sem_lock_check(ns, semid);
 854			if (IS_ERR(sma))
 855				return PTR_ERR(sma);
 856			id = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 857		}
 
 858
 
 
 
 
 859		err = -EACCES;
 860		if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
 861			goto out_unlock;
 
 862
 863		err = security_sem_semctl(sma, cmd);
 864		if (err)
 865			goto out_unlock;
 866
 867		memset(&tbuf, 0, sizeof(tbuf));
 868
 869		kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
 870		tbuf.sem_otime  = sma->sem_otime;
 871		tbuf.sem_ctime  = sma->sem_ctime;
 872		tbuf.sem_nsems  = sma->sem_nsems;
 873		sem_unlock(sma);
 874		if (copy_semid_to_user (arg.buf, &tbuf, version))
 875			return -EFAULT;
 876		return id;
 877	}
 878	default:
 879		return -EINVAL;
 880	}
 
 
 
 
 
 
 
 
 
 
 881out_unlock:
 882	sem_unlock(sma);
 883	return err;
 884}
 885
 886static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
 887		int cmd, int version, union semun arg)
 888{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 889	struct sem_array *sma;
 890	struct sem* curr;
 891	int err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 892	ushort fast_sem_io[SEMMSL_FAST];
 893	ushort* sem_io = fast_sem_io;
 894	int nsems;
 895	struct list_head tasks;
 896
 897	sma = sem_lock_check(ns, semid);
 898	if (IS_ERR(sma))
 
 
 899		return PTR_ERR(sma);
 
 900
 901	INIT_LIST_HEAD(&tasks);
 902	nsems = sma->sem_nsems;
 903
 904	err = -EACCES;
 905	if (ipcperms(ns, &sma->sem_perm,
 906			(cmd == SETVAL || cmd == SETALL) ? S_IWUGO : S_IRUGO))
 907		goto out_unlock;
 908
 909	err = security_sem_semctl(sma, cmd);
 910	if (err)
 911		goto out_unlock;
 912
 913	err = -EACCES;
 914	switch (cmd) {
 915	case GETALL:
 916	{
 917		ushort __user *array = arg.array;
 918		int i;
 919
 920		if(nsems > SEMMSL_FAST) {
 921			sem_getref_and_unlock(sma);
 922
 923			sem_io = ipc_alloc(sizeof(ushort)*nsems);
 924			if(sem_io == NULL) {
 925				sem_putref(sma);
 
 
 
 
 
 
 
 
 
 
 926				return -ENOMEM;
 927			}
 928
 
 929			sem_lock_and_putref(sma);
 930			if (sma->sem_perm.deleted) {
 931				sem_unlock(sma);
 932				err = -EIDRM;
 933				goto out_free;
 934			}
 935		}
 936
 937		for (i = 0; i < sma->sem_nsems; i++)
 938			sem_io[i] = sma->sem_base[i].semval;
 939		sem_unlock(sma);
 
 940		err = 0;
 941		if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
 942			err = -EFAULT;
 943		goto out_free;
 944	}
 945	case SETALL:
 946	{
 947		int i;
 948		struct sem_undo *un;
 949
 950		sem_getref_and_unlock(sma);
 
 
 
 
 951
 952		if(nsems > SEMMSL_FAST) {
 953			sem_io = ipc_alloc(sizeof(ushort)*nsems);
 954			if(sem_io == NULL) {
 955				sem_putref(sma);
 
 956				return -ENOMEM;
 957			}
 958		}
 959
 960		if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) {
 961			sem_putref(sma);
 962			err = -EFAULT;
 963			goto out_free;
 964		}
 965
 966		for (i = 0; i < nsems; i++) {
 967			if (sem_io[i] > SEMVMX) {
 968				sem_putref(sma);
 969				err = -ERANGE;
 970				goto out_free;
 971			}
 972		}
 
 973		sem_lock_and_putref(sma);
 974		if (sma->sem_perm.deleted) {
 975			sem_unlock(sma);
 976			err = -EIDRM;
 977			goto out_free;
 978		}
 979
 980		for (i = 0; i < nsems; i++)
 981			sma->sem_base[i].semval = sem_io[i];
 
 
 982
 983		assert_spin_locked(&sma->sem_perm.lock);
 984		list_for_each_entry(un, &sma->list_id, list_id) {
 985			for (i = 0; i < nsems; i++)
 986				un->semadj[i] = 0;
 987		}
 988		sma->sem_ctime = get_seconds();
 989		/* maybe some queued-up processes were waiting for this */
 990		do_smart_update(sma, NULL, 0, 0, &tasks);
 991		err = 0;
 992		goto out_unlock;
 993	}
 994	/* GETVAL, GETPID, GETNCTN, GETZCNT, SETVAL: fall-through */
 995	}
 996	err = -EINVAL;
 997	if(semnum < 0 || semnum >= nsems)
 998		goto out_unlock;
 999
1000	curr = &sma->sem_base[semnum];
 
 
 
 
 
1001
1002	switch (cmd) {
1003	case GETVAL:
1004		err = curr->semval;
1005		goto out_unlock;
1006	case GETPID:
1007		err = curr->sempid;
1008		goto out_unlock;
1009	case GETNCNT:
1010		err = count_semncnt(sma,semnum);
1011		goto out_unlock;
1012	case GETZCNT:
1013		err = count_semzcnt(sma,semnum);
1014		goto out_unlock;
1015	case SETVAL:
1016	{
1017		int val = arg.val;
1018		struct sem_undo *un;
1019
1020		err = -ERANGE;
1021		if (val > SEMVMX || val < 0)
1022			goto out_unlock;
1023
1024		assert_spin_locked(&sma->sem_perm.lock);
1025		list_for_each_entry(un, &sma->list_id, list_id)
1026			un->semadj[semnum] = 0;
1027
1028		curr->semval = val;
1029		curr->sempid = task_tgid_vnr(current);
1030		sma->sem_ctime = get_seconds();
1031		/* maybe some queued-up processes were waiting for this */
1032		do_smart_update(sma, NULL, 0, 0, &tasks);
1033		err = 0;
1034		goto out_unlock;
1035	}
1036	}
1037out_unlock:
1038	sem_unlock(sma);
1039	wake_up_sem_queue_do(&tasks);
1040
 
 
 
 
 
1041out_free:
1042	if(sem_io != fast_sem_io)
1043		ipc_free(sem_io, sizeof(ushort)*nsems);
1044	return err;
1045}
1046
1047static inline unsigned long
1048copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1049{
1050	switch(version) {
1051	case IPC_64:
1052		if (copy_from_user(out, buf, sizeof(*out)))
1053			return -EFAULT;
1054		return 0;
1055	case IPC_OLD:
1056	    {
1057		struct semid_ds tbuf_old;
1058
1059		if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1060			return -EFAULT;
1061
1062		out->sem_perm.uid	= tbuf_old.sem_perm.uid;
1063		out->sem_perm.gid	= tbuf_old.sem_perm.gid;
1064		out->sem_perm.mode	= tbuf_old.sem_perm.mode;
1065
1066		return 0;
1067	    }
1068	default:
1069		return -EINVAL;
1070	}
1071}
1072
1073/*
1074 * This function handles some semctl commands which require the rw_mutex
1075 * to be held in write mode.
1076 * NOTE: no locks must be held, the rw_mutex is taken inside this function.
1077 */
1078static int semctl_down(struct ipc_namespace *ns, int semid,
1079		       int cmd, int version, union semun arg)
1080{
1081	struct sem_array *sma;
1082	int err;
1083	struct semid64_ds semid64;
1084	struct kern_ipc_perm *ipcp;
1085
1086	if(cmd == IPC_SET) {
1087		if (copy_semid_from_user(&semid64, arg.buf, version))
1088			return -EFAULT;
1089	}
1090
1091	ipcp = ipcctl_pre_down(ns, &sem_ids(ns), semid, cmd,
1092			       &semid64.sem_perm, 0);
1093	if (IS_ERR(ipcp))
1094		return PTR_ERR(ipcp);
 
 
1095
1096	sma = container_of(ipcp, struct sem_array, sem_perm);
1097
1098	err = security_sem_semctl(sma, cmd);
1099	if (err)
1100		goto out_unlock;
1101
1102	switch(cmd){
1103	case IPC_RMID:
 
 
1104		freeary(ns, ipcp);
1105		goto out_up;
1106	case IPC_SET:
1107		ipc_update_perm(&semid64.sem_perm, ipcp);
1108		sma->sem_ctime = get_seconds();
 
 
 
1109		break;
1110	default:
1111		err = -EINVAL;
 
1112	}
1113
1114out_unlock:
1115	sem_unlock(sma);
 
 
1116out_up:
1117	up_write(&sem_ids(ns).rw_mutex);
1118	return err;
1119}
1120
1121SYSCALL_DEFINE(semctl)(int semid, int semnum, int cmd, union semun arg)
1122{
1123	int err = -EINVAL;
1124	int version;
1125	struct ipc_namespace *ns;
 
 
 
1126
1127	if (semid < 0)
1128		return -EINVAL;
1129
1130	version = ipc_parse_version(&cmd);
1131	ns = current->nsproxy->ipc_ns;
1132
1133	switch(cmd) {
1134	case IPC_INFO:
1135	case SEM_INFO:
 
1136	case IPC_STAT:
1137	case SEM_STAT:
1138		err = semctl_nolock(ns, semid, cmd, version, arg);
 
 
 
 
 
1139		return err;
1140	case GETALL:
1141	case GETVAL:
1142	case GETPID:
1143	case GETNCNT:
1144	case GETZCNT:
1145	case SETVAL:
1146	case SETALL:
1147		err = semctl_main(ns,semid,semnum,cmd,version,arg);
1148		return err;
1149	case IPC_RMID:
 
 
 
 
 
 
 
 
 
1150	case IPC_SET:
1151		err = semctl_down(ns, semid, cmd, version, arg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1152		return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1153	default:
1154		return -EINVAL;
1155	}
1156}
1157#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
1158asmlinkage long SyS_semctl(int semid, int semnum, int cmd, union semun arg)
1159{
1160	return SYSC_semctl((int) semid, (int) semnum, (int) cmd, arg);
1161}
1162SYSCALL_ALIAS(sys_semctl, SyS_semctl);
1163#endif
1164
1165/* If the task doesn't already have a undo_list, then allocate one
1166 * here.  We guarantee there is only one thread using this undo list,
1167 * and current is THE ONE
1168 *
1169 * If this allocation and assignment succeeds, but later
1170 * portions of this code fail, there is no need to free the sem_undo_list.
1171 * Just let it stay associated with the task, and it'll be freed later
1172 * at exit time.
1173 *
1174 * This can block, so callers must hold no locks.
1175 */
1176static inline int get_undo_list(struct sem_undo_list **undo_listp)
1177{
1178	struct sem_undo_list *undo_list;
1179
1180	undo_list = current->sysvsem.undo_list;
1181	if (!undo_list) {
1182		undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1183		if (undo_list == NULL)
1184			return -ENOMEM;
1185		spin_lock_init(&undo_list->lock);
1186		atomic_set(&undo_list->refcnt, 1);
1187		INIT_LIST_HEAD(&undo_list->list_proc);
1188
1189		current->sysvsem.undo_list = undo_list;
1190	}
1191	*undo_listp = undo_list;
1192	return 0;
1193}
1194
1195static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1196{
1197	struct sem_undo *un;
1198
1199	list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
1200		if (un->semid == semid)
1201			return un;
1202	}
1203	return NULL;
1204}
1205
1206static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1207{
1208	struct sem_undo *un;
1209
1210  	assert_spin_locked(&ulp->lock);
1211
1212	un = __lookup_undo(ulp, semid);
1213	if (un) {
1214		list_del_rcu(&un->list_proc);
1215		list_add_rcu(&un->list_proc, &ulp->list_proc);
1216	}
1217	return un;
1218}
1219
1220/**
1221 * find_alloc_undo - Lookup (and if not present create) undo array
1222 * @ns: namespace
1223 * @semid: semaphore array id
1224 *
1225 * The function looks up (and if not present creates) the undo structure.
1226 * The size of the undo structure depends on the size of the semaphore
1227 * array, thus the alloc path is not that straightforward.
1228 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1229 * performs a rcu_read_lock().
1230 */
1231static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1232{
1233	struct sem_array *sma;
1234	struct sem_undo_list *ulp;
1235	struct sem_undo *un, *new;
1236	int nsems;
1237	int error;
1238
1239	error = get_undo_list(&ulp);
1240	if (error)
1241		return ERR_PTR(error);
1242
1243	rcu_read_lock();
1244	spin_lock(&ulp->lock);
1245	un = lookup_undo(ulp, semid);
1246	spin_unlock(&ulp->lock);
1247	if (likely(un!=NULL))
1248		goto out;
1249	rcu_read_unlock();
1250
1251	/* no undo structure around - allocate one. */
1252	/* step 1: figure out the size of the semaphore array */
1253	sma = sem_lock_check(ns, semid);
1254	if (IS_ERR(sma))
 
1255		return ERR_CAST(sma);
 
1256
1257	nsems = sma->sem_nsems;
1258	sem_getref_and_unlock(sma);
 
 
 
 
 
1259
1260	/* step 2: allocate new undo structure */
1261	new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1262	if (!new) {
1263		sem_putref(sma);
1264		return ERR_PTR(-ENOMEM);
1265	}
1266
1267	/* step 3: Acquire the lock on semaphore array */
 
1268	sem_lock_and_putref(sma);
1269	if (sma->sem_perm.deleted) {
1270		sem_unlock(sma);
 
1271		kfree(new);
1272		un = ERR_PTR(-EIDRM);
1273		goto out;
1274	}
1275	spin_lock(&ulp->lock);
1276
1277	/*
1278	 * step 4: check for races: did someone else allocate the undo struct?
1279	 */
1280	un = lookup_undo(ulp, semid);
1281	if (un) {
1282		kfree(new);
1283		goto success;
1284	}
1285	/* step 5: initialize & link new undo structure */
1286	new->semadj = (short *) &new[1];
1287	new->ulp = ulp;
1288	new->semid = semid;
1289	assert_spin_locked(&ulp->lock);
1290	list_add_rcu(&new->list_proc, &ulp->list_proc);
1291	assert_spin_locked(&sma->sem_perm.lock);
1292	list_add(&new->list_id, &sma->list_id);
1293	un = new;
1294
1295success:
1296	spin_unlock(&ulp->lock);
1297	rcu_read_lock();
1298	sem_unlock(sma);
1299out:
1300	return un;
1301}
1302
1303
1304/**
1305 * get_queue_result - Retrieve the result code from sem_queue
1306 * @q: Pointer to queue structure
1307 *
1308 * Retrieve the return code from the pending queue. If IN_WAKEUP is found in
1309 * q->status, then we must loop until the value is replaced with the final
1310 * value: This may happen if a task is woken up by an unrelated event (e.g.
1311 * signal) and in parallel the task is woken up by another task because it got
1312 * the requested semaphores.
1313 *
1314 * The function can be called with or without holding the semaphore spinlock.
1315 */
1316static int get_queue_result(struct sem_queue *q)
1317{
1318	int error;
1319
1320	error = q->status;
1321	while (unlikely(error == IN_WAKEUP)) {
1322		cpu_relax();
1323		error = q->status;
1324	}
1325
1326	return error;
1327}
1328
1329
1330SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1331		unsigned, nsops, const struct timespec __user *, timeout)
1332{
1333	int error = -EINVAL;
1334	struct sem_array *sma;
1335	struct sembuf fast_sops[SEMOPM_FAST];
1336	struct sembuf* sops = fast_sops, *sop;
1337	struct sem_undo *un;
1338	int undos = 0, alter = 0, max;
 
1339	struct sem_queue queue;
1340	unsigned long jiffies_left = 0;
1341	struct ipc_namespace *ns;
1342	struct list_head tasks;
1343
1344	ns = current->nsproxy->ipc_ns;
1345
1346	if (nsops < 1 || semid < 0)
1347		return -EINVAL;
1348	if (nsops > ns->sc_semopm)
1349		return -E2BIG;
1350	if(nsops > SEMOPM_FAST) {
1351		sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL);
1352		if(sops==NULL)
1353			return -ENOMEM;
1354	}
1355	if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) {
1356		error=-EFAULT;
 
1357		goto out_free;
1358	}
 
1359	if (timeout) {
1360		struct timespec _timeout;
1361		if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) {
1362			error = -EFAULT;
1363			goto out_free;
1364		}
1365		if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 ||
1366			_timeout.tv_nsec >= 1000000000L) {
1367			error = -EINVAL;
1368			goto out_free;
1369		}
1370		jiffies_left = timespec_to_jiffies(&_timeout);
1371	}
 
1372	max = 0;
1373	for (sop = sops; sop < sops + nsops; sop++) {
 
 
1374		if (sop->sem_num >= max)
1375			max = sop->sem_num;
1376		if (sop->sem_flg & SEM_UNDO)
1377			undos = 1;
1378		if (sop->sem_op != 0)
1379			alter = 1;
 
 
 
 
 
 
 
 
 
 
 
1380	}
1381
1382	if (undos) {
 
1383		un = find_alloc_undo(ns, semid);
1384		if (IS_ERR(un)) {
1385			error = PTR_ERR(un);
1386			goto out_free;
1387		}
1388	} else
1389		un = NULL;
 
 
1390
1391	INIT_LIST_HEAD(&tasks);
1392
1393	sma = sem_lock_check(ns, semid);
1394	if (IS_ERR(sma)) {
1395		if (un)
1396			rcu_read_unlock();
1397		error = PTR_ERR(sma);
1398		goto out_free;
1399	}
1400
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1401	/*
1402	 * semid identifiers are not unique - find_alloc_undo may have
1403	 * allocated an undo structure, it was invalidated by an RMID
1404	 * and now a new array with received the same id. Check and fail.
1405	 * This case can be detected checking un->semid. The existence of
1406	 * "un" itself is guaranteed by rcu.
1407	 */
1408	error = -EIDRM;
1409	if (un) {
1410		if (un->semid == -1) {
1411			rcu_read_unlock();
1412			goto out_unlock_free;
1413		} else {
1414			/*
1415			 * rcu lock can be released, "un" cannot disappear:
1416			 * - sem_lock is acquired, thus IPC_RMID is
1417			 *   impossible.
1418			 * - exit_sem is impossible, it always operates on
1419			 *   current (or a dead task).
1420			 */
1421
1422			rcu_read_unlock();
1423		}
1424	}
1425
1426	error = -EFBIG;
1427	if (max >= sma->sem_nsems)
1428		goto out_unlock_free;
1429
1430	error = -EACCES;
1431	if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
1432		goto out_unlock_free;
 
 
 
1433
1434	error = security_sem_semop(sma, sops, nsops, alter);
1435	if (error)
1436		goto out_unlock_free;
1437
1438	error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
1439	if (error <= 0) {
1440		if (alter && error == 0)
1441			do_smart_update(sma, sops, nsops, 1, &tasks);
 
 
 
 
1442
1443		goto out_unlock_free;
 
 
 
 
1444	}
 
 
1445
1446	/* We need to sleep on this operation, so we put the current
 
1447	 * task into the pending queue and go to sleep.
1448	 */
1449		
1450	queue.sops = sops;
1451	queue.nsops = nsops;
1452	queue.undo = un;
1453	queue.pid = task_tgid_vnr(current);
1454	queue.alter = alter;
1455	if (alter)
1456		list_add_tail(&queue.list, &sma->sem_pending);
1457	else
1458		list_add(&queue.list, &sma->sem_pending);
1459
1460	if (nsops == 1) {
1461		struct sem *curr;
1462		curr = &sma->sem_base[sops->sem_num];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1463
1464		if (alter)
1465			list_add_tail(&queue.simple_list, &curr->sem_pending);
1466		else
1467			list_add(&queue.simple_list, &curr->sem_pending);
1468	} else {
1469		INIT_LIST_HEAD(&queue.simple_list);
1470		sma->complex_count++;
1471	}
1472
1473	queue.status = -EINTR;
1474	queue.sleeper = current;
1475
1476sleep_again:
1477	current->state = TASK_INTERRUPTIBLE;
1478	sem_unlock(sma);
1479
1480	if (timeout)
1481		jiffies_left = schedule_timeout(jiffies_left);
1482	else
1483		schedule();
1484
1485	error = get_queue_result(&queue);
 
 
 
1486
1487	if (error != -EINTR) {
1488		/* fast path: update_queue already obtained all requested
1489		 * resources.
1490		 * Perform a smp_mb(): User space could assume that semop()
1491		 * is a memory barrier: Without the mb(), the cpu could
1492		 * speculatively read in user space stale data that was
1493		 * overwritten by the previous owner of the semaphore.
 
 
 
1494		 */
1495		smp_mb();
1496
1497		goto out_free;
1498	}
1499
1500	sma = sem_lock(ns, semid);
1501
1502	/*
1503	 * Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
1504	 */
1505	error = get_queue_result(&queue);
1506
1507	/*
1508	 * Array removed? If yes, leave without sem_unlock().
1509	 */
1510	if (IS_ERR(sma)) {
1511		goto out_free;
1512	}
1513
 
 
1514
1515	/*
1516	 * If queue.status != -EINTR we are woken up by another process.
1517	 * Leave without unlink_queue(), but with sem_unlock().
1518	 */
1519
1520	if (error != -EINTR) {
1521		goto out_unlock_free;
1522	}
1523
1524	/*
1525	 * If an interrupt occurred we have to clean up the queue
1526	 */
1527	if (timeout && jiffies_left == 0)
1528		error = -EAGAIN;
1529
1530	/*
1531	 * If the wakeup was spurious, just retry
1532	 */
1533	if (error == -EINTR && !signal_pending(current))
1534		goto sleep_again;
 
1535
1536	unlink_queue(sma, &queue);
1537
1538out_unlock_free:
1539	sem_unlock(sma);
1540
1541	wake_up_sem_queue_do(&tasks);
1542out_free:
1543	if(sops != fast_sops)
1544		kfree(sops);
1545	return error;
1546}
1547
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1548SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
1549		unsigned, nsops)
1550{
1551	return sys_semtimedop(semid, tsops, nsops, NULL);
1552}
1553
1554/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
1555 * parent and child tasks.
1556 */
1557
1558int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
1559{
1560	struct sem_undo_list *undo_list;
1561	int error;
1562
1563	if (clone_flags & CLONE_SYSVSEM) {
1564		error = get_undo_list(&undo_list);
1565		if (error)
1566			return error;
1567		atomic_inc(&undo_list->refcnt);
1568		tsk->sysvsem.undo_list = undo_list;
1569	} else 
1570		tsk->sysvsem.undo_list = NULL;
1571
1572	return 0;
1573}
1574
1575/*
1576 * add semadj values to semaphores, free undo structures.
1577 * undo structures are not freed when semaphore arrays are destroyed
1578 * so some of them may be out of date.
1579 * IMPLEMENTATION NOTE: There is some confusion over whether the
1580 * set of adjustments that needs to be done should be done in an atomic
1581 * manner or not. That is, if we are attempting to decrement the semval
1582 * should we queue up and wait until we can do so legally?
1583 * The original implementation attempted to do this (queue and wait).
1584 * The current implementation does not do so. The POSIX standard
1585 * and SVID should be consulted to determine what behavior is mandated.
1586 */
1587void exit_sem(struct task_struct *tsk)
1588{
1589	struct sem_undo_list *ulp;
1590
1591	ulp = tsk->sysvsem.undo_list;
1592	if (!ulp)
1593		return;
1594	tsk->sysvsem.undo_list = NULL;
1595
1596	if (!atomic_dec_and_test(&ulp->refcnt))
1597		return;
1598
1599	for (;;) {
1600		struct sem_array *sma;
1601		struct sem_undo *un;
1602		struct list_head tasks;
1603		int semid;
1604		int i;
 
1605
1606		rcu_read_lock();
1607		un = list_entry_rcu(ulp->list_proc.next,
1608				    struct sem_undo, list_proc);
1609		if (&un->list_proc == &ulp->list_proc)
1610			semid = -1;
1611		 else
1612			semid = un->semid;
1613		rcu_read_unlock();
1614
1615		if (semid == -1)
 
 
 
1616			break;
 
 
 
 
1617
1618		sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
 
 
 
 
1619
 
1620		/* exit_sem raced with IPC_RMID, nothing to do */
1621		if (IS_ERR(sma))
 
1622			continue;
 
1623
 
 
 
 
 
 
 
1624		un = __lookup_undo(ulp, semid);
1625		if (un == NULL) {
1626			/* exit_sem raced with IPC_RMID+semget() that created
1627			 * exactly the same semid. Nothing to do.
1628			 */
1629			sem_unlock(sma);
 
1630			continue;
1631		}
1632
1633		/* remove un from the linked lists */
1634		assert_spin_locked(&sma->sem_perm.lock);
1635		list_del(&un->list_id);
1636
1637		spin_lock(&ulp->lock);
 
 
 
1638		list_del_rcu(&un->list_proc);
1639		spin_unlock(&ulp->lock);
1640
1641		/* perform adjustments registered in un */
1642		for (i = 0; i < sma->sem_nsems; i++) {
1643			struct sem * semaphore = &sma->sem_base[i];
1644			if (un->semadj[i]) {
1645				semaphore->semval += un->semadj[i];
1646				/*
1647				 * Range checks of the new semaphore value,
1648				 * not defined by sus:
1649				 * - Some unices ignore the undo entirely
1650				 *   (e.g. HP UX 11i 11.22, Tru64 V5.1)
1651				 * - some cap the value (e.g. FreeBSD caps
1652				 *   at 0, but doesn't enforce SEMVMX)
1653				 *
1654				 * Linux caps the semaphore value, both at 0
1655				 * and at SEMVMX.
1656				 *
1657				 * 	Manfred <manfred@colorfullife.com>
1658				 */
1659				if (semaphore->semval < 0)
1660					semaphore->semval = 0;
1661				if (semaphore->semval > SEMVMX)
1662					semaphore->semval = SEMVMX;
1663				semaphore->sempid = task_tgid_vnr(current);
1664			}
1665		}
1666		/* maybe some queued-up processes were waiting for this */
1667		INIT_LIST_HEAD(&tasks);
1668		do_smart_update(sma, NULL, 0, 1, &tasks);
1669		sem_unlock(sma);
1670		wake_up_sem_queue_do(&tasks);
1671
1672		kfree_rcu(un, rcu);
1673	}
1674	kfree(ulp);
1675}
1676
1677#ifdef CONFIG_PROC_FS
1678static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
1679{
1680	struct sem_array *sma = it;
 
 
 
1681
1682	return seq_printf(s,
1683			  "%10d %10d  %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
1684			  sma->sem_perm.key,
1685			  sma->sem_perm.id,
1686			  sma->sem_perm.mode,
1687			  sma->sem_nsems,
1688			  sma->sem_perm.uid,
1689			  sma->sem_perm.gid,
1690			  sma->sem_perm.cuid,
1691			  sma->sem_perm.cgid,
1692			  sma->sem_otime,
1693			  sma->sem_ctime);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1694}
1695#endif
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * linux/ipc/sem.c
   4 * Copyright (C) 1992 Krishna Balasubramanian
   5 * Copyright (C) 1995 Eric Schenk, Bruno Haible
   6 *
   7 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
   8 *
   9 * SMP-threaded, sysctl's added
  10 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
  11 * Enforced range limit on SEM_UNDO
  12 * (c) 2001 Red Hat Inc
  13 * Lockless wakeup
  14 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
  15 * (c) 2016 Davidlohr Bueso <dave@stgolabs.net>
  16 * Further wakeup optimizations, documentation
  17 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
  18 *
  19 * support for audit of ipc object properties and permission changes
  20 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  21 *
  22 * namespaces support
  23 * OpenVZ, SWsoft Inc.
  24 * Pavel Emelianov <xemul@openvz.org>
  25 *
  26 * Implementation notes: (May 2010)
  27 * This file implements System V semaphores.
  28 *
  29 * User space visible behavior:
  30 * - FIFO ordering for semop() operations (just FIFO, not starvation
  31 *   protection)
  32 * - multiple semaphore operations that alter the same semaphore in
  33 *   one semop() are handled.
  34 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
  35 *   SETALL calls.
  36 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
  37 * - undo adjustments at process exit are limited to 0..SEMVMX.
  38 * - namespace are supported.
  39 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
  40 *   to /proc/sys/kernel/sem.
  41 * - statistics about the usage are reported in /proc/sysvipc/sem.
  42 *
  43 * Internals:
  44 * - scalability:
  45 *   - all global variables are read-mostly.
  46 *   - semop() calls and semctl(RMID) are synchronized by RCU.
  47 *   - most operations do write operations (actually: spin_lock calls) to
  48 *     the per-semaphore array structure.
  49 *   Thus: Perfect SMP scaling between independent semaphore arrays.
  50 *         If multiple semaphores in one array are used, then cache line
  51 *         trashing on the semaphore array spinlock will limit the scaling.
  52 * - semncnt and semzcnt are calculated on demand in count_semcnt()
 
  53 * - the task that performs a successful semop() scans the list of all
  54 *   sleeping tasks and completes any pending operations that can be fulfilled.
  55 *   Semaphores are actively given to waiting tasks (necessary for FIFO).
  56 *   (see update_queue())
  57 * - To improve the scalability, the actual wake-up calls are performed after
  58 *   dropping all locks. (see wake_up_sem_queue_prepare())
 
  59 * - All work is done by the waker, the woken up task does not have to do
  60 *   anything - not even acquiring a lock or dropping a refcount.
  61 * - A woken up task may not even touch the semaphore array anymore, it may
  62 *   have been destroyed already by a semctl(RMID).
 
 
 
  63 * - UNDO values are stored in an array (one per process and per
  64 *   semaphore array, lazily allocated). For backwards compatibility, multiple
  65 *   modes for the UNDO variables are supported (per process, per thread)
  66 *   (see copy_semundo, CLONE_SYSVSEM)
  67 * - There are two lists of the pending operations: a per-array list
  68 *   and per-semaphore list (stored in the array). This allows to achieve FIFO
  69 *   ordering without always scanning all pending operations.
  70 *   The worst-case behavior is nevertheless O(N^2) for N wakeups.
  71 */
  72
  73#include <linux/slab.h>
  74#include <linux/spinlock.h>
  75#include <linux/init.h>
  76#include <linux/proc_fs.h>
  77#include <linux/time.h>
  78#include <linux/security.h>
  79#include <linux/syscalls.h>
  80#include <linux/audit.h>
  81#include <linux/capability.h>
  82#include <linux/seq_file.h>
  83#include <linux/rwsem.h>
  84#include <linux/nsproxy.h>
  85#include <linux/ipc_namespace.h>
  86#include <linux/sched/wake_q.h>
  87
  88#include <linux/uaccess.h>
  89#include "util.h"
  90
  91/* One semaphore structure for each semaphore in the system. */
  92struct sem {
  93	int	semval;		/* current value */
  94	/*
  95	 * PID of the process that last modified the semaphore. For
  96	 * Linux, specifically these are:
  97	 *  - semop
  98	 *  - semctl, via SETVAL and SETALL.
  99	 *  - at task exit when performing undo adjustments (see exit_sem).
 100	 */
 101	struct pid *sempid;
 102	spinlock_t	lock;	/* spinlock for fine-grained semtimedop */
 103	struct list_head pending_alter; /* pending single-sop operations */
 104					/* that alter the semaphore */
 105	struct list_head pending_const; /* pending single-sop operations */
 106					/* that do not alter the semaphore*/
 107	time_t	sem_otime;	/* candidate for sem_otime */
 108} ____cacheline_aligned_in_smp;
 109
 110/* One sem_array data structure for each set of semaphores in the system. */
 111struct sem_array {
 112	struct kern_ipc_perm	sem_perm;	/* permissions .. see ipc.h */
 113	time64_t		sem_ctime;	/* create/last semctl() time */
 114	struct list_head	pending_alter;	/* pending operations */
 115						/* that alter the array */
 116	struct list_head	pending_const;	/* pending complex operations */
 117						/* that do not alter semvals */
 118	struct list_head	list_id;	/* undo requests on this array */
 119	int			sem_nsems;	/* no. of semaphores in array */
 120	int			complex_count;	/* pending complex operations */
 121	unsigned int		use_global_lock;/* >0: global lock required */
 122
 123	struct sem		sems[];
 124} __randomize_layout;
 125
 126/* One queue for each sleeping process in the system. */
 127struct sem_queue {
 
 128	struct list_head	list;	 /* queue of pending operations */
 129	struct task_struct	*sleeper; /* this process */
 130	struct sem_undo		*undo;	 /* undo structure */
 131	struct pid		*pid;	 /* process id of requesting process */
 132	int			status;	 /* completion status of operation */
 133	struct sembuf		*sops;	 /* array of pending operations */
 134	struct sembuf		*blocking; /* the operation that blocked */
 135	int			nsops;	 /* number of operations */
 136	bool			alter;	 /* does *sops alter the array? */
 137	bool                    dupsop;	 /* sops on more than one sem_num */
 138};
 139
 140/* Each task has a list of undo requests. They are executed automatically
 141 * when the process exits.
 142 */
 143struct sem_undo {
 144	struct list_head	list_proc;	/* per-process list: *
 145						 * all undos from one process
 146						 * rcu protected */
 147	struct rcu_head		rcu;		/* rcu struct for sem_undo */
 148	struct sem_undo_list	*ulp;		/* back ptr to sem_undo_list */
 149	struct list_head	list_id;	/* per semaphore array list:
 150						 * all undos for one array */
 151	int			semid;		/* semaphore set identifier */
 152	short			*semadj;	/* array of adjustments */
 153						/* one per semaphore */
 154};
 155
 156/* sem_undo_list controls shared access to the list of sem_undo structures
 157 * that may be shared among all a CLONE_SYSVSEM task group.
 158 */
 159struct sem_undo_list {
 160	refcount_t		refcnt;
 161	spinlock_t		lock;
 162	struct list_head	list_proc;
 163};
 164
 165
 166#define sem_ids(ns)	((ns)->ids[IPC_SEM_IDS])
 167
 
 
 
 168static int newary(struct ipc_namespace *, struct ipc_params *);
 169static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
 170#ifdef CONFIG_PROC_FS
 171static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
 172#endif
 173
 174#define SEMMSL_FAST	256 /* 512 bytes on stack */
 175#define SEMOPM_FAST	64  /* ~ 372 bytes on stack */
 176
 177/*
 178 * Switching from the mode suitable for simple ops
 179 * to the mode for complex ops is costly. Therefore:
 180 * use some hysteresis
 181 */
 182#define USE_GLOBAL_LOCK_HYSTERESIS	10
 183
 184/*
 185 * Locking:
 186 * a) global sem_lock() for read/write
 187 *	sem_undo.id_next,
 188 *	sem_array.complex_count,
 189 *	sem_array.pending{_alter,_const},
 190 *	sem_array.sem_undo
 191 *
 192 * b) global or semaphore sem_lock() for read/write:
 193 *	sem_array.sems[i].pending_{const,alter}:
 194 *
 195 * c) special:
 196 *	sem_undo_list.list_proc:
 197 *	* undo_list->lock for write
 198 *	* rcu for read
 199 *	use_global_lock:
 200 *	* global sem_lock() for write
 201 *	* either local or global sem_lock() for read.
 202 *
 203 * Memory ordering:
 204 * Most ordering is enforced by using spin_lock() and spin_unlock().
 205 * The special case is use_global_lock:
 206 * Setting it from non-zero to 0 is a RELEASE, this is ensured by
 207 * using smp_store_release().
 208 * Testing if it is non-zero is an ACQUIRE, this is ensured by using
 209 * smp_load_acquire().
 210 * Setting it from 0 to non-zero must be ordered with regards to
 211 * this smp_load_acquire(), this is guaranteed because the smp_load_acquire()
 212 * is inside a spin_lock() and after a write from 0 to non-zero a
 213 * spin_lock()+spin_unlock() is done.
 214 */
 215
 216#define sc_semmsl	sem_ctls[0]
 217#define sc_semmns	sem_ctls[1]
 218#define sc_semopm	sem_ctls[2]
 219#define sc_semmni	sem_ctls[3]
 220
 221int sem_init_ns(struct ipc_namespace *ns)
 222{
 223	ns->sc_semmsl = SEMMSL;
 224	ns->sc_semmns = SEMMNS;
 225	ns->sc_semopm = SEMOPM;
 226	ns->sc_semmni = SEMMNI;
 227	ns->used_sems = 0;
 228	return ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
 229}
 230
 231#ifdef CONFIG_IPC_NS
 232void sem_exit_ns(struct ipc_namespace *ns)
 233{
 234	free_ipcs(ns, &sem_ids(ns), freeary);
 235	idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
 236	rhashtable_destroy(&ns->ids[IPC_SEM_IDS].key_ht);
 237}
 238#endif
 239
 240int __init sem_init(void)
 241{
 242	const int err = sem_init_ns(&init_ipc_ns);
 243
 244	ipc_init_proc_interface("sysvipc/sem",
 245				"       key      semid perms      nsems   uid   gid  cuid  cgid      otime      ctime\n",
 246				IPC_SEM_IDS, sysvipc_sem_proc_show);
 247	return err;
 248}
 249
 250/**
 251 * unmerge_queues - unmerge queues, if possible.
 252 * @sma: semaphore array
 253 *
 254 * The function unmerges the wait queues if complex_count is 0.
 255 * It must be called prior to dropping the global semaphore array lock.
 256 */
 257static void unmerge_queues(struct sem_array *sma)
 258{
 259	struct sem_queue *q, *tq;
 260
 261	/* complex operations still around? */
 262	if (sma->complex_count)
 263		return;
 264	/*
 265	 * We will switch back to simple mode.
 266	 * Move all pending operation back into the per-semaphore
 267	 * queues.
 268	 */
 269	list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
 270		struct sem *curr;
 271		curr = &sma->sems[q->sops[0].sem_num];
 272
 273		list_add_tail(&q->list, &curr->pending_alter);
 274	}
 275	INIT_LIST_HEAD(&sma->pending_alter);
 276}
 277
 278/**
 279 * merge_queues - merge single semop queues into global queue
 280 * @sma: semaphore array
 281 *
 282 * This function merges all per-semaphore queues into the global queue.
 283 * It is necessary to achieve FIFO ordering for the pending single-sop
 284 * operations when a multi-semop operation must sleep.
 285 * Only the alter operations must be moved, the const operations can stay.
 286 */
 287static void merge_queues(struct sem_array *sma)
 288{
 289	int i;
 290	for (i = 0; i < sma->sem_nsems; i++) {
 291		struct sem *sem = &sma->sems[i];
 292
 293		list_splice_init(&sem->pending_alter, &sma->pending_alter);
 294	}
 295}
 296
 297static void sem_rcu_free(struct rcu_head *head)
 298{
 299	struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu);
 300	struct sem_array *sma = container_of(p, struct sem_array, sem_perm);
 301
 302	security_sem_free(&sma->sem_perm);
 303	kvfree(sma);
 304}
 305
 306/*
 307 * Enter the mode suitable for non-simple operations:
 308 * Caller must own sem_perm.lock.
 309 */
 310static void complexmode_enter(struct sem_array *sma)
 311{
 312	int i;
 313	struct sem *sem;
 314
 315	if (sma->use_global_lock > 0)  {
 316		/*
 317		 * We are already in global lock mode.
 318		 * Nothing to do, just reset the
 319		 * counter until we return to simple mode.
 320		 */
 321		sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
 322		return;
 323	}
 324	sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
 325
 326	for (i = 0; i < sma->sem_nsems; i++) {
 327		sem = &sma->sems[i];
 328		spin_lock(&sem->lock);
 329		spin_unlock(&sem->lock);
 330	}
 331}
 332
 333/*
 334 * Try to leave the mode that disallows simple operations:
 335 * Caller must own sem_perm.lock.
 336 */
 337static void complexmode_tryleave(struct sem_array *sma)
 338{
 339	if (sma->complex_count)  {
 340		/* Complex ops are sleeping.
 341		 * We must stay in complex mode
 342		 */
 343		return;
 344	}
 345	if (sma->use_global_lock == 1) {
 346		/*
 347		 * Immediately after setting use_global_lock to 0,
 348		 * a simple op can start. Thus: all memory writes
 349		 * performed by the current operation must be visible
 350		 * before we set use_global_lock to 0.
 351		 */
 352		smp_store_release(&sma->use_global_lock, 0);
 353	} else {
 354		sma->use_global_lock--;
 355	}
 356}
 357
 358#define SEM_GLOBAL_LOCK	(-1)
 359/*
 360 * If the request contains only one semaphore operation, and there are
 361 * no complex transactions pending, lock only the semaphore involved.
 362 * Otherwise, lock the entire semaphore array, since we either have
 363 * multiple semaphores in our own semops, or we need to look at
 364 * semaphores from other pending complex operations.
 365 */
 366static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
 367			      int nsops)
 368{
 369	struct sem *sem;
 370
 371	if (nsops != 1) {
 372		/* Complex operation - acquire a full lock */
 373		ipc_lock_object(&sma->sem_perm);
 374
 375		/* Prevent parallel simple ops */
 376		complexmode_enter(sma);
 377		return SEM_GLOBAL_LOCK;
 378	}
 379
 380	/*
 381	 * Only one semaphore affected - try to optimize locking.
 382	 * Optimized locking is possible if no complex operation
 383	 * is either enqueued or processed right now.
 384	 *
 385	 * Both facts are tracked by use_global_mode.
 386	 */
 387	sem = &sma->sems[sops->sem_num];
 388
 389	/*
 390	 * Initial check for use_global_lock. Just an optimization,
 391	 * no locking, no memory barrier.
 392	 */
 393	if (!sma->use_global_lock) {
 394		/*
 395		 * It appears that no complex operation is around.
 396		 * Acquire the per-semaphore lock.
 397		 */
 398		spin_lock(&sem->lock);
 399
 400		/* pairs with smp_store_release() */
 401		if (!smp_load_acquire(&sma->use_global_lock)) {
 402			/* fast path successful! */
 403			return sops->sem_num;
 404		}
 405		spin_unlock(&sem->lock);
 406	}
 407
 408	/* slow path: acquire the full lock */
 409	ipc_lock_object(&sma->sem_perm);
 410
 411	if (sma->use_global_lock == 0) {
 412		/*
 413		 * The use_global_lock mode ended while we waited for
 414		 * sma->sem_perm.lock. Thus we must switch to locking
 415		 * with sem->lock.
 416		 * Unlike in the fast path, there is no need to recheck
 417		 * sma->use_global_lock after we have acquired sem->lock:
 418		 * We own sma->sem_perm.lock, thus use_global_lock cannot
 419		 * change.
 420		 */
 421		spin_lock(&sem->lock);
 422
 423		ipc_unlock_object(&sma->sem_perm);
 424		return sops->sem_num;
 425	} else {
 426		/*
 427		 * Not a false alarm, thus continue to use the global lock
 428		 * mode. No need for complexmode_enter(), this was done by
 429		 * the caller that has set use_global_mode to non-zero.
 430		 */
 431		return SEM_GLOBAL_LOCK;
 432	}
 433}
 434
 435static inline void sem_unlock(struct sem_array *sma, int locknum)
 436{
 437	if (locknum == SEM_GLOBAL_LOCK) {
 438		unmerge_queues(sma);
 439		complexmode_tryleave(sma);
 440		ipc_unlock_object(&sma->sem_perm);
 441	} else {
 442		struct sem *sem = &sma->sems[locknum];
 443		spin_unlock(&sem->lock);
 444	}
 445}
 446
 447/*
 448 * sem_lock_(check_) routines are called in the paths where the rwsem
 449 * is not held.
 450 *
 451 * The caller holds the RCU read lock.
 452 */
 453static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
 454{
 455	struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
 456
 457	if (IS_ERR(ipcp))
 458		return ERR_CAST(ipcp);
 459
 460	return container_of(ipcp, struct sem_array, sem_perm);
 461}
 462
 463static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
 464							int id)
 465{
 466	struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
 467
 468	if (IS_ERR(ipcp))
 469		return ERR_CAST(ipcp);
 470
 471	return container_of(ipcp, struct sem_array, sem_perm);
 472}
 473
 474static inline void sem_lock_and_putref(struct sem_array *sma)
 475{
 476	sem_lock(sma, NULL, -1);
 477	ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
 478}
 479
 480static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
 481{
 482	ipc_rmid(&sem_ids(ns), &s->sem_perm);
 
 483}
 484
 485static struct sem_array *sem_alloc(size_t nsems)
 486{
 487	struct sem_array *sma;
 488	size_t size;
 
 
 489
 490	if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0]))
 491		return NULL;
 
 
 492
 493	size = sizeof(*sma) + nsems * sizeof(sma->sems[0]);
 494	sma = kvmalloc(size, GFP_KERNEL);
 495	if (unlikely(!sma))
 496		return NULL;
 497
 498	memset(sma, 0, size);
 499
 500	return sma;
 501}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 502
 503/**
 504 * newary - Create a new semaphore set
 505 * @ns: namespace
 506 * @params: ptr to the structure that contains key, semflg and nsems
 507 *
 508 * Called with sem_ids.rwsem held (as a writer)
 509 */
 
 510static int newary(struct ipc_namespace *ns, struct ipc_params *params)
 511{
 
 512	int retval;
 513	struct sem_array *sma;
 
 514	key_t key = params->key;
 515	int nsems = params->u.nsems;
 516	int semflg = params->flg;
 517	int i;
 518
 519	if (!nsems)
 520		return -EINVAL;
 521	if (ns->used_sems + nsems > ns->sc_semmns)
 522		return -ENOSPC;
 523
 524	sma = sem_alloc(nsems);
 525	if (!sma)
 
 526		return -ENOMEM;
 
 
 527
 528	sma->sem_perm.mode = (semflg & S_IRWXUGO);
 529	sma->sem_perm.key = key;
 530
 531	sma->sem_perm.security = NULL;
 532	retval = security_sem_alloc(&sma->sem_perm);
 533	if (retval) {
 534		kvfree(sma);
 535		return retval;
 536	}
 537
 538	for (i = 0; i < nsems; i++) {
 539		INIT_LIST_HEAD(&sma->sems[i].pending_alter);
 540		INIT_LIST_HEAD(&sma->sems[i].pending_const);
 541		spin_lock_init(&sma->sems[i].lock);
 
 542	}
 
 
 
 
 
 
 543
 544	sma->complex_count = 0;
 545	sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
 546	INIT_LIST_HEAD(&sma->pending_alter);
 547	INIT_LIST_HEAD(&sma->pending_const);
 548	INIT_LIST_HEAD(&sma->list_id);
 549	sma->sem_nsems = nsems;
 550	sma->sem_ctime = ktime_get_real_seconds();
 
 
 
 
 551
 552	/* ipc_addid() locks sma upon success. */
 553	retval = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
 554	if (retval < 0) {
 555		call_rcu(&sma->sem_perm.rcu, sem_rcu_free);
 556		return retval;
 557	}
 558	ns->used_sems += nsems;
 559
 560	sem_unlock(sma, -1);
 561	rcu_read_unlock();
 
 
 
 
 562
 563	return sma->sem_perm.id;
 
 564}
 565
 566
 567/*
 568 * Called with sem_ids.rwsem and ipcp locked.
 569 */
 570static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
 571				struct ipc_params *params)
 572{
 573	struct sem_array *sma;
 574
 575	sma = container_of(ipcp, struct sem_array, sem_perm);
 576	if (params->u.nsems > sma->sem_nsems)
 577		return -EINVAL;
 578
 579	return 0;
 580}
 581
 582long ksys_semget(key_t key, int nsems, int semflg)
 583{
 584	struct ipc_namespace *ns;
 585	static const struct ipc_ops sem_ops = {
 586		.getnew = newary,
 587		.associate = security_sem_associate,
 588		.more_checks = sem_more_checks,
 589	};
 590	struct ipc_params sem_params;
 591
 592	ns = current->nsproxy->ipc_ns;
 593
 594	if (nsems < 0 || nsems > ns->sc_semmsl)
 595		return -EINVAL;
 596
 
 
 
 
 597	sem_params.key = key;
 598	sem_params.flg = semflg;
 599	sem_params.u.nsems = nsems;
 600
 601	return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
 602}
 603
 604SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
 605{
 606	return ksys_semget(key, nsems, semflg);
 607}
 608
 609/**
 610 * perform_atomic_semop[_slow] - Attempt to perform semaphore
 611 *                               operations on a given array.
 612 * @sma: semaphore array
 613 * @q: struct sem_queue that describes the operation
 614 *
 615 * Caller blocking are as follows, based the value
 616 * indicated by the semaphore operation (sem_op):
 617 *
 618 *  (1) >0 never blocks.
 619 *  (2)  0 (wait-for-zero operation): semval is non-zero.
 620 *  (3) <0 attempting to decrement semval to a value smaller than zero.
 621 *
 622 * Returns 0 if the operation was possible.
 623 * Returns 1 if the operation is impossible, the caller must sleep.
 624 * Returns <0 for error codes.
 625 */
 626static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
 627{
 628	int result, sem_op, nsops;
 629	struct pid *pid;
 630	struct sembuf *sop;
 631	struct sem *curr;
 632	struct sembuf *sops;
 633	struct sem_undo *un;
 634
 635	sops = q->sops;
 636	nsops = q->nsops;
 637	un = q->undo;
 638
 639	for (sop = sops; sop < sops + nsops; sop++) {
 640		curr = &sma->sems[sop->sem_num];
 641		sem_op = sop->sem_op;
 642		result = curr->semval;
 643
 644		if (!sem_op && result)
 645			goto would_block;
 646
 647		result += sem_op;
 648		if (result < 0)
 649			goto would_block;
 650		if (result > SEMVMX)
 651			goto out_of_range;
 652
 653		if (sop->sem_flg & SEM_UNDO) {
 654			int undo = un->semadj[sop->sem_num] - sem_op;
 655			/* Exceeding the undo range is an error. */
 
 
 656			if (undo < (-SEMAEM - 1) || undo > SEMAEM)
 657				goto out_of_range;
 658			un->semadj[sop->sem_num] = undo;
 659		}
 660
 661		curr->semval = result;
 662	}
 663
 664	sop--;
 665	pid = q->pid;
 666	while (sop >= sops) {
 667		ipc_update_pid(&sma->sems[sop->sem_num].sempid, pid);
 
 
 668		sop--;
 669	}
 670
 671	return 0;
 672
 673out_of_range:
 674	result = -ERANGE;
 675	goto undo;
 676
 677would_block:
 678	q->blocking = sop;
 679
 680	if (sop->sem_flg & IPC_NOWAIT)
 681		result = -EAGAIN;
 682	else
 683		result = 1;
 684
 685undo:
 686	sop--;
 687	while (sop >= sops) {
 688		sem_op = sop->sem_op;
 689		sma->sems[sop->sem_num].semval -= sem_op;
 690		if (sop->sem_flg & SEM_UNDO)
 691			un->semadj[sop->sem_num] += sem_op;
 692		sop--;
 693	}
 694
 695	return result;
 696}
 697
 698static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
 
 
 
 
 
 
 
 699{
 700	int result, sem_op, nsops;
 701	struct sembuf *sop;
 702	struct sem *curr;
 703	struct sembuf *sops;
 704	struct sem_undo *un;
 705
 706	sops = q->sops;
 707	nsops = q->nsops;
 708	un = q->undo;
 709
 710	if (unlikely(q->dupsop))
 711		return perform_atomic_semop_slow(sma, q);
 712
 713	/*
 714	 * We scan the semaphore set twice, first to ensure that the entire
 715	 * operation can succeed, therefore avoiding any pointless writes
 716	 * to shared memory and having to undo such changes in order to block
 717	 * until the operations can go through.
 718	 */
 719	for (sop = sops; sop < sops + nsops; sop++) {
 720		curr = &sma->sems[sop->sem_num];
 721		sem_op = sop->sem_op;
 722		result = curr->semval;
 723
 724		if (!sem_op && result)
 725			goto would_block; /* wait-for-zero */
 726
 727		result += sem_op;
 728		if (result < 0)
 729			goto would_block;
 730
 731		if (result > SEMVMX)
 732			return -ERANGE;
 733
 734		if (sop->sem_flg & SEM_UNDO) {
 735			int undo = un->semadj[sop->sem_num] - sem_op;
 736
 737			/* Exceeding the undo range is an error. */
 738			if (undo < (-SEMAEM - 1) || undo > SEMAEM)
 739				return -ERANGE;
 740		}
 741	}
 
 
 742
 743	for (sop = sops; sop < sops + nsops; sop++) {
 744		curr = &sma->sems[sop->sem_num];
 745		sem_op = sop->sem_op;
 746		result = curr->semval;
 747
 748		if (sop->sem_flg & SEM_UNDO) {
 749			int undo = un->semadj[sop->sem_num] - sem_op;
 750
 751			un->semadj[sop->sem_num] = undo;
 752		}
 753		curr->semval += sem_op;
 754		ipc_update_pid(&curr->sempid, q->pid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 755	}
 756
 757	return 0;
 758
 759would_block:
 760	q->blocking = sop;
 761	return sop->sem_flg & IPC_NOWAIT ? -EAGAIN : 1;
 762}
 763
 764static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
 765					     struct wake_q_head *wake_q)
 766{
 767	wake_q_add(wake_q, q->sleeper);
 768	/*
 769	 * Rely on the above implicit barrier, such that we can
 770	 * ensure that we hold reference to the task before setting
 771	 * q->status. Otherwise we could race with do_exit if the
 772	 * task is awoken by an external event before calling
 773	 * wake_up_process().
 774	 */
 775	WRITE_ONCE(q->status, error);
 776}
 777
 778static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
 779{
 780	list_del(&q->list);
 781	if (q->nsops > 1)
 
 
 782		sma->complex_count--;
 783}
 784
 785/** check_restart(sma, q)
 786 * @sma: semaphore array
 787 * @q: the operation that just completed
 788 *
 789 * update_queue is O(N^2) when it restarts scanning the whole queue of
 790 * waiting operations. Therefore this function checks if the restart is
 791 * really necessary. It is called after a previously waiting operation
 792 * modified the array.
 793 * Note that wait-for-zero operations are handled without restart.
 794 */
 795static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
 796{
 797	/* pending complex alter operations are too difficult to analyse */
 798	if (!list_empty(&sma->pending_alter))
 
 
 
 
 
 
 
 799		return 1;
 800
 801	/* we were a sleeping complex operation. Too difficult */
 802	if (q->nsops > 1)
 803		return 1;
 804
 805	/* It is impossible that someone waits for the new value:
 806	 * - complex operations always restart.
 807	 * - wait-for-zero are handled seperately.
 808	 * - q is a previously sleeping simple operation that
 809	 *   altered the array. It must be a decrement, because
 810	 *   simple increments never sleep.
 811	 * - If there are older (higher priority) decrements
 812	 *   in the queue, then they have observed the original
 813	 *   semval value and couldn't proceed. The operation
 814	 *   decremented to value - thus they won't proceed either.
 815	 */
 816	return 0;
 817}
 818
 819/**
 820 * wake_const_ops - wake up non-alter tasks
 821 * @sma: semaphore array.
 822 * @semnum: semaphore that was modified.
 823 * @wake_q: lockless wake-queue head.
 824 *
 825 * wake_const_ops must be called after a semaphore in a semaphore array
 826 * was set to 0. If complex const operations are pending, wake_const_ops must
 827 * be called with semnum = -1, as well as with the number of each modified
 828 * semaphore.
 829 * The tasks that must be woken up are added to @wake_q. The return code
 830 * is stored in q->pid.
 831 * The function returns 1 if at least one operation was completed successfully.
 832 */
 833static int wake_const_ops(struct sem_array *sma, int semnum,
 834			  struct wake_q_head *wake_q)
 835{
 836	struct sem_queue *q, *tmp;
 837	struct list_head *pending_list;
 838	int semop_completed = 0;
 839
 840	if (semnum == -1)
 841		pending_list = &sma->pending_const;
 842	else
 843		pending_list = &sma->sems[semnum].pending_const;
 844
 845	list_for_each_entry_safe(q, tmp, pending_list, list) {
 846		int error = perform_atomic_semop(sma, q);
 847
 848		if (error > 0)
 849			continue;
 850		/* operation completed, remove from queue & wakeup */
 851		unlink_queue(sma, q);
 852
 853		wake_up_sem_queue_prepare(q, error, wake_q);
 854		if (error == 0)
 855			semop_completed = 1;
 856	}
 857
 858	return semop_completed;
 859}
 860
 861/**
 862 * do_smart_wakeup_zero - wakeup all wait for zero tasks
 863 * @sma: semaphore array
 864 * @sops: operations that were performed
 865 * @nsops: number of operations
 866 * @wake_q: lockless wake-queue head
 867 *
 868 * Checks all required queue for wait-for-zero operations, based
 869 * on the actual changes that were performed on the semaphore array.
 870 * The function returns 1 if at least one operation was completed successfully.
 871 */
 872static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
 873				int nsops, struct wake_q_head *wake_q)
 874{
 875	int i;
 876	int semop_completed = 0;
 877	int got_zero = 0;
 878
 879	/* first: the per-semaphore queues, if known */
 880	if (sops) {
 881		for (i = 0; i < nsops; i++) {
 882			int num = sops[i].sem_num;
 883
 884			if (sma->sems[num].semval == 0) {
 885				got_zero = 1;
 886				semop_completed |= wake_const_ops(sma, num, wake_q);
 887			}
 888		}
 889	} else {
 890		/*
 891		 * No sops means modified semaphores not known.
 892		 * Assume all were changed.
 893		 */
 894		for (i = 0; i < sma->sem_nsems; i++) {
 895			if (sma->sems[i].semval == 0) {
 896				got_zero = 1;
 897				semop_completed |= wake_const_ops(sma, i, wake_q);
 898			}
 899		}
 900	}
 901	/*
 902	 * If one of the modified semaphores got 0,
 903	 * then check the global queue, too.
 904	 */
 905	if (got_zero)
 906		semop_completed |= wake_const_ops(sma, -1, wake_q);
 
 907
 908	return semop_completed;
 
 
 
 
 
 909}
 910
 911
 912/**
 913 * update_queue - look for tasks that can be completed.
 914 * @sma: semaphore array.
 915 * @semnum: semaphore that was modified.
 916 * @wake_q: lockless wake-queue head.
 917 *
 918 * update_queue must be called after a semaphore in a semaphore array
 919 * was modified. If multiple semaphores were modified, update_queue must
 920 * be called with semnum = -1, as well as with the number of each modified
 921 * semaphore.
 922 * The tasks that must be woken up are added to @wake_q. The return code
 923 * is stored in q->pid.
 924 * The function internally checks if const operations can now succeed.
 925 *
 926 * The function return 1 if at least one semop was completed successfully.
 927 */
 928static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q)
 929{
 930	struct sem_queue *q, *tmp;
 
 931	struct list_head *pending_list;
 
 932	int semop_completed = 0;
 933
 934	if (semnum == -1)
 935		pending_list = &sma->pending_alter;
 936	else
 937		pending_list = &sma->sems[semnum].pending_alter;
 
 
 
 
 
 
 
 
 
 
 938
 939again:
 940	list_for_each_entry_safe(q, tmp, pending_list, list) {
 
 941		int error, restart;
 942
 
 
 
 943		/* If we are scanning the single sop, per-semaphore list of
 944		 * one semaphore and that semaphore is 0, then it is not
 945		 * necessary to scan further: simple increments
 946		 * that affect only one entry succeed immediately and cannot
 947		 * be in the  per semaphore pending queue, and decrements
 948		 * cannot be successful if the value is already 0.
 949		 */
 950		if (semnum != -1 && sma->sems[semnum].semval == 0)
 
 951			break;
 952
 953		error = perform_atomic_semop(sma, q);
 
 954
 955		/* Does q->sleeper still need to sleep? */
 956		if (error > 0)
 957			continue;
 958
 959		unlink_queue(sma, q);
 960
 961		if (error) {
 962			restart = 0;
 963		} else {
 964			semop_completed = 1;
 965			do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q);
 966			restart = check_restart(sma, q);
 967		}
 968
 969		wake_up_sem_queue_prepare(q, error, wake_q);
 970		if (restart)
 971			goto again;
 972	}
 973	return semop_completed;
 974}
 975
 976/**
 977 * set_semotime - set sem_otime
 978 * @sma: semaphore array
 979 * @sops: operations that modified the array, may be NULL
 980 *
 981 * sem_otime is replicated to avoid cache line trashing.
 982 * This function sets one instance to the current time.
 983 */
 984static void set_semotime(struct sem_array *sma, struct sembuf *sops)
 985{
 986	if (sops == NULL) {
 987		sma->sems[0].sem_otime = get_seconds();
 988	} else {
 989		sma->sems[sops[0].sem_num].sem_otime =
 990							get_seconds();
 991	}
 992}
 993
 994/**
 995 * do_smart_update - optimized update_queue
 996 * @sma: semaphore array
 997 * @sops: operations that were performed
 998 * @nsops: number of operations
 999 * @otime: force setting otime
1000 * @wake_q: lockless wake-queue head
1001 *
1002 * do_smart_update() does the required calls to update_queue and wakeup_zero,
1003 * based on the actual changes that were performed on the semaphore array.
1004 * Note that the function does not do the actual wake-up: the caller is
1005 * responsible for calling wake_up_q().
1006 * It is safe to perform this call after dropping all locks.
1007 */
1008static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
1009			    int otime, struct wake_q_head *wake_q)
1010{
1011	int i;
1012
1013	otime |= do_smart_wakeup_zero(sma, sops, nsops, wake_q);
 
 
 
 
1014
1015	if (!list_empty(&sma->pending_alter)) {
1016		/* semaphore array uses the global queue - just process it. */
1017		otime |= update_queue(sma, -1, wake_q);
1018	} else {
1019		if (!sops) {
1020			/*
1021			 * No sops, thus the modified semaphores are not
1022			 * known. Check all.
1023			 */
1024			for (i = 0; i < sma->sem_nsems; i++)
1025				otime |= update_queue(sma, i, wake_q);
1026		} else {
1027			/*
1028			 * Check the semaphores that were increased:
1029			 * - No complex ops, thus all sleeping ops are
1030			 *   decrease.
1031			 * - if we decreased the value, then any sleeping
1032			 *   semaphore ops wont be able to run: If the
1033			 *   previous value was too small, then the new
1034			 *   value will be too small, too.
1035			 */
1036			for (i = 0; i < nsops; i++) {
1037				if (sops[i].sem_op > 0) {
1038					otime |= update_queue(sma,
1039							      sops[i].sem_num, wake_q);
1040				}
1041			}
1042		}
1043	}
 
1044	if (otime)
1045		set_semotime(sma, sops);
1046}
1047
1048/*
1049 * check_qop: Test if a queued operation sleeps on the semaphore semnum
1050 */
1051static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
1052			bool count_zero)
1053{
1054	struct sembuf *sop = q->blocking;
1055
1056	/*
1057	 * Linux always (since 0.99.10) reported a task as sleeping on all
1058	 * semaphores. This violates SUS, therefore it was changed to the
1059	 * standard compliant behavior.
1060	 * Give the administrators a chance to notice that an application
1061	 * might misbehave because it relies on the Linux behavior.
1062	 */
1063	pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
1064			"The task %s (%d) triggered the difference, watch for misbehavior.\n",
1065			current->comm, task_pid_nr(current));
1066
1067	if (sop->sem_num != semnum)
1068		return 0;
1069
1070	if (count_zero && sop->sem_op == 0)
1071		return 1;
1072	if (!count_zero && sop->sem_op < 0)
1073		return 1;
1074
1075	return 0;
1076}
1077
1078/* The following counts are associated to each semaphore:
1079 *   semncnt        number of tasks waiting on semval being nonzero
1080 *   semzcnt        number of tasks waiting on semval being zero
1081 *
1082 * Per definition, a task waits only on the semaphore of the first semop
1083 * that cannot proceed, even if additional operation would block, too.
1084 */
1085static int count_semcnt(struct sem_array *sma, ushort semnum,
1086			bool count_zero)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1087{
1088	struct list_head *l;
1089	struct sem_queue *q;
1090	int semcnt;
1091
1092	semcnt = 0;
1093	/* First: check the simple operations. They are easy to evaluate */
1094	if (count_zero)
1095		l = &sma->sems[semnum].pending_const;
1096	else
1097		l = &sma->sems[semnum].pending_alter;
1098
1099	list_for_each_entry(q, l, list) {
1100		/* all task on a per-semaphore list sleep on exactly
1101		 * that semaphore
1102		 */
1103		semcnt++;
1104	}
1105
1106	/* Then: check the complex operations. */
1107	list_for_each_entry(q, &sma->pending_alter, list) {
1108		semcnt += check_qop(sma, semnum, q, count_zero);
1109	}
1110	if (count_zero) {
1111		list_for_each_entry(q, &sma->pending_const, list) {
1112			semcnt += check_qop(sma, semnum, q, count_zero);
1113		}
1114	}
1115	return semcnt;
1116}
1117
1118/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
1119 * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
1120 * remains locked on exit.
1121 */
1122static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1123{
1124	struct sem_undo *un, *tu;
1125	struct sem_queue *q, *tq;
1126	struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1127	int i;
1128	DEFINE_WAKE_Q(wake_q);
1129
1130	/* Free the existing undo structures for this semaphore set.  */
1131	ipc_assert_locked_object(&sma->sem_perm);
1132	list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1133		list_del(&un->list_id);
1134		spin_lock(&un->ulp->lock);
1135		un->semid = -1;
1136		list_del_rcu(&un->list_proc);
1137		spin_unlock(&un->ulp->lock);
1138		kfree_rcu(un, rcu);
1139	}
1140
1141	/* Wake up all pending processes and let them fail with EIDRM. */
1142	list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
 
1143		unlink_queue(sma, q);
1144		wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1145	}
1146
1147	list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1148		unlink_queue(sma, q);
1149		wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1150	}
1151	for (i = 0; i < sma->sem_nsems; i++) {
1152		struct sem *sem = &sma->sems[i];
1153		list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
1154			unlink_queue(sma, q);
1155			wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1156		}
1157		list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
1158			unlink_queue(sma, q);
1159			wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1160		}
1161		ipc_update_pid(&sem->sempid, NULL);
1162	}
1163
1164	/* Remove the semaphore set from the IDR */
1165	sem_rmid(ns, sma);
1166	sem_unlock(sma, -1);
1167	rcu_read_unlock();
1168
1169	wake_up_q(&wake_q);
1170	ns->used_sems -= sma->sem_nsems;
1171	ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
 
1172}
1173
1174static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1175{
1176	switch (version) {
1177	case IPC_64:
1178		return copy_to_user(buf, in, sizeof(*in));
1179	case IPC_OLD:
1180	    {
1181		struct semid_ds out;
1182
1183		memset(&out, 0, sizeof(out));
1184
1185		ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
1186
1187		out.sem_otime	= in->sem_otime;
1188		out.sem_ctime	= in->sem_ctime;
1189		out.sem_nsems	= in->sem_nsems;
1190
1191		return copy_to_user(buf, &out, sizeof(out));
1192	    }
1193	default:
1194		return -EINVAL;
1195	}
1196}
1197
1198static time64_t get_semotime(struct sem_array *sma)
 
1199{
1200	int i;
1201	time64_t res;
1202
1203	res = sma->sems[0].sem_otime;
1204	for (i = 1; i < sma->sem_nsems; i++) {
1205		time64_t to = sma->sems[i].sem_otime;
 
 
 
1206
1207		if (to > res)
1208			res = to;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1209	}
1210	return res;
1211}
 
 
 
1212
1213static int semctl_stat(struct ipc_namespace *ns, int semid,
1214			 int cmd, struct semid64_ds *semid64)
1215{
1216	struct sem_array *sma;
1217	int id = 0;
1218	int err;
1219
1220	memset(semid64, 0, sizeof(*semid64));
1221
1222	rcu_read_lock();
1223	if (cmd == SEM_STAT || cmd == SEM_STAT_ANY) {
1224		sma = sem_obtain_object(ns, semid);
1225		if (IS_ERR(sma)) {
1226			err = PTR_ERR(sma);
1227			goto out_unlock;
1228		}
1229		id = sma->sem_perm.id;
1230	} else { /* IPC_STAT */
1231		sma = sem_obtain_object_check(ns, semid);
1232		if (IS_ERR(sma)) {
1233			err = PTR_ERR(sma);
1234			goto out_unlock;
1235		}
1236	}
1237
1238	/* see comment for SHM_STAT_ANY */
1239	if (cmd == SEM_STAT_ANY)
1240		audit_ipc_obj(&sma->sem_perm);
1241	else {
1242		err = -EACCES;
1243		if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1244			goto out_unlock;
1245	}
1246
1247	err = security_sem_semctl(&sma->sem_perm, cmd);
1248	if (err)
1249		goto out_unlock;
1250
1251	ipc_lock_object(&sma->sem_perm);
1252
1253	if (!ipc_valid_object(&sma->sem_perm)) {
1254		ipc_unlock_object(&sma->sem_perm);
1255		err = -EIDRM;
1256		goto out_unlock;
 
 
 
 
 
 
 
1257	}
1258
1259	kernel_to_ipc64_perm(&sma->sem_perm, &semid64->sem_perm);
1260	semid64->sem_otime = get_semotime(sma);
1261	semid64->sem_ctime = sma->sem_ctime;
1262	semid64->sem_nsems = sma->sem_nsems;
1263
1264	ipc_unlock_object(&sma->sem_perm);
1265	rcu_read_unlock();
1266	return id;
1267
1268out_unlock:
1269	rcu_read_unlock();
1270	return err;
1271}
1272
1273static int semctl_info(struct ipc_namespace *ns, int semid,
1274			 int cmd, void __user *p)
1275{
1276	struct seminfo seminfo;
1277	int max_id;
1278	int err;
1279
1280	err = security_sem_semctl(NULL, cmd);
1281	if (err)
1282		return err;
1283
1284	memset(&seminfo, 0, sizeof(seminfo));
1285	seminfo.semmni = ns->sc_semmni;
1286	seminfo.semmns = ns->sc_semmns;
1287	seminfo.semmsl = ns->sc_semmsl;
1288	seminfo.semopm = ns->sc_semopm;
1289	seminfo.semvmx = SEMVMX;
1290	seminfo.semmnu = SEMMNU;
1291	seminfo.semmap = SEMMAP;
1292	seminfo.semume = SEMUME;
1293	down_read(&sem_ids(ns).rwsem);
1294	if (cmd == SEM_INFO) {
1295		seminfo.semusz = sem_ids(ns).in_use;
1296		seminfo.semaem = ns->used_sems;
1297	} else {
1298		seminfo.semusz = SEMUSZ;
1299		seminfo.semaem = SEMAEM;
1300	}
1301	max_id = ipc_get_maxid(&sem_ids(ns));
1302	up_read(&sem_ids(ns).rwsem);
1303	if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1304		return -EFAULT;
1305	return (max_id < 0) ? 0 : max_id;
1306}
1307
1308static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1309		int val)
1310{
1311	struct sem_undo *un;
1312	struct sem_array *sma;
1313	struct sem *curr;
1314	int err;
1315	DEFINE_WAKE_Q(wake_q);
1316
1317	if (val > SEMVMX || val < 0)
1318		return -ERANGE;
1319
1320	rcu_read_lock();
1321	sma = sem_obtain_object_check(ns, semid);
1322	if (IS_ERR(sma)) {
1323		rcu_read_unlock();
1324		return PTR_ERR(sma);
1325	}
1326
1327	if (semnum < 0 || semnum >= sma->sem_nsems) {
1328		rcu_read_unlock();
1329		return -EINVAL;
1330	}
1331
1332
1333	if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1334		rcu_read_unlock();
1335		return -EACCES;
1336	}
1337
1338	err = security_sem_semctl(&sma->sem_perm, SETVAL);
1339	if (err) {
1340		rcu_read_unlock();
1341		return -EACCES;
1342	}
1343
1344	sem_lock(sma, NULL, -1);
1345
1346	if (!ipc_valid_object(&sma->sem_perm)) {
1347		sem_unlock(sma, -1);
1348		rcu_read_unlock();
1349		return -EIDRM;
1350	}
1351
1352	curr = &sma->sems[semnum];
1353
1354	ipc_assert_locked_object(&sma->sem_perm);
1355	list_for_each_entry(un, &sma->list_id, list_id)
1356		un->semadj[semnum] = 0;
1357
1358	curr->semval = val;
1359	ipc_update_pid(&curr->sempid, task_tgid(current));
1360	sma->sem_ctime = ktime_get_real_seconds();
1361	/* maybe some queued-up processes were waiting for this */
1362	do_smart_update(sma, NULL, 0, 0, &wake_q);
1363	sem_unlock(sma, -1);
1364	rcu_read_unlock();
1365	wake_up_q(&wake_q);
1366	return 0;
1367}
1368
1369static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1370		int cmd, void __user *p)
1371{
1372	struct sem_array *sma;
1373	struct sem *curr;
1374	int err, nsems;
1375	ushort fast_sem_io[SEMMSL_FAST];
1376	ushort *sem_io = fast_sem_io;
1377	DEFINE_WAKE_Q(wake_q);
 
1378
1379	rcu_read_lock();
1380	sma = sem_obtain_object_check(ns, semid);
1381	if (IS_ERR(sma)) {
1382		rcu_read_unlock();
1383		return PTR_ERR(sma);
1384	}
1385
 
1386	nsems = sma->sem_nsems;
1387
1388	err = -EACCES;
1389	if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1390		goto out_rcu_wakeup;
 
1391
1392	err = security_sem_semctl(&sma->sem_perm, cmd);
1393	if (err)
1394		goto out_rcu_wakeup;
1395
1396	err = -EACCES;
1397	switch (cmd) {
1398	case GETALL:
1399	{
1400		ushort __user *array = p;
1401		int i;
1402
1403		sem_lock(sma, NULL, -1);
1404		if (!ipc_valid_object(&sma->sem_perm)) {
1405			err = -EIDRM;
1406			goto out_unlock;
1407		}
1408		if (nsems > SEMMSL_FAST) {
1409			if (!ipc_rcu_getref(&sma->sem_perm)) {
1410				err = -EIDRM;
1411				goto out_unlock;
1412			}
1413			sem_unlock(sma, -1);
1414			rcu_read_unlock();
1415			sem_io = kvmalloc_array(nsems, sizeof(ushort),
1416						GFP_KERNEL);
1417			if (sem_io == NULL) {
1418				ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1419				return -ENOMEM;
1420			}
1421
1422			rcu_read_lock();
1423			sem_lock_and_putref(sma);
1424			if (!ipc_valid_object(&sma->sem_perm)) {
 
1425				err = -EIDRM;
1426				goto out_unlock;
1427			}
1428		}
 
1429		for (i = 0; i < sma->sem_nsems; i++)
1430			sem_io[i] = sma->sems[i].semval;
1431		sem_unlock(sma, -1);
1432		rcu_read_unlock();
1433		err = 0;
1434		if (copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1435			err = -EFAULT;
1436		goto out_free;
1437	}
1438	case SETALL:
1439	{
1440		int i;
1441		struct sem_undo *un;
1442
1443		if (!ipc_rcu_getref(&sma->sem_perm)) {
1444			err = -EIDRM;
1445			goto out_rcu_wakeup;
1446		}
1447		rcu_read_unlock();
1448
1449		if (nsems > SEMMSL_FAST) {
1450			sem_io = kvmalloc_array(nsems, sizeof(ushort),
1451						GFP_KERNEL);
1452			if (sem_io == NULL) {
1453				ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1454				return -ENOMEM;
1455			}
1456		}
1457
1458		if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
1459			ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1460			err = -EFAULT;
1461			goto out_free;
1462		}
1463
1464		for (i = 0; i < nsems; i++) {
1465			if (sem_io[i] > SEMVMX) {
1466				ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1467				err = -ERANGE;
1468				goto out_free;
1469			}
1470		}
1471		rcu_read_lock();
1472		sem_lock_and_putref(sma);
1473		if (!ipc_valid_object(&sma->sem_perm)) {
 
1474			err = -EIDRM;
1475			goto out_unlock;
1476		}
1477
1478		for (i = 0; i < nsems; i++) {
1479			sma->sems[i].semval = sem_io[i];
1480			ipc_update_pid(&sma->sems[i].sempid, task_tgid(current));
1481		}
1482
1483		ipc_assert_locked_object(&sma->sem_perm);
1484		list_for_each_entry(un, &sma->list_id, list_id) {
1485			for (i = 0; i < nsems; i++)
1486				un->semadj[i] = 0;
1487		}
1488		sma->sem_ctime = ktime_get_real_seconds();
1489		/* maybe some queued-up processes were waiting for this */
1490		do_smart_update(sma, NULL, 0, 0, &wake_q);
1491		err = 0;
1492		goto out_unlock;
1493	}
1494	/* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */
1495	}
1496	err = -EINVAL;
1497	if (semnum < 0 || semnum >= nsems)
1498		goto out_rcu_wakeup;
1499
1500	sem_lock(sma, NULL, -1);
1501	if (!ipc_valid_object(&sma->sem_perm)) {
1502		err = -EIDRM;
1503		goto out_unlock;
1504	}
1505	curr = &sma->sems[semnum];
1506
1507	switch (cmd) {
1508	case GETVAL:
1509		err = curr->semval;
1510		goto out_unlock;
1511	case GETPID:
1512		err = pid_vnr(curr->sempid);
1513		goto out_unlock;
1514	case GETNCNT:
1515		err = count_semcnt(sma, semnum, 0);
1516		goto out_unlock;
1517	case GETZCNT:
1518		err = count_semcnt(sma, semnum, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1519		goto out_unlock;
1520	}
 
 
 
 
1521
1522out_unlock:
1523	sem_unlock(sma, -1);
1524out_rcu_wakeup:
1525	rcu_read_unlock();
1526	wake_up_q(&wake_q);
1527out_free:
1528	if (sem_io != fast_sem_io)
1529		kvfree(sem_io);
1530	return err;
1531}
1532
1533static inline unsigned long
1534copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1535{
1536	switch (version) {
1537	case IPC_64:
1538		if (copy_from_user(out, buf, sizeof(*out)))
1539			return -EFAULT;
1540		return 0;
1541	case IPC_OLD:
1542	    {
1543		struct semid_ds tbuf_old;
1544
1545		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1546			return -EFAULT;
1547
1548		out->sem_perm.uid	= tbuf_old.sem_perm.uid;
1549		out->sem_perm.gid	= tbuf_old.sem_perm.gid;
1550		out->sem_perm.mode	= tbuf_old.sem_perm.mode;
1551
1552		return 0;
1553	    }
1554	default:
1555		return -EINVAL;
1556	}
1557}
1558
1559/*
1560 * This function handles some semctl commands which require the rwsem
1561 * to be held in write mode.
1562 * NOTE: no locks must be held, the rwsem is taken inside this function.
1563 */
1564static int semctl_down(struct ipc_namespace *ns, int semid,
1565		       int cmd, struct semid64_ds *semid64)
1566{
1567	struct sem_array *sma;
1568	int err;
 
1569	struct kern_ipc_perm *ipcp;
1570
1571	down_write(&sem_ids(ns).rwsem);
1572	rcu_read_lock();
 
 
1573
1574	ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd,
1575				      &semid64->sem_perm, 0);
1576	if (IS_ERR(ipcp)) {
1577		err = PTR_ERR(ipcp);
1578		goto out_unlock1;
1579	}
1580
1581	sma = container_of(ipcp, struct sem_array, sem_perm);
1582
1583	err = security_sem_semctl(&sma->sem_perm, cmd);
1584	if (err)
1585		goto out_unlock1;
1586
1587	switch (cmd) {
1588	case IPC_RMID:
1589		sem_lock(sma, NULL, -1);
1590		/* freeary unlocks the ipc object and rcu */
1591		freeary(ns, ipcp);
1592		goto out_up;
1593	case IPC_SET:
1594		sem_lock(sma, NULL, -1);
1595		err = ipc_update_perm(&semid64->sem_perm, ipcp);
1596		if (err)
1597			goto out_unlock0;
1598		sma->sem_ctime = ktime_get_real_seconds();
1599		break;
1600	default:
1601		err = -EINVAL;
1602		goto out_unlock1;
1603	}
1604
1605out_unlock0:
1606	sem_unlock(sma, -1);
1607out_unlock1:
1608	rcu_read_unlock();
1609out_up:
1610	up_write(&sem_ids(ns).rwsem);
1611	return err;
1612}
1613
1614long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg)
1615{
 
1616	int version;
1617	struct ipc_namespace *ns;
1618	void __user *p = (void __user *)arg;
1619	struct semid64_ds semid64;
1620	int err;
1621
1622	if (semid < 0)
1623		return -EINVAL;
1624
1625	version = ipc_parse_version(&cmd);
1626	ns = current->nsproxy->ipc_ns;
1627
1628	switch (cmd) {
1629	case IPC_INFO:
1630	case SEM_INFO:
1631		return semctl_info(ns, semid, cmd, p);
1632	case IPC_STAT:
1633	case SEM_STAT:
1634	case SEM_STAT_ANY:
1635		err = semctl_stat(ns, semid, cmd, &semid64);
1636		if (err < 0)
1637			return err;
1638		if (copy_semid_to_user(p, &semid64, version))
1639			err = -EFAULT;
1640		return err;
1641	case GETALL:
1642	case GETVAL:
1643	case GETPID:
1644	case GETNCNT:
1645	case GETZCNT:
 
1646	case SETALL:
1647		return semctl_main(ns, semid, semnum, cmd, p);
1648	case SETVAL: {
1649		int val;
1650#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1651		/* big-endian 64bit */
1652		val = arg >> 32;
1653#else
1654		/* 32bit or little-endian 64bit */
1655		val = arg;
1656#endif
1657		return semctl_setval(ns, semid, semnum, val);
1658	}
1659	case IPC_SET:
1660		if (copy_semid_from_user(&semid64, p, version))
1661			return -EFAULT;
1662	case IPC_RMID:
1663		return semctl_down(ns, semid, cmd, &semid64);
1664	default:
1665		return -EINVAL;
1666	}
1667}
1668
1669SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1670{
1671	return ksys_semctl(semid, semnum, cmd, arg);
1672}
1673
1674#ifdef CONFIG_COMPAT
1675
1676struct compat_semid_ds {
1677	struct compat_ipc_perm sem_perm;
1678	compat_time_t sem_otime;
1679	compat_time_t sem_ctime;
1680	compat_uptr_t sem_base;
1681	compat_uptr_t sem_pending;
1682	compat_uptr_t sem_pending_last;
1683	compat_uptr_t undo;
1684	unsigned short sem_nsems;
1685};
1686
1687static int copy_compat_semid_from_user(struct semid64_ds *out, void __user *buf,
1688					int version)
1689{
1690	memset(out, 0, sizeof(*out));
1691	if (version == IPC_64) {
1692		struct compat_semid64_ds __user *p = buf;
1693		return get_compat_ipc64_perm(&out->sem_perm, &p->sem_perm);
1694	} else {
1695		struct compat_semid_ds __user *p = buf;
1696		return get_compat_ipc_perm(&out->sem_perm, &p->sem_perm);
1697	}
1698}
1699
1700static int copy_compat_semid_to_user(void __user *buf, struct semid64_ds *in,
1701					int version)
1702{
1703	if (version == IPC_64) {
1704		struct compat_semid64_ds v;
1705		memset(&v, 0, sizeof(v));
1706		to_compat_ipc64_perm(&v.sem_perm, &in->sem_perm);
1707		v.sem_otime = in->sem_otime;
1708		v.sem_ctime = in->sem_ctime;
1709		v.sem_nsems = in->sem_nsems;
1710		return copy_to_user(buf, &v, sizeof(v));
1711	} else {
1712		struct compat_semid_ds v;
1713		memset(&v, 0, sizeof(v));
1714		to_compat_ipc_perm(&v.sem_perm, &in->sem_perm);
1715		v.sem_otime = in->sem_otime;
1716		v.sem_ctime = in->sem_ctime;
1717		v.sem_nsems = in->sem_nsems;
1718		return copy_to_user(buf, &v, sizeof(v));
1719	}
1720}
1721
1722long compat_ksys_semctl(int semid, int semnum, int cmd, int arg)
1723{
1724	void __user *p = compat_ptr(arg);
1725	struct ipc_namespace *ns;
1726	struct semid64_ds semid64;
1727	int version = compat_ipc_parse_version(&cmd);
1728	int err;
1729
1730	ns = current->nsproxy->ipc_ns;
1731
1732	if (semid < 0)
1733		return -EINVAL;
1734
1735	switch (cmd & (~IPC_64)) {
1736	case IPC_INFO:
1737	case SEM_INFO:
1738		return semctl_info(ns, semid, cmd, p);
1739	case IPC_STAT:
1740	case SEM_STAT:
1741	case SEM_STAT_ANY:
1742		err = semctl_stat(ns, semid, cmd, &semid64);
1743		if (err < 0)
1744			return err;
1745		if (copy_compat_semid_to_user(p, &semid64, version))
1746			err = -EFAULT;
1747		return err;
1748	case GETVAL:
1749	case GETPID:
1750	case GETNCNT:
1751	case GETZCNT:
1752	case GETALL:
1753	case SETALL:
1754		return semctl_main(ns, semid, semnum, cmd, p);
1755	case SETVAL:
1756		return semctl_setval(ns, semid, semnum, arg);
1757	case IPC_SET:
1758		if (copy_compat_semid_from_user(&semid64, p, version))
1759			return -EFAULT;
1760		/* fallthru */
1761	case IPC_RMID:
1762		return semctl_down(ns, semid, cmd, &semid64);
1763	default:
1764		return -EINVAL;
1765	}
1766}
1767
1768COMPAT_SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, int, arg)
1769{
1770	return compat_ksys_semctl(semid, semnum, cmd, arg);
1771}
 
1772#endif
1773
1774/* If the task doesn't already have a undo_list, then allocate one
1775 * here.  We guarantee there is only one thread using this undo list,
1776 * and current is THE ONE
1777 *
1778 * If this allocation and assignment succeeds, but later
1779 * portions of this code fail, there is no need to free the sem_undo_list.
1780 * Just let it stay associated with the task, and it'll be freed later
1781 * at exit time.
1782 *
1783 * This can block, so callers must hold no locks.
1784 */
1785static inline int get_undo_list(struct sem_undo_list **undo_listp)
1786{
1787	struct sem_undo_list *undo_list;
1788
1789	undo_list = current->sysvsem.undo_list;
1790	if (!undo_list) {
1791		undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1792		if (undo_list == NULL)
1793			return -ENOMEM;
1794		spin_lock_init(&undo_list->lock);
1795		refcount_set(&undo_list->refcnt, 1);
1796		INIT_LIST_HEAD(&undo_list->list_proc);
1797
1798		current->sysvsem.undo_list = undo_list;
1799	}
1800	*undo_listp = undo_list;
1801	return 0;
1802}
1803
1804static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1805{
1806	struct sem_undo *un;
1807
1808	list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
1809		if (un->semid == semid)
1810			return un;
1811	}
1812	return NULL;
1813}
1814
1815static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1816{
1817	struct sem_undo *un;
1818
1819	assert_spin_locked(&ulp->lock);
1820
1821	un = __lookup_undo(ulp, semid);
1822	if (un) {
1823		list_del_rcu(&un->list_proc);
1824		list_add_rcu(&un->list_proc, &ulp->list_proc);
1825	}
1826	return un;
1827}
1828
1829/**
1830 * find_alloc_undo - lookup (and if not present create) undo array
1831 * @ns: namespace
1832 * @semid: semaphore array id
1833 *
1834 * The function looks up (and if not present creates) the undo structure.
1835 * The size of the undo structure depends on the size of the semaphore
1836 * array, thus the alloc path is not that straightforward.
1837 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1838 * performs a rcu_read_lock().
1839 */
1840static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1841{
1842	struct sem_array *sma;
1843	struct sem_undo_list *ulp;
1844	struct sem_undo *un, *new;
1845	int nsems, error;
 
1846
1847	error = get_undo_list(&ulp);
1848	if (error)
1849		return ERR_PTR(error);
1850
1851	rcu_read_lock();
1852	spin_lock(&ulp->lock);
1853	un = lookup_undo(ulp, semid);
1854	spin_unlock(&ulp->lock);
1855	if (likely(un != NULL))
1856		goto out;
 
1857
1858	/* no undo structure around - allocate one. */
1859	/* step 1: figure out the size of the semaphore array */
1860	sma = sem_obtain_object_check(ns, semid);
1861	if (IS_ERR(sma)) {
1862		rcu_read_unlock();
1863		return ERR_CAST(sma);
1864	}
1865
1866	nsems = sma->sem_nsems;
1867	if (!ipc_rcu_getref(&sma->sem_perm)) {
1868		rcu_read_unlock();
1869		un = ERR_PTR(-EIDRM);
1870		goto out;
1871	}
1872	rcu_read_unlock();
1873
1874	/* step 2: allocate new undo structure */
1875	new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1876	if (!new) {
1877		ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1878		return ERR_PTR(-ENOMEM);
1879	}
1880
1881	/* step 3: Acquire the lock on semaphore array */
1882	rcu_read_lock();
1883	sem_lock_and_putref(sma);
1884	if (!ipc_valid_object(&sma->sem_perm)) {
1885		sem_unlock(sma, -1);
1886		rcu_read_unlock();
1887		kfree(new);
1888		un = ERR_PTR(-EIDRM);
1889		goto out;
1890	}
1891	spin_lock(&ulp->lock);
1892
1893	/*
1894	 * step 4: check for races: did someone else allocate the undo struct?
1895	 */
1896	un = lookup_undo(ulp, semid);
1897	if (un) {
1898		kfree(new);
1899		goto success;
1900	}
1901	/* step 5: initialize & link new undo structure */
1902	new->semadj = (short *) &new[1];
1903	new->ulp = ulp;
1904	new->semid = semid;
1905	assert_spin_locked(&ulp->lock);
1906	list_add_rcu(&new->list_proc, &ulp->list_proc);
1907	ipc_assert_locked_object(&sma->sem_perm);
1908	list_add(&new->list_id, &sma->list_id);
1909	un = new;
1910
1911success:
1912	spin_unlock(&ulp->lock);
1913	sem_unlock(sma, -1);
 
1914out:
1915	return un;
1916}
1917
1918static long do_semtimedop(int semid, struct sembuf __user *tsops,
1919		unsigned nsops, const struct timespec64 *timeout)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1920{
1921	int error = -EINVAL;
1922	struct sem_array *sma;
1923	struct sembuf fast_sops[SEMOPM_FAST];
1924	struct sembuf *sops = fast_sops, *sop;
1925	struct sem_undo *un;
1926	int max, locknum;
1927	bool undos = false, alter = false, dupsop = false;
1928	struct sem_queue queue;
1929	unsigned long dup = 0, jiffies_left = 0;
1930	struct ipc_namespace *ns;
 
1931
1932	ns = current->nsproxy->ipc_ns;
1933
1934	if (nsops < 1 || semid < 0)
1935		return -EINVAL;
1936	if (nsops > ns->sc_semopm)
1937		return -E2BIG;
1938	if (nsops > SEMOPM_FAST) {
1939		sops = kvmalloc(sizeof(*sops)*nsops, GFP_KERNEL);
1940		if (sops == NULL)
1941			return -ENOMEM;
1942	}
1943
1944	if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
1945		error =  -EFAULT;
1946		goto out_free;
1947	}
1948
1949	if (timeout) {
1950		if (timeout->tv_sec < 0 || timeout->tv_nsec < 0 ||
1951			timeout->tv_nsec >= 1000000000L) {
 
 
 
 
 
1952			error = -EINVAL;
1953			goto out_free;
1954		}
1955		jiffies_left = timespec64_to_jiffies(timeout);
1956	}
1957
1958	max = 0;
1959	for (sop = sops; sop < sops + nsops; sop++) {
1960		unsigned long mask = 1ULL << ((sop->sem_num) % BITS_PER_LONG);
1961
1962		if (sop->sem_num >= max)
1963			max = sop->sem_num;
1964		if (sop->sem_flg & SEM_UNDO)
1965			undos = true;
1966		if (dup & mask) {
1967			/*
1968			 * There was a previous alter access that appears
1969			 * to have accessed the same semaphore, thus use
1970			 * the dupsop logic. "appears", because the detection
1971			 * can only check % BITS_PER_LONG.
1972			 */
1973			dupsop = true;
1974		}
1975		if (sop->sem_op != 0) {
1976			alter = true;
1977			dup |= mask;
1978		}
1979	}
1980
1981	if (undos) {
1982		/* On success, find_alloc_undo takes the rcu_read_lock */
1983		un = find_alloc_undo(ns, semid);
1984		if (IS_ERR(un)) {
1985			error = PTR_ERR(un);
1986			goto out_free;
1987		}
1988	} else {
1989		un = NULL;
1990		rcu_read_lock();
1991	}
1992
1993	sma = sem_obtain_object_check(ns, semid);
 
 
1994	if (IS_ERR(sma)) {
1995		rcu_read_unlock();
 
1996		error = PTR_ERR(sma);
1997		goto out_free;
1998	}
1999
2000	error = -EFBIG;
2001	if (max >= sma->sem_nsems) {
2002		rcu_read_unlock();
2003		goto out_free;
2004	}
2005
2006	error = -EACCES;
2007	if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) {
2008		rcu_read_unlock();
2009		goto out_free;
2010	}
2011
2012	error = security_sem_semop(&sma->sem_perm, sops, nsops, alter);
2013	if (error) {
2014		rcu_read_unlock();
2015		goto out_free;
2016	}
2017
2018	error = -EIDRM;
2019	locknum = sem_lock(sma, sops, nsops);
2020	/*
2021	 * We eventually might perform the following check in a lockless
2022	 * fashion, considering ipc_valid_object() locking constraints.
2023	 * If nsops == 1 and there is no contention for sem_perm.lock, then
2024	 * only a per-semaphore lock is held and it's OK to proceed with the
2025	 * check below. More details on the fine grained locking scheme
2026	 * entangled here and why it's RMID race safe on comments at sem_lock()
2027	 */
2028	if (!ipc_valid_object(&sma->sem_perm))
2029		goto out_unlock_free;
2030	/*
2031	 * semid identifiers are not unique - find_alloc_undo may have
2032	 * allocated an undo structure, it was invalidated by an RMID
2033	 * and now a new array with received the same id. Check and fail.
2034	 * This case can be detected checking un->semid. The existence of
2035	 * "un" itself is guaranteed by rcu.
2036	 */
2037	if (un && un->semid == -1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2038		goto out_unlock_free;
2039
2040	queue.sops = sops;
2041	queue.nsops = nsops;
2042	queue.undo = un;
2043	queue.pid = task_tgid(current);
2044	queue.alter = alter;
2045	queue.dupsop = dupsop;
2046
2047	error = perform_atomic_semop(sma, &queue);
2048	if (error == 0) { /* non-blocking succesfull path */
2049		DEFINE_WAKE_Q(wake_q);
2050
2051		/*
2052		 * If the operation was successful, then do
2053		 * the required updates.
2054		 */
2055		if (alter)
2056			do_smart_update(sma, sops, nsops, 1, &wake_q);
2057		else
2058			set_semotime(sma, sops);
2059
2060		sem_unlock(sma, locknum);
2061		rcu_read_unlock();
2062		wake_up_q(&wake_q);
2063
2064		goto out_free;
2065	}
2066	if (error < 0) /* non-blocking error path */
2067		goto out_unlock_free;
2068
2069	/*
2070	 * We need to sleep on this operation, so we put the current
2071	 * task into the pending queue and go to sleep.
2072	 */
 
 
 
 
 
 
 
 
 
 
 
2073	if (nsops == 1) {
2074		struct sem *curr;
2075		curr = &sma->sems[sops->sem_num];
2076
2077		if (alter) {
2078			if (sma->complex_count) {
2079				list_add_tail(&queue.list,
2080						&sma->pending_alter);
2081			} else {
2082
2083				list_add_tail(&queue.list,
2084						&curr->pending_alter);
2085			}
2086		} else {
2087			list_add_tail(&queue.list, &curr->pending_const);
2088		}
2089	} else {
2090		if (!sma->complex_count)
2091			merge_queues(sma);
2092
2093		if (alter)
2094			list_add_tail(&queue.list, &sma->pending_alter);
2095		else
2096			list_add_tail(&queue.list, &sma->pending_const);
2097
 
2098		sma->complex_count++;
2099	}
2100
2101	do {
2102		queue.status = -EINTR;
2103		queue.sleeper = current;
 
 
 
2104
2105		__set_current_state(TASK_INTERRUPTIBLE);
2106		sem_unlock(sma, locknum);
2107		rcu_read_unlock();
 
2108
2109		if (timeout)
2110			jiffies_left = schedule_timeout(jiffies_left);
2111		else
2112			schedule();
2113
2114		/*
2115		 * fastpath: the semop has completed, either successfully or
2116		 * not, from the syscall pov, is quite irrelevant to us at this
2117		 * point; we're done.
2118		 *
2119		 * We _do_ care, nonetheless, about being awoken by a signal or
2120		 * spuriously.  The queue.status is checked again in the
2121		 * slowpath (aka after taking sem_lock), such that we can detect
2122		 * scenarios where we were awakened externally, during the
2123		 * window between wake_q_add() and wake_up_q().
2124		 */
2125		error = READ_ONCE(queue.status);
2126		if (error != -EINTR) {
2127			/*
2128			 * User space could assume that semop() is a memory
2129			 * barrier: Without the mb(), the cpu could
2130			 * speculatively read in userspace stale data that was
2131			 * overwritten by the previous owner of the semaphore.
2132			 */
2133			smp_mb();
2134			goto out_free;
2135		}
2136
2137		rcu_read_lock();
2138		locknum = sem_lock(sma, sops, nsops);
 
 
 
 
2139
2140		if (!ipc_valid_object(&sma->sem_perm))
2141			goto out_unlock_free;
2142
2143		error = READ_ONCE(queue.status);
 
 
 
2144
2145		/*
2146		 * If queue.status != -EINTR we are woken up by another process.
2147		 * Leave without unlink_queue(), but with sem_unlock().
2148		 */
2149		if (error != -EINTR)
2150			goto out_unlock_free;
 
 
 
2151
2152		/*
2153		 * If an interrupt occurred we have to clean up the queue.
2154		 */
2155		if (timeout && jiffies_left == 0)
2156			error = -EAGAIN;
2157	} while (error == -EINTR && !signal_pending(current)); /* spurious */
2158
2159	unlink_queue(sma, &queue);
2160
2161out_unlock_free:
2162	sem_unlock(sma, locknum);
2163	rcu_read_unlock();
 
2164out_free:
2165	if (sops != fast_sops)
2166		kvfree(sops);
2167	return error;
2168}
2169
2170long ksys_semtimedop(int semid, struct sembuf __user *tsops,
2171		     unsigned int nsops, const struct timespec __user *timeout)
2172{
2173	if (timeout) {
2174		struct timespec64 ts;
2175		if (get_timespec64(&ts, timeout))
2176			return -EFAULT;
2177		return do_semtimedop(semid, tsops, nsops, &ts);
2178	}
2179	return do_semtimedop(semid, tsops, nsops, NULL);
2180}
2181
2182SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
2183		unsigned int, nsops, const struct timespec __user *, timeout)
2184{
2185	return ksys_semtimedop(semid, tsops, nsops, timeout);
2186}
2187
2188#ifdef CONFIG_COMPAT
2189long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
2190			    unsigned int nsops,
2191			    const struct compat_timespec __user *timeout)
2192{
2193	if (timeout) {
2194		struct timespec64 ts;
2195		if (compat_get_timespec64(&ts, timeout))
2196			return -EFAULT;
2197		return do_semtimedop(semid, tsems, nsops, &ts);
2198	}
2199	return do_semtimedop(semid, tsems, nsops, NULL);
2200}
2201
2202COMPAT_SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsems,
2203		       unsigned int, nsops,
2204		       const struct compat_timespec __user *, timeout)
2205{
2206	return compat_ksys_semtimedop(semid, tsems, nsops, timeout);
2207}
2208#endif
2209
2210SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
2211		unsigned, nsops)
2212{
2213	return do_semtimedop(semid, tsops, nsops, NULL);
2214}
2215
2216/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
2217 * parent and child tasks.
2218 */
2219
2220int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
2221{
2222	struct sem_undo_list *undo_list;
2223	int error;
2224
2225	if (clone_flags & CLONE_SYSVSEM) {
2226		error = get_undo_list(&undo_list);
2227		if (error)
2228			return error;
2229		refcount_inc(&undo_list->refcnt);
2230		tsk->sysvsem.undo_list = undo_list;
2231	} else
2232		tsk->sysvsem.undo_list = NULL;
2233
2234	return 0;
2235}
2236
2237/*
2238 * add semadj values to semaphores, free undo structures.
2239 * undo structures are not freed when semaphore arrays are destroyed
2240 * so some of them may be out of date.
2241 * IMPLEMENTATION NOTE: There is some confusion over whether the
2242 * set of adjustments that needs to be done should be done in an atomic
2243 * manner or not. That is, if we are attempting to decrement the semval
2244 * should we queue up and wait until we can do so legally?
2245 * The original implementation attempted to do this (queue and wait).
2246 * The current implementation does not do so. The POSIX standard
2247 * and SVID should be consulted to determine what behavior is mandated.
2248 */
2249void exit_sem(struct task_struct *tsk)
2250{
2251	struct sem_undo_list *ulp;
2252
2253	ulp = tsk->sysvsem.undo_list;
2254	if (!ulp)
2255		return;
2256	tsk->sysvsem.undo_list = NULL;
2257
2258	if (!refcount_dec_and_test(&ulp->refcnt))
2259		return;
2260
2261	for (;;) {
2262		struct sem_array *sma;
2263		struct sem_undo *un;
2264		int semid, i;
2265		DEFINE_WAKE_Q(wake_q);
2266
2267		cond_resched();
2268
2269		rcu_read_lock();
2270		un = list_entry_rcu(ulp->list_proc.next,
2271				    struct sem_undo, list_proc);
2272		if (&un->list_proc == &ulp->list_proc) {
2273			/*
2274			 * We must wait for freeary() before freeing this ulp,
2275			 * in case we raced with last sem_undo. There is a small
2276			 * possibility where we exit while freeary() didn't
2277			 * finish unlocking sem_undo_list.
2278			 */
2279			spin_lock(&ulp->lock);
2280			spin_unlock(&ulp->lock);
2281			rcu_read_unlock();
2282			break;
2283		}
2284		spin_lock(&ulp->lock);
2285		semid = un->semid;
2286		spin_unlock(&ulp->lock);
2287
2288		/* exit_sem raced with IPC_RMID, nothing to do */
2289		if (semid == -1) {
2290			rcu_read_unlock();
2291			continue;
2292		}
2293
2294		sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2295		/* exit_sem raced with IPC_RMID, nothing to do */
2296		if (IS_ERR(sma)) {
2297			rcu_read_unlock();
2298			continue;
2299		}
2300
2301		sem_lock(sma, NULL, -1);
2302		/* exit_sem raced with IPC_RMID, nothing to do */
2303		if (!ipc_valid_object(&sma->sem_perm)) {
2304			sem_unlock(sma, -1);
2305			rcu_read_unlock();
2306			continue;
2307		}
2308		un = __lookup_undo(ulp, semid);
2309		if (un == NULL) {
2310			/* exit_sem raced with IPC_RMID+semget() that created
2311			 * exactly the same semid. Nothing to do.
2312			 */
2313			sem_unlock(sma, -1);
2314			rcu_read_unlock();
2315			continue;
2316		}
2317
2318		/* remove un from the linked lists */
2319		ipc_assert_locked_object(&sma->sem_perm);
2320		list_del(&un->list_id);
2321
2322		/* we are the last process using this ulp, acquiring ulp->lock
2323		 * isn't required. Besides that, we are also protected against
2324		 * IPC_RMID as we hold sma->sem_perm lock now
2325		 */
2326		list_del_rcu(&un->list_proc);
 
2327
2328		/* perform adjustments registered in un */
2329		for (i = 0; i < sma->sem_nsems; i++) {
2330			struct sem *semaphore = &sma->sems[i];
2331			if (un->semadj[i]) {
2332				semaphore->semval += un->semadj[i];
2333				/*
2334				 * Range checks of the new semaphore value,
2335				 * not defined by sus:
2336				 * - Some unices ignore the undo entirely
2337				 *   (e.g. HP UX 11i 11.22, Tru64 V5.1)
2338				 * - some cap the value (e.g. FreeBSD caps
2339				 *   at 0, but doesn't enforce SEMVMX)
2340				 *
2341				 * Linux caps the semaphore value, both at 0
2342				 * and at SEMVMX.
2343				 *
2344				 *	Manfred <manfred@colorfullife.com>
2345				 */
2346				if (semaphore->semval < 0)
2347					semaphore->semval = 0;
2348				if (semaphore->semval > SEMVMX)
2349					semaphore->semval = SEMVMX;
2350				ipc_update_pid(&semaphore->sempid, task_tgid(current));
2351			}
2352		}
2353		/* maybe some queued-up processes were waiting for this */
2354		do_smart_update(sma, NULL, 0, 1, &wake_q);
2355		sem_unlock(sma, -1);
2356		rcu_read_unlock();
2357		wake_up_q(&wake_q);
2358
2359		kfree_rcu(un, rcu);
2360	}
2361	kfree(ulp);
2362}
2363
2364#ifdef CONFIG_PROC_FS
2365static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2366{
2367	struct user_namespace *user_ns = seq_user_ns(s);
2368	struct kern_ipc_perm *ipcp = it;
2369	struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
2370	time64_t sem_otime;
2371
2372	/*
2373	 * The proc interface isn't aware of sem_lock(), it calls
2374	 * ipc_lock_object() directly (in sysvipc_find_ipc).
2375	 * In order to stay compatible with sem_lock(), we must
2376	 * enter / leave complex_mode.
2377	 */
2378	complexmode_enter(sma);
2379
2380	sem_otime = get_semotime(sma);
2381
2382	seq_printf(s,
2383		   "%10d %10d  %4o %10u %5u %5u %5u %5u %10llu %10llu\n",
2384		   sma->sem_perm.key,
2385		   sma->sem_perm.id,
2386		   sma->sem_perm.mode,
2387		   sma->sem_nsems,
2388		   from_kuid_munged(user_ns, sma->sem_perm.uid),
2389		   from_kgid_munged(user_ns, sma->sem_perm.gid),
2390		   from_kuid_munged(user_ns, sma->sem_perm.cuid),
2391		   from_kgid_munged(user_ns, sma->sem_perm.cgid),
2392		   sem_otime,
2393		   sma->sem_ctime);
2394
2395	complexmode_tryleave(sma);
2396
2397	return 0;
2398}
2399#endif