Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * linux/ipc/sem.c
   4 * Copyright (C) 1992 Krishna Balasubramanian
   5 * Copyright (C) 1995 Eric Schenk, Bruno Haible
   6 *
   7 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
   8 *
   9 * SMP-threaded, sysctl's added
  10 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
  11 * Enforced range limit on SEM_UNDO
  12 * (c) 2001 Red Hat Inc
  13 * Lockless wakeup
  14 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
  15 * (c) 2016 Davidlohr Bueso <dave@stgolabs.net>
  16 * Further wakeup optimizations, documentation
  17 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
  18 *
  19 * support for audit of ipc object properties and permission changes
  20 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  21 *
  22 * namespaces support
  23 * OpenVZ, SWsoft Inc.
  24 * Pavel Emelianov <xemul@openvz.org>
  25 *
  26 * Implementation notes: (May 2010)
  27 * This file implements System V semaphores.
  28 *
  29 * User space visible behavior:
  30 * - FIFO ordering for semop() operations (just FIFO, not starvation
  31 *   protection)
  32 * - multiple semaphore operations that alter the same semaphore in
  33 *   one semop() are handled.
  34 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
  35 *   SETALL calls.
  36 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
  37 * - undo adjustments at process exit are limited to 0..SEMVMX.
  38 * - namespace are supported.
  39 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtime by writing
  40 *   to /proc/sys/kernel/sem.
  41 * - statistics about the usage are reported in /proc/sysvipc/sem.
  42 *
  43 * Internals:
  44 * - scalability:
  45 *   - all global variables are read-mostly.
  46 *   - semop() calls and semctl(RMID) are synchronized by RCU.
  47 *   - most operations do write operations (actually: spin_lock calls) to
  48 *     the per-semaphore array structure.
  49 *   Thus: Perfect SMP scaling between independent semaphore arrays.
  50 *         If multiple semaphores in one array are used, then cache line
  51 *         trashing on the semaphore array spinlock will limit the scaling.
  52 * - semncnt and semzcnt are calculated on demand in count_semcnt()
  53 * - the task that performs a successful semop() scans the list of all
  54 *   sleeping tasks and completes any pending operations that can be fulfilled.
  55 *   Semaphores are actively given to waiting tasks (necessary for FIFO).
  56 *   (see update_queue())
  57 * - To improve the scalability, the actual wake-up calls are performed after
  58 *   dropping all locks. (see wake_up_sem_queue_prepare())
 
  59 * - All work is done by the waker, the woken up task does not have to do
  60 *   anything - not even acquiring a lock or dropping a refcount.
  61 * - A woken up task may not even touch the semaphore array anymore, it may
  62 *   have been destroyed already by a semctl(RMID).
 
 
 
  63 * - UNDO values are stored in an array (one per process and per
  64 *   semaphore array, lazily allocated). For backwards compatibility, multiple
  65 *   modes for the UNDO variables are supported (per process, per thread)
  66 *   (see copy_semundo, CLONE_SYSVSEM)
  67 * - There are two lists of the pending operations: a per-array list
  68 *   and per-semaphore list (stored in the array). This allows to achieve FIFO
  69 *   ordering without always scanning all pending operations.
  70 *   The worst-case behavior is nevertheless O(N^2) for N wakeups.
  71 */
  72
  73#include <linux/compat.h>
  74#include <linux/slab.h>
  75#include <linux/spinlock.h>
  76#include <linux/init.h>
  77#include <linux/proc_fs.h>
  78#include <linux/time.h>
  79#include <linux/security.h>
  80#include <linux/syscalls.h>
  81#include <linux/audit.h>
  82#include <linux/capability.h>
  83#include <linux/seq_file.h>
  84#include <linux/rwsem.h>
  85#include <linux/nsproxy.h>
  86#include <linux/ipc_namespace.h>
  87#include <linux/sched/wake_q.h>
  88#include <linux/nospec.h>
  89#include <linux/rhashtable.h>
  90
  91#include <linux/uaccess.h>
  92#include "util.h"
  93
  94/* One semaphore structure for each semaphore in the system. */
  95struct sem {
  96	int	semval;		/* current value */
  97	/*
  98	 * PID of the process that last modified the semaphore. For
  99	 * Linux, specifically these are:
 100	 *  - semop
 101	 *  - semctl, via SETVAL and SETALL.
 102	 *  - at task exit when performing undo adjustments (see exit_sem).
 103	 */
 104	struct pid *sempid;
 105	spinlock_t	lock;	/* spinlock for fine-grained semtimedop */
 106	struct list_head pending_alter; /* pending single-sop operations */
 107					/* that alter the semaphore */
 108	struct list_head pending_const; /* pending single-sop operations */
 109					/* that do not alter the semaphore*/
 110	time64_t	 sem_otime;	/* candidate for sem_otime */
 111} ____cacheline_aligned_in_smp;
 112
 113/* One sem_array data structure for each set of semaphores in the system. */
 114struct sem_array {
 115	struct kern_ipc_perm	sem_perm;	/* permissions .. see ipc.h */
 116	time64_t		sem_ctime;	/* create/last semctl() time */
 117	struct list_head	pending_alter;	/* pending operations */
 118						/* that alter the array */
 119	struct list_head	pending_const;	/* pending complex operations */
 120						/* that do not alter semvals */
 121	struct list_head	list_id;	/* undo requests on this array */
 122	int			sem_nsems;	/* no. of semaphores in array */
 123	int			complex_count;	/* pending complex operations */
 124	unsigned int		use_global_lock;/* >0: global lock required */
 125
 126	struct sem		sems[];
 127} __randomize_layout;
 128
 129/* One queue for each sleeping process in the system. */
 130struct sem_queue {
 131	struct list_head	list;	 /* queue of pending operations */
 132	struct task_struct	*sleeper; /* this process */
 133	struct sem_undo		*undo;	 /* undo structure */
 134	struct pid		*pid;	 /* process id of requesting process */
 135	int			status;	 /* completion status of operation */
 136	struct sembuf		*sops;	 /* array of pending operations */
 137	struct sembuf		*blocking; /* the operation that blocked */
 138	int			nsops;	 /* number of operations */
 139	bool			alter;	 /* does *sops alter the array? */
 140	bool                    dupsop;	 /* sops on more than one sem_num */
 141};
 142
 143/* Each task has a list of undo requests. They are executed automatically
 144 * when the process exits.
 145 */
 146struct sem_undo {
 147	struct list_head	list_proc;	/* per-process list: *
 148						 * all undos from one process
 149						 * rcu protected */
 150	struct rcu_head		rcu;		/* rcu struct for sem_undo */
 151	struct sem_undo_list	*ulp;		/* back ptr to sem_undo_list */
 152	struct list_head	list_id;	/* per semaphore array list:
 153						 * all undos for one array */
 154	int			semid;		/* semaphore set identifier */
 155	short			semadj[];	/* array of adjustments */
 156						/* one per semaphore */
 157};
 158
 159/* sem_undo_list controls shared access to the list of sem_undo structures
 160 * that may be shared among all a CLONE_SYSVSEM task group.
 161 */
 162struct sem_undo_list {
 163	refcount_t		refcnt;
 164	spinlock_t		lock;
 165	struct list_head	list_proc;
 166};
 167
 168
 169#define sem_ids(ns)	((ns)->ids[IPC_SEM_IDS])
 170
 
 
 171static int newary(struct ipc_namespace *, struct ipc_params *);
 172static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
 173#ifdef CONFIG_PROC_FS
 174static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
 175#endif
 176
 177#define SEMMSL_FAST	256 /* 512 bytes on stack */
 178#define SEMOPM_FAST	64  /* ~ 372 bytes on stack */
 179
 180/*
 181 * Switching from the mode suitable for simple ops
 182 * to the mode for complex ops is costly. Therefore:
 183 * use some hysteresis
 184 */
 185#define USE_GLOBAL_LOCK_HYSTERESIS	10
 186
 187/*
 188 * Locking:
 189 * a) global sem_lock() for read/write
 190 *	sem_undo.id_next,
 191 *	sem_array.complex_count,
 192 *	sem_array.pending{_alter,_const},
 193 *	sem_array.sem_undo
 194 *
 195 * b) global or semaphore sem_lock() for read/write:
 196 *	sem_array.sems[i].pending_{const,alter}:
 197 *
 198 * c) special:
 199 *	sem_undo_list.list_proc:
 200 *	* undo_list->lock for write
 201 *	* rcu for read
 202 *	use_global_lock:
 203 *	* global sem_lock() for write
 204 *	* either local or global sem_lock() for read.
 205 *
 206 * Memory ordering:
 207 * Most ordering is enforced by using spin_lock() and spin_unlock().
 208 *
 209 * Exceptions:
 210 * 1) use_global_lock: (SEM_BARRIER_1)
 211 * Setting it from non-zero to 0 is a RELEASE, this is ensured by
 212 * using smp_store_release(): Immediately after setting it to 0,
 213 * a simple op can start.
 214 * Testing if it is non-zero is an ACQUIRE, this is ensured by using
 215 * smp_load_acquire().
 216 * Setting it from 0 to non-zero must be ordered with regards to
 217 * this smp_load_acquire(), this is guaranteed because the smp_load_acquire()
 218 * is inside a spin_lock() and after a write from 0 to non-zero a
 219 * spin_lock()+spin_unlock() is done.
 220 * To prevent the compiler/cpu temporarily writing 0 to use_global_lock,
 221 * READ_ONCE()/WRITE_ONCE() is used.
 222 *
 223 * 2) queue.status: (SEM_BARRIER_2)
 224 * Initialization is done while holding sem_lock(), so no further barrier is
 225 * required.
 226 * Setting it to a result code is a RELEASE, this is ensured by both a
 227 * smp_store_release() (for case a) and while holding sem_lock()
 228 * (for case b).
 229 * The ACQUIRE when reading the result code without holding sem_lock() is
 230 * achieved by using READ_ONCE() + smp_acquire__after_ctrl_dep().
 231 * (case a above).
 232 * Reading the result code while holding sem_lock() needs no further barriers,
 233 * the locks inside sem_lock() enforce ordering (case b above)
 234 *
 235 * 3) current->state:
 236 * current->state is set to TASK_INTERRUPTIBLE while holding sem_lock().
 237 * The wakeup is handled using the wake_q infrastructure. wake_q wakeups may
 238 * happen immediately after calling wake_q_add. As wake_q_add_safe() is called
 239 * when holding sem_lock(), no further barriers are required.
 240 *
 241 * See also ipc/mqueue.c for more details on the covered races.
 
 242 */
 243
 244#define sc_semmsl	sem_ctls[0]
 245#define sc_semmns	sem_ctls[1]
 246#define sc_semopm	sem_ctls[2]
 247#define sc_semmni	sem_ctls[3]
 248
 249void sem_init_ns(struct ipc_namespace *ns)
 250{
 251	ns->sc_semmsl = SEMMSL;
 252	ns->sc_semmns = SEMMNS;
 253	ns->sc_semopm = SEMOPM;
 254	ns->sc_semmni = SEMMNI;
 255	ns->used_sems = 0;
 256	ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
 257}
 258
 259#ifdef CONFIG_IPC_NS
 260void sem_exit_ns(struct ipc_namespace *ns)
 261{
 262	free_ipcs(ns, &sem_ids(ns), freeary);
 263	idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
 264	rhashtable_destroy(&ns->ids[IPC_SEM_IDS].key_ht);
 265}
 266#endif
 267
 268void __init sem_init(void)
 269{
 270	sem_init_ns(&init_ipc_ns);
 271	ipc_init_proc_interface("sysvipc/sem",
 272				"       key      semid perms      nsems   uid   gid  cuid  cgid      otime      ctime\n",
 273				IPC_SEM_IDS, sysvipc_sem_proc_show);
 274}
 275
 276/**
 277 * unmerge_queues - unmerge queues, if possible.
 278 * @sma: semaphore array
 279 *
 280 * The function unmerges the wait queues if complex_count is 0.
 281 * It must be called prior to dropping the global semaphore array lock.
 282 */
 283static void unmerge_queues(struct sem_array *sma)
 284{
 285	struct sem_queue *q, *tq;
 286
 287	/* complex operations still around? */
 288	if (sma->complex_count)
 289		return;
 290	/*
 291	 * We will switch back to simple mode.
 292	 * Move all pending operation back into the per-semaphore
 293	 * queues.
 294	 */
 295	list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
 296		struct sem *curr;
 297		curr = &sma->sems[q->sops[0].sem_num];
 298
 299		list_add_tail(&q->list, &curr->pending_alter);
 300	}
 301	INIT_LIST_HEAD(&sma->pending_alter);
 302}
 303
 304/**
 305 * merge_queues - merge single semop queues into global queue
 306 * @sma: semaphore array
 307 *
 308 * This function merges all per-semaphore queues into the global queue.
 309 * It is necessary to achieve FIFO ordering for the pending single-sop
 310 * operations when a multi-semop operation must sleep.
 311 * Only the alter operations must be moved, the const operations can stay.
 312 */
 313static void merge_queues(struct sem_array *sma)
 314{
 315	int i;
 316	for (i = 0; i < sma->sem_nsems; i++) {
 317		struct sem *sem = &sma->sems[i];
 318
 319		list_splice_init(&sem->pending_alter, &sma->pending_alter);
 320	}
 321}
 322
 323static void sem_rcu_free(struct rcu_head *head)
 324{
 325	struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu);
 326	struct sem_array *sma = container_of(p, struct sem_array, sem_perm);
 327
 328	security_sem_free(&sma->sem_perm);
 329	kvfree(sma);
 330}
 331
 332/*
 333 * Enter the mode suitable for non-simple operations:
 334 * Caller must own sem_perm.lock.
 
 
 
 
 335 */
 336static void complexmode_enter(struct sem_array *sma)
 337{
 338	int i;
 339	struct sem *sem;
 340
 341	if (sma->use_global_lock > 0)  {
 342		/*
 343		 * We are already in global lock mode.
 344		 * Nothing to do, just reset the
 345		 * counter until we return to simple mode.
 346		 */
 347		WRITE_ONCE(sma->use_global_lock, USE_GLOBAL_LOCK_HYSTERESIS);
 348		return;
 349	}
 350	WRITE_ONCE(sma->use_global_lock, USE_GLOBAL_LOCK_HYSTERESIS);
 351
 352	for (i = 0; i < sma->sem_nsems; i++) {
 353		sem = &sma->sems[i];
 354		spin_lock(&sem->lock);
 355		spin_unlock(&sem->lock);
 356	}
 357}
 358
 359/*
 360 * Try to leave the mode that disallows simple operations:
 361 * Caller must own sem_perm.lock.
 
 
 
 362 */
 363static void complexmode_tryleave(struct sem_array *sma)
 364{
 
 
 
 365	if (sma->complex_count)  {
 366		/* Complex ops are sleeping.
 367		 * We must stay in complex mode
 368		 */
 369		return;
 370	}
 371	if (sma->use_global_lock == 1) {
 372
 373		/* See SEM_BARRIER_1 for purpose/pairing */
 374		smp_store_release(&sma->use_global_lock, 0);
 375	} else {
 376		WRITE_ONCE(sma->use_global_lock,
 377				sma->use_global_lock-1);
 378	}
 
 379}
 380
 381#define SEM_GLOBAL_LOCK	(-1)
 382/*
 383 * If the request contains only one semaphore operation, and there are
 384 * no complex transactions pending, lock only the semaphore involved.
 385 * Otherwise, lock the entire semaphore array, since we either have
 386 * multiple semaphores in our own semops, or we need to look at
 387 * semaphores from other pending complex operations.
 388 */
 389static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
 390			      int nsops)
 391{
 392	struct sem *sem;
 393	int idx;
 394
 395	if (nsops != 1) {
 396		/* Complex operation - acquire a full lock */
 397		ipc_lock_object(&sma->sem_perm);
 398
 399		/* Prevent parallel simple ops */
 400		complexmode_enter(sma);
 401		return SEM_GLOBAL_LOCK;
 
 
 402	}
 403
 404	/*
 405	 * Only one semaphore affected - try to optimize locking.
 406	 * Optimized locking is possible if no complex operation
 407	 * is either enqueued or processed right now.
 408	 *
 409	 * Both facts are tracked by use_global_mode.
 
 
 
 
 
 
 
 
 410	 */
 411	idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
 412	sem = &sma->sems[idx];
 413
 414	/*
 415	 * Initial check for use_global_lock. Just an optimization,
 416	 * no locking, no memory barrier.
 417	 */
 418	if (!READ_ONCE(sma->use_global_lock)) {
 419		/*
 420		 * It appears that no complex operation is around.
 421		 * Acquire the per-semaphore lock.
 422		 */
 423		spin_lock(&sem->lock);
 424
 425		/* see SEM_BARRIER_1 for purpose/pairing */
 426		if (!smp_load_acquire(&sma->use_global_lock)) {
 427			/* fast path successful! */
 428			return sops->sem_num;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 429		}
 430		spin_unlock(&sem->lock);
 431	}
 432
 433	/* slow path: acquire the full lock */
 434	ipc_lock_object(&sma->sem_perm);
 435
 436	if (sma->use_global_lock == 0) {
 437		/*
 438		 * The use_global_lock mode ended while we waited for
 439		 * sma->sem_perm.lock. Thus we must switch to locking
 440		 * with sem->lock.
 441		 * Unlike in the fast path, there is no need to recheck
 442		 * sma->use_global_lock after we have acquired sem->lock:
 443		 * We own sma->sem_perm.lock, thus use_global_lock cannot
 444		 * change.
 445		 */
 446		spin_lock(&sem->lock);
 447
 448		ipc_unlock_object(&sma->sem_perm);
 449		return sops->sem_num;
 450	} else {
 451		/*
 452		 * Not a false alarm, thus continue to use the global lock
 453		 * mode. No need for complexmode_enter(), this was done by
 454		 * the caller that has set use_global_mode to non-zero.
 455		 */
 456		return SEM_GLOBAL_LOCK;
 
 457	}
 458}
 459
 460static inline void sem_unlock(struct sem_array *sma, int locknum)
 461{
 462	if (locknum == SEM_GLOBAL_LOCK) {
 463		unmerge_queues(sma);
 464		complexmode_tryleave(sma);
 465		ipc_unlock_object(&sma->sem_perm);
 466	} else {
 467		struct sem *sem = &sma->sems[locknum];
 468		spin_unlock(&sem->lock);
 469	}
 470}
 471
 472/*
 473 * sem_lock_(check_) routines are called in the paths where the rwsem
 474 * is not held.
 475 *
 476 * The caller holds the RCU read lock.
 477 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 478static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
 479{
 480	struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
 481
 482	if (IS_ERR(ipcp))
 483		return ERR_CAST(ipcp);
 484
 485	return container_of(ipcp, struct sem_array, sem_perm);
 486}
 487
 488static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
 489							int id)
 490{
 491	struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
 492
 493	if (IS_ERR(ipcp))
 494		return ERR_CAST(ipcp);
 495
 496	return container_of(ipcp, struct sem_array, sem_perm);
 497}
 498
 499static inline void sem_lock_and_putref(struct sem_array *sma)
 500{
 501	sem_lock(sma, NULL, -1);
 502	ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
 503}
 504
 505static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
 506{
 507	ipc_rmid(&sem_ids(ns), &s->sem_perm);
 508}
 509
 510static struct sem_array *sem_alloc(size_t nsems)
 511{
 512	struct sem_array *sma;
 513
 514	if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0]))
 515		return NULL;
 516
 517	sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL_ACCOUNT);
 518	if (unlikely(!sma))
 519		return NULL;
 520
 521	return sma;
 522}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 523
 524/**
 525 * newary - Create a new semaphore set
 526 * @ns: namespace
 527 * @params: ptr to the structure that contains key, semflg and nsems
 528 *
 529 * Called with sem_ids.rwsem held (as a writer)
 530 */
 531static int newary(struct ipc_namespace *ns, struct ipc_params *params)
 532{
 
 533	int retval;
 534	struct sem_array *sma;
 
 535	key_t key = params->key;
 536	int nsems = params->u.nsems;
 537	int semflg = params->flg;
 538	int i;
 539
 540	if (!nsems)
 541		return -EINVAL;
 542	if (ns->used_sems + nsems > ns->sc_semmns)
 543		return -ENOSPC;
 544
 545	sma = sem_alloc(nsems);
 
 546	if (!sma)
 547		return -ENOMEM;
 548
 
 
 549	sma->sem_perm.mode = (semflg & S_IRWXUGO);
 550	sma->sem_perm.key = key;
 551
 552	sma->sem_perm.security = NULL;
 553	retval = security_sem_alloc(&sma->sem_perm);
 554	if (retval) {
 555		kvfree(sma);
 556		return retval;
 557	}
 558
 
 
 559	for (i = 0; i < nsems; i++) {
 560		INIT_LIST_HEAD(&sma->sems[i].pending_alter);
 561		INIT_LIST_HEAD(&sma->sems[i].pending_const);
 562		spin_lock_init(&sma->sems[i].lock);
 563	}
 564
 565	sma->complex_count = 0;
 566	sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
 567	INIT_LIST_HEAD(&sma->pending_alter);
 568	INIT_LIST_HEAD(&sma->pending_const);
 569	INIT_LIST_HEAD(&sma->list_id);
 570	sma->sem_nsems = nsems;
 571	sma->sem_ctime = ktime_get_real_seconds();
 572
 573	/* ipc_addid() locks sma upon success. */
 574	retval = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
 575	if (retval < 0) {
 576		ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
 577		return retval;
 578	}
 579	ns->used_sems += nsems;
 580
 581	sem_unlock(sma, -1);
 582	rcu_read_unlock();
 583
 584	return sma->sem_perm.id;
 585}
 586
 587
 588/*
 589 * Called with sem_ids.rwsem and ipcp locked.
 590 */
 591static int sem_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params)
 
 
 
 
 
 
 
 
 
 
 
 
 592{
 593	struct sem_array *sma;
 594
 595	sma = container_of(ipcp, struct sem_array, sem_perm);
 596	if (params->u.nsems > sma->sem_nsems)
 597		return -EINVAL;
 598
 599	return 0;
 600}
 601
 602long ksys_semget(key_t key, int nsems, int semflg)
 603{
 604	struct ipc_namespace *ns;
 605	static const struct ipc_ops sem_ops = {
 606		.getnew = newary,
 607		.associate = security_sem_associate,
 608		.more_checks = sem_more_checks,
 609	};
 610	struct ipc_params sem_params;
 611
 612	ns = current->nsproxy->ipc_ns;
 613
 614	if (nsems < 0 || nsems > ns->sc_semmsl)
 615		return -EINVAL;
 616
 617	sem_params.key = key;
 618	sem_params.flg = semflg;
 619	sem_params.u.nsems = nsems;
 620
 621	return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
 622}
 623
 624SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
 625{
 626	return ksys_semget(key, nsems, semflg);
 627}
 628
 629/**
 630 * perform_atomic_semop[_slow] - Attempt to perform semaphore
 631 *                               operations on a given array.
 632 * @sma: semaphore array
 633 * @q: struct sem_queue that describes the operation
 634 *
 635 * Caller blocking are as follows, based the value
 636 * indicated by the semaphore operation (sem_op):
 637 *
 638 *  (1) >0 never blocks.
 639 *  (2)  0 (wait-for-zero operation): semval is non-zero.
 640 *  (3) <0 attempting to decrement semval to a value smaller than zero.
 641 *
 642 * Returns 0 if the operation was possible.
 643 * Returns 1 if the operation is impossible, the caller must sleep.
 644 * Returns <0 for error codes.
 645 */
 646static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
 647{
 648	int result, sem_op, nsops;
 649	struct pid *pid;
 650	struct sembuf *sop;
 651	struct sem *curr;
 652	struct sembuf *sops;
 653	struct sem_undo *un;
 654
 655	sops = q->sops;
 656	nsops = q->nsops;
 657	un = q->undo;
 658
 659	for (sop = sops; sop < sops + nsops; sop++) {
 660		int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
 661		curr = &sma->sems[idx];
 662		sem_op = sop->sem_op;
 663		result = curr->semval;
 664
 665		if (!sem_op && result)
 666			goto would_block;
 667
 668		result += sem_op;
 669		if (result < 0)
 670			goto would_block;
 671		if (result > SEMVMX)
 672			goto out_of_range;
 673
 674		if (sop->sem_flg & SEM_UNDO) {
 675			int undo = un->semadj[sop->sem_num] - sem_op;
 676			/* Exceeding the undo range is an error. */
 677			if (undo < (-SEMAEM - 1) || undo > SEMAEM)
 678				goto out_of_range;
 679			un->semadj[sop->sem_num] = undo;
 680		}
 681
 682		curr->semval = result;
 683	}
 684
 685	sop--;
 686	pid = q->pid;
 687	while (sop >= sops) {
 688		ipc_update_pid(&sma->sems[sop->sem_num].sempid, pid);
 689		sop--;
 690	}
 691
 692	return 0;
 693
 694out_of_range:
 695	result = -ERANGE;
 696	goto undo;
 697
 698would_block:
 699	q->blocking = sop;
 700
 701	if (sop->sem_flg & IPC_NOWAIT)
 702		result = -EAGAIN;
 703	else
 704		result = 1;
 705
 706undo:
 707	sop--;
 708	while (sop >= sops) {
 709		sem_op = sop->sem_op;
 710		sma->sems[sop->sem_num].semval -= sem_op;
 711		if (sop->sem_flg & SEM_UNDO)
 712			un->semadj[sop->sem_num] += sem_op;
 713		sop--;
 714	}
 715
 716	return result;
 717}
 718
 719static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
 
 
 
 
 
 
 
 720{
 721	int result, sem_op, nsops;
 722	struct sembuf *sop;
 723	struct sem *curr;
 724	struct sembuf *sops;
 725	struct sem_undo *un;
 726
 727	sops = q->sops;
 728	nsops = q->nsops;
 729	un = q->undo;
 730
 731	if (unlikely(q->dupsop))
 732		return perform_atomic_semop_slow(sma, q);
 733
 734	/*
 735	 * We scan the semaphore set twice, first to ensure that the entire
 736	 * operation can succeed, therefore avoiding any pointless writes
 737	 * to shared memory and having to undo such changes in order to block
 738	 * until the operations can go through.
 739	 */
 740	for (sop = sops; sop < sops + nsops; sop++) {
 741		int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
 742
 743		curr = &sma->sems[idx];
 744		sem_op = sop->sem_op;
 745		result = curr->semval;
 746
 747		if (!sem_op && result)
 748			goto would_block; /* wait-for-zero */
 749
 750		result += sem_op;
 751		if (result < 0)
 752			goto would_block;
 753
 754		if (result > SEMVMX)
 755			return -ERANGE;
 756
 757		if (sop->sem_flg & SEM_UNDO) {
 758			int undo = un->semadj[sop->sem_num] - sem_op;
 759
 760			/* Exceeding the undo range is an error. */
 761			if (undo < (-SEMAEM - 1) || undo > SEMAEM)
 762				return -ERANGE;
 763		}
 764	}
 765
 766	for (sop = sops; sop < sops + nsops; sop++) {
 767		curr = &sma->sems[sop->sem_num];
 768		sem_op = sop->sem_op;
 769
 770		if (sop->sem_flg & SEM_UNDO) {
 771			int undo = un->semadj[sop->sem_num] - sem_op;
 772
 773			un->semadj[sop->sem_num] = undo;
 774		}
 775		curr->semval += sem_op;
 776		ipc_update_pid(&curr->sempid, q->pid);
 777	}
 
 
 778
 779	return 0;
 780
 781would_block:
 782	q->blocking = sop;
 783	return sop->sem_flg & IPC_NOWAIT ? -EAGAIN : 1;
 784}
 785
 786static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
 787					     struct wake_q_head *wake_q)
 788{
 789	struct task_struct *sleeper;
 790
 791	sleeper = get_task_struct(q->sleeper);
 792
 793	/* see SEM_BARRIER_2 for purpose/pairing */
 794	smp_store_release(&q->status, error);
 795
 796	wake_q_add_safe(wake_q, sleeper);
 
 
 
 
 
 
 
 
 
 
 
 
 797}
 798
 799static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
 800{
 801	list_del(&q->list);
 802	if (q->nsops > 1)
 803		sma->complex_count--;
 804}
 805
 806/** check_restart(sma, q)
 807 * @sma: semaphore array
 808 * @q: the operation that just completed
 809 *
 810 * update_queue is O(N^2) when it restarts scanning the whole queue of
 811 * waiting operations. Therefore this function checks if the restart is
 812 * really necessary. It is called after a previously waiting operation
 813 * modified the array.
 814 * Note that wait-for-zero operations are handled without restart.
 815 */
 816static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
 817{
 818	/* pending complex alter operations are too difficult to analyse */
 819	if (!list_empty(&sma->pending_alter))
 820		return 1;
 821
 822	/* we were a sleeping complex operation. Too difficult */
 823	if (q->nsops > 1)
 824		return 1;
 825
 826	/* It is impossible that someone waits for the new value:
 827	 * - complex operations always restart.
 828	 * - wait-for-zero are handled separately.
 829	 * - q is a previously sleeping simple operation that
 830	 *   altered the array. It must be a decrement, because
 831	 *   simple increments never sleep.
 832	 * - If there are older (higher priority) decrements
 833	 *   in the queue, then they have observed the original
 834	 *   semval value and couldn't proceed. The operation
 835	 *   decremented to value - thus they won't proceed either.
 836	 */
 837	return 0;
 838}
 839
 840/**
 841 * wake_const_ops - wake up non-alter tasks
 842 * @sma: semaphore array.
 843 * @semnum: semaphore that was modified.
 844 * @wake_q: lockless wake-queue head.
 845 *
 846 * wake_const_ops must be called after a semaphore in a semaphore array
 847 * was set to 0. If complex const operations are pending, wake_const_ops must
 848 * be called with semnum = -1, as well as with the number of each modified
 849 * semaphore.
 850 * The tasks that must be woken up are added to @wake_q. The return code
 851 * is stored in q->pid.
 852 * The function returns 1 if at least one operation was completed successfully.
 853 */
 854static int wake_const_ops(struct sem_array *sma, int semnum,
 855			  struct wake_q_head *wake_q)
 856{
 857	struct sem_queue *q, *tmp;
 
 858	struct list_head *pending_list;
 859	int semop_completed = 0;
 860
 861	if (semnum == -1)
 862		pending_list = &sma->pending_const;
 863	else
 864		pending_list = &sma->sems[semnum].pending_const;
 865
 866	list_for_each_entry_safe(q, tmp, pending_list, list) {
 867		int error = perform_atomic_semop(sma, q);
 
 868
 869		if (error > 0)
 870			continue;
 871		/* operation completed, remove from queue & wakeup */
 872		unlink_queue(sma, q);
 873
 874		wake_up_sem_queue_prepare(q, error, wake_q);
 875		if (error == 0)
 876			semop_completed = 1;
 877	}
 
 
 878
 
 
 
 
 
 879	return semop_completed;
 880}
 881
 882/**
 883 * do_smart_wakeup_zero - wakeup all wait for zero tasks
 884 * @sma: semaphore array
 885 * @sops: operations that were performed
 886 * @nsops: number of operations
 887 * @wake_q: lockless wake-queue head
 888 *
 889 * Checks all required queue for wait-for-zero operations, based
 890 * on the actual changes that were performed on the semaphore array.
 891 * The function returns 1 if at least one operation was completed successfully.
 892 */
 893static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
 894				int nsops, struct wake_q_head *wake_q)
 895{
 896	int i;
 897	int semop_completed = 0;
 898	int got_zero = 0;
 899
 900	/* first: the per-semaphore queues, if known */
 901	if (sops) {
 902		for (i = 0; i < nsops; i++) {
 903			int num = sops[i].sem_num;
 904
 905			if (sma->sems[num].semval == 0) {
 906				got_zero = 1;
 907				semop_completed |= wake_const_ops(sma, num, wake_q);
 908			}
 909		}
 910	} else {
 911		/*
 912		 * No sops means modified semaphores not known.
 913		 * Assume all were changed.
 914		 */
 915		for (i = 0; i < sma->sem_nsems; i++) {
 916			if (sma->sems[i].semval == 0) {
 917				got_zero = 1;
 918				semop_completed |= wake_const_ops(sma, i, wake_q);
 919			}
 920		}
 921	}
 922	/*
 923	 * If one of the modified semaphores got 0,
 924	 * then check the global queue, too.
 925	 */
 926	if (got_zero)
 927		semop_completed |= wake_const_ops(sma, -1, wake_q);
 928
 929	return semop_completed;
 930}
 931
 932
 933/**
 934 * update_queue - look for tasks that can be completed.
 935 * @sma: semaphore array.
 936 * @semnum: semaphore that was modified.
 937 * @wake_q: lockless wake-queue head.
 938 *
 939 * update_queue must be called after a semaphore in a semaphore array
 940 * was modified. If multiple semaphores were modified, update_queue must
 941 * be called with semnum = -1, as well as with the number of each modified
 942 * semaphore.
 943 * The tasks that must be woken up are added to @wake_q. The return code
 944 * is stored in q->pid.
 945 * The function internally checks if const operations can now succeed.
 946 *
 947 * The function return 1 if at least one semop was completed successfully.
 948 */
 949static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q)
 950{
 951	struct sem_queue *q, *tmp;
 
 952	struct list_head *pending_list;
 953	int semop_completed = 0;
 954
 955	if (semnum == -1)
 956		pending_list = &sma->pending_alter;
 957	else
 958		pending_list = &sma->sems[semnum].pending_alter;
 959
 960again:
 961	list_for_each_entry_safe(q, tmp, pending_list, list) {
 
 962		int error, restart;
 963
 
 
 
 964		/* If we are scanning the single sop, per-semaphore list of
 965		 * one semaphore and that semaphore is 0, then it is not
 966		 * necessary to scan further: simple increments
 967		 * that affect only one entry succeed immediately and cannot
 968		 * be in the  per semaphore pending queue, and decrements
 969		 * cannot be successful if the value is already 0.
 970		 */
 971		if (semnum != -1 && sma->sems[semnum].semval == 0)
 972			break;
 973
 974		error = perform_atomic_semop(sma, q);
 975
 976		/* Does q->sleeper still need to sleep? */
 977		if (error > 0)
 978			continue;
 979
 980		unlink_queue(sma, q);
 981
 982		if (error) {
 983			restart = 0;
 984		} else {
 985			semop_completed = 1;
 986			do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q);
 987			restart = check_restart(sma, q);
 988		}
 989
 990		wake_up_sem_queue_prepare(q, error, wake_q);
 991		if (restart)
 992			goto again;
 993	}
 994	return semop_completed;
 995}
 996
 997/**
 998 * set_semotime - set sem_otime
 999 * @sma: semaphore array
1000 * @sops: operations that modified the array, may be NULL
1001 *
1002 * sem_otime is replicated to avoid cache line trashing.
1003 * This function sets one instance to the current time.
1004 */
1005static void set_semotime(struct sem_array *sma, struct sembuf *sops)
1006{
1007	if (sops == NULL) {
1008		sma->sems[0].sem_otime = ktime_get_real_seconds();
1009	} else {
1010		sma->sems[sops[0].sem_num].sem_otime =
1011						ktime_get_real_seconds();
1012	}
1013}
1014
1015/**
1016 * do_smart_update - optimized update_queue
1017 * @sma: semaphore array
1018 * @sops: operations that were performed
1019 * @nsops: number of operations
1020 * @otime: force setting otime
1021 * @wake_q: lockless wake-queue head
1022 *
1023 * do_smart_update() does the required calls to update_queue and wakeup_zero,
1024 * based on the actual changes that were performed on the semaphore array.
1025 * Note that the function does not do the actual wake-up: the caller is
1026 * responsible for calling wake_up_q().
1027 * It is safe to perform this call after dropping all locks.
1028 */
1029static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
1030			    int otime, struct wake_q_head *wake_q)
1031{
1032	int i;
1033
1034	otime |= do_smart_wakeup_zero(sma, sops, nsops, wake_q);
1035
1036	if (!list_empty(&sma->pending_alter)) {
1037		/* semaphore array uses the global queue - just process it. */
1038		otime |= update_queue(sma, -1, wake_q);
1039	} else {
1040		if (!sops) {
1041			/*
1042			 * No sops, thus the modified semaphores are not
1043			 * known. Check all.
1044			 */
1045			for (i = 0; i < sma->sem_nsems; i++)
1046				otime |= update_queue(sma, i, wake_q);
1047		} else {
1048			/*
1049			 * Check the semaphores that were increased:
1050			 * - No complex ops, thus all sleeping ops are
1051			 *   decrease.
1052			 * - if we decreased the value, then any sleeping
1053			 *   semaphore ops won't be able to run: If the
1054			 *   previous value was too small, then the new
1055			 *   value will be too small, too.
1056			 */
1057			for (i = 0; i < nsops; i++) {
1058				if (sops[i].sem_op > 0) {
1059					otime |= update_queue(sma,
1060							      sops[i].sem_num, wake_q);
1061				}
1062			}
1063		}
1064	}
1065	if (otime)
1066		set_semotime(sma, sops);
1067}
1068
1069/*
1070 * check_qop: Test if a queued operation sleeps on the semaphore semnum
1071 */
1072static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
1073			bool count_zero)
1074{
1075	struct sembuf *sop = q->blocking;
1076
1077	/*
1078	 * Linux always (since 0.99.10) reported a task as sleeping on all
1079	 * semaphores. This violates SUS, therefore it was changed to the
1080	 * standard compliant behavior.
1081	 * Give the administrators a chance to notice that an application
1082	 * might misbehave because it relies on the Linux behavior.
1083	 */
1084	pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
1085			"The task %s (%d) triggered the difference, watch for misbehavior.\n",
1086			current->comm, task_pid_nr(current));
1087
1088	if (sop->sem_num != semnum)
1089		return 0;
1090
1091	if (count_zero && sop->sem_op == 0)
1092		return 1;
1093	if (!count_zero && sop->sem_op < 0)
1094		return 1;
1095
1096	return 0;
1097}
1098
1099/* The following counts are associated to each semaphore:
1100 *   semncnt        number of tasks waiting on semval being nonzero
1101 *   semzcnt        number of tasks waiting on semval being zero
1102 *
1103 * Per definition, a task waits only on the semaphore of the first semop
1104 * that cannot proceed, even if additional operation would block, too.
1105 */
1106static int count_semcnt(struct sem_array *sma, ushort semnum,
1107			bool count_zero)
1108{
1109	struct list_head *l;
1110	struct sem_queue *q;
1111	int semcnt;
1112
1113	semcnt = 0;
1114	/* First: check the simple operations. They are easy to evaluate */
1115	if (count_zero)
1116		l = &sma->sems[semnum].pending_const;
1117	else
1118		l = &sma->sems[semnum].pending_alter;
1119
1120	list_for_each_entry(q, l, list) {
1121		/* all task on a per-semaphore list sleep on exactly
1122		 * that semaphore
1123		 */
1124		semcnt++;
1125	}
1126
1127	/* Then: check the complex operations. */
1128	list_for_each_entry(q, &sma->pending_alter, list) {
1129		semcnt += check_qop(sma, semnum, q, count_zero);
1130	}
1131	if (count_zero) {
1132		list_for_each_entry(q, &sma->pending_const, list) {
1133			semcnt += check_qop(sma, semnum, q, count_zero);
1134		}
1135	}
1136	return semcnt;
1137}
1138
1139/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
1140 * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
1141 * remains locked on exit.
1142 */
1143static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1144{
1145	struct sem_undo *un, *tu;
1146	struct sem_queue *q, *tq;
1147	struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
 
1148	int i;
1149	DEFINE_WAKE_Q(wake_q);
1150
1151	/* Free the existing undo structures for this semaphore set.  */
1152	ipc_assert_locked_object(&sma->sem_perm);
1153	list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1154		list_del(&un->list_id);
1155		spin_lock(&un->ulp->lock);
1156		un->semid = -1;
1157		list_del_rcu(&un->list_proc);
1158		spin_unlock(&un->ulp->lock);
1159		kvfree_rcu(un, rcu);
1160	}
1161
1162	/* Wake up all pending processes and let them fail with EIDRM. */
 
1163	list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1164		unlink_queue(sma, q);
1165		wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1166	}
1167
1168	list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1169		unlink_queue(sma, q);
1170		wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1171	}
1172	for (i = 0; i < sma->sem_nsems; i++) {
1173		struct sem *sem = &sma->sems[i];
1174		list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
1175			unlink_queue(sma, q);
1176			wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1177		}
1178		list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
1179			unlink_queue(sma, q);
1180			wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1181		}
1182		ipc_update_pid(&sem->sempid, NULL);
1183	}
1184
1185	/* Remove the semaphore set from the IDR */
1186	sem_rmid(ns, sma);
1187	sem_unlock(sma, -1);
1188	rcu_read_unlock();
1189
1190	wake_up_q(&wake_q);
1191	ns->used_sems -= sma->sem_nsems;
1192	ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1193}
1194
1195static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1196{
1197	switch (version) {
1198	case IPC_64:
1199		return copy_to_user(buf, in, sizeof(*in));
1200	case IPC_OLD:
1201	    {
1202		struct semid_ds out;
1203
1204		memset(&out, 0, sizeof(out));
1205
1206		ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
1207
1208		out.sem_otime	= in->sem_otime;
1209		out.sem_ctime	= in->sem_ctime;
1210		out.sem_nsems	= in->sem_nsems;
1211
1212		return copy_to_user(buf, &out, sizeof(out));
1213	    }
1214	default:
1215		return -EINVAL;
1216	}
1217}
1218
1219static time64_t get_semotime(struct sem_array *sma)
1220{
1221	int i;
1222	time64_t res;
1223
1224	res = sma->sems[0].sem_otime;
1225	for (i = 1; i < sma->sem_nsems; i++) {
1226		time64_t to = sma->sems[i].sem_otime;
1227
1228		if (to > res)
1229			res = to;
1230	}
1231	return res;
1232}
1233
1234static int semctl_stat(struct ipc_namespace *ns, int semid,
1235			 int cmd, struct semid64_ds *semid64)
1236{
1237	struct sem_array *sma;
1238	time64_t semotime;
1239	int err;
 
1240
1241	memset(semid64, 0, sizeof(*semid64));
 
 
 
 
 
1242
1243	rcu_read_lock();
1244	if (cmd == SEM_STAT || cmd == SEM_STAT_ANY) {
1245		sma = sem_obtain_object(ns, semid);
1246		if (IS_ERR(sma)) {
1247			err = PTR_ERR(sma);
1248			goto out_unlock;
1249		}
1250	} else { /* IPC_STAT */
1251		sma = sem_obtain_object_check(ns, semid);
1252		if (IS_ERR(sma)) {
1253			err = PTR_ERR(sma);
1254			goto out_unlock;
 
 
 
 
 
 
 
 
1255		}
 
 
 
 
 
1256	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1257
1258	/* see comment for SHM_STAT_ANY */
1259	if (cmd == SEM_STAT_ANY)
1260		audit_ipc_obj(&sma->sem_perm);
1261	else {
1262		err = -EACCES;
1263		if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1264			goto out_unlock;
1265	}
1266
1267	err = security_sem_semctl(&sma->sem_perm, cmd);
1268	if (err)
1269		goto out_unlock;
1270
1271	ipc_lock_object(&sma->sem_perm);
 
 
1272
1273	if (!ipc_valid_object(&sma->sem_perm)) {
1274		ipc_unlock_object(&sma->sem_perm);
1275		err = -EIDRM;
1276		goto out_unlock;
 
 
 
 
1277	}
1278
1279	kernel_to_ipc64_perm(&sma->sem_perm, &semid64->sem_perm);
1280	semotime = get_semotime(sma);
1281	semid64->sem_otime = semotime;
1282	semid64->sem_ctime = sma->sem_ctime;
1283#ifndef CONFIG_64BIT
1284	semid64->sem_otime_high = semotime >> 32;
1285	semid64->sem_ctime_high = sma->sem_ctime >> 32;
1286#endif
1287	semid64->sem_nsems = sma->sem_nsems;
1288
1289	if (cmd == IPC_STAT) {
1290		/*
1291		 * As defined in SUS:
1292		 * Return 0 on success
1293		 */
1294		err = 0;
1295	} else {
1296		/*
1297		 * SEM_STAT and SEM_STAT_ANY (both Linux specific)
1298		 * Return the full id, including the sequence number
1299		 */
1300		err = sma->sem_perm.id;
1301	}
1302	ipc_unlock_object(&sma->sem_perm);
1303out_unlock:
1304	rcu_read_unlock();
1305	return err;
1306}
1307
1308static int semctl_info(struct ipc_namespace *ns, int semid,
1309			 int cmd, void __user *p)
1310{
1311	struct seminfo seminfo;
1312	int max_idx;
1313	int err;
1314
1315	err = security_sem_semctl(NULL, cmd);
1316	if (err)
1317		return err;
1318
1319	memset(&seminfo, 0, sizeof(seminfo));
1320	seminfo.semmni = ns->sc_semmni;
1321	seminfo.semmns = ns->sc_semmns;
1322	seminfo.semmsl = ns->sc_semmsl;
1323	seminfo.semopm = ns->sc_semopm;
1324	seminfo.semvmx = SEMVMX;
1325	seminfo.semmnu = SEMMNU;
1326	seminfo.semmap = SEMMAP;
1327	seminfo.semume = SEMUME;
1328	down_read(&sem_ids(ns).rwsem);
1329	if (cmd == SEM_INFO) {
1330		seminfo.semusz = sem_ids(ns).in_use;
1331		seminfo.semaem = ns->used_sems;
1332	} else {
1333		seminfo.semusz = SEMUSZ;
1334		seminfo.semaem = SEMAEM;
1335	}
1336	max_idx = ipc_get_maxidx(&sem_ids(ns));
1337	up_read(&sem_ids(ns).rwsem);
1338	if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1339		return -EFAULT;
1340	return (max_idx < 0) ? 0 : max_idx;
1341}
1342
1343static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1344		int val)
1345{
1346	struct sem_undo *un;
1347	struct sem_array *sma;
1348	struct sem *curr;
1349	int err;
1350	DEFINE_WAKE_Q(wake_q);
 
 
 
 
 
 
 
 
1351
1352	if (val > SEMVMX || val < 0)
1353		return -ERANGE;
1354
 
 
1355	rcu_read_lock();
1356	sma = sem_obtain_object_check(ns, semid);
1357	if (IS_ERR(sma)) {
1358		rcu_read_unlock();
1359		return PTR_ERR(sma);
1360	}
1361
1362	if (semnum < 0 || semnum >= sma->sem_nsems) {
1363		rcu_read_unlock();
1364		return -EINVAL;
1365	}
1366
1367
1368	if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1369		rcu_read_unlock();
1370		return -EACCES;
1371	}
1372
1373	err = security_sem_semctl(&sma->sem_perm, SETVAL);
1374	if (err) {
1375		rcu_read_unlock();
1376		return -EACCES;
1377	}
1378
1379	sem_lock(sma, NULL, -1);
1380
1381	if (!ipc_valid_object(&sma->sem_perm)) {
1382		sem_unlock(sma, -1);
1383		rcu_read_unlock();
1384		return -EIDRM;
1385	}
1386
1387	semnum = array_index_nospec(semnum, sma->sem_nsems);
1388	curr = &sma->sems[semnum];
1389
1390	ipc_assert_locked_object(&sma->sem_perm);
1391	list_for_each_entry(un, &sma->list_id, list_id)
1392		un->semadj[semnum] = 0;
1393
1394	curr->semval = val;
1395	ipc_update_pid(&curr->sempid, task_tgid(current));
1396	sma->sem_ctime = ktime_get_real_seconds();
1397	/* maybe some queued-up processes were waiting for this */
1398	do_smart_update(sma, NULL, 0, 0, &wake_q);
1399	sem_unlock(sma, -1);
1400	rcu_read_unlock();
1401	wake_up_q(&wake_q);
1402	return 0;
1403}
1404
1405static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1406		int cmd, void __user *p)
1407{
1408	struct sem_array *sma;
1409	struct sem *curr;
1410	int err, nsems;
1411	ushort fast_sem_io[SEMMSL_FAST];
1412	ushort *sem_io = fast_sem_io;
1413	DEFINE_WAKE_Q(wake_q);
 
 
1414
1415	rcu_read_lock();
1416	sma = sem_obtain_object_check(ns, semid);
1417	if (IS_ERR(sma)) {
1418		rcu_read_unlock();
1419		return PTR_ERR(sma);
1420	}
1421
1422	nsems = sma->sem_nsems;
1423
1424	err = -EACCES;
1425	if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1426		goto out_rcu_wakeup;
1427
1428	err = security_sem_semctl(&sma->sem_perm, cmd);
1429	if (err)
1430		goto out_rcu_wakeup;
1431
 
1432	switch (cmd) {
1433	case GETALL:
1434	{
1435		ushort __user *array = p;
1436		int i;
1437
1438		sem_lock(sma, NULL, -1);
1439		if (!ipc_valid_object(&sma->sem_perm)) {
1440			err = -EIDRM;
1441			goto out_unlock;
1442		}
1443		if (nsems > SEMMSL_FAST) {
1444			if (!ipc_rcu_getref(&sma->sem_perm)) {
1445				err = -EIDRM;
1446				goto out_unlock;
1447			}
1448			sem_unlock(sma, -1);
1449			rcu_read_unlock();
1450			sem_io = kvmalloc_array(nsems, sizeof(ushort),
1451						GFP_KERNEL);
1452			if (sem_io == NULL) {
1453				ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1454				return -ENOMEM;
1455			}
1456
1457			rcu_read_lock();
1458			sem_lock_and_putref(sma);
1459			if (!ipc_valid_object(&sma->sem_perm)) {
1460				err = -EIDRM;
1461				goto out_unlock;
1462			}
1463		}
1464		for (i = 0; i < sma->sem_nsems; i++)
1465			sem_io[i] = sma->sems[i].semval;
1466		sem_unlock(sma, -1);
1467		rcu_read_unlock();
1468		err = 0;
1469		if (copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1470			err = -EFAULT;
1471		goto out_free;
1472	}
1473	case SETALL:
1474	{
1475		int i;
1476		struct sem_undo *un;
1477
1478		if (!ipc_rcu_getref(&sma->sem_perm)) {
1479			err = -EIDRM;
1480			goto out_rcu_wakeup;
1481		}
1482		rcu_read_unlock();
1483
1484		if (nsems > SEMMSL_FAST) {
1485			sem_io = kvmalloc_array(nsems, sizeof(ushort),
1486						GFP_KERNEL);
1487			if (sem_io == NULL) {
1488				ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1489				return -ENOMEM;
1490			}
1491		}
1492
1493		if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
1494			ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1495			err = -EFAULT;
1496			goto out_free;
1497		}
1498
1499		for (i = 0; i < nsems; i++) {
1500			if (sem_io[i] > SEMVMX) {
1501				ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1502				err = -ERANGE;
1503				goto out_free;
1504			}
1505		}
1506		rcu_read_lock();
1507		sem_lock_and_putref(sma);
1508		if (!ipc_valid_object(&sma->sem_perm)) {
1509			err = -EIDRM;
1510			goto out_unlock;
1511		}
1512
1513		for (i = 0; i < nsems; i++) {
1514			sma->sems[i].semval = sem_io[i];
1515			ipc_update_pid(&sma->sems[i].sempid, task_tgid(current));
1516		}
1517
1518		ipc_assert_locked_object(&sma->sem_perm);
1519		list_for_each_entry(un, &sma->list_id, list_id) {
1520			for (i = 0; i < nsems; i++)
1521				un->semadj[i] = 0;
1522		}
1523		sma->sem_ctime = ktime_get_real_seconds();
1524		/* maybe some queued-up processes were waiting for this */
1525		do_smart_update(sma, NULL, 0, 0, &wake_q);
1526		err = 0;
1527		goto out_unlock;
1528	}
1529	/* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */
1530	}
1531	err = -EINVAL;
1532	if (semnum < 0 || semnum >= nsems)
1533		goto out_rcu_wakeup;
1534
1535	sem_lock(sma, NULL, -1);
1536	if (!ipc_valid_object(&sma->sem_perm)) {
1537		err = -EIDRM;
1538		goto out_unlock;
1539	}
1540
1541	semnum = array_index_nospec(semnum, nsems);
1542	curr = &sma->sems[semnum];
1543
1544	switch (cmd) {
1545	case GETVAL:
1546		err = curr->semval;
1547		goto out_unlock;
1548	case GETPID:
1549		err = pid_vnr(curr->sempid);
1550		goto out_unlock;
1551	case GETNCNT:
1552		err = count_semcnt(sma, semnum, 0);
1553		goto out_unlock;
1554	case GETZCNT:
1555		err = count_semcnt(sma, semnum, 1);
1556		goto out_unlock;
1557	}
1558
1559out_unlock:
1560	sem_unlock(sma, -1);
1561out_rcu_wakeup:
1562	rcu_read_unlock();
1563	wake_up_q(&wake_q);
1564out_free:
1565	if (sem_io != fast_sem_io)
1566		kvfree(sem_io);
1567	return err;
1568}
1569
1570static inline unsigned long
1571copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1572{
1573	switch (version) {
1574	case IPC_64:
1575		if (copy_from_user(out, buf, sizeof(*out)))
1576			return -EFAULT;
1577		return 0;
1578	case IPC_OLD:
1579	    {
1580		struct semid_ds tbuf_old;
1581
1582		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1583			return -EFAULT;
1584
1585		out->sem_perm.uid	= tbuf_old.sem_perm.uid;
1586		out->sem_perm.gid	= tbuf_old.sem_perm.gid;
1587		out->sem_perm.mode	= tbuf_old.sem_perm.mode;
1588
1589		return 0;
1590	    }
1591	default:
1592		return -EINVAL;
1593	}
1594}
1595
1596/*
1597 * This function handles some semctl commands which require the rwsem
1598 * to be held in write mode.
1599 * NOTE: no locks must be held, the rwsem is taken inside this function.
1600 */
1601static int semctl_down(struct ipc_namespace *ns, int semid,
1602		       int cmd, struct semid64_ds *semid64)
1603{
1604	struct sem_array *sma;
1605	int err;
 
1606	struct kern_ipc_perm *ipcp;
1607
 
 
 
 
 
1608	down_write(&sem_ids(ns).rwsem);
1609	rcu_read_lock();
1610
1611	ipcp = ipcctl_obtain_check(ns, &sem_ids(ns), semid, cmd,
1612				      &semid64->sem_perm, 0);
1613	if (IS_ERR(ipcp)) {
1614		err = PTR_ERR(ipcp);
1615		goto out_unlock1;
1616	}
1617
1618	sma = container_of(ipcp, struct sem_array, sem_perm);
1619
1620	err = security_sem_semctl(&sma->sem_perm, cmd);
1621	if (err)
1622		goto out_unlock1;
1623
1624	switch (cmd) {
1625	case IPC_RMID:
1626		sem_lock(sma, NULL, -1);
1627		/* freeary unlocks the ipc object and rcu */
1628		freeary(ns, ipcp);
1629		goto out_up;
1630	case IPC_SET:
1631		sem_lock(sma, NULL, -1);
1632		err = ipc_update_perm(&semid64->sem_perm, ipcp);
1633		if (err)
1634			goto out_unlock0;
1635		sma->sem_ctime = ktime_get_real_seconds();
1636		break;
1637	default:
1638		err = -EINVAL;
1639		goto out_unlock1;
1640	}
1641
1642out_unlock0:
1643	sem_unlock(sma, -1);
1644out_unlock1:
1645	rcu_read_unlock();
1646out_up:
1647	up_write(&sem_ids(ns).rwsem);
1648	return err;
1649}
1650
1651static long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg, int version)
1652{
 
1653	struct ipc_namespace *ns;
1654	void __user *p = (void __user *)arg;
1655	struct semid64_ds semid64;
1656	int err;
1657
1658	if (semid < 0)
1659		return -EINVAL;
1660
 
1661	ns = current->nsproxy->ipc_ns;
1662
1663	switch (cmd) {
1664	case IPC_INFO:
1665	case SEM_INFO:
1666		return semctl_info(ns, semid, cmd, p);
1667	case IPC_STAT:
1668	case SEM_STAT:
1669	case SEM_STAT_ANY:
1670		err = semctl_stat(ns, semid, cmd, &semid64);
1671		if (err < 0)
1672			return err;
1673		if (copy_semid_to_user(p, &semid64, version))
1674			err = -EFAULT;
1675		return err;
1676	case GETALL:
1677	case GETVAL:
1678	case GETPID:
1679	case GETNCNT:
1680	case GETZCNT:
1681	case SETALL:
1682		return semctl_main(ns, semid, semnum, cmd, p);
1683	case SETVAL: {
1684		int val;
1685#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1686		/* big-endian 64bit */
1687		val = arg >> 32;
1688#else
1689		/* 32bit or little-endian 64bit */
1690		val = arg;
1691#endif
1692		return semctl_setval(ns, semid, semnum, val);
1693	}
1694	case IPC_SET:
1695		if (copy_semid_from_user(&semid64, p, version))
1696			return -EFAULT;
1697		fallthrough;
1698	case IPC_RMID:
1699		return semctl_down(ns, semid, cmd, &semid64);
1700	default:
1701		return -EINVAL;
1702	}
1703}
1704
1705SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1706{
1707	return ksys_semctl(semid, semnum, cmd, arg, IPC_64);
1708}
1709
1710#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
1711long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg)
1712{
1713	int version = ipc_parse_version(&cmd);
1714
1715	return ksys_semctl(semid, semnum, cmd, arg, version);
1716}
1717
1718SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1719{
1720	return ksys_old_semctl(semid, semnum, cmd, arg);
1721}
1722#endif
1723
1724#ifdef CONFIG_COMPAT
1725
1726struct compat_semid_ds {
1727	struct compat_ipc_perm sem_perm;
1728	old_time32_t sem_otime;
1729	old_time32_t sem_ctime;
1730	compat_uptr_t sem_base;
1731	compat_uptr_t sem_pending;
1732	compat_uptr_t sem_pending_last;
1733	compat_uptr_t undo;
1734	unsigned short sem_nsems;
1735};
1736
1737static int copy_compat_semid_from_user(struct semid64_ds *out, void __user *buf,
1738					int version)
1739{
1740	memset(out, 0, sizeof(*out));
1741	if (version == IPC_64) {
1742		struct compat_semid64_ds __user *p = buf;
1743		return get_compat_ipc64_perm(&out->sem_perm, &p->sem_perm);
1744	} else {
1745		struct compat_semid_ds __user *p = buf;
1746		return get_compat_ipc_perm(&out->sem_perm, &p->sem_perm);
1747	}
1748}
1749
1750static int copy_compat_semid_to_user(void __user *buf, struct semid64_ds *in,
1751					int version)
1752{
1753	if (version == IPC_64) {
1754		struct compat_semid64_ds v;
1755		memset(&v, 0, sizeof(v));
1756		to_compat_ipc64_perm(&v.sem_perm, &in->sem_perm);
1757		v.sem_otime	 = lower_32_bits(in->sem_otime);
1758		v.sem_otime_high = upper_32_bits(in->sem_otime);
1759		v.sem_ctime	 = lower_32_bits(in->sem_ctime);
1760		v.sem_ctime_high = upper_32_bits(in->sem_ctime);
1761		v.sem_nsems = in->sem_nsems;
1762		return copy_to_user(buf, &v, sizeof(v));
1763	} else {
1764		struct compat_semid_ds v;
1765		memset(&v, 0, sizeof(v));
1766		to_compat_ipc_perm(&v.sem_perm, &in->sem_perm);
1767		v.sem_otime = in->sem_otime;
1768		v.sem_ctime = in->sem_ctime;
1769		v.sem_nsems = in->sem_nsems;
1770		return copy_to_user(buf, &v, sizeof(v));
1771	}
1772}
1773
1774static long compat_ksys_semctl(int semid, int semnum, int cmd, int arg, int version)
1775{
1776	void __user *p = compat_ptr(arg);
1777	struct ipc_namespace *ns;
1778	struct semid64_ds semid64;
1779	int err;
1780
1781	ns = current->nsproxy->ipc_ns;
1782
1783	if (semid < 0)
1784		return -EINVAL;
1785
1786	switch (cmd & (~IPC_64)) {
1787	case IPC_INFO:
1788	case SEM_INFO:
1789		return semctl_info(ns, semid, cmd, p);
1790	case IPC_STAT:
1791	case SEM_STAT:
1792	case SEM_STAT_ANY:
1793		err = semctl_stat(ns, semid, cmd, &semid64);
1794		if (err < 0)
1795			return err;
1796		if (copy_compat_semid_to_user(p, &semid64, version))
1797			err = -EFAULT;
1798		return err;
1799	case GETVAL:
1800	case GETPID:
1801	case GETNCNT:
1802	case GETZCNT:
1803	case GETALL:
1804	case SETALL:
1805		return semctl_main(ns, semid, semnum, cmd, p);
1806	case SETVAL:
1807		return semctl_setval(ns, semid, semnum, arg);
1808	case IPC_SET:
1809		if (copy_compat_semid_from_user(&semid64, p, version))
1810			return -EFAULT;
1811		fallthrough;
1812	case IPC_RMID:
1813		return semctl_down(ns, semid, cmd, &semid64);
 
1814	default:
1815		return -EINVAL;
1816	}
1817}
1818
1819COMPAT_SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, int, arg)
1820{
1821	return compat_ksys_semctl(semid, semnum, cmd, arg, IPC_64);
1822}
1823
1824#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION
1825long compat_ksys_old_semctl(int semid, int semnum, int cmd, int arg)
1826{
1827	int version = compat_ipc_parse_version(&cmd);
1828
1829	return compat_ksys_semctl(semid, semnum, cmd, arg, version);
1830}
1831
1832COMPAT_SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, int, arg)
1833{
1834	return compat_ksys_old_semctl(semid, semnum, cmd, arg);
1835}
1836#endif
1837#endif
1838
1839/* If the task doesn't already have a undo_list, then allocate one
1840 * here.  We guarantee there is only one thread using this undo list,
1841 * and current is THE ONE
1842 *
1843 * If this allocation and assignment succeeds, but later
1844 * portions of this code fail, there is no need to free the sem_undo_list.
1845 * Just let it stay associated with the task, and it'll be freed later
1846 * at exit time.
1847 *
1848 * This can block, so callers must hold no locks.
1849 */
1850static inline int get_undo_list(struct sem_undo_list **undo_listp)
1851{
1852	struct sem_undo_list *undo_list;
1853
1854	undo_list = current->sysvsem.undo_list;
1855	if (!undo_list) {
1856		undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL_ACCOUNT);
1857		if (undo_list == NULL)
1858			return -ENOMEM;
1859		spin_lock_init(&undo_list->lock);
1860		refcount_set(&undo_list->refcnt, 1);
1861		INIT_LIST_HEAD(&undo_list->list_proc);
1862
1863		current->sysvsem.undo_list = undo_list;
1864	}
1865	*undo_listp = undo_list;
1866	return 0;
1867}
1868
1869static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1870{
1871	struct sem_undo *un;
1872
1873	list_for_each_entry_rcu(un, &ulp->list_proc, list_proc,
1874				spin_is_locked(&ulp->lock)) {
1875		if (un->semid == semid)
1876			return un;
1877	}
1878	return NULL;
1879}
1880
1881static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1882{
1883	struct sem_undo *un;
1884
1885	assert_spin_locked(&ulp->lock);
1886
1887	un = __lookup_undo(ulp, semid);
1888	if (un) {
1889		list_del_rcu(&un->list_proc);
1890		list_add_rcu(&un->list_proc, &ulp->list_proc);
1891	}
1892	return un;
1893}
1894
1895/**
1896 * find_alloc_undo - lookup (and if not present create) undo array
1897 * @ns: namespace
1898 * @semid: semaphore array id
1899 *
1900 * The function looks up (and if not present creates) the undo structure.
1901 * The size of the undo structure depends on the size of the semaphore
1902 * array, thus the alloc path is not that straightforward.
1903 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1904 * performs a rcu_read_lock().
1905 */
1906static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1907{
1908	struct sem_array *sma;
1909	struct sem_undo_list *ulp;
1910	struct sem_undo *un, *new;
1911	int nsems, error;
1912
1913	error = get_undo_list(&ulp);
1914	if (error)
1915		return ERR_PTR(error);
1916
1917	rcu_read_lock();
1918	spin_lock(&ulp->lock);
1919	un = lookup_undo(ulp, semid);
1920	spin_unlock(&ulp->lock);
1921	if (likely(un != NULL))
1922		goto out;
1923
1924	/* no undo structure around - allocate one. */
1925	/* step 1: figure out the size of the semaphore array */
1926	sma = sem_obtain_object_check(ns, semid);
1927	if (IS_ERR(sma)) {
1928		rcu_read_unlock();
1929		return ERR_CAST(sma);
1930	}
1931
1932	nsems = sma->sem_nsems;
1933	if (!ipc_rcu_getref(&sma->sem_perm)) {
1934		rcu_read_unlock();
1935		un = ERR_PTR(-EIDRM);
1936		goto out;
1937	}
1938	rcu_read_unlock();
1939
1940	/* step 2: allocate new undo structure */
1941	new = kvzalloc(struct_size(new, semadj, nsems), GFP_KERNEL_ACCOUNT);
1942	if (!new) {
1943		ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1944		return ERR_PTR(-ENOMEM);
1945	}
1946
1947	/* step 3: Acquire the lock on semaphore array */
1948	rcu_read_lock();
1949	sem_lock_and_putref(sma);
1950	if (!ipc_valid_object(&sma->sem_perm)) {
1951		sem_unlock(sma, -1);
1952		rcu_read_unlock();
1953		kvfree(new);
1954		un = ERR_PTR(-EIDRM);
1955		goto out;
1956	}
1957	spin_lock(&ulp->lock);
1958
1959	/*
1960	 * step 4: check for races: did someone else allocate the undo struct?
1961	 */
1962	un = lookup_undo(ulp, semid);
1963	if (un) {
1964		spin_unlock(&ulp->lock);
1965		kvfree(new);
1966		goto success;
1967	}
1968	/* step 5: initialize & link new undo structure */
 
1969	new->ulp = ulp;
1970	new->semid = semid;
1971	assert_spin_locked(&ulp->lock);
1972	list_add_rcu(&new->list_proc, &ulp->list_proc);
1973	ipc_assert_locked_object(&sma->sem_perm);
1974	list_add(&new->list_id, &sma->list_id);
1975	un = new;
1976	spin_unlock(&ulp->lock);
1977success:
 
1978	sem_unlock(sma, -1);
1979out:
1980	return un;
1981}
1982
1983long __do_semtimedop(int semid, struct sembuf *sops,
1984		unsigned nsops, const struct timespec64 *timeout,
1985		struct ipc_namespace *ns)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1986{
1987	int error = -EINVAL;
1988	struct sem_array *sma;
1989	struct sembuf *sop;
 
1990	struct sem_undo *un;
1991	int max, locknum;
1992	bool undos = false, alter = false, dupsop = false;
1993	struct sem_queue queue;
1994	unsigned long dup = 0;
1995	ktime_t expires, *exp = NULL;
1996	bool timed_out = false;
 
 
1997
1998	if (nsops < 1 || semid < 0)
1999		return -EINVAL;
2000	if (nsops > ns->sc_semopm)
2001		return -E2BIG;
2002
 
 
 
 
 
 
 
 
2003	if (timeout) {
2004		if (!timespec64_valid(timeout))
2005			return -EINVAL;
2006		expires = ktime_add_safe(ktime_get(),
2007				timespec64_to_ktime(*timeout));
2008		exp = &expires;
 
 
 
 
 
 
2009	}
2010
2011
2012	max = 0;
2013	for (sop = sops; sop < sops + nsops; sop++) {
2014		unsigned long mask = 1ULL << ((sop->sem_num) % BITS_PER_LONG);
2015
2016		if (sop->sem_num >= max)
2017			max = sop->sem_num;
2018		if (sop->sem_flg & SEM_UNDO)
2019			undos = true;
2020		if (dup & mask) {
2021			/*
2022			 * There was a previous alter access that appears
2023			 * to have accessed the same semaphore, thus use
2024			 * the dupsop logic. "appears", because the detection
2025			 * can only check % BITS_PER_LONG.
2026			 */
2027			dupsop = true;
2028		}
2029		if (sop->sem_op != 0) {
2030			alter = true;
2031			dup |= mask;
2032		}
2033	}
2034
 
 
2035	if (undos) {
2036		/* On success, find_alloc_undo takes the rcu_read_lock */
2037		un = find_alloc_undo(ns, semid);
2038		if (IS_ERR(un)) {
2039			error = PTR_ERR(un);
2040			goto out;
2041		}
2042	} else {
2043		un = NULL;
2044		rcu_read_lock();
2045	}
2046
2047	sma = sem_obtain_object_check(ns, semid);
2048	if (IS_ERR(sma)) {
2049		rcu_read_unlock();
2050		error = PTR_ERR(sma);
2051		goto out;
2052	}
2053
2054	error = -EFBIG;
2055	if (max >= sma->sem_nsems) {
2056		rcu_read_unlock();
2057		goto out;
2058	}
2059
2060	error = -EACCES;
2061	if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) {
2062		rcu_read_unlock();
2063		goto out;
2064	}
2065
2066	error = security_sem_semop(&sma->sem_perm, sops, nsops, alter);
2067	if (error) {
2068		rcu_read_unlock();
2069		goto out;
2070	}
2071
2072	error = -EIDRM;
2073	locknum = sem_lock(sma, sops, nsops);
2074	/*
2075	 * We eventually might perform the following check in a lockless
2076	 * fashion, considering ipc_valid_object() locking constraints.
2077	 * If nsops == 1 and there is no contention for sem_perm.lock, then
2078	 * only a per-semaphore lock is held and it's OK to proceed with the
2079	 * check below. More details on the fine grained locking scheme
2080	 * entangled here and why it's RMID race safe on comments at sem_lock()
2081	 */
2082	if (!ipc_valid_object(&sma->sem_perm))
2083		goto out_unlock;
2084	/*
2085	 * semid identifiers are not unique - find_alloc_undo may have
2086	 * allocated an undo structure, it was invalidated by an RMID
2087	 * and now a new array with received the same id. Check and fail.
2088	 * This case can be detected checking un->semid. The existence of
2089	 * "un" itself is guaranteed by rcu.
2090	 */
2091	if (un && un->semid == -1)
2092		goto out_unlock;
2093
2094	queue.sops = sops;
2095	queue.nsops = nsops;
2096	queue.undo = un;
2097	queue.pid = task_tgid(current);
2098	queue.alter = alter;
2099	queue.dupsop = dupsop;
2100
2101	error = perform_atomic_semop(sma, &queue);
2102	if (error == 0) { /* non-blocking successful path */
2103		DEFINE_WAKE_Q(wake_q);
2104
2105		/*
2106		 * If the operation was successful, then do
2107		 * the required updates.
2108		 */
2109		if (alter)
2110			do_smart_update(sma, sops, nsops, 1, &wake_q);
2111		else
2112			set_semotime(sma, sops);
2113
2114		sem_unlock(sma, locknum);
2115		rcu_read_unlock();
2116		wake_up_q(&wake_q);
2117
2118		goto out;
2119	}
2120	if (error < 0) /* non-blocking error path */
2121		goto out_unlock;
2122
2123	/*
2124	 * We need to sleep on this operation, so we put the current
2125	 * task into the pending queue and go to sleep.
2126	 */
 
2127	if (nsops == 1) {
2128		struct sem *curr;
2129		int idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
2130		curr = &sma->sems[idx];
2131
2132		if (alter) {
2133			if (sma->complex_count) {
2134				list_add_tail(&queue.list,
2135						&sma->pending_alter);
2136			} else {
2137
2138				list_add_tail(&queue.list,
2139						&curr->pending_alter);
2140			}
2141		} else {
2142			list_add_tail(&queue.list, &curr->pending_const);
2143		}
2144	} else {
2145		if (!sma->complex_count)
2146			merge_queues(sma);
2147
2148		if (alter)
2149			list_add_tail(&queue.list, &sma->pending_alter);
2150		else
2151			list_add_tail(&queue.list, &sma->pending_const);
2152
2153		sma->complex_count++;
2154	}
2155
2156	do {
2157		/* memory ordering ensured by the lock in sem_lock() */
2158		WRITE_ONCE(queue.status, -EINTR);
2159		queue.sleeper = current;
2160
2161		/* memory ordering is ensured by the lock in sem_lock() */
2162		__set_current_state(TASK_INTERRUPTIBLE);
2163		sem_unlock(sma, locknum);
2164		rcu_read_unlock();
2165
2166		timed_out = !schedule_hrtimeout_range(exp,
2167				current->timer_slack_ns, HRTIMER_MODE_ABS);
2168
2169		/*
2170		 * fastpath: the semop has completed, either successfully or
2171		 * not, from the syscall pov, is quite irrelevant to us at this
2172		 * point; we're done.
2173		 *
2174		 * We _do_ care, nonetheless, about being awoken by a signal or
2175		 * spuriously.  The queue.status is checked again in the
2176		 * slowpath (aka after taking sem_lock), such that we can detect
2177		 * scenarios where we were awakened externally, during the
2178		 * window between wake_q_add() and wake_up_q().
2179		 */
2180		rcu_read_lock();
2181		error = READ_ONCE(queue.status);
2182		if (error != -EINTR) {
2183			/* see SEM_BARRIER_2 for purpose/pairing */
2184			smp_acquire__after_ctrl_dep();
2185			rcu_read_unlock();
2186			goto out;
2187		}
2188
2189		locknum = sem_lock(sma, sops, nsops);
 
 
 
2190
2191		if (!ipc_valid_object(&sma->sem_perm))
2192			goto out_unlock;
2193
2194		/*
2195		 * No necessity for any barrier: We are protect by sem_lock()
 
 
 
 
 
2196		 */
2197		error = READ_ONCE(queue.status);
2198
2199		/*
2200		 * If queue.status != -EINTR we are woken up by another process.
2201		 * Leave without unlink_queue(), but with sem_unlock().
2202		 */
2203		if (error != -EINTR)
2204			goto out_unlock;
2205
2206		/*
2207		 * If an interrupt occurred we have to clean up the queue.
2208		 */
2209		if (timed_out)
2210			error = -EAGAIN;
2211	} while (error == -EINTR && !signal_pending(current)); /* spurious */
2212
2213	unlink_queue(sma, &queue);
2214
2215out_unlock:
2216	sem_unlock(sma, locknum);
2217	rcu_read_unlock();
2218out:
2219	return error;
2220}
2221
2222static long do_semtimedop(int semid, struct sembuf __user *tsops,
2223		unsigned nsops, const struct timespec64 *timeout)
2224{
2225	struct sembuf fast_sops[SEMOPM_FAST];
2226	struct sembuf *sops = fast_sops;
2227	struct ipc_namespace *ns;
2228	int ret;
2229
2230	ns = current->nsproxy->ipc_ns;
2231	if (nsops > ns->sc_semopm)
2232		return -E2BIG;
2233	if (nsops < 1)
2234		return -EINVAL;
2235
2236	if (nsops > SEMOPM_FAST) {
2237		sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL);
2238		if (sops == NULL)
2239			return -ENOMEM;
2240	}
2241
2242	if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
2243		ret =  -EFAULT;
 
 
 
2244		goto out_free;
2245	}
2246
2247	ret = __do_semtimedop(semid, sops, nsops, timeout, ns);
2248
2249out_free:
2250	if (sops != fast_sops)
2251		kvfree(sops);
2252
2253	return ret;
2254}
 
 
 
 
2255
2256long ksys_semtimedop(int semid, struct sembuf __user *tsops,
2257		     unsigned int nsops, const struct __kernel_timespec __user *timeout)
2258{
2259	if (timeout) {
2260		struct timespec64 ts;
2261		if (get_timespec64(&ts, timeout))
2262			return -EFAULT;
2263		return do_semtimedop(semid, tsops, nsops, &ts);
2264	}
2265	return do_semtimedop(semid, tsops, nsops, NULL);
2266}
2267
2268SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
2269		unsigned int, nsops, const struct __kernel_timespec __user *, timeout)
2270{
2271	return ksys_semtimedop(semid, tsops, nsops, timeout);
2272}
2273
2274#ifdef CONFIG_COMPAT_32BIT_TIME
2275long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
2276			    unsigned int nsops,
2277			    const struct old_timespec32 __user *timeout)
2278{
2279	if (timeout) {
2280		struct timespec64 ts;
2281		if (get_old_timespec32(&ts, timeout))
2282			return -EFAULT;
2283		return do_semtimedop(semid, tsems, nsops, &ts);
2284	}
2285	return do_semtimedop(semid, tsems, nsops, NULL);
2286}
2287
2288SYSCALL_DEFINE4(semtimedop_time32, int, semid, struct sembuf __user *, tsems,
2289		       unsigned int, nsops,
2290		       const struct old_timespec32 __user *, timeout)
2291{
2292	return compat_ksys_semtimedop(semid, tsems, nsops, timeout);
 
 
 
 
2293}
2294#endif
2295
2296SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
2297		unsigned, nsops)
2298{
2299	return do_semtimedop(semid, tsops, nsops, NULL);
2300}
2301
2302/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
2303 * parent and child tasks.
2304 */
2305
2306int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
2307{
2308	struct sem_undo_list *undo_list;
2309	int error;
2310
2311	if (clone_flags & CLONE_SYSVSEM) {
2312		error = get_undo_list(&undo_list);
2313		if (error)
2314			return error;
2315		refcount_inc(&undo_list->refcnt);
2316		tsk->sysvsem.undo_list = undo_list;
2317	} else
2318		tsk->sysvsem.undo_list = NULL;
2319
2320	return 0;
2321}
2322
2323/*
2324 * add semadj values to semaphores, free undo structures.
2325 * undo structures are not freed when semaphore arrays are destroyed
2326 * so some of them may be out of date.
2327 * IMPLEMENTATION NOTE: There is some confusion over whether the
2328 * set of adjustments that needs to be done should be done in an atomic
2329 * manner or not. That is, if we are attempting to decrement the semval
2330 * should we queue up and wait until we can do so legally?
2331 * The original implementation attempted to do this (queue and wait).
2332 * The current implementation does not do so. The POSIX standard
2333 * and SVID should be consulted to determine what behavior is mandated.
2334 */
2335void exit_sem(struct task_struct *tsk)
2336{
2337	struct sem_undo_list *ulp;
2338
2339	ulp = tsk->sysvsem.undo_list;
2340	if (!ulp)
2341		return;
2342	tsk->sysvsem.undo_list = NULL;
2343
2344	if (!refcount_dec_and_test(&ulp->refcnt))
2345		return;
2346
2347	for (;;) {
2348		struct sem_array *sma;
2349		struct sem_undo *un;
 
2350		int semid, i;
2351		DEFINE_WAKE_Q(wake_q);
2352
2353		cond_resched();
2354
2355		rcu_read_lock();
2356		un = list_entry_rcu(ulp->list_proc.next,
2357				    struct sem_undo, list_proc);
2358		if (&un->list_proc == &ulp->list_proc) {
2359			/*
2360			 * We must wait for freeary() before freeing this ulp,
2361			 * in case we raced with last sem_undo. There is a small
2362			 * possibility where we exit while freeary() didn't
2363			 * finish unlocking sem_undo_list.
2364			 */
2365			spin_lock(&ulp->lock);
2366			spin_unlock(&ulp->lock);
2367			rcu_read_unlock();
2368			break;
2369		}
2370		spin_lock(&ulp->lock);
2371		semid = un->semid;
2372		spin_unlock(&ulp->lock);
2373
2374		/* exit_sem raced with IPC_RMID, nothing to do */
2375		if (semid == -1) {
2376			rcu_read_unlock();
2377			continue;
2378		}
2379
2380		sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2381		/* exit_sem raced with IPC_RMID, nothing to do */
2382		if (IS_ERR(sma)) {
2383			rcu_read_unlock();
2384			continue;
2385		}
2386
2387		sem_lock(sma, NULL, -1);
2388		/* exit_sem raced with IPC_RMID, nothing to do */
2389		if (!ipc_valid_object(&sma->sem_perm)) {
2390			sem_unlock(sma, -1);
2391			rcu_read_unlock();
2392			continue;
2393		}
2394		un = __lookup_undo(ulp, semid);
2395		if (un == NULL) {
2396			/* exit_sem raced with IPC_RMID+semget() that created
2397			 * exactly the same semid. Nothing to do.
2398			 */
2399			sem_unlock(sma, -1);
2400			rcu_read_unlock();
2401			continue;
2402		}
2403
2404		/* remove un from the linked lists */
2405		ipc_assert_locked_object(&sma->sem_perm);
2406		list_del(&un->list_id);
2407
2408		spin_lock(&ulp->lock);
 
 
 
2409		list_del_rcu(&un->list_proc);
2410		spin_unlock(&ulp->lock);
2411
2412		/* perform adjustments registered in un */
2413		for (i = 0; i < sma->sem_nsems; i++) {
2414			struct sem *semaphore = &sma->sems[i];
2415			if (un->semadj[i]) {
2416				semaphore->semval += un->semadj[i];
2417				/*
2418				 * Range checks of the new semaphore value,
2419				 * not defined by sus:
2420				 * - Some unices ignore the undo entirely
2421				 *   (e.g. HP UX 11i 11.22, Tru64 V5.1)
2422				 * - some cap the value (e.g. FreeBSD caps
2423				 *   at 0, but doesn't enforce SEMVMX)
2424				 *
2425				 * Linux caps the semaphore value, both at 0
2426				 * and at SEMVMX.
2427				 *
2428				 *	Manfred <manfred@colorfullife.com>
2429				 */
2430				if (semaphore->semval < 0)
2431					semaphore->semval = 0;
2432				if (semaphore->semval > SEMVMX)
2433					semaphore->semval = SEMVMX;
2434				ipc_update_pid(&semaphore->sempid, task_tgid(current));
2435			}
2436		}
2437		/* maybe some queued-up processes were waiting for this */
2438		do_smart_update(sma, NULL, 0, 1, &wake_q);
 
2439		sem_unlock(sma, -1);
2440		rcu_read_unlock();
2441		wake_up_q(&wake_q);
2442
2443		kvfree_rcu(un, rcu);
2444	}
2445	kfree(ulp);
2446}
2447
2448#ifdef CONFIG_PROC_FS
2449static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2450{
2451	struct user_namespace *user_ns = seq_user_ns(s);
2452	struct kern_ipc_perm *ipcp = it;
2453	struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
2454	time64_t sem_otime;
2455
2456	/*
2457	 * The proc interface isn't aware of sem_lock(), it calls
2458	 * ipc_lock_object(), i.e. spin_lock(&sma->sem_perm.lock).
2459	 * (in sysvipc_find_ipc)
2460	 * In order to stay compatible with sem_lock(), we must
2461	 * enter / leave complex_mode.
2462	 */
2463	complexmode_enter(sma);
2464
2465	sem_otime = get_semotime(sma);
2466
2467	seq_printf(s,
2468		   "%10d %10d  %4o %10u %5u %5u %5u %5u %10llu %10llu\n",
2469		   sma->sem_perm.key,
2470		   sma->sem_perm.id,
2471		   sma->sem_perm.mode,
2472		   sma->sem_nsems,
2473		   from_kuid_munged(user_ns, sma->sem_perm.uid),
2474		   from_kgid_munged(user_ns, sma->sem_perm.gid),
2475		   from_kuid_munged(user_ns, sma->sem_perm.cuid),
2476		   from_kgid_munged(user_ns, sma->sem_perm.cgid),
2477		   sem_otime,
2478		   sma->sem_ctime);
2479
2480	complexmode_tryleave(sma);
2481
2482	return 0;
2483}
2484#endif
v4.6
 
   1/*
   2 * linux/ipc/sem.c
   3 * Copyright (C) 1992 Krishna Balasubramanian
   4 * Copyright (C) 1995 Eric Schenk, Bruno Haible
   5 *
   6 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
   7 *
   8 * SMP-threaded, sysctl's added
   9 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
  10 * Enforced range limit on SEM_UNDO
  11 * (c) 2001 Red Hat Inc
  12 * Lockless wakeup
  13 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
 
  14 * Further wakeup optimizations, documentation
  15 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
  16 *
  17 * support for audit of ipc object properties and permission changes
  18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  19 *
  20 * namespaces support
  21 * OpenVZ, SWsoft Inc.
  22 * Pavel Emelianov <xemul@openvz.org>
  23 *
  24 * Implementation notes: (May 2010)
  25 * This file implements System V semaphores.
  26 *
  27 * User space visible behavior:
  28 * - FIFO ordering for semop() operations (just FIFO, not starvation
  29 *   protection)
  30 * - multiple semaphore operations that alter the same semaphore in
  31 *   one semop() are handled.
  32 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
  33 *   SETALL calls.
  34 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
  35 * - undo adjustments at process exit are limited to 0..SEMVMX.
  36 * - namespace are supported.
  37 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
  38 *   to /proc/sys/kernel/sem.
  39 * - statistics about the usage are reported in /proc/sysvipc/sem.
  40 *
  41 * Internals:
  42 * - scalability:
  43 *   - all global variables are read-mostly.
  44 *   - semop() calls and semctl(RMID) are synchronized by RCU.
  45 *   - most operations do write operations (actually: spin_lock calls) to
  46 *     the per-semaphore array structure.
  47 *   Thus: Perfect SMP scaling between independent semaphore arrays.
  48 *         If multiple semaphores in one array are used, then cache line
  49 *         trashing on the semaphore array spinlock will limit the scaling.
  50 * - semncnt and semzcnt are calculated on demand in count_semcnt()
  51 * - the task that performs a successful semop() scans the list of all
  52 *   sleeping tasks and completes any pending operations that can be fulfilled.
  53 *   Semaphores are actively given to waiting tasks (necessary for FIFO).
  54 *   (see update_queue())
  55 * - To improve the scalability, the actual wake-up calls are performed after
  56 *   dropping all locks. (see wake_up_sem_queue_prepare(),
  57 *   wake_up_sem_queue_do())
  58 * - All work is done by the waker, the woken up task does not have to do
  59 *   anything - not even acquiring a lock or dropping a refcount.
  60 * - A woken up task may not even touch the semaphore array anymore, it may
  61 *   have been destroyed already by a semctl(RMID).
  62 * - The synchronizations between wake-ups due to a timeout/signal and a
  63 *   wake-up due to a completed semaphore operation is achieved by using an
  64 *   intermediate state (IN_WAKEUP).
  65 * - UNDO values are stored in an array (one per process and per
  66 *   semaphore array, lazily allocated). For backwards compatibility, multiple
  67 *   modes for the UNDO variables are supported (per process, per thread)
  68 *   (see copy_semundo, CLONE_SYSVSEM)
  69 * - There are two lists of the pending operations: a per-array list
  70 *   and per-semaphore list (stored in the array). This allows to achieve FIFO
  71 *   ordering without always scanning all pending operations.
  72 *   The worst-case behavior is nevertheless O(N^2) for N wakeups.
  73 */
  74
 
  75#include <linux/slab.h>
  76#include <linux/spinlock.h>
  77#include <linux/init.h>
  78#include <linux/proc_fs.h>
  79#include <linux/time.h>
  80#include <linux/security.h>
  81#include <linux/syscalls.h>
  82#include <linux/audit.h>
  83#include <linux/capability.h>
  84#include <linux/seq_file.h>
  85#include <linux/rwsem.h>
  86#include <linux/nsproxy.h>
  87#include <linux/ipc_namespace.h>
 
 
 
  88
  89#include <linux/uaccess.h>
  90#include "util.h"
  91
  92/* One semaphore structure for each semaphore in the system. */
  93struct sem {
  94	int	semval;		/* current value */
  95	/*
  96	 * PID of the process that last modified the semaphore. For
  97	 * Linux, specifically these are:
  98	 *  - semop
  99	 *  - semctl, via SETVAL and SETALL.
 100	 *  - at task exit when performing undo adjustments (see exit_sem).
 101	 */
 102	int	sempid;
 103	spinlock_t	lock;	/* spinlock for fine-grained semtimedop */
 104	struct list_head pending_alter; /* pending single-sop operations */
 105					/* that alter the semaphore */
 106	struct list_head pending_const; /* pending single-sop operations */
 107					/* that do not alter the semaphore*/
 108	time_t	sem_otime;	/* candidate for sem_otime */
 109} ____cacheline_aligned_in_smp;
 110
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 111/* One queue for each sleeping process in the system. */
 112struct sem_queue {
 113	struct list_head	list;	 /* queue of pending operations */
 114	struct task_struct	*sleeper; /* this process */
 115	struct sem_undo		*undo;	 /* undo structure */
 116	int			pid;	 /* process id of requesting process */
 117	int			status;	 /* completion status of operation */
 118	struct sembuf		*sops;	 /* array of pending operations */
 119	struct sembuf		*blocking; /* the operation that blocked */
 120	int			nsops;	 /* number of operations */
 121	int			alter;	 /* does *sops alter the array? */
 
 122};
 123
 124/* Each task has a list of undo requests. They are executed automatically
 125 * when the process exits.
 126 */
 127struct sem_undo {
 128	struct list_head	list_proc;	/* per-process list: *
 129						 * all undos from one process
 130						 * rcu protected */
 131	struct rcu_head		rcu;		/* rcu struct for sem_undo */
 132	struct sem_undo_list	*ulp;		/* back ptr to sem_undo_list */
 133	struct list_head	list_id;	/* per semaphore array list:
 134						 * all undos for one array */
 135	int			semid;		/* semaphore set identifier */
 136	short			*semadj;	/* array of adjustments */
 137						/* one per semaphore */
 138};
 139
 140/* sem_undo_list controls shared access to the list of sem_undo structures
 141 * that may be shared among all a CLONE_SYSVSEM task group.
 142 */
 143struct sem_undo_list {
 144	atomic_t		refcnt;
 145	spinlock_t		lock;
 146	struct list_head	list_proc;
 147};
 148
 149
 150#define sem_ids(ns)	((ns)->ids[IPC_SEM_IDS])
 151
 152#define sem_checkid(sma, semid)	ipc_checkid(&sma->sem_perm, semid)
 153
 154static int newary(struct ipc_namespace *, struct ipc_params *);
 155static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
 156#ifdef CONFIG_PROC_FS
 157static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
 158#endif
 159
 160#define SEMMSL_FAST	256 /* 512 bytes on stack */
 161#define SEMOPM_FAST	64  /* ~ 372 bytes on stack */
 162
 163/*
 
 
 
 
 
 
 
 164 * Locking:
 
 165 *	sem_undo.id_next,
 166 *	sem_array.complex_count,
 167 *	sem_array.pending{_alter,_cont},
 168 *	sem_array.sem_undo: global sem_lock() for read/write
 169 *	sem_undo.proc_next: only "current" is allowed to read/write that field.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 170 *
 171 *	sem_array.sem_base[i].pending_{const,alter}:
 172 *		global or semaphore sem_lock() for read/write
 173 */
 174
 175#define sc_semmsl	sem_ctls[0]
 176#define sc_semmns	sem_ctls[1]
 177#define sc_semopm	sem_ctls[2]
 178#define sc_semmni	sem_ctls[3]
 179
 180void sem_init_ns(struct ipc_namespace *ns)
 181{
 182	ns->sc_semmsl = SEMMSL;
 183	ns->sc_semmns = SEMMNS;
 184	ns->sc_semopm = SEMOPM;
 185	ns->sc_semmni = SEMMNI;
 186	ns->used_sems = 0;
 187	ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
 188}
 189
 190#ifdef CONFIG_IPC_NS
 191void sem_exit_ns(struct ipc_namespace *ns)
 192{
 193	free_ipcs(ns, &sem_ids(ns), freeary);
 194	idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
 
 195}
 196#endif
 197
 198void __init sem_init(void)
 199{
 200	sem_init_ns(&init_ipc_ns);
 201	ipc_init_proc_interface("sysvipc/sem",
 202				"       key      semid perms      nsems   uid   gid  cuid  cgid      otime      ctime\n",
 203				IPC_SEM_IDS, sysvipc_sem_proc_show);
 204}
 205
 206/**
 207 * unmerge_queues - unmerge queues, if possible.
 208 * @sma: semaphore array
 209 *
 210 * The function unmerges the wait queues if complex_count is 0.
 211 * It must be called prior to dropping the global semaphore array lock.
 212 */
 213static void unmerge_queues(struct sem_array *sma)
 214{
 215	struct sem_queue *q, *tq;
 216
 217	/* complex operations still around? */
 218	if (sma->complex_count)
 219		return;
 220	/*
 221	 * We will switch back to simple mode.
 222	 * Move all pending operation back into the per-semaphore
 223	 * queues.
 224	 */
 225	list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
 226		struct sem *curr;
 227		curr = &sma->sem_base[q->sops[0].sem_num];
 228
 229		list_add_tail(&q->list, &curr->pending_alter);
 230	}
 231	INIT_LIST_HEAD(&sma->pending_alter);
 232}
 233
 234/**
 235 * merge_queues - merge single semop queues into global queue
 236 * @sma: semaphore array
 237 *
 238 * This function merges all per-semaphore queues into the global queue.
 239 * It is necessary to achieve FIFO ordering for the pending single-sop
 240 * operations when a multi-semop operation must sleep.
 241 * Only the alter operations must be moved, the const operations can stay.
 242 */
 243static void merge_queues(struct sem_array *sma)
 244{
 245	int i;
 246	for (i = 0; i < sma->sem_nsems; i++) {
 247		struct sem *sem = sma->sem_base + i;
 248
 249		list_splice_init(&sem->pending_alter, &sma->pending_alter);
 250	}
 251}
 252
 253static void sem_rcu_free(struct rcu_head *head)
 254{
 255	struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
 256	struct sem_array *sma = ipc_rcu_to_struct(p);
 257
 258	security_sem_free(sma);
 259	ipc_rcu_free(head);
 260}
 261
 262/*
 263 * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
 264 * are only control barriers.
 265 * The code must pair with spin_unlock(&sem->lock) or
 266 * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
 267 *
 268 * smp_rmb() is sufficient, as writes cannot pass the control barrier.
 269 */
 270#define ipc_smp_acquire__after_spin_is_unlocked()	smp_rmb()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 271
 272/*
 273 * Wait until all currently ongoing simple ops have completed.
 274 * Caller must own sem_perm.lock.
 275 * New simple ops cannot start, because simple ops first check
 276 * that sem_perm.lock is free.
 277 * that a) sem_perm.lock is free and b) complex_count is 0.
 278 */
 279static void sem_wait_array(struct sem_array *sma)
 280{
 281	int i;
 282	struct sem *sem;
 283
 284	if (sma->complex_count)  {
 285		/* The thread that increased sma->complex_count waited on
 286		 * all sem->lock locks. Thus we don't need to wait again.
 287		 */
 288		return;
 289	}
 
 290
 291	for (i = 0; i < sma->sem_nsems; i++) {
 292		sem = sma->sem_base + i;
 293		spin_unlock_wait(&sem->lock);
 
 
 294	}
 295	ipc_smp_acquire__after_spin_is_unlocked();
 296}
 297
 
 298/*
 299 * If the request contains only one semaphore operation, and there are
 300 * no complex transactions pending, lock only the semaphore involved.
 301 * Otherwise, lock the entire semaphore array, since we either have
 302 * multiple semaphores in our own semops, or we need to look at
 303 * semaphores from other pending complex operations.
 304 */
 305static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
 306			      int nsops)
 307{
 308	struct sem *sem;
 
 309
 310	if (nsops != 1) {
 311		/* Complex operation - acquire a full lock */
 312		ipc_lock_object(&sma->sem_perm);
 313
 314		/* And wait until all simple ops that are processed
 315		 * right now have dropped their locks.
 316		 */
 317		sem_wait_array(sma);
 318		return -1;
 319	}
 320
 321	/*
 322	 * Only one semaphore affected - try to optimize locking.
 323	 * The rules are:
 324	 * - optimized locking is possible if no complex operation
 325	 *   is either enqueued or processed right now.
 326	 * - The test for enqueued complex ops is simple:
 327	 *      sma->complex_count != 0
 328	 * - Testing for complex ops that are processed right now is
 329	 *   a bit more difficult. Complex ops acquire the full lock
 330	 *   and first wait that the running simple ops have completed.
 331	 *   (see above)
 332	 *   Thus: If we own a simple lock and the global lock is free
 333	 *	and complex_count is now 0, then it will stay 0 and
 334	 *	thus just locking sem->lock is sufficient.
 335	 */
 336	sem = sma->sem_base + sops->sem_num;
 
 337
 338	if (sma->complex_count == 0) {
 
 
 
 
 339		/*
 340		 * It appears that no complex operation is around.
 341		 * Acquire the per-semaphore lock.
 342		 */
 343		spin_lock(&sem->lock);
 344
 345		/* Then check that the global lock is free */
 346		if (!spin_is_locked(&sma->sem_perm.lock)) {
 347			/*
 348			 * We need a memory barrier with acquire semantics,
 349			 * otherwise we can race with another thread that does:
 350			 *	complex_count++;
 351			 *	spin_unlock(sem_perm.lock);
 352			 */
 353			ipc_smp_acquire__after_spin_is_unlocked();
 354
 355			/*
 356			 * Now repeat the test of complex_count:
 357			 * It can't change anymore until we drop sem->lock.
 358			 * Thus: if is now 0, then it will stay 0.
 359			 */
 360			if (sma->complex_count == 0) {
 361				/* fast path successful! */
 362				return sops->sem_num;
 363			}
 364		}
 365		spin_unlock(&sem->lock);
 366	}
 367
 368	/* slow path: acquire the full lock */
 369	ipc_lock_object(&sma->sem_perm);
 370
 371	if (sma->complex_count == 0) {
 372		/* False alarm:
 373		 * There is no complex operation, thus we can switch
 374		 * back to the fast path.
 
 
 
 
 
 375		 */
 376		spin_lock(&sem->lock);
 
 377		ipc_unlock_object(&sma->sem_perm);
 378		return sops->sem_num;
 379	} else {
 380		/* Not a false alarm, thus complete the sequence for a
 381		 * full lock.
 
 
 382		 */
 383		sem_wait_array(sma);
 384		return -1;
 385	}
 386}
 387
 388static inline void sem_unlock(struct sem_array *sma, int locknum)
 389{
 390	if (locknum == -1) {
 391		unmerge_queues(sma);
 
 392		ipc_unlock_object(&sma->sem_perm);
 393	} else {
 394		struct sem *sem = sma->sem_base + locknum;
 395		spin_unlock(&sem->lock);
 396	}
 397}
 398
 399/*
 400 * sem_lock_(check_) routines are called in the paths where the rwsem
 401 * is not held.
 402 *
 403 * The caller holds the RCU read lock.
 404 */
 405static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
 406			int id, struct sembuf *sops, int nsops, int *locknum)
 407{
 408	struct kern_ipc_perm *ipcp;
 409	struct sem_array *sma;
 410
 411	ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
 412	if (IS_ERR(ipcp))
 413		return ERR_CAST(ipcp);
 414
 415	sma = container_of(ipcp, struct sem_array, sem_perm);
 416	*locknum = sem_lock(sma, sops, nsops);
 417
 418	/* ipc_rmid() may have already freed the ID while sem_lock
 419	 * was spinning: verify that the structure is still valid
 420	 */
 421	if (ipc_valid_object(ipcp))
 422		return container_of(ipcp, struct sem_array, sem_perm);
 423
 424	sem_unlock(sma, *locknum);
 425	return ERR_PTR(-EINVAL);
 426}
 427
 428static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
 429{
 430	struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
 431
 432	if (IS_ERR(ipcp))
 433		return ERR_CAST(ipcp);
 434
 435	return container_of(ipcp, struct sem_array, sem_perm);
 436}
 437
 438static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
 439							int id)
 440{
 441	struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
 442
 443	if (IS_ERR(ipcp))
 444		return ERR_CAST(ipcp);
 445
 446	return container_of(ipcp, struct sem_array, sem_perm);
 447}
 448
 449static inline void sem_lock_and_putref(struct sem_array *sma)
 450{
 451	sem_lock(sma, NULL, -1);
 452	ipc_rcu_putref(sma, ipc_rcu_free);
 453}
 454
 455static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
 456{
 457	ipc_rmid(&sem_ids(ns), &s->sem_perm);
 458}
 459
 460/*
 461 * Lockless wakeup algorithm:
 462 * Without the check/retry algorithm a lockless wakeup is possible:
 463 * - queue.status is initialized to -EINTR before blocking.
 464 * - wakeup is performed by
 465 *	* unlinking the queue entry from the pending list
 466 *	* setting queue.status to IN_WAKEUP
 467 *	  This is the notification for the blocked thread that a
 468 *	  result value is imminent.
 469 *	* call wake_up_process
 470 *	* set queue.status to the final value.
 471 * - the previously blocked thread checks queue.status:
 472 *	* if it's IN_WAKEUP, then it must wait until the value changes
 473 *	* if it's not -EINTR, then the operation was completed by
 474 *	  update_queue. semtimedop can return queue.status without
 475 *	  performing any operation on the sem array.
 476 *	* otherwise it must acquire the spinlock and check what's up.
 477 *
 478 * The two-stage algorithm is necessary to protect against the following
 479 * races:
 480 * - if queue.status is set after wake_up_process, then the woken up idle
 481 *   thread could race forward and try (and fail) to acquire sma->lock
 482 *   before update_queue had a chance to set queue.status
 483 * - if queue.status is written before wake_up_process and if the
 484 *   blocked process is woken up by a signal between writing
 485 *   queue.status and the wake_up_process, then the woken up
 486 *   process could return from semtimedop and die by calling
 487 *   sys_exit before wake_up_process is called. Then wake_up_process
 488 *   will oops, because the task structure is already invalid.
 489 *   (yes, this happened on s390 with sysv msg).
 490 *
 491 */
 492#define IN_WAKEUP	1
 493
 494/**
 495 * newary - Create a new semaphore set
 496 * @ns: namespace
 497 * @params: ptr to the structure that contains key, semflg and nsems
 498 *
 499 * Called with sem_ids.rwsem held (as a writer)
 500 */
 501static int newary(struct ipc_namespace *ns, struct ipc_params *params)
 502{
 503	int id;
 504	int retval;
 505	struct sem_array *sma;
 506	int size;
 507	key_t key = params->key;
 508	int nsems = params->u.nsems;
 509	int semflg = params->flg;
 510	int i;
 511
 512	if (!nsems)
 513		return -EINVAL;
 514	if (ns->used_sems + nsems > ns->sc_semmns)
 515		return -ENOSPC;
 516
 517	size = sizeof(*sma) + nsems * sizeof(struct sem);
 518	sma = ipc_rcu_alloc(size);
 519	if (!sma)
 520		return -ENOMEM;
 521
 522	memset(sma, 0, size);
 523
 524	sma->sem_perm.mode = (semflg & S_IRWXUGO);
 525	sma->sem_perm.key = key;
 526
 527	sma->sem_perm.security = NULL;
 528	retval = security_sem_alloc(sma);
 529	if (retval) {
 530		ipc_rcu_putref(sma, ipc_rcu_free);
 531		return retval;
 532	}
 533
 534	sma->sem_base = (struct sem *) &sma[1];
 535
 536	for (i = 0; i < nsems; i++) {
 537		INIT_LIST_HEAD(&sma->sem_base[i].pending_alter);
 538		INIT_LIST_HEAD(&sma->sem_base[i].pending_const);
 539		spin_lock_init(&sma->sem_base[i].lock);
 540	}
 541
 542	sma->complex_count = 0;
 
 543	INIT_LIST_HEAD(&sma->pending_alter);
 544	INIT_LIST_HEAD(&sma->pending_const);
 545	INIT_LIST_HEAD(&sma->list_id);
 546	sma->sem_nsems = nsems;
 547	sma->sem_ctime = get_seconds();
 548
 549	id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
 550	if (id < 0) {
 551		ipc_rcu_putref(sma, sem_rcu_free);
 552		return id;
 
 553	}
 554	ns->used_sems += nsems;
 555
 556	sem_unlock(sma, -1);
 557	rcu_read_unlock();
 558
 559	return sma->sem_perm.id;
 560}
 561
 562
 563/*
 564 * Called with sem_ids.rwsem and ipcp locked.
 565 */
 566static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
 567{
 568	struct sem_array *sma;
 569
 570	sma = container_of(ipcp, struct sem_array, sem_perm);
 571	return security_sem_associate(sma, semflg);
 572}
 573
 574/*
 575 * Called with sem_ids.rwsem and ipcp locked.
 576 */
 577static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
 578				struct ipc_params *params)
 579{
 580	struct sem_array *sma;
 581
 582	sma = container_of(ipcp, struct sem_array, sem_perm);
 583	if (params->u.nsems > sma->sem_nsems)
 584		return -EINVAL;
 585
 586	return 0;
 587}
 588
 589SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
 590{
 591	struct ipc_namespace *ns;
 592	static const struct ipc_ops sem_ops = {
 593		.getnew = newary,
 594		.associate = sem_security,
 595		.more_checks = sem_more_checks,
 596	};
 597	struct ipc_params sem_params;
 598
 599	ns = current->nsproxy->ipc_ns;
 600
 601	if (nsems < 0 || nsems > ns->sc_semmsl)
 602		return -EINVAL;
 603
 604	sem_params.key = key;
 605	sem_params.flg = semflg;
 606	sem_params.u.nsems = nsems;
 607
 608	return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
 609}
 610
 
 
 
 
 
 611/**
 612 * perform_atomic_semop - Perform (if possible) a semaphore operation
 
 613 * @sma: semaphore array
 614 * @q: struct sem_queue that describes the operation
 615 *
 
 
 
 
 
 
 
 616 * Returns 0 if the operation was possible.
 617 * Returns 1 if the operation is impossible, the caller must sleep.
 618 * Negative values are error codes.
 619 */
 620static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
 621{
 622	int result, sem_op, nsops, pid;
 
 623	struct sembuf *sop;
 624	struct sem *curr;
 625	struct sembuf *sops;
 626	struct sem_undo *un;
 627
 628	sops = q->sops;
 629	nsops = q->nsops;
 630	un = q->undo;
 631
 632	for (sop = sops; sop < sops + nsops; sop++) {
 633		curr = sma->sem_base + sop->sem_num;
 
 634		sem_op = sop->sem_op;
 635		result = curr->semval;
 636
 637		if (!sem_op && result)
 638			goto would_block;
 639
 640		result += sem_op;
 641		if (result < 0)
 642			goto would_block;
 643		if (result > SEMVMX)
 644			goto out_of_range;
 645
 646		if (sop->sem_flg & SEM_UNDO) {
 647			int undo = un->semadj[sop->sem_num] - sem_op;
 648			/* Exceeding the undo range is an error. */
 649			if (undo < (-SEMAEM - 1) || undo > SEMAEM)
 650				goto out_of_range;
 651			un->semadj[sop->sem_num] = undo;
 652		}
 653
 654		curr->semval = result;
 655	}
 656
 657	sop--;
 658	pid = q->pid;
 659	while (sop >= sops) {
 660		sma->sem_base[sop->sem_num].sempid = pid;
 661		sop--;
 662	}
 663
 664	return 0;
 665
 666out_of_range:
 667	result = -ERANGE;
 668	goto undo;
 669
 670would_block:
 671	q->blocking = sop;
 672
 673	if (sop->sem_flg & IPC_NOWAIT)
 674		result = -EAGAIN;
 675	else
 676		result = 1;
 677
 678undo:
 679	sop--;
 680	while (sop >= sops) {
 681		sem_op = sop->sem_op;
 682		sma->sem_base[sop->sem_num].semval -= sem_op;
 683		if (sop->sem_flg & SEM_UNDO)
 684			un->semadj[sop->sem_num] += sem_op;
 685		sop--;
 686	}
 687
 688	return result;
 689}
 690
 691/** wake_up_sem_queue_prepare(q, error): Prepare wake-up
 692 * @q: queue entry that must be signaled
 693 * @error: Error value for the signal
 694 *
 695 * Prepare the wake-up of the queue entry q.
 696 */
 697static void wake_up_sem_queue_prepare(struct list_head *pt,
 698				struct sem_queue *q, int error)
 699{
 700	if (list_empty(pt)) {
 701		/*
 702		 * Hold preempt off so that we don't get preempted and have the
 703		 * wakee busy-wait until we're scheduled back on.
 704		 */
 705		preempt_disable();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 706	}
 707	q->status = IN_WAKEUP;
 708	q->pid = error;
 709
 710	list_add_tail(&q->list, pt);
 
 
 
 
 711}
 712
 713/**
 714 * wake_up_sem_queue_do - do the actual wake-up
 715 * @pt: list of tasks to be woken up
 716 *
 717 * Do the actual wake-up.
 718 * The function is called without any locks held, thus the semaphore array
 719 * could be destroyed already and the tasks can disappear as soon as the
 720 * status is set to the actual return code.
 721 */
 722static void wake_up_sem_queue_do(struct list_head *pt)
 723{
 724	struct sem_queue *q, *t;
 725	int did_something;
 726
 727	did_something = !list_empty(pt);
 728	list_for_each_entry_safe(q, t, pt, list) {
 729		wake_up_process(q->sleeper);
 730		/* q can disappear immediately after writing q->status. */
 731		smp_wmb();
 732		q->status = q->pid;
 733	}
 734	if (did_something)
 735		preempt_enable();
 736}
 737
 738static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
 739{
 740	list_del(&q->list);
 741	if (q->nsops > 1)
 742		sma->complex_count--;
 743}
 744
 745/** check_restart(sma, q)
 746 * @sma: semaphore array
 747 * @q: the operation that just completed
 748 *
 749 * update_queue is O(N^2) when it restarts scanning the whole queue of
 750 * waiting operations. Therefore this function checks if the restart is
 751 * really necessary. It is called after a previously waiting operation
 752 * modified the array.
 753 * Note that wait-for-zero operations are handled without restart.
 754 */
 755static int check_restart(struct sem_array *sma, struct sem_queue *q)
 756{
 757	/* pending complex alter operations are too difficult to analyse */
 758	if (!list_empty(&sma->pending_alter))
 759		return 1;
 760
 761	/* we were a sleeping complex operation. Too difficult */
 762	if (q->nsops > 1)
 763		return 1;
 764
 765	/* It is impossible that someone waits for the new value:
 766	 * - complex operations always restart.
 767	 * - wait-for-zero are handled seperately.
 768	 * - q is a previously sleeping simple operation that
 769	 *   altered the array. It must be a decrement, because
 770	 *   simple increments never sleep.
 771	 * - If there are older (higher priority) decrements
 772	 *   in the queue, then they have observed the original
 773	 *   semval value and couldn't proceed. The operation
 774	 *   decremented to value - thus they won't proceed either.
 775	 */
 776	return 0;
 777}
 778
 779/**
 780 * wake_const_ops - wake up non-alter tasks
 781 * @sma: semaphore array.
 782 * @semnum: semaphore that was modified.
 783 * @pt: list head for the tasks that must be woken up.
 784 *
 785 * wake_const_ops must be called after a semaphore in a semaphore array
 786 * was set to 0. If complex const operations are pending, wake_const_ops must
 787 * be called with semnum = -1, as well as with the number of each modified
 788 * semaphore.
 789 * The tasks that must be woken up are added to @pt. The return code
 790 * is stored in q->pid.
 791 * The function returns 1 if at least one operation was completed successfully.
 792 */
 793static int wake_const_ops(struct sem_array *sma, int semnum,
 794				struct list_head *pt)
 795{
 796	struct sem_queue *q;
 797	struct list_head *walk;
 798	struct list_head *pending_list;
 799	int semop_completed = 0;
 800
 801	if (semnum == -1)
 802		pending_list = &sma->pending_const;
 803	else
 804		pending_list = &sma->sem_base[semnum].pending_const;
 805
 806	walk = pending_list->next;
 807	while (walk != pending_list) {
 808		int error;
 809
 810		q = container_of(walk, struct sem_queue, list);
 811		walk = walk->next;
 
 
 812
 813		error = perform_atomic_semop(sma, q);
 814
 815		if (error <= 0) {
 816			/* operation completed, remove from queue & wakeup */
 817
 818			unlink_queue(sma, q);
 819
 820			wake_up_sem_queue_prepare(pt, q, error);
 821			if (error == 0)
 822				semop_completed = 1;
 823		}
 824	}
 825	return semop_completed;
 826}
 827
 828/**
 829 * do_smart_wakeup_zero - wakeup all wait for zero tasks
 830 * @sma: semaphore array
 831 * @sops: operations that were performed
 832 * @nsops: number of operations
 833 * @pt: list head of the tasks that must be woken up.
 834 *
 835 * Checks all required queue for wait-for-zero operations, based
 836 * on the actual changes that were performed on the semaphore array.
 837 * The function returns 1 if at least one operation was completed successfully.
 838 */
 839static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
 840					int nsops, struct list_head *pt)
 841{
 842	int i;
 843	int semop_completed = 0;
 844	int got_zero = 0;
 845
 846	/* first: the per-semaphore queues, if known */
 847	if (sops) {
 848		for (i = 0; i < nsops; i++) {
 849			int num = sops[i].sem_num;
 850
 851			if (sma->sem_base[num].semval == 0) {
 852				got_zero = 1;
 853				semop_completed |= wake_const_ops(sma, num, pt);
 854			}
 855		}
 856	} else {
 857		/*
 858		 * No sops means modified semaphores not known.
 859		 * Assume all were changed.
 860		 */
 861		for (i = 0; i < sma->sem_nsems; i++) {
 862			if (sma->sem_base[i].semval == 0) {
 863				got_zero = 1;
 864				semop_completed |= wake_const_ops(sma, i, pt);
 865			}
 866		}
 867	}
 868	/*
 869	 * If one of the modified semaphores got 0,
 870	 * then check the global queue, too.
 871	 */
 872	if (got_zero)
 873		semop_completed |= wake_const_ops(sma, -1, pt);
 874
 875	return semop_completed;
 876}
 877
 878
 879/**
 880 * update_queue - look for tasks that can be completed.
 881 * @sma: semaphore array.
 882 * @semnum: semaphore that was modified.
 883 * @pt: list head for the tasks that must be woken up.
 884 *
 885 * update_queue must be called after a semaphore in a semaphore array
 886 * was modified. If multiple semaphores were modified, update_queue must
 887 * be called with semnum = -1, as well as with the number of each modified
 888 * semaphore.
 889 * The tasks that must be woken up are added to @pt. The return code
 890 * is stored in q->pid.
 891 * The function internally checks if const operations can now succeed.
 892 *
 893 * The function return 1 if at least one semop was completed successfully.
 894 */
 895static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
 896{
 897	struct sem_queue *q;
 898	struct list_head *walk;
 899	struct list_head *pending_list;
 900	int semop_completed = 0;
 901
 902	if (semnum == -1)
 903		pending_list = &sma->pending_alter;
 904	else
 905		pending_list = &sma->sem_base[semnum].pending_alter;
 906
 907again:
 908	walk = pending_list->next;
 909	while (walk != pending_list) {
 910		int error, restart;
 911
 912		q = container_of(walk, struct sem_queue, list);
 913		walk = walk->next;
 914
 915		/* If we are scanning the single sop, per-semaphore list of
 916		 * one semaphore and that semaphore is 0, then it is not
 917		 * necessary to scan further: simple increments
 918		 * that affect only one entry succeed immediately and cannot
 919		 * be in the  per semaphore pending queue, and decrements
 920		 * cannot be successful if the value is already 0.
 921		 */
 922		if (semnum != -1 && sma->sem_base[semnum].semval == 0)
 923			break;
 924
 925		error = perform_atomic_semop(sma, q);
 926
 927		/* Does q->sleeper still need to sleep? */
 928		if (error > 0)
 929			continue;
 930
 931		unlink_queue(sma, q);
 932
 933		if (error) {
 934			restart = 0;
 935		} else {
 936			semop_completed = 1;
 937			do_smart_wakeup_zero(sma, q->sops, q->nsops, pt);
 938			restart = check_restart(sma, q);
 939		}
 940
 941		wake_up_sem_queue_prepare(pt, q, error);
 942		if (restart)
 943			goto again;
 944	}
 945	return semop_completed;
 946}
 947
 948/**
 949 * set_semotime - set sem_otime
 950 * @sma: semaphore array
 951 * @sops: operations that modified the array, may be NULL
 952 *
 953 * sem_otime is replicated to avoid cache line trashing.
 954 * This function sets one instance to the current time.
 955 */
 956static void set_semotime(struct sem_array *sma, struct sembuf *sops)
 957{
 958	if (sops == NULL) {
 959		sma->sem_base[0].sem_otime = get_seconds();
 960	} else {
 961		sma->sem_base[sops[0].sem_num].sem_otime =
 962							get_seconds();
 963	}
 964}
 965
 966/**
 967 * do_smart_update - optimized update_queue
 968 * @sma: semaphore array
 969 * @sops: operations that were performed
 970 * @nsops: number of operations
 971 * @otime: force setting otime
 972 * @pt: list head of the tasks that must be woken up.
 973 *
 974 * do_smart_update() does the required calls to update_queue and wakeup_zero,
 975 * based on the actual changes that were performed on the semaphore array.
 976 * Note that the function does not do the actual wake-up: the caller is
 977 * responsible for calling wake_up_sem_queue_do(@pt).
 978 * It is safe to perform this call after dropping all locks.
 979 */
 980static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
 981			int otime, struct list_head *pt)
 982{
 983	int i;
 984
 985	otime |= do_smart_wakeup_zero(sma, sops, nsops, pt);
 986
 987	if (!list_empty(&sma->pending_alter)) {
 988		/* semaphore array uses the global queue - just process it. */
 989		otime |= update_queue(sma, -1, pt);
 990	} else {
 991		if (!sops) {
 992			/*
 993			 * No sops, thus the modified semaphores are not
 994			 * known. Check all.
 995			 */
 996			for (i = 0; i < sma->sem_nsems; i++)
 997				otime |= update_queue(sma, i, pt);
 998		} else {
 999			/*
1000			 * Check the semaphores that were increased:
1001			 * - No complex ops, thus all sleeping ops are
1002			 *   decrease.
1003			 * - if we decreased the value, then any sleeping
1004			 *   semaphore ops wont be able to run: If the
1005			 *   previous value was too small, then the new
1006			 *   value will be too small, too.
1007			 */
1008			for (i = 0; i < nsops; i++) {
1009				if (sops[i].sem_op > 0) {
1010					otime |= update_queue(sma,
1011							sops[i].sem_num, pt);
1012				}
1013			}
1014		}
1015	}
1016	if (otime)
1017		set_semotime(sma, sops);
1018}
1019
1020/*
1021 * check_qop: Test if a queued operation sleeps on the semaphore semnum
1022 */
1023static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
1024			bool count_zero)
1025{
1026	struct sembuf *sop = q->blocking;
1027
1028	/*
1029	 * Linux always (since 0.99.10) reported a task as sleeping on all
1030	 * semaphores. This violates SUS, therefore it was changed to the
1031	 * standard compliant behavior.
1032	 * Give the administrators a chance to notice that an application
1033	 * might misbehave because it relies on the Linux behavior.
1034	 */
1035	pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
1036			"The task %s (%d) triggered the difference, watch for misbehavior.\n",
1037			current->comm, task_pid_nr(current));
1038
1039	if (sop->sem_num != semnum)
1040		return 0;
1041
1042	if (count_zero && sop->sem_op == 0)
1043		return 1;
1044	if (!count_zero && sop->sem_op < 0)
1045		return 1;
1046
1047	return 0;
1048}
1049
1050/* The following counts are associated to each semaphore:
1051 *   semncnt        number of tasks waiting on semval being nonzero
1052 *   semzcnt        number of tasks waiting on semval being zero
1053 *
1054 * Per definition, a task waits only on the semaphore of the first semop
1055 * that cannot proceed, even if additional operation would block, too.
1056 */
1057static int count_semcnt(struct sem_array *sma, ushort semnum,
1058			bool count_zero)
1059{
1060	struct list_head *l;
1061	struct sem_queue *q;
1062	int semcnt;
1063
1064	semcnt = 0;
1065	/* First: check the simple operations. They are easy to evaluate */
1066	if (count_zero)
1067		l = &sma->sem_base[semnum].pending_const;
1068	else
1069		l = &sma->sem_base[semnum].pending_alter;
1070
1071	list_for_each_entry(q, l, list) {
1072		/* all task on a per-semaphore list sleep on exactly
1073		 * that semaphore
1074		 */
1075		semcnt++;
1076	}
1077
1078	/* Then: check the complex operations. */
1079	list_for_each_entry(q, &sma->pending_alter, list) {
1080		semcnt += check_qop(sma, semnum, q, count_zero);
1081	}
1082	if (count_zero) {
1083		list_for_each_entry(q, &sma->pending_const, list) {
1084			semcnt += check_qop(sma, semnum, q, count_zero);
1085		}
1086	}
1087	return semcnt;
1088}
1089
1090/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
1091 * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
1092 * remains locked on exit.
1093 */
1094static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1095{
1096	struct sem_undo *un, *tu;
1097	struct sem_queue *q, *tq;
1098	struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1099	struct list_head tasks;
1100	int i;
 
1101
1102	/* Free the existing undo structures for this semaphore set.  */
1103	ipc_assert_locked_object(&sma->sem_perm);
1104	list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1105		list_del(&un->list_id);
1106		spin_lock(&un->ulp->lock);
1107		un->semid = -1;
1108		list_del_rcu(&un->list_proc);
1109		spin_unlock(&un->ulp->lock);
1110		kfree_rcu(un, rcu);
1111	}
1112
1113	/* Wake up all pending processes and let them fail with EIDRM. */
1114	INIT_LIST_HEAD(&tasks);
1115	list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1116		unlink_queue(sma, q);
1117		wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1118	}
1119
1120	list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1121		unlink_queue(sma, q);
1122		wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1123	}
1124	for (i = 0; i < sma->sem_nsems; i++) {
1125		struct sem *sem = sma->sem_base + i;
1126		list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
1127			unlink_queue(sma, q);
1128			wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1129		}
1130		list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
1131			unlink_queue(sma, q);
1132			wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1133		}
 
1134	}
1135
1136	/* Remove the semaphore set from the IDR */
1137	sem_rmid(ns, sma);
1138	sem_unlock(sma, -1);
1139	rcu_read_unlock();
1140
1141	wake_up_sem_queue_do(&tasks);
1142	ns->used_sems -= sma->sem_nsems;
1143	ipc_rcu_putref(sma, sem_rcu_free);
1144}
1145
1146static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1147{
1148	switch (version) {
1149	case IPC_64:
1150		return copy_to_user(buf, in, sizeof(*in));
1151	case IPC_OLD:
1152	    {
1153		struct semid_ds out;
1154
1155		memset(&out, 0, sizeof(out));
1156
1157		ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
1158
1159		out.sem_otime	= in->sem_otime;
1160		out.sem_ctime	= in->sem_ctime;
1161		out.sem_nsems	= in->sem_nsems;
1162
1163		return copy_to_user(buf, &out, sizeof(out));
1164	    }
1165	default:
1166		return -EINVAL;
1167	}
1168}
1169
1170static time_t get_semotime(struct sem_array *sma)
1171{
1172	int i;
1173	time_t res;
1174
1175	res = sma->sem_base[0].sem_otime;
1176	for (i = 1; i < sma->sem_nsems; i++) {
1177		time_t to = sma->sem_base[i].sem_otime;
1178
1179		if (to > res)
1180			res = to;
1181	}
1182	return res;
1183}
1184
1185static int semctl_nolock(struct ipc_namespace *ns, int semid,
1186			 int cmd, int version, void __user *p)
1187{
 
 
1188	int err;
1189	struct sem_array *sma;
1190
1191	switch (cmd) {
1192	case IPC_INFO:
1193	case SEM_INFO:
1194	{
1195		struct seminfo seminfo;
1196		int max_id;
1197
1198		err = security_sem_semctl(NULL, cmd);
1199		if (err)
1200			return err;
1201
1202		memset(&seminfo, 0, sizeof(seminfo));
1203		seminfo.semmni = ns->sc_semmni;
1204		seminfo.semmns = ns->sc_semmns;
1205		seminfo.semmsl = ns->sc_semmsl;
1206		seminfo.semopm = ns->sc_semopm;
1207		seminfo.semvmx = SEMVMX;
1208		seminfo.semmnu = SEMMNU;
1209		seminfo.semmap = SEMMAP;
1210		seminfo.semume = SEMUME;
1211		down_read(&sem_ids(ns).rwsem);
1212		if (cmd == SEM_INFO) {
1213			seminfo.semusz = sem_ids(ns).in_use;
1214			seminfo.semaem = ns->used_sems;
1215		} else {
1216			seminfo.semusz = SEMUSZ;
1217			seminfo.semaem = SEMAEM;
1218		}
1219		max_id = ipc_get_maxid(&sem_ids(ns));
1220		up_read(&sem_ids(ns).rwsem);
1221		if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1222			return -EFAULT;
1223		return (max_id < 0) ? 0 : max_id;
1224	}
1225	case IPC_STAT:
1226	case SEM_STAT:
1227	{
1228		struct semid64_ds tbuf;
1229		int id = 0;
1230
1231		memset(&tbuf, 0, sizeof(tbuf));
1232
1233		rcu_read_lock();
1234		if (cmd == SEM_STAT) {
1235			sma = sem_obtain_object(ns, semid);
1236			if (IS_ERR(sma)) {
1237				err = PTR_ERR(sma);
1238				goto out_unlock;
1239			}
1240			id = sma->sem_perm.id;
1241		} else {
1242			sma = sem_obtain_object_check(ns, semid);
1243			if (IS_ERR(sma)) {
1244				err = PTR_ERR(sma);
1245				goto out_unlock;
1246			}
1247		}
1248
 
 
 
 
1249		err = -EACCES;
1250		if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1251			goto out_unlock;
 
 
 
 
 
1252
1253		err = security_sem_semctl(sma, cmd);
1254		if (err)
1255			goto out_unlock;
1256
1257		kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
1258		tbuf.sem_otime = get_semotime(sma);
1259		tbuf.sem_ctime = sma->sem_ctime;
1260		tbuf.sem_nsems = sma->sem_nsems;
1261		rcu_read_unlock();
1262		if (copy_semid_to_user(p, &tbuf, version))
1263			return -EFAULT;
1264		return id;
1265	}
1266	default:
1267		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1268	}
 
1269out_unlock:
1270	rcu_read_unlock();
1271	return err;
1272}
1273
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1274static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1275		unsigned long arg)
1276{
1277	struct sem_undo *un;
1278	struct sem_array *sma;
1279	struct sem *curr;
1280	int err;
1281	struct list_head tasks;
1282	int val;
1283#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1284	/* big-endian 64bit */
1285	val = arg >> 32;
1286#else
1287	/* 32bit or little-endian 64bit */
1288	val = arg;
1289#endif
1290
1291	if (val > SEMVMX || val < 0)
1292		return -ERANGE;
1293
1294	INIT_LIST_HEAD(&tasks);
1295
1296	rcu_read_lock();
1297	sma = sem_obtain_object_check(ns, semid);
1298	if (IS_ERR(sma)) {
1299		rcu_read_unlock();
1300		return PTR_ERR(sma);
1301	}
1302
1303	if (semnum < 0 || semnum >= sma->sem_nsems) {
1304		rcu_read_unlock();
1305		return -EINVAL;
1306	}
1307
1308
1309	if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1310		rcu_read_unlock();
1311		return -EACCES;
1312	}
1313
1314	err = security_sem_semctl(sma, SETVAL);
1315	if (err) {
1316		rcu_read_unlock();
1317		return -EACCES;
1318	}
1319
1320	sem_lock(sma, NULL, -1);
1321
1322	if (!ipc_valid_object(&sma->sem_perm)) {
1323		sem_unlock(sma, -1);
1324		rcu_read_unlock();
1325		return -EIDRM;
1326	}
1327
1328	curr = &sma->sem_base[semnum];
 
1329
1330	ipc_assert_locked_object(&sma->sem_perm);
1331	list_for_each_entry(un, &sma->list_id, list_id)
1332		un->semadj[semnum] = 0;
1333
1334	curr->semval = val;
1335	curr->sempid = task_tgid_vnr(current);
1336	sma->sem_ctime = get_seconds();
1337	/* maybe some queued-up processes were waiting for this */
1338	do_smart_update(sma, NULL, 0, 0, &tasks);
1339	sem_unlock(sma, -1);
1340	rcu_read_unlock();
1341	wake_up_sem_queue_do(&tasks);
1342	return 0;
1343}
1344
1345static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1346		int cmd, void __user *p)
1347{
1348	struct sem_array *sma;
1349	struct sem *curr;
1350	int err, nsems;
1351	ushort fast_sem_io[SEMMSL_FAST];
1352	ushort *sem_io = fast_sem_io;
1353	struct list_head tasks;
1354
1355	INIT_LIST_HEAD(&tasks);
1356
1357	rcu_read_lock();
1358	sma = sem_obtain_object_check(ns, semid);
1359	if (IS_ERR(sma)) {
1360		rcu_read_unlock();
1361		return PTR_ERR(sma);
1362	}
1363
1364	nsems = sma->sem_nsems;
1365
1366	err = -EACCES;
1367	if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1368		goto out_rcu_wakeup;
1369
1370	err = security_sem_semctl(sma, cmd);
1371	if (err)
1372		goto out_rcu_wakeup;
1373
1374	err = -EACCES;
1375	switch (cmd) {
1376	case GETALL:
1377	{
1378		ushort __user *array = p;
1379		int i;
1380
1381		sem_lock(sma, NULL, -1);
1382		if (!ipc_valid_object(&sma->sem_perm)) {
1383			err = -EIDRM;
1384			goto out_unlock;
1385		}
1386		if (nsems > SEMMSL_FAST) {
1387			if (!ipc_rcu_getref(sma)) {
1388				err = -EIDRM;
1389				goto out_unlock;
1390			}
1391			sem_unlock(sma, -1);
1392			rcu_read_unlock();
1393			sem_io = ipc_alloc(sizeof(ushort)*nsems);
 
1394			if (sem_io == NULL) {
1395				ipc_rcu_putref(sma, ipc_rcu_free);
1396				return -ENOMEM;
1397			}
1398
1399			rcu_read_lock();
1400			sem_lock_and_putref(sma);
1401			if (!ipc_valid_object(&sma->sem_perm)) {
1402				err = -EIDRM;
1403				goto out_unlock;
1404			}
1405		}
1406		for (i = 0; i < sma->sem_nsems; i++)
1407			sem_io[i] = sma->sem_base[i].semval;
1408		sem_unlock(sma, -1);
1409		rcu_read_unlock();
1410		err = 0;
1411		if (copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1412			err = -EFAULT;
1413		goto out_free;
1414	}
1415	case SETALL:
1416	{
1417		int i;
1418		struct sem_undo *un;
1419
1420		if (!ipc_rcu_getref(sma)) {
1421			err = -EIDRM;
1422			goto out_rcu_wakeup;
1423		}
1424		rcu_read_unlock();
1425
1426		if (nsems > SEMMSL_FAST) {
1427			sem_io = ipc_alloc(sizeof(ushort)*nsems);
 
1428			if (sem_io == NULL) {
1429				ipc_rcu_putref(sma, ipc_rcu_free);
1430				return -ENOMEM;
1431			}
1432		}
1433
1434		if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
1435			ipc_rcu_putref(sma, ipc_rcu_free);
1436			err = -EFAULT;
1437			goto out_free;
1438		}
1439
1440		for (i = 0; i < nsems; i++) {
1441			if (sem_io[i] > SEMVMX) {
1442				ipc_rcu_putref(sma, ipc_rcu_free);
1443				err = -ERANGE;
1444				goto out_free;
1445			}
1446		}
1447		rcu_read_lock();
1448		sem_lock_and_putref(sma);
1449		if (!ipc_valid_object(&sma->sem_perm)) {
1450			err = -EIDRM;
1451			goto out_unlock;
1452		}
1453
1454		for (i = 0; i < nsems; i++) {
1455			sma->sem_base[i].semval = sem_io[i];
1456			sma->sem_base[i].sempid = task_tgid_vnr(current);
1457		}
1458
1459		ipc_assert_locked_object(&sma->sem_perm);
1460		list_for_each_entry(un, &sma->list_id, list_id) {
1461			for (i = 0; i < nsems; i++)
1462				un->semadj[i] = 0;
1463		}
1464		sma->sem_ctime = get_seconds();
1465		/* maybe some queued-up processes were waiting for this */
1466		do_smart_update(sma, NULL, 0, 0, &tasks);
1467		err = 0;
1468		goto out_unlock;
1469	}
1470	/* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */
1471	}
1472	err = -EINVAL;
1473	if (semnum < 0 || semnum >= nsems)
1474		goto out_rcu_wakeup;
1475
1476	sem_lock(sma, NULL, -1);
1477	if (!ipc_valid_object(&sma->sem_perm)) {
1478		err = -EIDRM;
1479		goto out_unlock;
1480	}
1481	curr = &sma->sem_base[semnum];
 
 
1482
1483	switch (cmd) {
1484	case GETVAL:
1485		err = curr->semval;
1486		goto out_unlock;
1487	case GETPID:
1488		err = curr->sempid;
1489		goto out_unlock;
1490	case GETNCNT:
1491		err = count_semcnt(sma, semnum, 0);
1492		goto out_unlock;
1493	case GETZCNT:
1494		err = count_semcnt(sma, semnum, 1);
1495		goto out_unlock;
1496	}
1497
1498out_unlock:
1499	sem_unlock(sma, -1);
1500out_rcu_wakeup:
1501	rcu_read_unlock();
1502	wake_up_sem_queue_do(&tasks);
1503out_free:
1504	if (sem_io != fast_sem_io)
1505		ipc_free(sem_io);
1506	return err;
1507}
1508
1509static inline unsigned long
1510copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1511{
1512	switch (version) {
1513	case IPC_64:
1514		if (copy_from_user(out, buf, sizeof(*out)))
1515			return -EFAULT;
1516		return 0;
1517	case IPC_OLD:
1518	    {
1519		struct semid_ds tbuf_old;
1520
1521		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1522			return -EFAULT;
1523
1524		out->sem_perm.uid	= tbuf_old.sem_perm.uid;
1525		out->sem_perm.gid	= tbuf_old.sem_perm.gid;
1526		out->sem_perm.mode	= tbuf_old.sem_perm.mode;
1527
1528		return 0;
1529	    }
1530	default:
1531		return -EINVAL;
1532	}
1533}
1534
1535/*
1536 * This function handles some semctl commands which require the rwsem
1537 * to be held in write mode.
1538 * NOTE: no locks must be held, the rwsem is taken inside this function.
1539 */
1540static int semctl_down(struct ipc_namespace *ns, int semid,
1541		       int cmd, int version, void __user *p)
1542{
1543	struct sem_array *sma;
1544	int err;
1545	struct semid64_ds semid64;
1546	struct kern_ipc_perm *ipcp;
1547
1548	if (cmd == IPC_SET) {
1549		if (copy_semid_from_user(&semid64, p, version))
1550			return -EFAULT;
1551	}
1552
1553	down_write(&sem_ids(ns).rwsem);
1554	rcu_read_lock();
1555
1556	ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd,
1557				      &semid64.sem_perm, 0);
1558	if (IS_ERR(ipcp)) {
1559		err = PTR_ERR(ipcp);
1560		goto out_unlock1;
1561	}
1562
1563	sma = container_of(ipcp, struct sem_array, sem_perm);
1564
1565	err = security_sem_semctl(sma, cmd);
1566	if (err)
1567		goto out_unlock1;
1568
1569	switch (cmd) {
1570	case IPC_RMID:
1571		sem_lock(sma, NULL, -1);
1572		/* freeary unlocks the ipc object and rcu */
1573		freeary(ns, ipcp);
1574		goto out_up;
1575	case IPC_SET:
1576		sem_lock(sma, NULL, -1);
1577		err = ipc_update_perm(&semid64.sem_perm, ipcp);
1578		if (err)
1579			goto out_unlock0;
1580		sma->sem_ctime = get_seconds();
1581		break;
1582	default:
1583		err = -EINVAL;
1584		goto out_unlock1;
1585	}
1586
1587out_unlock0:
1588	sem_unlock(sma, -1);
1589out_unlock1:
1590	rcu_read_unlock();
1591out_up:
1592	up_write(&sem_ids(ns).rwsem);
1593	return err;
1594}
1595
1596SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1597{
1598	int version;
1599	struct ipc_namespace *ns;
1600	void __user *p = (void __user *)arg;
 
 
1601
1602	if (semid < 0)
1603		return -EINVAL;
1604
1605	version = ipc_parse_version(&cmd);
1606	ns = current->nsproxy->ipc_ns;
1607
1608	switch (cmd) {
1609	case IPC_INFO:
1610	case SEM_INFO:
 
1611	case IPC_STAT:
1612	case SEM_STAT:
1613		return semctl_nolock(ns, semid, cmd, version, p);
 
 
 
 
 
 
1614	case GETALL:
1615	case GETVAL:
1616	case GETPID:
1617	case GETNCNT:
1618	case GETZCNT:
1619	case SETALL:
1620		return semctl_main(ns, semid, semnum, cmd, p);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1621	case SETVAL:
1622		return semctl_setval(ns, semid, semnum, arg);
 
 
 
 
1623	case IPC_RMID:
1624	case IPC_SET:
1625		return semctl_down(ns, semid, cmd, version, p);
1626	default:
1627		return -EINVAL;
1628	}
1629}
1630
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1631/* If the task doesn't already have a undo_list, then allocate one
1632 * here.  We guarantee there is only one thread using this undo list,
1633 * and current is THE ONE
1634 *
1635 * If this allocation and assignment succeeds, but later
1636 * portions of this code fail, there is no need to free the sem_undo_list.
1637 * Just let it stay associated with the task, and it'll be freed later
1638 * at exit time.
1639 *
1640 * This can block, so callers must hold no locks.
1641 */
1642static inline int get_undo_list(struct sem_undo_list **undo_listp)
1643{
1644	struct sem_undo_list *undo_list;
1645
1646	undo_list = current->sysvsem.undo_list;
1647	if (!undo_list) {
1648		undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1649		if (undo_list == NULL)
1650			return -ENOMEM;
1651		spin_lock_init(&undo_list->lock);
1652		atomic_set(&undo_list->refcnt, 1);
1653		INIT_LIST_HEAD(&undo_list->list_proc);
1654
1655		current->sysvsem.undo_list = undo_list;
1656	}
1657	*undo_listp = undo_list;
1658	return 0;
1659}
1660
1661static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1662{
1663	struct sem_undo *un;
1664
1665	list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
 
1666		if (un->semid == semid)
1667			return un;
1668	}
1669	return NULL;
1670}
1671
1672static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1673{
1674	struct sem_undo *un;
1675
1676	assert_spin_locked(&ulp->lock);
1677
1678	un = __lookup_undo(ulp, semid);
1679	if (un) {
1680		list_del_rcu(&un->list_proc);
1681		list_add_rcu(&un->list_proc, &ulp->list_proc);
1682	}
1683	return un;
1684}
1685
1686/**
1687 * find_alloc_undo - lookup (and if not present create) undo array
1688 * @ns: namespace
1689 * @semid: semaphore array id
1690 *
1691 * The function looks up (and if not present creates) the undo structure.
1692 * The size of the undo structure depends on the size of the semaphore
1693 * array, thus the alloc path is not that straightforward.
1694 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1695 * performs a rcu_read_lock().
1696 */
1697static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1698{
1699	struct sem_array *sma;
1700	struct sem_undo_list *ulp;
1701	struct sem_undo *un, *new;
1702	int nsems, error;
1703
1704	error = get_undo_list(&ulp);
1705	if (error)
1706		return ERR_PTR(error);
1707
1708	rcu_read_lock();
1709	spin_lock(&ulp->lock);
1710	un = lookup_undo(ulp, semid);
1711	spin_unlock(&ulp->lock);
1712	if (likely(un != NULL))
1713		goto out;
1714
1715	/* no undo structure around - allocate one. */
1716	/* step 1: figure out the size of the semaphore array */
1717	sma = sem_obtain_object_check(ns, semid);
1718	if (IS_ERR(sma)) {
1719		rcu_read_unlock();
1720		return ERR_CAST(sma);
1721	}
1722
1723	nsems = sma->sem_nsems;
1724	if (!ipc_rcu_getref(sma)) {
1725		rcu_read_unlock();
1726		un = ERR_PTR(-EIDRM);
1727		goto out;
1728	}
1729	rcu_read_unlock();
1730
1731	/* step 2: allocate new undo structure */
1732	new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1733	if (!new) {
1734		ipc_rcu_putref(sma, ipc_rcu_free);
1735		return ERR_PTR(-ENOMEM);
1736	}
1737
1738	/* step 3: Acquire the lock on semaphore array */
1739	rcu_read_lock();
1740	sem_lock_and_putref(sma);
1741	if (!ipc_valid_object(&sma->sem_perm)) {
1742		sem_unlock(sma, -1);
1743		rcu_read_unlock();
1744		kfree(new);
1745		un = ERR_PTR(-EIDRM);
1746		goto out;
1747	}
1748	spin_lock(&ulp->lock);
1749
1750	/*
1751	 * step 4: check for races: did someone else allocate the undo struct?
1752	 */
1753	un = lookup_undo(ulp, semid);
1754	if (un) {
1755		kfree(new);
 
1756		goto success;
1757	}
1758	/* step 5: initialize & link new undo structure */
1759	new->semadj = (short *) &new[1];
1760	new->ulp = ulp;
1761	new->semid = semid;
1762	assert_spin_locked(&ulp->lock);
1763	list_add_rcu(&new->list_proc, &ulp->list_proc);
1764	ipc_assert_locked_object(&sma->sem_perm);
1765	list_add(&new->list_id, &sma->list_id);
1766	un = new;
1767
1768success:
1769	spin_unlock(&ulp->lock);
1770	sem_unlock(sma, -1);
1771out:
1772	return un;
1773}
1774
1775
1776/**
1777 * get_queue_result - retrieve the result code from sem_queue
1778 * @q: Pointer to queue structure
1779 *
1780 * Retrieve the return code from the pending queue. If IN_WAKEUP is found in
1781 * q->status, then we must loop until the value is replaced with the final
1782 * value: This may happen if a task is woken up by an unrelated event (e.g.
1783 * signal) and in parallel the task is woken up by another task because it got
1784 * the requested semaphores.
1785 *
1786 * The function can be called with or without holding the semaphore spinlock.
1787 */
1788static int get_queue_result(struct sem_queue *q)
1789{
1790	int error;
1791
1792	error = q->status;
1793	while (unlikely(error == IN_WAKEUP)) {
1794		cpu_relax();
1795		error = q->status;
1796	}
1797
1798	return error;
1799}
1800
1801SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1802		unsigned, nsops, const struct timespec __user *, timeout)
1803{
1804	int error = -EINVAL;
1805	struct sem_array *sma;
1806	struct sembuf fast_sops[SEMOPM_FAST];
1807	struct sembuf *sops = fast_sops, *sop;
1808	struct sem_undo *un;
1809	int undos = 0, alter = 0, max, locknum;
 
1810	struct sem_queue queue;
1811	unsigned long jiffies_left = 0;
1812	struct ipc_namespace *ns;
1813	struct list_head tasks;
1814
1815	ns = current->nsproxy->ipc_ns;
1816
1817	if (nsops < 1 || semid < 0)
1818		return -EINVAL;
1819	if (nsops > ns->sc_semopm)
1820		return -E2BIG;
1821	if (nsops > SEMOPM_FAST) {
1822		sops = kmalloc(sizeof(*sops)*nsops, GFP_KERNEL);
1823		if (sops == NULL)
1824			return -ENOMEM;
1825	}
1826	if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
1827		error =  -EFAULT;
1828		goto out_free;
1829	}
1830	if (timeout) {
1831		struct timespec _timeout;
1832		if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) {
1833			error = -EFAULT;
1834			goto out_free;
1835		}
1836		if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 ||
1837			_timeout.tv_nsec >= 1000000000L) {
1838			error = -EINVAL;
1839			goto out_free;
1840		}
1841		jiffies_left = timespec_to_jiffies(&_timeout);
1842	}
 
 
1843	max = 0;
1844	for (sop = sops; sop < sops + nsops; sop++) {
 
 
1845		if (sop->sem_num >= max)
1846			max = sop->sem_num;
1847		if (sop->sem_flg & SEM_UNDO)
1848			undos = 1;
1849		if (sop->sem_op != 0)
1850			alter = 1;
 
 
 
 
 
 
 
 
 
 
 
1851	}
1852
1853	INIT_LIST_HEAD(&tasks);
1854
1855	if (undos) {
1856		/* On success, find_alloc_undo takes the rcu_read_lock */
1857		un = find_alloc_undo(ns, semid);
1858		if (IS_ERR(un)) {
1859			error = PTR_ERR(un);
1860			goto out_free;
1861		}
1862	} else {
1863		un = NULL;
1864		rcu_read_lock();
1865	}
1866
1867	sma = sem_obtain_object_check(ns, semid);
1868	if (IS_ERR(sma)) {
1869		rcu_read_unlock();
1870		error = PTR_ERR(sma);
1871		goto out_free;
1872	}
1873
1874	error = -EFBIG;
1875	if (max >= sma->sem_nsems)
1876		goto out_rcu_wakeup;
 
 
1877
1878	error = -EACCES;
1879	if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
1880		goto out_rcu_wakeup;
 
 
1881
1882	error = security_sem_semop(sma, sops, nsops, alter);
1883	if (error)
1884		goto out_rcu_wakeup;
 
 
1885
1886	error = -EIDRM;
1887	locknum = sem_lock(sma, sops, nsops);
1888	/*
1889	 * We eventually might perform the following check in a lockless
1890	 * fashion, considering ipc_valid_object() locking constraints.
1891	 * If nsops == 1 and there is no contention for sem_perm.lock, then
1892	 * only a per-semaphore lock is held and it's OK to proceed with the
1893	 * check below. More details on the fine grained locking scheme
1894	 * entangled here and why it's RMID race safe on comments at sem_lock()
1895	 */
1896	if (!ipc_valid_object(&sma->sem_perm))
1897		goto out_unlock_free;
1898	/*
1899	 * semid identifiers are not unique - find_alloc_undo may have
1900	 * allocated an undo structure, it was invalidated by an RMID
1901	 * and now a new array with received the same id. Check and fail.
1902	 * This case can be detected checking un->semid. The existence of
1903	 * "un" itself is guaranteed by rcu.
1904	 */
1905	if (un && un->semid == -1)
1906		goto out_unlock_free;
1907
1908	queue.sops = sops;
1909	queue.nsops = nsops;
1910	queue.undo = un;
1911	queue.pid = task_tgid_vnr(current);
1912	queue.alter = alter;
 
1913
1914	error = perform_atomic_semop(sma, &queue);
1915	if (error == 0) {
1916		/* If the operation was successful, then do
 
 
 
1917		 * the required updates.
1918		 */
1919		if (alter)
1920			do_smart_update(sma, sops, nsops, 1, &tasks);
1921		else
1922			set_semotime(sma, sops);
 
 
 
 
 
 
1923	}
1924	if (error <= 0)
1925		goto out_unlock_free;
1926
1927	/* We need to sleep on this operation, so we put the current
 
1928	 * task into the pending queue and go to sleep.
1929	 */
1930
1931	if (nsops == 1) {
1932		struct sem *curr;
1933		curr = &sma->sem_base[sops->sem_num];
 
1934
1935		if (alter) {
1936			if (sma->complex_count) {
1937				list_add_tail(&queue.list,
1938						&sma->pending_alter);
1939			} else {
1940
1941				list_add_tail(&queue.list,
1942						&curr->pending_alter);
1943			}
1944		} else {
1945			list_add_tail(&queue.list, &curr->pending_const);
1946		}
1947	} else {
1948		if (!sma->complex_count)
1949			merge_queues(sma);
1950
1951		if (alter)
1952			list_add_tail(&queue.list, &sma->pending_alter);
1953		else
1954			list_add_tail(&queue.list, &sma->pending_const);
1955
1956		sma->complex_count++;
1957	}
1958
1959	queue.status = -EINTR;
1960	queue.sleeper = current;
 
 
 
 
 
 
 
 
 
 
1961
1962sleep_again:
1963	__set_current_state(TASK_INTERRUPTIBLE);
1964	sem_unlock(sma, locknum);
1965	rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1966
1967	if (timeout)
1968		jiffies_left = schedule_timeout(jiffies_left);
1969	else
1970		schedule();
1971
1972	error = get_queue_result(&queue);
 
1973
1974	if (error != -EINTR) {
1975		/* fast path: update_queue already obtained all requested
1976		 * resources.
1977		 * Perform a smp_mb(): User space could assume that semop()
1978		 * is a memory barrier: Without the mb(), the cpu could
1979		 * speculatively read in user space stale data that was
1980		 * overwritten by the previous owner of the semaphore.
1981		 */
1982		smp_mb();
1983
1984		goto out_free;
1985	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1986
1987	rcu_read_lock();
1988	sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
 
 
 
1989
1990	/*
1991	 * Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
1992	 */
1993	error = get_queue_result(&queue);
 
1994
1995	/*
1996	 * Array removed? If yes, leave without sem_unlock().
1997	 */
1998	if (IS_ERR(sma)) {
1999		rcu_read_unlock();
2000		goto out_free;
2001	}
2002
 
 
 
 
 
2003
2004	/*
2005	 * If queue.status != -EINTR we are woken up by another process.
2006	 * Leave without unlink_queue(), but with sem_unlock().
2007	 */
2008	if (error != -EINTR)
2009		goto out_unlock_free;
2010
2011	/*
2012	 * If an interrupt occurred we have to clean up the queue
2013	 */
2014	if (timeout && jiffies_left == 0)
2015		error = -EAGAIN;
 
 
 
 
 
 
2016
2017	/*
2018	 * If the wakeup was spurious, just retry
2019	 */
2020	if (error == -EINTR && !signal_pending(current))
2021		goto sleep_again;
2022
2023	unlink_queue(sma, &queue);
 
 
 
 
 
 
 
 
 
 
 
 
2024
2025out_unlock_free:
2026	sem_unlock(sma, locknum);
2027out_rcu_wakeup:
2028	rcu_read_unlock();
2029	wake_up_sem_queue_do(&tasks);
2030out_free:
2031	if (sops != fast_sops)
2032		kfree(sops);
2033	return error;
2034}
 
2035
2036SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
2037		unsigned, nsops)
2038{
2039	return sys_semtimedop(semid, tsops, nsops, NULL);
2040}
2041
2042/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
2043 * parent and child tasks.
2044 */
2045
2046int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
2047{
2048	struct sem_undo_list *undo_list;
2049	int error;
2050
2051	if (clone_flags & CLONE_SYSVSEM) {
2052		error = get_undo_list(&undo_list);
2053		if (error)
2054			return error;
2055		atomic_inc(&undo_list->refcnt);
2056		tsk->sysvsem.undo_list = undo_list;
2057	} else
2058		tsk->sysvsem.undo_list = NULL;
2059
2060	return 0;
2061}
2062
2063/*
2064 * add semadj values to semaphores, free undo structures.
2065 * undo structures are not freed when semaphore arrays are destroyed
2066 * so some of them may be out of date.
2067 * IMPLEMENTATION NOTE: There is some confusion over whether the
2068 * set of adjustments that needs to be done should be done in an atomic
2069 * manner or not. That is, if we are attempting to decrement the semval
2070 * should we queue up and wait until we can do so legally?
2071 * The original implementation attempted to do this (queue and wait).
2072 * The current implementation does not do so. The POSIX standard
2073 * and SVID should be consulted to determine what behavior is mandated.
2074 */
2075void exit_sem(struct task_struct *tsk)
2076{
2077	struct sem_undo_list *ulp;
2078
2079	ulp = tsk->sysvsem.undo_list;
2080	if (!ulp)
2081		return;
2082	tsk->sysvsem.undo_list = NULL;
2083
2084	if (!atomic_dec_and_test(&ulp->refcnt))
2085		return;
2086
2087	for (;;) {
2088		struct sem_array *sma;
2089		struct sem_undo *un;
2090		struct list_head tasks;
2091		int semid, i;
 
 
 
2092
2093		rcu_read_lock();
2094		un = list_entry_rcu(ulp->list_proc.next,
2095				    struct sem_undo, list_proc);
2096		if (&un->list_proc == &ulp->list_proc) {
2097			/*
2098			 * We must wait for freeary() before freeing this ulp,
2099			 * in case we raced with last sem_undo. There is a small
2100			 * possibility where we exit while freeary() didn't
2101			 * finish unlocking sem_undo_list.
2102			 */
2103			spin_unlock_wait(&ulp->lock);
 
2104			rcu_read_unlock();
2105			break;
2106		}
2107		spin_lock(&ulp->lock);
2108		semid = un->semid;
2109		spin_unlock(&ulp->lock);
2110
2111		/* exit_sem raced with IPC_RMID, nothing to do */
2112		if (semid == -1) {
2113			rcu_read_unlock();
2114			continue;
2115		}
2116
2117		sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2118		/* exit_sem raced with IPC_RMID, nothing to do */
2119		if (IS_ERR(sma)) {
2120			rcu_read_unlock();
2121			continue;
2122		}
2123
2124		sem_lock(sma, NULL, -1);
2125		/* exit_sem raced with IPC_RMID, nothing to do */
2126		if (!ipc_valid_object(&sma->sem_perm)) {
2127			sem_unlock(sma, -1);
2128			rcu_read_unlock();
2129			continue;
2130		}
2131		un = __lookup_undo(ulp, semid);
2132		if (un == NULL) {
2133			/* exit_sem raced with IPC_RMID+semget() that created
2134			 * exactly the same semid. Nothing to do.
2135			 */
2136			sem_unlock(sma, -1);
2137			rcu_read_unlock();
2138			continue;
2139		}
2140
2141		/* remove un from the linked lists */
2142		ipc_assert_locked_object(&sma->sem_perm);
2143		list_del(&un->list_id);
2144
2145		/* we are the last process using this ulp, acquiring ulp->lock
2146		 * isn't required. Besides that, we are also protected against
2147		 * IPC_RMID as we hold sma->sem_perm lock now
2148		 */
2149		list_del_rcu(&un->list_proc);
 
2150
2151		/* perform adjustments registered in un */
2152		for (i = 0; i < sma->sem_nsems; i++) {
2153			struct sem *semaphore = &sma->sem_base[i];
2154			if (un->semadj[i]) {
2155				semaphore->semval += un->semadj[i];
2156				/*
2157				 * Range checks of the new semaphore value,
2158				 * not defined by sus:
2159				 * - Some unices ignore the undo entirely
2160				 *   (e.g. HP UX 11i 11.22, Tru64 V5.1)
2161				 * - some cap the value (e.g. FreeBSD caps
2162				 *   at 0, but doesn't enforce SEMVMX)
2163				 *
2164				 * Linux caps the semaphore value, both at 0
2165				 * and at SEMVMX.
2166				 *
2167				 *	Manfred <manfred@colorfullife.com>
2168				 */
2169				if (semaphore->semval < 0)
2170					semaphore->semval = 0;
2171				if (semaphore->semval > SEMVMX)
2172					semaphore->semval = SEMVMX;
2173				semaphore->sempid = task_tgid_vnr(current);
2174			}
2175		}
2176		/* maybe some queued-up processes were waiting for this */
2177		INIT_LIST_HEAD(&tasks);
2178		do_smart_update(sma, NULL, 0, 1, &tasks);
2179		sem_unlock(sma, -1);
2180		rcu_read_unlock();
2181		wake_up_sem_queue_do(&tasks);
2182
2183		kfree_rcu(un, rcu);
2184	}
2185	kfree(ulp);
2186}
2187
2188#ifdef CONFIG_PROC_FS
2189static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2190{
2191	struct user_namespace *user_ns = seq_user_ns(s);
2192	struct sem_array *sma = it;
2193	time_t sem_otime;
 
2194
2195	/*
2196	 * The proc interface isn't aware of sem_lock(), it calls
2197	 * ipc_lock_object() directly (in sysvipc_find_ipc).
2198	 * In order to stay compatible with sem_lock(), we must wait until
2199	 * all simple semop() calls have left their critical regions.
 
2200	 */
2201	sem_wait_array(sma);
2202
2203	sem_otime = get_semotime(sma);
2204
2205	seq_printf(s,
2206		   "%10d %10d  %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
2207		   sma->sem_perm.key,
2208		   sma->sem_perm.id,
2209		   sma->sem_perm.mode,
2210		   sma->sem_nsems,
2211		   from_kuid_munged(user_ns, sma->sem_perm.uid),
2212		   from_kgid_munged(user_ns, sma->sem_perm.gid),
2213		   from_kuid_munged(user_ns, sma->sem_perm.cuid),
2214		   from_kgid_munged(user_ns, sma->sem_perm.cgid),
2215		   sem_otime,
2216		   sma->sem_ctime);
 
 
2217
2218	return 0;
2219}
2220#endif