Linux Audio

Check our new training course

Loading...
v4.10.11
 
   1/*
   2 * linux/ipc/shm.c
   3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
   4 *	 Many improvements/fixes by Bruno Haible.
   5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
   6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
   7 *
   8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
   9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
  10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
  11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
  12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
  13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
  14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
  15 *
  16 * support for audit of ipc object properties and permission changes
  17 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  18 *
  19 * namespaces support
  20 * OpenVZ, SWsoft Inc.
  21 * Pavel Emelianov <xemul@openvz.org>
  22 *
  23 * Better ipc lock (kern_ipc_perm.lock) handling
  24 * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
  25 */
  26
  27#include <linux/slab.h>
  28#include <linux/mm.h>
  29#include <linux/hugetlb.h>
  30#include <linux/shm.h>
  31#include <linux/init.h>
  32#include <linux/file.h>
  33#include <linux/mman.h>
  34#include <linux/shmem_fs.h>
  35#include <linux/security.h>
  36#include <linux/syscalls.h>
  37#include <linux/audit.h>
  38#include <linux/capability.h>
  39#include <linux/ptrace.h>
  40#include <linux/seq_file.h>
  41#include <linux/rwsem.h>
  42#include <linux/nsproxy.h>
  43#include <linux/mount.h>
  44#include <linux/ipc_namespace.h>
  45
  46#include <linux/uaccess.h>
  47
  48#include "util.h"
  49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  50struct shm_file_data {
  51	int id;
  52	struct ipc_namespace *ns;
  53	struct file *file;
  54	const struct vm_operations_struct *vm_ops;
  55};
  56
  57#define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
  58
  59static const struct file_operations shm_file_operations;
  60static const struct vm_operations_struct shm_vm_ops;
  61
  62#define shm_ids(ns)	((ns)->ids[IPC_SHM_IDS])
  63
  64#define shm_unlock(shp)			\
  65	ipc_unlock(&(shp)->shm_perm)
  66
  67static int newseg(struct ipc_namespace *, struct ipc_params *);
  68static void shm_open(struct vm_area_struct *vma);
  69static void shm_close(struct vm_area_struct *vma);
  70static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
  71#ifdef CONFIG_PROC_FS
  72static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
  73#endif
  74
  75void shm_init_ns(struct ipc_namespace *ns)
  76{
  77	ns->shm_ctlmax = SHMMAX;
  78	ns->shm_ctlall = SHMALL;
  79	ns->shm_ctlmni = SHMMNI;
  80	ns->shm_rmid_forced = 0;
  81	ns->shm_tot = 0;
  82	ipc_init_ids(&shm_ids(ns));
  83}
  84
  85/*
  86 * Called with shm_ids.rwsem (writer) and the shp structure locked.
  87 * Only shm_ids.rwsem remains locked on exit.
  88 */
  89static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
  90{
  91	struct shmid_kernel *shp;
  92
  93	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  94
  95	if (shp->shm_nattch) {
  96		shp->shm_perm.mode |= SHM_DEST;
  97		/* Do not find it any more */
  98		shp->shm_perm.key = IPC_PRIVATE;
  99		shm_unlock(shp);
 100	} else
 101		shm_destroy(ns, shp);
 102}
 103
 104#ifdef CONFIG_IPC_NS
 105void shm_exit_ns(struct ipc_namespace *ns)
 106{
 107	free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
 108	idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
 
 109}
 110#endif
 111
 112static int __init ipc_ns_init(void)
 113{
 114	shm_init_ns(&init_ipc_ns);
 115	return 0;
 
 116}
 117
 118pure_initcall(ipc_ns_init);
 119
 120void __init shm_init(void)
 121{
 122	ipc_init_proc_interface("sysvipc/shm",
 123#if BITS_PER_LONG <= 32
 124				"       key      shmid perms       size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime        rss       swap\n",
 125#else
 126				"       key      shmid perms                  size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime                   rss                  swap\n",
 127#endif
 128				IPC_SHM_IDS, sysvipc_shm_proc_show);
 129}
 130
 131static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
 132{
 133	struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
 134
 135	if (IS_ERR(ipcp))
 136		return ERR_CAST(ipcp);
 137
 138	return container_of(ipcp, struct shmid_kernel, shm_perm);
 139}
 140
 141static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
 142{
 143	struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
 144
 145	if (IS_ERR(ipcp))
 146		return ERR_CAST(ipcp);
 147
 148	return container_of(ipcp, struct shmid_kernel, shm_perm);
 149}
 150
 151/*
 152 * shm_lock_(check_) routines are called in the paths where the rwsem
 153 * is not necessarily held.
 154 */
 155static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
 156{
 157	struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
 158
 159	/*
 160	 * Callers of shm_lock() must validate the status of the returned ipc
 161	 * object pointer (as returned by ipc_lock()), and error out as
 162	 * appropriate.
 163	 */
 164	if (IS_ERR(ipcp))
 165		return (void *)ipcp;
 166	return container_of(ipcp, struct shmid_kernel, shm_perm);
 167}
 168
 169static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
 170{
 171	rcu_read_lock();
 172	ipc_lock_object(&ipcp->shm_perm);
 173}
 174
 175static void shm_rcu_free(struct rcu_head *head)
 176{
 177	struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
 178	struct shmid_kernel *shp = ipc_rcu_to_struct(p);
 179
 180	security_shm_free(shp);
 181	ipc_rcu_free(head);
 
 182}
 183
 184static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
 185{
 186	list_del(&s->shm_clist);
 187	ipc_rmid(&shm_ids(ns), &s->shm_perm);
 188}
 189
 190
 191static int __shm_open(struct vm_area_struct *vma)
 192{
 193	struct file *file = vma->vm_file;
 194	struct shm_file_data *sfd = shm_file_data(file);
 195	struct shmid_kernel *shp;
 196
 197	shp = shm_lock(sfd->ns, sfd->id);
 198
 199	if (IS_ERR(shp))
 200		return PTR_ERR(shp);
 201
 202	shp->shm_atim = get_seconds();
 203	shp->shm_lprid = task_tgid_vnr(current);
 
 
 
 
 
 
 204	shp->shm_nattch++;
 205	shm_unlock(shp);
 206	return 0;
 207}
 208
 209/* This is called by fork, once for every shm attach. */
 210static void shm_open(struct vm_area_struct *vma)
 211{
 212	int err = __shm_open(vma);
 213	/*
 214	 * We raced in the idr lookup or with shm_destroy().
 215	 * Either way, the ID is busted.
 216	 */
 217	WARN_ON_ONCE(err);
 218}
 219
 220/*
 221 * shm_destroy - free the struct shmid_kernel
 222 *
 223 * @ns: namespace
 224 * @shp: struct to free
 225 *
 226 * It has to be called with shp and shm_ids.rwsem (writer) locked,
 227 * but returns with shp unlocked and freed.
 228 */
 229static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
 230{
 231	struct file *shm_file;
 232
 233	shm_file = shp->shm_file;
 234	shp->shm_file = NULL;
 235	ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
 236	shm_rmid(ns, shp);
 237	shm_unlock(shp);
 238	if (!is_file_hugepages(shm_file))
 239		shmem_lock(shm_file, 0, shp->mlock_user);
 240	else if (shp->mlock_user)
 241		user_shm_unlock(i_size_read(file_inode(shm_file)),
 242				shp->mlock_user);
 243	fput(shm_file);
 244	ipc_rcu_putref(shp, shm_rcu_free);
 
 
 245}
 246
 247/*
 248 * shm_may_destroy - identifies whether shm segment should be destroyed now
 249 *
 250 * Returns true if and only if there are no active users of the segment and
 251 * one of the following is true:
 252 *
 253 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
 254 *
 255 * 2) sysctl kernel.shm_rmid_forced is set to 1.
 256 */
 257static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
 258{
 259	return (shp->shm_nattch == 0) &&
 260	       (ns->shm_rmid_forced ||
 261		(shp->shm_perm.mode & SHM_DEST));
 262}
 263
 264/*
 265 * remove the attach descriptor vma.
 266 * free memory for segment if it is marked destroyed.
 267 * The descriptor has already been removed from the current->mm->mmap list
 268 * and will later be kfree()d.
 269 */
 270static void shm_close(struct vm_area_struct *vma)
 271{
 272	struct file *file = vma->vm_file;
 273	struct shm_file_data *sfd = shm_file_data(file);
 274	struct shmid_kernel *shp;
 275	struct ipc_namespace *ns = sfd->ns;
 276
 277	down_write(&shm_ids(ns).rwsem);
 278	/* remove from the list of attaches of the shm segment */
 279	shp = shm_lock(ns, sfd->id);
 280
 281	/*
 282	 * We raced in the idr lookup or with shm_destroy().
 283	 * Either way, the ID is busted.
 284	 */
 285	if (WARN_ON_ONCE(IS_ERR(shp)))
 286		goto done; /* no-op */
 287
 288	shp->shm_lprid = task_tgid_vnr(current);
 289	shp->shm_dtim = get_seconds();
 290	shp->shm_nattch--;
 291	if (shm_may_destroy(ns, shp))
 292		shm_destroy(ns, shp);
 293	else
 294		shm_unlock(shp);
 295done:
 296	up_write(&shm_ids(ns).rwsem);
 297}
 298
 299/* Called with ns->shm_ids(ns).rwsem locked */
 300static int shm_try_destroy_orphaned(int id, void *p, void *data)
 301{
 302	struct ipc_namespace *ns = data;
 303	struct kern_ipc_perm *ipcp = p;
 304	struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 305
 306	/*
 307	 * We want to destroy segments without users and with already
 308	 * exit'ed originating process.
 309	 *
 310	 * As shp->* are changed under rwsem, it's safe to skip shp locking.
 311	 */
 312	if (shp->shm_creator != NULL)
 313		return 0;
 314
 315	if (shm_may_destroy(ns, shp)) {
 316		shm_lock_by_ptr(shp);
 317		shm_destroy(ns, shp);
 318	}
 319	return 0;
 320}
 321
 322void shm_destroy_orphaned(struct ipc_namespace *ns)
 323{
 324	down_write(&shm_ids(ns).rwsem);
 325	if (shm_ids(ns).in_use)
 326		idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
 327	up_write(&shm_ids(ns).rwsem);
 328}
 329
 330/* Locking assumes this will only be called with task == current */
 331void exit_shm(struct task_struct *task)
 332{
 333	struct ipc_namespace *ns = task->nsproxy->ipc_ns;
 334	struct shmid_kernel *shp, *n;
 335
 336	if (list_empty(&task->sysvshm.shm_clist))
 337		return;
 338
 339	/*
 340	 * If kernel.shm_rmid_forced is not set then only keep track of
 341	 * which shmids are orphaned, so that a later set of the sysctl
 342	 * can clean them up.
 343	 */
 344	if (!ns->shm_rmid_forced) {
 345		down_read(&shm_ids(ns).rwsem);
 346		list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist)
 347			shp->shm_creator = NULL;
 348		/*
 349		 * Only under read lock but we are only called on current
 350		 * so no entry on the list will be shared.
 351		 */
 352		list_del(&task->sysvshm.shm_clist);
 353		up_read(&shm_ids(ns).rwsem);
 354		return;
 355	}
 356
 357	/*
 358	 * Destroy all already created segments, that were not yet mapped,
 359	 * and mark any mapped as orphan to cover the sysctl toggling.
 360	 * Destroy is skipped if shm_may_destroy() returns false.
 361	 */
 362	down_write(&shm_ids(ns).rwsem);
 363	list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) {
 364		shp->shm_creator = NULL;
 365
 366		if (shm_may_destroy(ns, shp)) {
 367			shm_lock_by_ptr(shp);
 368			shm_destroy(ns, shp);
 369		}
 370	}
 371
 372	/* Remove the list head from any segments still attached. */
 373	list_del(&task->sysvshm.shm_clist);
 374	up_write(&shm_ids(ns).rwsem);
 375}
 376
 377static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
 
 
 
 
 
 
 
 378{
 379	struct file *file = vma->vm_file;
 380	struct shm_file_data *sfd = shm_file_data(file);
 381
 382	return sfd->vm_ops->fault(vma, vmf);
 
 
 
 383}
 384
 385#ifdef CONFIG_NUMA
 386static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
 387{
 388	struct file *file = vma->vm_file;
 389	struct shm_file_data *sfd = shm_file_data(file);
 390	int err = 0;
 391
 392	if (sfd->vm_ops->set_policy)
 393		err = sfd->vm_ops->set_policy(vma, new);
 394	return err;
 395}
 396
 397static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
 398					unsigned long addr)
 399{
 400	struct file *file = vma->vm_file;
 401	struct shm_file_data *sfd = shm_file_data(file);
 402	struct mempolicy *pol = NULL;
 403
 404	if (sfd->vm_ops->get_policy)
 405		pol = sfd->vm_ops->get_policy(vma, addr);
 406	else if (vma->vm_policy)
 407		pol = vma->vm_policy;
 408
 409	return pol;
 410}
 411#endif
 412
 413static int shm_mmap(struct file *file, struct vm_area_struct *vma)
 414{
 415	struct shm_file_data *sfd = shm_file_data(file);
 416	int ret;
 417
 418	/*
 419	 * In case of remap_file_pages() emulation, the file can represent
 420	 * removed IPC ID: propogate shm_lock() error to caller.
 
 421	 */
 422	ret = __shm_open(vma);
 423	if (ret)
 424		return ret;
 425
 426	ret = sfd->file->f_op->mmap(sfd->file, vma);
 427	if (ret) {
 428		shm_close(vma);
 429		return ret;
 430	}
 431	sfd->vm_ops = vma->vm_ops;
 432#ifdef CONFIG_MMU
 433	WARN_ON(!sfd->vm_ops->fault);
 434#endif
 435	vma->vm_ops = &shm_vm_ops;
 436	return 0;
 437}
 438
 439static int shm_release(struct inode *ino, struct file *file)
 440{
 441	struct shm_file_data *sfd = shm_file_data(file);
 442
 443	put_ipc_ns(sfd->ns);
 
 444	shm_file_data(file) = NULL;
 445	kfree(sfd);
 446	return 0;
 447}
 448
 449static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 450{
 451	struct shm_file_data *sfd = shm_file_data(file);
 452
 453	if (!sfd->file->f_op->fsync)
 454		return -EINVAL;
 455	return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
 456}
 457
 458static long shm_fallocate(struct file *file, int mode, loff_t offset,
 459			  loff_t len)
 460{
 461	struct shm_file_data *sfd = shm_file_data(file);
 462
 463	if (!sfd->file->f_op->fallocate)
 464		return -EOPNOTSUPP;
 465	return sfd->file->f_op->fallocate(file, mode, offset, len);
 466}
 467
 468static unsigned long shm_get_unmapped_area(struct file *file,
 469	unsigned long addr, unsigned long len, unsigned long pgoff,
 470	unsigned long flags)
 471{
 472	struct shm_file_data *sfd = shm_file_data(file);
 473
 474	return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
 475						pgoff, flags);
 476}
 477
 478static const struct file_operations shm_file_operations = {
 479	.mmap		= shm_mmap,
 480	.fsync		= shm_fsync,
 481	.release	= shm_release,
 482	.get_unmapped_area	= shm_get_unmapped_area,
 483	.llseek		= noop_llseek,
 484	.fallocate	= shm_fallocate,
 485};
 486
 487/*
 488 * shm_file_operations_huge is now identical to shm_file_operations,
 489 * but we keep it distinct for the sake of is_file_shm_hugepages().
 490 */
 491static const struct file_operations shm_file_operations_huge = {
 492	.mmap		= shm_mmap,
 493	.fsync		= shm_fsync,
 494	.release	= shm_release,
 495	.get_unmapped_area	= shm_get_unmapped_area,
 496	.llseek		= noop_llseek,
 497	.fallocate	= shm_fallocate,
 498};
 499
 500bool is_file_shm_hugepages(struct file *file)
 501{
 502	return file->f_op == &shm_file_operations_huge;
 503}
 504
 505static const struct vm_operations_struct shm_vm_ops = {
 506	.open	= shm_open,	/* callback for a new vm-area open */
 507	.close	= shm_close,	/* callback for when the vm-area is released */
 508	.fault	= shm_fault,
 
 509#if defined(CONFIG_NUMA)
 510	.set_policy = shm_set_policy,
 511	.get_policy = shm_get_policy,
 512#endif
 513};
 514
 515/**
 516 * newseg - Create a new shared memory segment
 517 * @ns: namespace
 518 * @params: ptr to the structure that contains key, size and shmflg
 519 *
 520 * Called with shm_ids.rwsem held as a writer.
 521 */
 522static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
 523{
 524	key_t key = params->key;
 525	int shmflg = params->flg;
 526	size_t size = params->u.size;
 527	int error;
 528	struct shmid_kernel *shp;
 529	size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 530	struct file *file;
 531	char name[13];
 532	int id;
 533	vm_flags_t acctflag = 0;
 534
 535	if (size < SHMMIN || size > ns->shm_ctlmax)
 536		return -EINVAL;
 537
 538	if (numpages << PAGE_SHIFT < size)
 539		return -ENOSPC;
 540
 541	if (ns->shm_tot + numpages < ns->shm_tot ||
 542			ns->shm_tot + numpages > ns->shm_ctlall)
 543		return -ENOSPC;
 544
 545	shp = ipc_rcu_alloc(sizeof(*shp));
 546	if (!shp)
 547		return -ENOMEM;
 548
 549	shp->shm_perm.key = key;
 550	shp->shm_perm.mode = (shmflg & S_IRWXUGO);
 551	shp->mlock_user = NULL;
 552
 553	shp->shm_perm.security = NULL;
 554	error = security_shm_alloc(shp);
 555	if (error) {
 556		ipc_rcu_putref(shp, ipc_rcu_free);
 557		return error;
 558	}
 559
 560	sprintf(name, "SYSV%08x", key);
 561	if (shmflg & SHM_HUGETLB) {
 562		struct hstate *hs;
 563		size_t hugesize;
 564
 565		hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
 566		if (!hs) {
 567			error = -EINVAL;
 568			goto no_file;
 569		}
 570		hugesize = ALIGN(size, huge_page_size(hs));
 571
 572		/* hugetlb_file_setup applies strict accounting */
 573		if (shmflg & SHM_NORESERVE)
 574			acctflag = VM_NORESERVE;
 575		file = hugetlb_file_setup(name, hugesize, acctflag,
 576				  &shp->mlock_user, HUGETLB_SHMFS_INODE,
 577				(shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
 578	} else {
 579		/*
 580		 * Do not allow no accounting for OVERCOMMIT_NEVER, even
 581		 * if it's asked for.
 582		 */
 583		if  ((shmflg & SHM_NORESERVE) &&
 584				sysctl_overcommit_memory != OVERCOMMIT_NEVER)
 585			acctflag = VM_NORESERVE;
 586		file = shmem_kernel_file_setup(name, size, acctflag);
 587	}
 588	error = PTR_ERR(file);
 589	if (IS_ERR(file))
 590		goto no_file;
 591
 592	shp->shm_cprid = task_tgid_vnr(current);
 593	shp->shm_lprid = 0;
 594	shp->shm_atim = shp->shm_dtim = 0;
 595	shp->shm_ctim = get_seconds();
 596	shp->shm_segsz = size;
 597	shp->shm_nattch = 0;
 598	shp->shm_file = file;
 599	shp->shm_creator = current;
 600
 601	id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
 602	if (id < 0) {
 603		error = id;
 604		goto no_id;
 605	}
 606
 607	list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
 608
 609	/*
 610	 * shmid gets reported as "inode#" in /proc/pid/maps.
 611	 * proc-ps tools use this. Changing this will break them.
 612	 */
 613	file_inode(file)->i_ino = shp->shm_perm.id;
 614
 615	ns->shm_tot += numpages;
 616	error = shp->shm_perm.id;
 617
 618	ipc_unlock_object(&shp->shm_perm);
 619	rcu_read_unlock();
 620	return error;
 621
 622no_id:
 
 
 623	if (is_file_hugepages(file) && shp->mlock_user)
 624		user_shm_unlock(size, shp->mlock_user);
 625	fput(file);
 626no_file:
 627	ipc_rcu_putref(shp, shm_rcu_free);
 628	return error;
 629}
 630
 631/*
 632 * Called with shm_ids.rwsem and ipcp locked.
 633 */
 634static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
 635{
 636	struct shmid_kernel *shp;
 637
 638	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 639	return security_shm_associate(shp, shmflg);
 640}
 641
 642/*
 643 * Called with shm_ids.rwsem and ipcp locked.
 644 */
 645static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
 646				struct ipc_params *params)
 647{
 648	struct shmid_kernel *shp;
 649
 650	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 651	if (shp->shm_segsz < params->u.size)
 652		return -EINVAL;
 653
 654	return 0;
 655}
 656
 657SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
 658{
 659	struct ipc_namespace *ns;
 660	static const struct ipc_ops shm_ops = {
 661		.getnew = newseg,
 662		.associate = shm_security,
 663		.more_checks = shm_more_checks,
 664	};
 665	struct ipc_params shm_params;
 666
 667	ns = current->nsproxy->ipc_ns;
 668
 669	shm_params.key = key;
 670	shm_params.flg = shmflg;
 671	shm_params.u.size = size;
 672
 673	return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
 674}
 675
 
 
 
 
 
 676static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
 677{
 678	switch (version) {
 679	case IPC_64:
 680		return copy_to_user(buf, in, sizeof(*in));
 681	case IPC_OLD:
 682	    {
 683		struct shmid_ds out;
 684
 685		memset(&out, 0, sizeof(out));
 686		ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
 687		out.shm_segsz	= in->shm_segsz;
 688		out.shm_atime	= in->shm_atime;
 689		out.shm_dtime	= in->shm_dtime;
 690		out.shm_ctime	= in->shm_ctime;
 691		out.shm_cpid	= in->shm_cpid;
 692		out.shm_lpid	= in->shm_lpid;
 693		out.shm_nattch	= in->shm_nattch;
 694
 695		return copy_to_user(buf, &out, sizeof(out));
 696	    }
 697	default:
 698		return -EINVAL;
 699	}
 700}
 701
 702static inline unsigned long
 703copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
 704{
 705	switch (version) {
 706	case IPC_64:
 707		if (copy_from_user(out, buf, sizeof(*out)))
 708			return -EFAULT;
 709		return 0;
 710	case IPC_OLD:
 711	    {
 712		struct shmid_ds tbuf_old;
 713
 714		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
 715			return -EFAULT;
 716
 717		out->shm_perm.uid	= tbuf_old.shm_perm.uid;
 718		out->shm_perm.gid	= tbuf_old.shm_perm.gid;
 719		out->shm_perm.mode	= tbuf_old.shm_perm.mode;
 720
 721		return 0;
 722	    }
 723	default:
 724		return -EINVAL;
 725	}
 726}
 727
 728static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
 729{
 730	switch (version) {
 731	case IPC_64:
 732		return copy_to_user(buf, in, sizeof(*in));
 733	case IPC_OLD:
 734	    {
 735		struct shminfo out;
 736
 737		if (in->shmmax > INT_MAX)
 738			out.shmmax = INT_MAX;
 739		else
 740			out.shmmax = (int)in->shmmax;
 741
 742		out.shmmin	= in->shmmin;
 743		out.shmmni	= in->shmmni;
 744		out.shmseg	= in->shmseg;
 745		out.shmall	= in->shmall;
 746
 747		return copy_to_user(buf, &out, sizeof(out));
 748	    }
 749	default:
 750		return -EINVAL;
 751	}
 752}
 753
 754/*
 755 * Calculate and add used RSS and swap pages of a shm.
 756 * Called with shm_ids.rwsem held as a reader
 757 */
 758static void shm_add_rss_swap(struct shmid_kernel *shp,
 759	unsigned long *rss_add, unsigned long *swp_add)
 760{
 761	struct inode *inode;
 762
 763	inode = file_inode(shp->shm_file);
 764
 765	if (is_file_hugepages(shp->shm_file)) {
 766		struct address_space *mapping = inode->i_mapping;
 767		struct hstate *h = hstate_file(shp->shm_file);
 768		*rss_add += pages_per_huge_page(h) * mapping->nrpages;
 769	} else {
 770#ifdef CONFIG_SHMEM
 771		struct shmem_inode_info *info = SHMEM_I(inode);
 772
 773		spin_lock_irq(&info->lock);
 774		*rss_add += inode->i_mapping->nrpages;
 775		*swp_add += info->swapped;
 776		spin_unlock_irq(&info->lock);
 777#else
 778		*rss_add += inode->i_mapping->nrpages;
 779#endif
 780	}
 781}
 782
 783/*
 784 * Called with shm_ids.rwsem held as a reader
 785 */
 786static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
 787		unsigned long *swp)
 788{
 789	int next_id;
 790	int total, in_use;
 791
 792	*rss = 0;
 793	*swp = 0;
 794
 795	in_use = shm_ids(ns).in_use;
 796
 797	for (total = 0, next_id = 0; total < in_use; next_id++) {
 798		struct kern_ipc_perm *ipc;
 799		struct shmid_kernel *shp;
 800
 801		ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
 802		if (ipc == NULL)
 803			continue;
 804		shp = container_of(ipc, struct shmid_kernel, shm_perm);
 805
 806		shm_add_rss_swap(shp, rss, swp);
 807
 808		total++;
 809	}
 810}
 811
 812/*
 813 * This function handles some shmctl commands which require the rwsem
 814 * to be held in write mode.
 815 * NOTE: no locks must be held, the rwsem is taken inside this function.
 816 */
 817static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
 818		       struct shmid_ds __user *buf, int version)
 819{
 820	struct kern_ipc_perm *ipcp;
 821	struct shmid64_ds shmid64;
 822	struct shmid_kernel *shp;
 823	int err;
 824
 825	if (cmd == IPC_SET) {
 826		if (copy_shmid_from_user(&shmid64, buf, version))
 827			return -EFAULT;
 828	}
 829
 830	down_write(&shm_ids(ns).rwsem);
 831	rcu_read_lock();
 832
 833	ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
 834				      &shmid64.shm_perm, 0);
 835	if (IS_ERR(ipcp)) {
 836		err = PTR_ERR(ipcp);
 837		goto out_unlock1;
 838	}
 839
 840	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 841
 842	err = security_shm_shmctl(shp, cmd);
 843	if (err)
 844		goto out_unlock1;
 845
 846	switch (cmd) {
 847	case IPC_RMID:
 848		ipc_lock_object(&shp->shm_perm);
 849		/* do_shm_rmid unlocks the ipc object and rcu */
 850		do_shm_rmid(ns, ipcp);
 851		goto out_up;
 852	case IPC_SET:
 853		ipc_lock_object(&shp->shm_perm);
 854		err = ipc_update_perm(&shmid64.shm_perm, ipcp);
 855		if (err)
 856			goto out_unlock0;
 857		shp->shm_ctim = get_seconds();
 858		break;
 859	default:
 860		err = -EINVAL;
 861		goto out_unlock1;
 862	}
 863
 864out_unlock0:
 865	ipc_unlock_object(&shp->shm_perm);
 866out_unlock1:
 867	rcu_read_unlock();
 868out_up:
 869	up_write(&shm_ids(ns).rwsem);
 870	return err;
 871}
 872
 873static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
 874			 int cmd, int version, void __user *buf)
 875{
 876	int err;
 877	struct shmid_kernel *shp;
 878
 879	/* preliminary security checks for *_INFO */
 880	if (cmd == IPC_INFO || cmd == SHM_INFO) {
 881		err = security_shm_shmctl(NULL, cmd);
 882		if (err)
 883			return err;
 884	}
 885
 886	switch (cmd) {
 887	case IPC_INFO:
 888	{
 889		struct shminfo64 shminfo;
 890
 891		memset(&shminfo, 0, sizeof(shminfo));
 892		shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
 893		shminfo.shmmax = ns->shm_ctlmax;
 894		shminfo.shmall = ns->shm_ctlall;
 895
 896		shminfo.shmmin = SHMMIN;
 897		if (copy_shminfo_to_user(buf, &shminfo, version))
 898			return -EFAULT;
 899
 900		down_read(&shm_ids(ns).rwsem);
 901		err = ipc_get_maxid(&shm_ids(ns));
 902		up_read(&shm_ids(ns).rwsem);
 903
 904		if (err < 0)
 905			err = 0;
 906		goto out;
 907	}
 908	case SHM_INFO:
 909	{
 910		struct shm_info shm_info;
 911
 912		memset(&shm_info, 0, sizeof(shm_info));
 
 
 
 
 
 913		down_read(&shm_ids(ns).rwsem);
 914		shm_info.used_ids = shm_ids(ns).in_use;
 915		shm_get_stat(ns, &shm_info.shm_rss, &shm_info.shm_swp);
 916		shm_info.shm_tot = ns->shm_tot;
 917		shm_info.swap_attempts = 0;
 918		shm_info.swap_successes = 0;
 919		err = ipc_get_maxid(&shm_ids(ns));
 920		up_read(&shm_ids(ns).rwsem);
 921		if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
 922			err = -EFAULT;
 923			goto out;
 924		}
 925
 926		err = err < 0 ? 0 : err;
 927		goto out;
 928	}
 929	case SHM_STAT:
 930	case IPC_STAT:
 931	{
 932		struct shmid64_ds tbuf;
 933		int result;
 934
 935		rcu_read_lock();
 936		if (cmd == SHM_STAT) {
 937			shp = shm_obtain_object(ns, shmid);
 938			if (IS_ERR(shp)) {
 939				err = PTR_ERR(shp);
 940				goto out_unlock;
 941			}
 942			result = shp->shm_perm.id;
 943		} else {
 944			shp = shm_obtain_object_check(ns, shmid);
 945			if (IS_ERR(shp)) {
 946				err = PTR_ERR(shp);
 947				goto out_unlock;
 948			}
 949			result = 0;
 
 
 
 
 950		}
 
 951
 
 
 
 
 
 
 
 
 
 
 952		err = -EACCES;
 953		if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
 954			goto out_unlock;
 
 955
 956		err = security_shm_shmctl(shp, cmd);
 957		if (err)
 958			goto out_unlock;
 959
 960		memset(&tbuf, 0, sizeof(tbuf));
 961		kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
 962		tbuf.shm_segsz	= shp->shm_segsz;
 963		tbuf.shm_atime	= shp->shm_atim;
 964		tbuf.shm_dtime	= shp->shm_dtim;
 965		tbuf.shm_ctime	= shp->shm_ctim;
 966		tbuf.shm_cpid	= shp->shm_cprid;
 967		tbuf.shm_lpid	= shp->shm_lprid;
 968		tbuf.shm_nattch	= shp->shm_nattch;
 969		rcu_read_unlock();
 970
 971		if (copy_shmid_to_user(buf, &tbuf, version))
 972			err = -EFAULT;
 973		else
 974			err = result;
 975		goto out;
 976	}
 977	default:
 978		return -EINVAL;
 979	}
 980
 
 
 
 
 
 
 
 
 
 
 
 
 
 981out_unlock:
 982	rcu_read_unlock();
 983out:
 984	return err;
 985}
 986
 987SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
 988{
 989	struct shmid_kernel *shp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 990	int err, version;
 991	struct ipc_namespace *ns;
 
 992
 993	if (cmd < 0 || shmid < 0)
 994		return -EINVAL;
 995
 996	version = ipc_parse_version(&cmd);
 997	ns = current->nsproxy->ipc_ns;
 998
 999	switch (cmd) {
1000	case IPC_INFO:
1001	case SHM_INFO:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1002	case SHM_STAT:
1003	case IPC_STAT:
1004		return shmctl_nolock(ns, shmid, cmd, version, buf);
1005	case IPC_RMID:
 
 
 
 
 
 
1006	case IPC_SET:
1007		return shmctl_down(ns, shmid, cmd, buf, version);
 
 
 
 
1008	case SHM_LOCK:
1009	case SHM_UNLOCK:
1010	{
1011		struct file *shm_file;
 
 
 
1012
1013		rcu_read_lock();
1014		shp = shm_obtain_object_check(ns, shmid);
1015		if (IS_ERR(shp)) {
1016			err = PTR_ERR(shp);
1017			goto out_unlock1;
1018		}
1019
1020		audit_ipc_obj(&(shp->shm_perm));
1021		err = security_shm_shmctl(shp, cmd);
1022		if (err)
1023			goto out_unlock1;
1024
1025		ipc_lock_object(&shp->shm_perm);
 
 
 
 
 
 
 
 
 
 
 
 
1026
1027		/* check if shm_destroy() is tearing down shp */
1028		if (!ipc_valid_object(&shp->shm_perm)) {
1029			err = -EIDRM;
1030			goto out_unlock0;
1031		}
 
 
 
 
 
 
1032
1033		if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
1034			kuid_t euid = current_euid();
 
 
 
1035
1036			if (!uid_eq(euid, shp->shm_perm.uid) &&
1037			    !uid_eq(euid, shp->shm_perm.cuid)) {
1038				err = -EPERM;
1039				goto out_unlock0;
1040			}
1041			if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
1042				err = -EPERM;
1043				goto out_unlock0;
1044			}
1045		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1046
1047		shm_file = shp->shm_file;
1048		if (is_file_hugepages(shm_file))
1049			goto out_unlock0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1050
1051		if (cmd == SHM_LOCK) {
1052			struct user_struct *user = current_user();
 
 
 
 
 
 
 
 
 
 
1053
1054			err = shmem_lock(shm_file, 1, user);
1055			if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
1056				shp->shm_perm.mode |= SHM_LOCKED;
1057				shp->mlock_user = user;
1058			}
1059			goto out_unlock0;
1060		}
1061
1062		/* SHM_UNLOCK */
1063		if (!(shp->shm_perm.mode & SHM_LOCKED))
1064			goto out_unlock0;
1065		shmem_lock(shm_file, 0, shp->mlock_user);
1066		shp->shm_perm.mode &= ~SHM_LOCKED;
1067		shp->mlock_user = NULL;
1068		get_file(shm_file);
1069		ipc_unlock_object(&shp->shm_perm);
1070		rcu_read_unlock();
1071		shmem_unlock_mapping(shm_file->f_mapping);
1072
1073		fput(shm_file);
 
 
 
 
 
 
 
1074		return err;
1075	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1076	default:
1077		return -EINVAL;
1078	}
1079
1080out_unlock0:
1081	ipc_unlock_object(&shp->shm_perm);
1082out_unlock1:
1083	rcu_read_unlock();
1084	return err;
1085}
1086
 
 
 
 
 
 
1087/*
1088 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1089 *
1090 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1091 * "raddr" thing points to kernel space, and there has to be a wrapper around
1092 * this.
1093 */
1094long do_shmat(int shmid, char __user *shmaddr, int shmflg,
1095	      ulong *raddr, unsigned long shmlba)
1096{
1097	struct shmid_kernel *shp;
1098	unsigned long addr;
1099	unsigned long size;
1100	struct file *file;
1101	int    err;
1102	unsigned long flags;
1103	unsigned long prot;
1104	int acc_mode;
1105	struct ipc_namespace *ns;
1106	struct shm_file_data *sfd;
1107	struct path path;
1108	fmode_t f_mode;
1109	unsigned long populate = 0;
1110
1111	err = -EINVAL;
1112	if (shmid < 0)
1113		goto out;
1114	else if ((addr = (ulong)shmaddr)) {
 
1115		if (addr & (shmlba - 1)) {
1116			/*
1117			 * Round down to the nearest multiple of shmlba.
1118			 * For sane do_mmap_pgoff() parameters, avoid
1119			 * round downs that trigger nil-page and MAP_FIXED.
1120			 */
1121			if ((shmflg & SHM_RND) && addr >= shmlba)
1122				addr &= ~(shmlba - 1);
1123			else
 
 
 
1124#ifndef __ARCH_FORCE_SHMLBA
1125				if (addr & ~PAGE_MASK)
1126#endif
1127					goto out;
1128		}
1129		flags = MAP_SHARED | MAP_FIXED;
1130	} else {
1131		if ((shmflg & SHM_REMAP))
1132			goto out;
1133
1134		flags = MAP_SHARED;
1135	}
 
1136
1137	if (shmflg & SHM_RDONLY) {
1138		prot = PROT_READ;
1139		acc_mode = S_IRUGO;
1140		f_mode = FMODE_READ;
1141	} else {
1142		prot = PROT_READ | PROT_WRITE;
1143		acc_mode = S_IRUGO | S_IWUGO;
1144		f_mode = FMODE_READ | FMODE_WRITE;
1145	}
1146	if (shmflg & SHM_EXEC) {
1147		prot |= PROT_EXEC;
1148		acc_mode |= S_IXUGO;
1149	}
1150
1151	/*
1152	 * We cannot rely on the fs check since SYSV IPC does have an
1153	 * additional creator id...
1154	 */
1155	ns = current->nsproxy->ipc_ns;
1156	rcu_read_lock();
1157	shp = shm_obtain_object_check(ns, shmid);
1158	if (IS_ERR(shp)) {
1159		err = PTR_ERR(shp);
1160		goto out_unlock;
1161	}
1162
1163	err = -EACCES;
1164	if (ipcperms(ns, &shp->shm_perm, acc_mode))
1165		goto out_unlock;
1166
1167	err = security_shm_shmat(shp, shmaddr, shmflg);
1168	if (err)
1169		goto out_unlock;
1170
1171	ipc_lock_object(&shp->shm_perm);
1172
1173	/* check if shm_destroy() is tearing down shp */
1174	if (!ipc_valid_object(&shp->shm_perm)) {
1175		ipc_unlock_object(&shp->shm_perm);
1176		err = -EIDRM;
1177		goto out_unlock;
1178	}
1179
1180	path = shp->shm_file->f_path;
1181	path_get(&path);
1182	shp->shm_nattch++;
1183	size = i_size_read(d_inode(path.dentry));
1184	ipc_unlock_object(&shp->shm_perm);
1185	rcu_read_unlock();
1186
1187	err = -ENOMEM;
1188	sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1189	if (!sfd) {
1190		path_put(&path);
1191		goto out_nattch;
1192	}
1193
1194	file = alloc_file(&path, f_mode,
1195			  is_file_hugepages(shp->shm_file) ?
1196				&shm_file_operations_huge :
1197				&shm_file_operations);
1198	err = PTR_ERR(file);
1199	if (IS_ERR(file)) {
1200		kfree(sfd);
1201		path_put(&path);
1202		goto out_nattch;
1203	}
1204
1205	file->private_data = sfd;
1206	file->f_mapping = shp->shm_file->f_mapping;
1207	sfd->id = shp->shm_perm.id;
1208	sfd->ns = get_ipc_ns(ns);
1209	sfd->file = shp->shm_file;
 
 
 
 
 
 
 
 
 
1210	sfd->vm_ops = NULL;
1211
1212	err = security_mmap_file(file, prot, flags);
1213	if (err)
1214		goto out_fput;
1215
1216	if (down_write_killable(&current->mm->mmap_sem)) {
1217		err = -EINTR;
1218		goto out_fput;
1219	}
1220
1221	if (addr && !(shmflg & SHM_REMAP)) {
1222		err = -EINVAL;
1223		if (addr + size < addr)
1224			goto invalid;
1225
1226		if (find_vma_intersection(current->mm, addr, addr + size))
1227			goto invalid;
1228	}
1229
1230	addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate);
1231	*raddr = addr;
1232	err = 0;
1233	if (IS_ERR_VALUE(addr))
1234		err = (long)addr;
1235invalid:
1236	up_write(&current->mm->mmap_sem);
1237	if (populate)
1238		mm_populate(addr, populate);
1239
1240out_fput:
1241	fput(file);
1242
1243out_nattch:
1244	down_write(&shm_ids(ns).rwsem);
1245	shp = shm_lock(ns, shmid);
1246	shp->shm_nattch--;
1247	if (shm_may_destroy(ns, shp))
1248		shm_destroy(ns, shp);
1249	else
1250		shm_unlock(shp);
1251	up_write(&shm_ids(ns).rwsem);
1252	return err;
1253
1254out_unlock:
1255	rcu_read_unlock();
1256out:
1257	return err;
1258}
1259
1260SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1261{
1262	unsigned long ret;
1263	long err;
1264
1265	err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
1266	if (err)
1267		return err;
1268	force_successful_syscall_return();
1269	return (long)ret;
1270}
1271
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1272/*
1273 * detach and kill segment if marked destroyed.
1274 * The work is done in shm_close.
1275 */
1276SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1277{
1278	struct mm_struct *mm = current->mm;
1279	struct vm_area_struct *vma;
1280	unsigned long addr = (unsigned long)shmaddr;
1281	int retval = -EINVAL;
1282#ifdef CONFIG_MMU
1283	loff_t size = 0;
1284	struct file *file;
1285	struct vm_area_struct *next;
1286#endif
1287
1288	if (addr & ~PAGE_MASK)
1289		return retval;
1290
1291	if (down_write_killable(&mm->mmap_sem))
1292		return -EINTR;
1293
1294	/*
1295	 * This function tries to be smart and unmap shm segments that
1296	 * were modified by partial mlock or munmap calls:
1297	 * - It first determines the size of the shm segment that should be
1298	 *   unmapped: It searches for a vma that is backed by shm and that
1299	 *   started at address shmaddr. It records it's size and then unmaps
1300	 *   it.
1301	 * - Then it unmaps all shm vmas that started at shmaddr and that
1302	 *   are within the initially determined size and that are from the
1303	 *   same shm segment from which we determined the size.
1304	 * Errors from do_munmap are ignored: the function only fails if
1305	 * it's called with invalid parameters or if it's called to unmap
1306	 * a part of a vma. Both calls in this function are for full vmas,
1307	 * the parameters are directly copied from the vma itself and always
1308	 * valid - therefore do_munmap cannot fail. (famous last words?)
1309	 */
1310	/*
1311	 * If it had been mremap()'d, the starting address would not
1312	 * match the usual checks anyway. So assume all vma's are
1313	 * above the starting address given.
1314	 */
1315	vma = find_vma(mm, addr);
1316
1317#ifdef CONFIG_MMU
1318	while (vma) {
1319		next = vma->vm_next;
1320
1321		/*
1322		 * Check if the starting address would match, i.e. it's
1323		 * a fragment created by mprotect() and/or munmap(), or it
1324		 * otherwise it starts at this address with no hassles.
1325		 */
1326		if ((vma->vm_ops == &shm_vm_ops) &&
1327			(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1328
1329			/*
1330			 * Record the file of the shm segment being
1331			 * unmapped.  With mremap(), someone could place
1332			 * page from another segment but with equal offsets
1333			 * in the range we are unmapping.
1334			 */
1335			file = vma->vm_file;
1336			size = i_size_read(file_inode(vma->vm_file));
1337			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1338			/*
1339			 * We discovered the size of the shm segment, so
1340			 * break out of here and fall through to the next
1341			 * loop that uses the size information to stop
1342			 * searching for matching vma's.
1343			 */
1344			retval = 0;
1345			vma = next;
1346			break;
1347		}
1348		vma = next;
1349	}
1350
1351	/*
1352	 * We need look no further than the maximum address a fragment
1353	 * could possibly have landed at. Also cast things to loff_t to
1354	 * prevent overflows and make comparisons vs. equal-width types.
1355	 */
1356	size = PAGE_ALIGN(size);
1357	while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1358		next = vma->vm_next;
1359
1360		/* finding a matching vma now does not alter retval */
1361		if ((vma->vm_ops == &shm_vm_ops) &&
1362		    ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
1363		    (vma->vm_file == file))
1364			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1365		vma = next;
1366	}
1367
1368#else	/* CONFIG_MMU */
1369	/* under NOMMU conditions, the exact address to be destroyed must be
1370	 * given
1371	 */
1372	if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1373		do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1374		retval = 0;
1375	}
1376
1377#endif
1378
1379	up_write(&mm->mmap_sem);
1380	return retval;
1381}
1382
 
 
 
 
 
1383#ifdef CONFIG_PROC_FS
1384static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1385{
 
1386	struct user_namespace *user_ns = seq_user_ns(s);
1387	struct shmid_kernel *shp = it;
 
1388	unsigned long rss = 0, swp = 0;
1389
 
1390	shm_add_rss_swap(shp, &rss, &swp);
1391
1392#if BITS_PER_LONG <= 32
1393#define SIZE_SPEC "%10lu"
1394#else
1395#define SIZE_SPEC "%21lu"
1396#endif
1397
1398	seq_printf(s,
1399		   "%10d %10d  %4o " SIZE_SPEC " %5u %5u  "
1400		   "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
1401		   SIZE_SPEC " " SIZE_SPEC "\n",
1402		   shp->shm_perm.key,
1403		   shp->shm_perm.id,
1404		   shp->shm_perm.mode,
1405		   shp->shm_segsz,
1406		   shp->shm_cprid,
1407		   shp->shm_lprid,
1408		   shp->shm_nattch,
1409		   from_kuid_munged(user_ns, shp->shm_perm.uid),
1410		   from_kgid_munged(user_ns, shp->shm_perm.gid),
1411		   from_kuid_munged(user_ns, shp->shm_perm.cuid),
1412		   from_kgid_munged(user_ns, shp->shm_perm.cgid),
1413		   shp->shm_atim,
1414		   shp->shm_dtim,
1415		   shp->shm_ctim,
1416		   rss * PAGE_SIZE,
1417		   swp * PAGE_SIZE);
1418
1419	return 0;
1420}
1421#endif
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * linux/ipc/shm.c
   4 * Copyright (C) 1992, 1993 Krishna Balasubramanian
   5 *	 Many improvements/fixes by Bruno Haible.
   6 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
   7 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
   8 *
   9 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
  10 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
  11 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
  12 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
  13 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
  14 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
  15 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
  16 *
  17 * support for audit of ipc object properties and permission changes
  18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  19 *
  20 * namespaces support
  21 * OpenVZ, SWsoft Inc.
  22 * Pavel Emelianov <xemul@openvz.org>
  23 *
  24 * Better ipc lock (kern_ipc_perm.lock) handling
  25 * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
  26 */
  27
  28#include <linux/slab.h>
  29#include <linux/mm.h>
  30#include <linux/hugetlb.h>
  31#include <linux/shm.h>
  32#include <linux/init.h>
  33#include <linux/file.h>
  34#include <linux/mman.h>
  35#include <linux/shmem_fs.h>
  36#include <linux/security.h>
  37#include <linux/syscalls.h>
  38#include <linux/audit.h>
  39#include <linux/capability.h>
  40#include <linux/ptrace.h>
  41#include <linux/seq_file.h>
  42#include <linux/rwsem.h>
  43#include <linux/nsproxy.h>
  44#include <linux/mount.h>
  45#include <linux/ipc_namespace.h>
  46
  47#include <linux/uaccess.h>
  48
  49#include "util.h"
  50
  51struct shmid_kernel /* private to the kernel */
  52{
  53	struct kern_ipc_perm	shm_perm;
  54	struct file		*shm_file;
  55	unsigned long		shm_nattch;
  56	unsigned long		shm_segsz;
  57	time64_t		shm_atim;
  58	time64_t		shm_dtim;
  59	time64_t		shm_ctim;
  60	struct pid		*shm_cprid;
  61	struct pid		*shm_lprid;
  62	struct user_struct	*mlock_user;
  63
  64	/* The task created the shm object.  NULL if the task is dead. */
  65	struct task_struct	*shm_creator;
  66	struct list_head	shm_clist;	/* list by creator */
  67} __randomize_layout;
  68
  69/* shm_mode upper byte flags */
  70#define SHM_DEST	01000	/* segment will be destroyed on last detach */
  71#define SHM_LOCKED	02000   /* segment will not be swapped */
  72
  73struct shm_file_data {
  74	int id;
  75	struct ipc_namespace *ns;
  76	struct file *file;
  77	const struct vm_operations_struct *vm_ops;
  78};
  79
  80#define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
  81
  82static const struct file_operations shm_file_operations;
  83static const struct vm_operations_struct shm_vm_ops;
  84
  85#define shm_ids(ns)	((ns)->ids[IPC_SHM_IDS])
  86
  87#define shm_unlock(shp)			\
  88	ipc_unlock(&(shp)->shm_perm)
  89
  90static int newseg(struct ipc_namespace *, struct ipc_params *);
  91static void shm_open(struct vm_area_struct *vma);
  92static void shm_close(struct vm_area_struct *vma);
  93static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
  94#ifdef CONFIG_PROC_FS
  95static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
  96#endif
  97
  98int shm_init_ns(struct ipc_namespace *ns)
  99{
 100	ns->shm_ctlmax = SHMMAX;
 101	ns->shm_ctlall = SHMALL;
 102	ns->shm_ctlmni = SHMMNI;
 103	ns->shm_rmid_forced = 0;
 104	ns->shm_tot = 0;
 105	return ipc_init_ids(&shm_ids(ns));
 106}
 107
 108/*
 109 * Called with shm_ids.rwsem (writer) and the shp structure locked.
 110 * Only shm_ids.rwsem remains locked on exit.
 111 */
 112static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
 113{
 114	struct shmid_kernel *shp;
 115
 116	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 117
 118	if (shp->shm_nattch) {
 119		shp->shm_perm.mode |= SHM_DEST;
 120		/* Do not find it any more */
 121		ipc_set_key_private(&shm_ids(ns), &shp->shm_perm);
 122		shm_unlock(shp);
 123	} else
 124		shm_destroy(ns, shp);
 125}
 126
 127#ifdef CONFIG_IPC_NS
 128void shm_exit_ns(struct ipc_namespace *ns)
 129{
 130	free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
 131	idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
 132	rhashtable_destroy(&ns->ids[IPC_SHM_IDS].key_ht);
 133}
 134#endif
 135
 136static int __init ipc_ns_init(void)
 137{
 138	const int err = shm_init_ns(&init_ipc_ns);
 139	WARN(err, "ipc: sysv shm_init_ns failed: %d\n", err);
 140	return err;
 141}
 142
 143pure_initcall(ipc_ns_init);
 144
 145void __init shm_init(void)
 146{
 147	ipc_init_proc_interface("sysvipc/shm",
 148#if BITS_PER_LONG <= 32
 149				"       key      shmid perms       size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime        rss       swap\n",
 150#else
 151				"       key      shmid perms                  size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime                   rss                  swap\n",
 152#endif
 153				IPC_SHM_IDS, sysvipc_shm_proc_show);
 154}
 155
 156static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
 157{
 158	struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
 159
 160	if (IS_ERR(ipcp))
 161		return ERR_CAST(ipcp);
 162
 163	return container_of(ipcp, struct shmid_kernel, shm_perm);
 164}
 165
 166static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
 167{
 168	struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
 169
 170	if (IS_ERR(ipcp))
 171		return ERR_CAST(ipcp);
 172
 173	return container_of(ipcp, struct shmid_kernel, shm_perm);
 174}
 175
 176/*
 177 * shm_lock_(check_) routines are called in the paths where the rwsem
 178 * is not necessarily held.
 179 */
 180static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
 181{
 182	struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
 183
 184	/*
 185	 * Callers of shm_lock() must validate the status of the returned ipc
 186	 * object pointer (as returned by ipc_lock()), and error out as
 187	 * appropriate.
 188	 */
 189	if (IS_ERR(ipcp))
 190		return (void *)ipcp;
 191	return container_of(ipcp, struct shmid_kernel, shm_perm);
 192}
 193
 194static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
 195{
 196	rcu_read_lock();
 197	ipc_lock_object(&ipcp->shm_perm);
 198}
 199
 200static void shm_rcu_free(struct rcu_head *head)
 201{
 202	struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm,
 203							rcu);
 204	struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel,
 205							shm_perm);
 206	security_shm_free(&shp->shm_perm);
 207	kvfree(shp);
 208}
 209
 210static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
 211{
 212	list_del(&s->shm_clist);
 213	ipc_rmid(&shm_ids(ns), &s->shm_perm);
 214}
 215
 216
 217static int __shm_open(struct vm_area_struct *vma)
 218{
 219	struct file *file = vma->vm_file;
 220	struct shm_file_data *sfd = shm_file_data(file);
 221	struct shmid_kernel *shp;
 222
 223	shp = shm_lock(sfd->ns, sfd->id);
 224
 225	if (IS_ERR(shp))
 226		return PTR_ERR(shp);
 227
 228	if (shp->shm_file != sfd->file) {
 229		/* ID was reused */
 230		shm_unlock(shp);
 231		return -EINVAL;
 232	}
 233
 234	shp->shm_atim = ktime_get_real_seconds();
 235	ipc_update_pid(&shp->shm_lprid, task_tgid(current));
 236	shp->shm_nattch++;
 237	shm_unlock(shp);
 238	return 0;
 239}
 240
 241/* This is called by fork, once for every shm attach. */
 242static void shm_open(struct vm_area_struct *vma)
 243{
 244	int err = __shm_open(vma);
 245	/*
 246	 * We raced in the idr lookup or with shm_destroy().
 247	 * Either way, the ID is busted.
 248	 */
 249	WARN_ON_ONCE(err);
 250}
 251
 252/*
 253 * shm_destroy - free the struct shmid_kernel
 254 *
 255 * @ns: namespace
 256 * @shp: struct to free
 257 *
 258 * It has to be called with shp and shm_ids.rwsem (writer) locked,
 259 * but returns with shp unlocked and freed.
 260 */
 261static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
 262{
 263	struct file *shm_file;
 264
 265	shm_file = shp->shm_file;
 266	shp->shm_file = NULL;
 267	ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
 268	shm_rmid(ns, shp);
 269	shm_unlock(shp);
 270	if (!is_file_hugepages(shm_file))
 271		shmem_lock(shm_file, 0, shp->mlock_user);
 272	else if (shp->mlock_user)
 273		user_shm_unlock(i_size_read(file_inode(shm_file)),
 274				shp->mlock_user);
 275	fput(shm_file);
 276	ipc_update_pid(&shp->shm_cprid, NULL);
 277	ipc_update_pid(&shp->shm_lprid, NULL);
 278	ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
 279}
 280
 281/*
 282 * shm_may_destroy - identifies whether shm segment should be destroyed now
 283 *
 284 * Returns true if and only if there are no active users of the segment and
 285 * one of the following is true:
 286 *
 287 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
 288 *
 289 * 2) sysctl kernel.shm_rmid_forced is set to 1.
 290 */
 291static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
 292{
 293	return (shp->shm_nattch == 0) &&
 294	       (ns->shm_rmid_forced ||
 295		(shp->shm_perm.mode & SHM_DEST));
 296}
 297
 298/*
 299 * remove the attach descriptor vma.
 300 * free memory for segment if it is marked destroyed.
 301 * The descriptor has already been removed from the current->mm->mmap list
 302 * and will later be kfree()d.
 303 */
 304static void shm_close(struct vm_area_struct *vma)
 305{
 306	struct file *file = vma->vm_file;
 307	struct shm_file_data *sfd = shm_file_data(file);
 308	struct shmid_kernel *shp;
 309	struct ipc_namespace *ns = sfd->ns;
 310
 311	down_write(&shm_ids(ns).rwsem);
 312	/* remove from the list of attaches of the shm segment */
 313	shp = shm_lock(ns, sfd->id);
 314
 315	/*
 316	 * We raced in the idr lookup or with shm_destroy().
 317	 * Either way, the ID is busted.
 318	 */
 319	if (WARN_ON_ONCE(IS_ERR(shp)))
 320		goto done; /* no-op */
 321
 322	ipc_update_pid(&shp->shm_lprid, task_tgid(current));
 323	shp->shm_dtim = ktime_get_real_seconds();
 324	shp->shm_nattch--;
 325	if (shm_may_destroy(ns, shp))
 326		shm_destroy(ns, shp);
 327	else
 328		shm_unlock(shp);
 329done:
 330	up_write(&shm_ids(ns).rwsem);
 331}
 332
 333/* Called with ns->shm_ids(ns).rwsem locked */
 334static int shm_try_destroy_orphaned(int id, void *p, void *data)
 335{
 336	struct ipc_namespace *ns = data;
 337	struct kern_ipc_perm *ipcp = p;
 338	struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 339
 340	/*
 341	 * We want to destroy segments without users and with already
 342	 * exit'ed originating process.
 343	 *
 344	 * As shp->* are changed under rwsem, it's safe to skip shp locking.
 345	 */
 346	if (shp->shm_creator != NULL)
 347		return 0;
 348
 349	if (shm_may_destroy(ns, shp)) {
 350		shm_lock_by_ptr(shp);
 351		shm_destroy(ns, shp);
 352	}
 353	return 0;
 354}
 355
 356void shm_destroy_orphaned(struct ipc_namespace *ns)
 357{
 358	down_write(&shm_ids(ns).rwsem);
 359	if (shm_ids(ns).in_use)
 360		idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
 361	up_write(&shm_ids(ns).rwsem);
 362}
 363
 364/* Locking assumes this will only be called with task == current */
 365void exit_shm(struct task_struct *task)
 366{
 367	struct ipc_namespace *ns = task->nsproxy->ipc_ns;
 368	struct shmid_kernel *shp, *n;
 369
 370	if (list_empty(&task->sysvshm.shm_clist))
 371		return;
 372
 373	/*
 374	 * If kernel.shm_rmid_forced is not set then only keep track of
 375	 * which shmids are orphaned, so that a later set of the sysctl
 376	 * can clean them up.
 377	 */
 378	if (!ns->shm_rmid_forced) {
 379		down_read(&shm_ids(ns).rwsem);
 380		list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist)
 381			shp->shm_creator = NULL;
 382		/*
 383		 * Only under read lock but we are only called on current
 384		 * so no entry on the list will be shared.
 385		 */
 386		list_del(&task->sysvshm.shm_clist);
 387		up_read(&shm_ids(ns).rwsem);
 388		return;
 389	}
 390
 391	/*
 392	 * Destroy all already created segments, that were not yet mapped,
 393	 * and mark any mapped as orphan to cover the sysctl toggling.
 394	 * Destroy is skipped if shm_may_destroy() returns false.
 395	 */
 396	down_write(&shm_ids(ns).rwsem);
 397	list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) {
 398		shp->shm_creator = NULL;
 399
 400		if (shm_may_destroy(ns, shp)) {
 401			shm_lock_by_ptr(shp);
 402			shm_destroy(ns, shp);
 403		}
 404	}
 405
 406	/* Remove the list head from any segments still attached. */
 407	list_del(&task->sysvshm.shm_clist);
 408	up_write(&shm_ids(ns).rwsem);
 409}
 410
 411static int shm_fault(struct vm_fault *vmf)
 412{
 413	struct file *file = vmf->vma->vm_file;
 414	struct shm_file_data *sfd = shm_file_data(file);
 415
 416	return sfd->vm_ops->fault(vmf);
 417}
 418
 419static int shm_split(struct vm_area_struct *vma, unsigned long addr)
 420{
 421	struct file *file = vma->vm_file;
 422	struct shm_file_data *sfd = shm_file_data(file);
 423
 424	if (sfd->vm_ops->split)
 425		return sfd->vm_ops->split(vma, addr);
 426
 427	return 0;
 428}
 429
 430#ifdef CONFIG_NUMA
 431static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
 432{
 433	struct file *file = vma->vm_file;
 434	struct shm_file_data *sfd = shm_file_data(file);
 435	int err = 0;
 436
 437	if (sfd->vm_ops->set_policy)
 438		err = sfd->vm_ops->set_policy(vma, new);
 439	return err;
 440}
 441
 442static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
 443					unsigned long addr)
 444{
 445	struct file *file = vma->vm_file;
 446	struct shm_file_data *sfd = shm_file_data(file);
 447	struct mempolicy *pol = NULL;
 448
 449	if (sfd->vm_ops->get_policy)
 450		pol = sfd->vm_ops->get_policy(vma, addr);
 451	else if (vma->vm_policy)
 452		pol = vma->vm_policy;
 453
 454	return pol;
 455}
 456#endif
 457
 458static int shm_mmap(struct file *file, struct vm_area_struct *vma)
 459{
 460	struct shm_file_data *sfd = shm_file_data(file);
 461	int ret;
 462
 463	/*
 464	 * In case of remap_file_pages() emulation, the file can represent an
 465	 * IPC ID that was removed, and possibly even reused by another shm
 466	 * segment already.  Propagate this case as an error to caller.
 467	 */
 468	ret = __shm_open(vma);
 469	if (ret)
 470		return ret;
 471
 472	ret = call_mmap(sfd->file, vma);
 473	if (ret) {
 474		shm_close(vma);
 475		return ret;
 476	}
 477	sfd->vm_ops = vma->vm_ops;
 478#ifdef CONFIG_MMU
 479	WARN_ON(!sfd->vm_ops->fault);
 480#endif
 481	vma->vm_ops = &shm_vm_ops;
 482	return 0;
 483}
 484
 485static int shm_release(struct inode *ino, struct file *file)
 486{
 487	struct shm_file_data *sfd = shm_file_data(file);
 488
 489	put_ipc_ns(sfd->ns);
 490	fput(sfd->file);
 491	shm_file_data(file) = NULL;
 492	kfree(sfd);
 493	return 0;
 494}
 495
 496static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 497{
 498	struct shm_file_data *sfd = shm_file_data(file);
 499
 500	if (!sfd->file->f_op->fsync)
 501		return -EINVAL;
 502	return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
 503}
 504
 505static long shm_fallocate(struct file *file, int mode, loff_t offset,
 506			  loff_t len)
 507{
 508	struct shm_file_data *sfd = shm_file_data(file);
 509
 510	if (!sfd->file->f_op->fallocate)
 511		return -EOPNOTSUPP;
 512	return sfd->file->f_op->fallocate(file, mode, offset, len);
 513}
 514
 515static unsigned long shm_get_unmapped_area(struct file *file,
 516	unsigned long addr, unsigned long len, unsigned long pgoff,
 517	unsigned long flags)
 518{
 519	struct shm_file_data *sfd = shm_file_data(file);
 520
 521	return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
 522						pgoff, flags);
 523}
 524
 525static const struct file_operations shm_file_operations = {
 526	.mmap		= shm_mmap,
 527	.fsync		= shm_fsync,
 528	.release	= shm_release,
 529	.get_unmapped_area	= shm_get_unmapped_area,
 530	.llseek		= noop_llseek,
 531	.fallocate	= shm_fallocate,
 532};
 533
 534/*
 535 * shm_file_operations_huge is now identical to shm_file_operations,
 536 * but we keep it distinct for the sake of is_file_shm_hugepages().
 537 */
 538static const struct file_operations shm_file_operations_huge = {
 539	.mmap		= shm_mmap,
 540	.fsync		= shm_fsync,
 541	.release	= shm_release,
 542	.get_unmapped_area	= shm_get_unmapped_area,
 543	.llseek		= noop_llseek,
 544	.fallocate	= shm_fallocate,
 545};
 546
 547bool is_file_shm_hugepages(struct file *file)
 548{
 549	return file->f_op == &shm_file_operations_huge;
 550}
 551
 552static const struct vm_operations_struct shm_vm_ops = {
 553	.open	= shm_open,	/* callback for a new vm-area open */
 554	.close	= shm_close,	/* callback for when the vm-area is released */
 555	.fault	= shm_fault,
 556	.split	= shm_split,
 557#if defined(CONFIG_NUMA)
 558	.set_policy = shm_set_policy,
 559	.get_policy = shm_get_policy,
 560#endif
 561};
 562
 563/**
 564 * newseg - Create a new shared memory segment
 565 * @ns: namespace
 566 * @params: ptr to the structure that contains key, size and shmflg
 567 *
 568 * Called with shm_ids.rwsem held as a writer.
 569 */
 570static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
 571{
 572	key_t key = params->key;
 573	int shmflg = params->flg;
 574	size_t size = params->u.size;
 575	int error;
 576	struct shmid_kernel *shp;
 577	size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 578	struct file *file;
 579	char name[13];
 
 580	vm_flags_t acctflag = 0;
 581
 582	if (size < SHMMIN || size > ns->shm_ctlmax)
 583		return -EINVAL;
 584
 585	if (numpages << PAGE_SHIFT < size)
 586		return -ENOSPC;
 587
 588	if (ns->shm_tot + numpages < ns->shm_tot ||
 589			ns->shm_tot + numpages > ns->shm_ctlall)
 590		return -ENOSPC;
 591
 592	shp = kvmalloc(sizeof(*shp), GFP_KERNEL);
 593	if (unlikely(!shp))
 594		return -ENOMEM;
 595
 596	shp->shm_perm.key = key;
 597	shp->shm_perm.mode = (shmflg & S_IRWXUGO);
 598	shp->mlock_user = NULL;
 599
 600	shp->shm_perm.security = NULL;
 601	error = security_shm_alloc(&shp->shm_perm);
 602	if (error) {
 603		kvfree(shp);
 604		return error;
 605	}
 606
 607	sprintf(name, "SYSV%08x", key);
 608	if (shmflg & SHM_HUGETLB) {
 609		struct hstate *hs;
 610		size_t hugesize;
 611
 612		hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
 613		if (!hs) {
 614			error = -EINVAL;
 615			goto no_file;
 616		}
 617		hugesize = ALIGN(size, huge_page_size(hs));
 618
 619		/* hugetlb_file_setup applies strict accounting */
 620		if (shmflg & SHM_NORESERVE)
 621			acctflag = VM_NORESERVE;
 622		file = hugetlb_file_setup(name, hugesize, acctflag,
 623				  &shp->mlock_user, HUGETLB_SHMFS_INODE,
 624				(shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
 625	} else {
 626		/*
 627		 * Do not allow no accounting for OVERCOMMIT_NEVER, even
 628		 * if it's asked for.
 629		 */
 630		if  ((shmflg & SHM_NORESERVE) &&
 631				sysctl_overcommit_memory != OVERCOMMIT_NEVER)
 632			acctflag = VM_NORESERVE;
 633		file = shmem_kernel_file_setup(name, size, acctflag);
 634	}
 635	error = PTR_ERR(file);
 636	if (IS_ERR(file))
 637		goto no_file;
 638
 639	shp->shm_cprid = get_pid(task_tgid(current));
 640	shp->shm_lprid = NULL;
 641	shp->shm_atim = shp->shm_dtim = 0;
 642	shp->shm_ctim = ktime_get_real_seconds();
 643	shp->shm_segsz = size;
 644	shp->shm_nattch = 0;
 645	shp->shm_file = file;
 646	shp->shm_creator = current;
 647
 648	/* ipc_addid() locks shp upon success. */
 649	error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
 650	if (error < 0)
 651		goto no_id;
 
 652
 653	list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
 654
 655	/*
 656	 * shmid gets reported as "inode#" in /proc/pid/maps.
 657	 * proc-ps tools use this. Changing this will break them.
 658	 */
 659	file_inode(file)->i_ino = shp->shm_perm.id;
 660
 661	ns->shm_tot += numpages;
 662	error = shp->shm_perm.id;
 663
 664	ipc_unlock_object(&shp->shm_perm);
 665	rcu_read_unlock();
 666	return error;
 667
 668no_id:
 669	ipc_update_pid(&shp->shm_cprid, NULL);
 670	ipc_update_pid(&shp->shm_lprid, NULL);
 671	if (is_file_hugepages(file) && shp->mlock_user)
 672		user_shm_unlock(size, shp->mlock_user);
 673	fput(file);
 674no_file:
 675	call_rcu(&shp->shm_perm.rcu, shm_rcu_free);
 676	return error;
 677}
 678
 679/*
 680 * Called with shm_ids.rwsem and ipcp locked.
 681 */
 
 
 
 
 
 
 
 
 
 
 
 682static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
 683				struct ipc_params *params)
 684{
 685	struct shmid_kernel *shp;
 686
 687	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 688	if (shp->shm_segsz < params->u.size)
 689		return -EINVAL;
 690
 691	return 0;
 692}
 693
 694long ksys_shmget(key_t key, size_t size, int shmflg)
 695{
 696	struct ipc_namespace *ns;
 697	static const struct ipc_ops shm_ops = {
 698		.getnew = newseg,
 699		.associate = security_shm_associate,
 700		.more_checks = shm_more_checks,
 701	};
 702	struct ipc_params shm_params;
 703
 704	ns = current->nsproxy->ipc_ns;
 705
 706	shm_params.key = key;
 707	shm_params.flg = shmflg;
 708	shm_params.u.size = size;
 709
 710	return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
 711}
 712
 713SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
 714{
 715	return ksys_shmget(key, size, shmflg);
 716}
 717
 718static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
 719{
 720	switch (version) {
 721	case IPC_64:
 722		return copy_to_user(buf, in, sizeof(*in));
 723	case IPC_OLD:
 724	    {
 725		struct shmid_ds out;
 726
 727		memset(&out, 0, sizeof(out));
 728		ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
 729		out.shm_segsz	= in->shm_segsz;
 730		out.shm_atime	= in->shm_atime;
 731		out.shm_dtime	= in->shm_dtime;
 732		out.shm_ctime	= in->shm_ctime;
 733		out.shm_cpid	= in->shm_cpid;
 734		out.shm_lpid	= in->shm_lpid;
 735		out.shm_nattch	= in->shm_nattch;
 736
 737		return copy_to_user(buf, &out, sizeof(out));
 738	    }
 739	default:
 740		return -EINVAL;
 741	}
 742}
 743
 744static inline unsigned long
 745copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
 746{
 747	switch (version) {
 748	case IPC_64:
 749		if (copy_from_user(out, buf, sizeof(*out)))
 750			return -EFAULT;
 751		return 0;
 752	case IPC_OLD:
 753	    {
 754		struct shmid_ds tbuf_old;
 755
 756		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
 757			return -EFAULT;
 758
 759		out->shm_perm.uid	= tbuf_old.shm_perm.uid;
 760		out->shm_perm.gid	= tbuf_old.shm_perm.gid;
 761		out->shm_perm.mode	= tbuf_old.shm_perm.mode;
 762
 763		return 0;
 764	    }
 765	default:
 766		return -EINVAL;
 767	}
 768}
 769
 770static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
 771{
 772	switch (version) {
 773	case IPC_64:
 774		return copy_to_user(buf, in, sizeof(*in));
 775	case IPC_OLD:
 776	    {
 777		struct shminfo out;
 778
 779		if (in->shmmax > INT_MAX)
 780			out.shmmax = INT_MAX;
 781		else
 782			out.shmmax = (int)in->shmmax;
 783
 784		out.shmmin	= in->shmmin;
 785		out.shmmni	= in->shmmni;
 786		out.shmseg	= in->shmseg;
 787		out.shmall	= in->shmall;
 788
 789		return copy_to_user(buf, &out, sizeof(out));
 790	    }
 791	default:
 792		return -EINVAL;
 793	}
 794}
 795
 796/*
 797 * Calculate and add used RSS and swap pages of a shm.
 798 * Called with shm_ids.rwsem held as a reader
 799 */
 800static void shm_add_rss_swap(struct shmid_kernel *shp,
 801	unsigned long *rss_add, unsigned long *swp_add)
 802{
 803	struct inode *inode;
 804
 805	inode = file_inode(shp->shm_file);
 806
 807	if (is_file_hugepages(shp->shm_file)) {
 808		struct address_space *mapping = inode->i_mapping;
 809		struct hstate *h = hstate_file(shp->shm_file);
 810		*rss_add += pages_per_huge_page(h) * mapping->nrpages;
 811	} else {
 812#ifdef CONFIG_SHMEM
 813		struct shmem_inode_info *info = SHMEM_I(inode);
 814
 815		spin_lock_irq(&info->lock);
 816		*rss_add += inode->i_mapping->nrpages;
 817		*swp_add += info->swapped;
 818		spin_unlock_irq(&info->lock);
 819#else
 820		*rss_add += inode->i_mapping->nrpages;
 821#endif
 822	}
 823}
 824
 825/*
 826 * Called with shm_ids.rwsem held as a reader
 827 */
 828static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
 829		unsigned long *swp)
 830{
 831	int next_id;
 832	int total, in_use;
 833
 834	*rss = 0;
 835	*swp = 0;
 836
 837	in_use = shm_ids(ns).in_use;
 838
 839	for (total = 0, next_id = 0; total < in_use; next_id++) {
 840		struct kern_ipc_perm *ipc;
 841		struct shmid_kernel *shp;
 842
 843		ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
 844		if (ipc == NULL)
 845			continue;
 846		shp = container_of(ipc, struct shmid_kernel, shm_perm);
 847
 848		shm_add_rss_swap(shp, rss, swp);
 849
 850		total++;
 851	}
 852}
 853
 854/*
 855 * This function handles some shmctl commands which require the rwsem
 856 * to be held in write mode.
 857 * NOTE: no locks must be held, the rwsem is taken inside this function.
 858 */
 859static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
 860		       struct shmid64_ds *shmid64)
 861{
 862	struct kern_ipc_perm *ipcp;
 
 863	struct shmid_kernel *shp;
 864	int err;
 865
 
 
 
 
 
 866	down_write(&shm_ids(ns).rwsem);
 867	rcu_read_lock();
 868
 869	ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
 870				      &shmid64->shm_perm, 0);
 871	if (IS_ERR(ipcp)) {
 872		err = PTR_ERR(ipcp);
 873		goto out_unlock1;
 874	}
 875
 876	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 877
 878	err = security_shm_shmctl(&shp->shm_perm, cmd);
 879	if (err)
 880		goto out_unlock1;
 881
 882	switch (cmd) {
 883	case IPC_RMID:
 884		ipc_lock_object(&shp->shm_perm);
 885		/* do_shm_rmid unlocks the ipc object and rcu */
 886		do_shm_rmid(ns, ipcp);
 887		goto out_up;
 888	case IPC_SET:
 889		ipc_lock_object(&shp->shm_perm);
 890		err = ipc_update_perm(&shmid64->shm_perm, ipcp);
 891		if (err)
 892			goto out_unlock0;
 893		shp->shm_ctim = ktime_get_real_seconds();
 894		break;
 895	default:
 896		err = -EINVAL;
 897		goto out_unlock1;
 898	}
 899
 900out_unlock0:
 901	ipc_unlock_object(&shp->shm_perm);
 902out_unlock1:
 903	rcu_read_unlock();
 904out_up:
 905	up_write(&shm_ids(ns).rwsem);
 906	return err;
 907}
 908
 909static int shmctl_ipc_info(struct ipc_namespace *ns,
 910			   struct shminfo64 *shminfo)
 911{
 912	int err = security_shm_shmctl(NULL, IPC_INFO);
 913	if (!err) {
 914		memset(shminfo, 0, sizeof(*shminfo));
 915		shminfo->shmmni = shminfo->shmseg = ns->shm_ctlmni;
 916		shminfo->shmmax = ns->shm_ctlmax;
 917		shminfo->shmall = ns->shm_ctlall;
 918		shminfo->shmmin = SHMMIN;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 919		down_read(&shm_ids(ns).rwsem);
 920		err = ipc_get_maxid(&shm_ids(ns));
 921		up_read(&shm_ids(ns).rwsem);
 
 922		if (err < 0)
 923			err = 0;
 
 924	}
 925	return err;
 926}
 
 927
 928static int shmctl_shm_info(struct ipc_namespace *ns,
 929			   struct shm_info *shm_info)
 930{
 931	int err = security_shm_shmctl(NULL, SHM_INFO);
 932	if (!err) {
 933		memset(shm_info, 0, sizeof(*shm_info));
 934		down_read(&shm_ids(ns).rwsem);
 935		shm_info->used_ids = shm_ids(ns).in_use;
 936		shm_get_stat(ns, &shm_info->shm_rss, &shm_info->shm_swp);
 937		shm_info->shm_tot = ns->shm_tot;
 938		shm_info->swap_attempts = 0;
 939		shm_info->swap_successes = 0;
 940		err = ipc_get_maxid(&shm_ids(ns));
 941		up_read(&shm_ids(ns).rwsem);
 942		if (err < 0)
 943			err = 0;
 
 
 
 
 
 944	}
 945	return err;
 946}
 947
 948static int shmctl_stat(struct ipc_namespace *ns, int shmid,
 949			int cmd, struct shmid64_ds *tbuf)
 950{
 951	struct shmid_kernel *shp;
 952	int id = 0;
 953	int err;
 954
 955	memset(tbuf, 0, sizeof(*tbuf));
 956
 957	rcu_read_lock();
 958	if (cmd == SHM_STAT || cmd == SHM_STAT_ANY) {
 959		shp = shm_obtain_object(ns, shmid);
 960		if (IS_ERR(shp)) {
 961			err = PTR_ERR(shp);
 962			goto out_unlock;
 963		}
 964		id = shp->shm_perm.id;
 965	} else { /* IPC_STAT */
 966		shp = shm_obtain_object_check(ns, shmid);
 967		if (IS_ERR(shp)) {
 968			err = PTR_ERR(shp);
 969			goto out_unlock;
 970		}
 971	}
 972
 973	/*
 974	 * Semantically SHM_STAT_ANY ought to be identical to
 975	 * that functionality provided by the /proc/sysvipc/
 976	 * interface. As such, only audit these calls and
 977	 * do not do traditional S_IRUGO permission checks on
 978	 * the ipc object.
 979	 */
 980	if (cmd == SHM_STAT_ANY)
 981		audit_ipc_obj(&shp->shm_perm);
 982	else {
 983		err = -EACCES;
 984		if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
 985			goto out_unlock;
 986	}
 987
 988	err = security_shm_shmctl(&shp->shm_perm, cmd);
 989	if (err)
 990		goto out_unlock;
 991
 992	ipc_lock_object(&shp->shm_perm);
 
 
 
 
 
 
 
 
 
 993
 994	if (!ipc_valid_object(&shp->shm_perm)) {
 995		ipc_unlock_object(&shp->shm_perm);
 996		err = -EIDRM;
 997		goto out_unlock;
 
 
 
 
 998	}
 999
1000	kernel_to_ipc64_perm(&shp->shm_perm, &tbuf->shm_perm);
1001	tbuf->shm_segsz	= shp->shm_segsz;
1002	tbuf->shm_atime	= shp->shm_atim;
1003	tbuf->shm_dtime	= shp->shm_dtim;
1004	tbuf->shm_ctime	= shp->shm_ctim;
1005	tbuf->shm_cpid	= pid_vnr(shp->shm_cprid);
1006	tbuf->shm_lpid	= pid_vnr(shp->shm_lprid);
1007	tbuf->shm_nattch = shp->shm_nattch;
1008
1009	ipc_unlock_object(&shp->shm_perm);
1010	rcu_read_unlock();
1011	return id;
1012
1013out_unlock:
1014	rcu_read_unlock();
 
1015	return err;
1016}
1017
1018static int shmctl_do_lock(struct ipc_namespace *ns, int shmid, int cmd)
1019{
1020	struct shmid_kernel *shp;
1021	struct file *shm_file;
1022	int err;
1023
1024	rcu_read_lock();
1025	shp = shm_obtain_object_check(ns, shmid);
1026	if (IS_ERR(shp)) {
1027		err = PTR_ERR(shp);
1028		goto out_unlock1;
1029	}
1030
1031	audit_ipc_obj(&(shp->shm_perm));
1032	err = security_shm_shmctl(&shp->shm_perm, cmd);
1033	if (err)
1034		goto out_unlock1;
1035
1036	ipc_lock_object(&shp->shm_perm);
1037
1038	/* check if shm_destroy() is tearing down shp */
1039	if (!ipc_valid_object(&shp->shm_perm)) {
1040		err = -EIDRM;
1041		goto out_unlock0;
1042	}
1043
1044	if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
1045		kuid_t euid = current_euid();
1046
1047		if (!uid_eq(euid, shp->shm_perm.uid) &&
1048		    !uid_eq(euid, shp->shm_perm.cuid)) {
1049			err = -EPERM;
1050			goto out_unlock0;
1051		}
1052		if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
1053			err = -EPERM;
1054			goto out_unlock0;
1055		}
1056	}
1057
1058	shm_file = shp->shm_file;
1059	if (is_file_hugepages(shm_file))
1060		goto out_unlock0;
1061
1062	if (cmd == SHM_LOCK) {
1063		struct user_struct *user = current_user();
1064
1065		err = shmem_lock(shm_file, 1, user);
1066		if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
1067			shp->shm_perm.mode |= SHM_LOCKED;
1068			shp->mlock_user = user;
1069		}
1070		goto out_unlock0;
1071	}
1072
1073	/* SHM_UNLOCK */
1074	if (!(shp->shm_perm.mode & SHM_LOCKED))
1075		goto out_unlock0;
1076	shmem_lock(shm_file, 0, shp->mlock_user);
1077	shp->shm_perm.mode &= ~SHM_LOCKED;
1078	shp->mlock_user = NULL;
1079	get_file(shm_file);
1080	ipc_unlock_object(&shp->shm_perm);
1081	rcu_read_unlock();
1082	shmem_unlock_mapping(shm_file->f_mapping);
1083
1084	fput(shm_file);
1085	return err;
1086
1087out_unlock0:
1088	ipc_unlock_object(&shp->shm_perm);
1089out_unlock1:
1090	rcu_read_unlock();
1091	return err;
1092}
1093
1094long ksys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf)
1095{
1096	int err, version;
1097	struct ipc_namespace *ns;
1098	struct shmid64_ds sem64;
1099
1100	if (cmd < 0 || shmid < 0)
1101		return -EINVAL;
1102
1103	version = ipc_parse_version(&cmd);
1104	ns = current->nsproxy->ipc_ns;
1105
1106	switch (cmd) {
1107	case IPC_INFO: {
1108		struct shminfo64 shminfo;
1109		err = shmctl_ipc_info(ns, &shminfo);
1110		if (err < 0)
1111			return err;
1112		if (copy_shminfo_to_user(buf, &shminfo, version))
1113			err = -EFAULT;
1114		return err;
1115	}
1116	case SHM_INFO: {
1117		struct shm_info shm_info;
1118		err = shmctl_shm_info(ns, &shm_info);
1119		if (err < 0)
1120			return err;
1121		if (copy_to_user(buf, &shm_info, sizeof(shm_info)))
1122			err = -EFAULT;
1123		return err;
1124	}
1125	case SHM_STAT:
1126	case SHM_STAT_ANY:
1127	case IPC_STAT: {
1128		err = shmctl_stat(ns, shmid, cmd, &sem64);
1129		if (err < 0)
1130			return err;
1131		if (copy_shmid_to_user(buf, &sem64, version))
1132			err = -EFAULT;
1133		return err;
1134	}
1135	case IPC_SET:
1136		if (copy_shmid_from_user(&sem64, buf, version))
1137			return -EFAULT;
1138		/* fallthru */
1139	case IPC_RMID:
1140		return shmctl_down(ns, shmid, cmd, &sem64);
1141	case SHM_LOCK:
1142	case SHM_UNLOCK:
1143		return shmctl_do_lock(ns, shmid, cmd);
1144	default:
1145		return -EINVAL;
1146	}
1147}
1148
1149SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1150{
1151	return ksys_shmctl(shmid, cmd, buf);
1152}
 
 
1153
1154#ifdef CONFIG_COMPAT
 
 
 
1155
1156struct compat_shmid_ds {
1157	struct compat_ipc_perm shm_perm;
1158	int shm_segsz;
1159	compat_time_t shm_atime;
1160	compat_time_t shm_dtime;
1161	compat_time_t shm_ctime;
1162	compat_ipc_pid_t shm_cpid;
1163	compat_ipc_pid_t shm_lpid;
1164	unsigned short shm_nattch;
1165	unsigned short shm_unused;
1166	compat_uptr_t shm_unused2;
1167	compat_uptr_t shm_unused3;
1168};
1169
1170struct compat_shminfo64 {
1171	compat_ulong_t shmmax;
1172	compat_ulong_t shmmin;
1173	compat_ulong_t shmmni;
1174	compat_ulong_t shmseg;
1175	compat_ulong_t shmall;
1176	compat_ulong_t __unused1;
1177	compat_ulong_t __unused2;
1178	compat_ulong_t __unused3;
1179	compat_ulong_t __unused4;
1180};
1181
1182struct compat_shm_info {
1183	compat_int_t used_ids;
1184	compat_ulong_t shm_tot, shm_rss, shm_swp;
1185	compat_ulong_t swap_attempts, swap_successes;
1186};
1187
1188static int copy_compat_shminfo_to_user(void __user *buf, struct shminfo64 *in,
1189					int version)
1190{
1191	if (in->shmmax > INT_MAX)
1192		in->shmmax = INT_MAX;
1193	if (version == IPC_64) {
1194		struct compat_shminfo64 info;
1195		memset(&info, 0, sizeof(info));
1196		info.shmmax = in->shmmax;
1197		info.shmmin = in->shmmin;
1198		info.shmmni = in->shmmni;
1199		info.shmseg = in->shmseg;
1200		info.shmall = in->shmall;
1201		return copy_to_user(buf, &info, sizeof(info));
1202	} else {
1203		struct shminfo info;
1204		memset(&info, 0, sizeof(info));
1205		info.shmmax = in->shmmax;
1206		info.shmmin = in->shmmin;
1207		info.shmmni = in->shmmni;
1208		info.shmseg = in->shmseg;
1209		info.shmall = in->shmall;
1210		return copy_to_user(buf, &info, sizeof(info));
1211	}
1212}
1213
1214static int put_compat_shm_info(struct shm_info *ip,
1215				struct compat_shm_info __user *uip)
1216{
1217	struct compat_shm_info info;
1218
1219	memset(&info, 0, sizeof(info));
1220	info.used_ids = ip->used_ids;
1221	info.shm_tot = ip->shm_tot;
1222	info.shm_rss = ip->shm_rss;
1223	info.shm_swp = ip->shm_swp;
1224	info.swap_attempts = ip->swap_attempts;
1225	info.swap_successes = ip->swap_successes;
1226	return copy_to_user(uip, &info, sizeof(info));
1227}
1228
1229static int copy_compat_shmid_to_user(void __user *buf, struct shmid64_ds *in,
1230					int version)
1231{
1232	if (version == IPC_64) {
1233		struct compat_shmid64_ds v;
1234		memset(&v, 0, sizeof(v));
1235		to_compat_ipc64_perm(&v.shm_perm, &in->shm_perm);
1236		v.shm_atime = in->shm_atime;
1237		v.shm_dtime = in->shm_dtime;
1238		v.shm_ctime = in->shm_ctime;
1239		v.shm_segsz = in->shm_segsz;
1240		v.shm_nattch = in->shm_nattch;
1241		v.shm_cpid = in->shm_cpid;
1242		v.shm_lpid = in->shm_lpid;
1243		return copy_to_user(buf, &v, sizeof(v));
1244	} else {
1245		struct compat_shmid_ds v;
1246		memset(&v, 0, sizeof(v));
1247		to_compat_ipc_perm(&v.shm_perm, &in->shm_perm);
1248		v.shm_perm.key = in->shm_perm.key;
1249		v.shm_atime = in->shm_atime;
1250		v.shm_dtime = in->shm_dtime;
1251		v.shm_ctime = in->shm_ctime;
1252		v.shm_segsz = in->shm_segsz;
1253		v.shm_nattch = in->shm_nattch;
1254		v.shm_cpid = in->shm_cpid;
1255		v.shm_lpid = in->shm_lpid;
1256		return copy_to_user(buf, &v, sizeof(v));
1257	}
1258}
1259
1260static int copy_compat_shmid_from_user(struct shmid64_ds *out, void __user *buf,
1261					int version)
1262{
1263	memset(out, 0, sizeof(*out));
1264	if (version == IPC_64) {
1265		struct compat_shmid64_ds __user *p = buf;
1266		return get_compat_ipc64_perm(&out->shm_perm, &p->shm_perm);
1267	} else {
1268		struct compat_shmid_ds __user *p = buf;
1269		return get_compat_ipc_perm(&out->shm_perm, &p->shm_perm);
1270	}
1271}
1272
1273long compat_ksys_shmctl(int shmid, int cmd, void __user *uptr)
1274{
1275	struct ipc_namespace *ns;
1276	struct shmid64_ds sem64;
1277	int version = compat_ipc_parse_version(&cmd);
1278	int err;
 
1279
1280	ns = current->nsproxy->ipc_ns;
1281
1282	if (cmd < 0 || shmid < 0)
1283		return -EINVAL;
 
 
 
 
 
 
1284
1285	switch (cmd) {
1286	case IPC_INFO: {
1287		struct shminfo64 shminfo;
1288		err = shmctl_ipc_info(ns, &shminfo);
1289		if (err < 0)
1290			return err;
1291		if (copy_compat_shminfo_to_user(uptr, &shminfo, version))
1292			err = -EFAULT;
1293		return err;
1294	}
1295	case SHM_INFO: {
1296		struct shm_info shm_info;
1297		err = shmctl_shm_info(ns, &shm_info);
1298		if (err < 0)
1299			return err;
1300		if (put_compat_shm_info(&shm_info, uptr))
1301			err = -EFAULT;
1302		return err;
1303	}
1304	case IPC_STAT:
1305	case SHM_STAT_ANY:
1306	case SHM_STAT:
1307		err = shmctl_stat(ns, shmid, cmd, &sem64);
1308		if (err < 0)
1309			return err;
1310		if (copy_compat_shmid_to_user(uptr, &sem64, version))
1311			err = -EFAULT;
1312		return err;
1313
1314	case IPC_SET:
1315		if (copy_compat_shmid_from_user(&sem64, uptr, version))
1316			return -EFAULT;
1317		/* fallthru */
1318	case IPC_RMID:
1319		return shmctl_down(ns, shmid, cmd, &sem64);
1320	case SHM_LOCK:
1321	case SHM_UNLOCK:
1322		return shmctl_do_lock(ns, shmid, cmd);
1323		break;
1324	default:
1325		return -EINVAL;
1326	}
 
 
 
 
 
1327	return err;
1328}
1329
1330COMPAT_SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, void __user *, uptr)
1331{
1332	return compat_ksys_shmctl(shmid, cmd, uptr);
1333}
1334#endif
1335
1336/*
1337 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1338 *
1339 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1340 * "raddr" thing points to kernel space, and there has to be a wrapper around
1341 * this.
1342 */
1343long do_shmat(int shmid, char __user *shmaddr, int shmflg,
1344	      ulong *raddr, unsigned long shmlba)
1345{
1346	struct shmid_kernel *shp;
1347	unsigned long addr = (unsigned long)shmaddr;
1348	unsigned long size;
1349	struct file *file;
1350	int    err;
1351	unsigned long flags = MAP_SHARED;
1352	unsigned long prot;
1353	int acc_mode;
1354	struct ipc_namespace *ns;
1355	struct shm_file_data *sfd;
1356	struct path path;
1357	fmode_t f_mode;
1358	unsigned long populate = 0;
1359
1360	err = -EINVAL;
1361	if (shmid < 0)
1362		goto out;
1363
1364	if (addr) {
1365		if (addr & (shmlba - 1)) {
1366			if (shmflg & SHM_RND) {
1367				addr &= ~(shmlba - 1);  /* round down */
1368
1369				/*
1370				 * Ensure that the round-down is non-nil
1371				 * when remapping. This can happen for
1372				 * cases when addr < shmlba.
1373				 */
1374				if (!addr && (shmflg & SHM_REMAP))
1375					goto out;
1376			} else
1377#ifndef __ARCH_FORCE_SHMLBA
1378				if (addr & ~PAGE_MASK)
1379#endif
1380					goto out;
1381		}
 
 
 
 
1382
1383		flags |= MAP_FIXED;
1384	} else if ((shmflg & SHM_REMAP))
1385		goto out;
1386
1387	if (shmflg & SHM_RDONLY) {
1388		prot = PROT_READ;
1389		acc_mode = S_IRUGO;
1390		f_mode = FMODE_READ;
1391	} else {
1392		prot = PROT_READ | PROT_WRITE;
1393		acc_mode = S_IRUGO | S_IWUGO;
1394		f_mode = FMODE_READ | FMODE_WRITE;
1395	}
1396	if (shmflg & SHM_EXEC) {
1397		prot |= PROT_EXEC;
1398		acc_mode |= S_IXUGO;
1399	}
1400
1401	/*
1402	 * We cannot rely on the fs check since SYSV IPC does have an
1403	 * additional creator id...
1404	 */
1405	ns = current->nsproxy->ipc_ns;
1406	rcu_read_lock();
1407	shp = shm_obtain_object_check(ns, shmid);
1408	if (IS_ERR(shp)) {
1409		err = PTR_ERR(shp);
1410		goto out_unlock;
1411	}
1412
1413	err = -EACCES;
1414	if (ipcperms(ns, &shp->shm_perm, acc_mode))
1415		goto out_unlock;
1416
1417	err = security_shm_shmat(&shp->shm_perm, shmaddr, shmflg);
1418	if (err)
1419		goto out_unlock;
1420
1421	ipc_lock_object(&shp->shm_perm);
1422
1423	/* check if shm_destroy() is tearing down shp */
1424	if (!ipc_valid_object(&shp->shm_perm)) {
1425		ipc_unlock_object(&shp->shm_perm);
1426		err = -EIDRM;
1427		goto out_unlock;
1428	}
1429
1430	path = shp->shm_file->f_path;
1431	path_get(&path);
1432	shp->shm_nattch++;
1433	size = i_size_read(d_inode(path.dentry));
1434	ipc_unlock_object(&shp->shm_perm);
1435	rcu_read_unlock();
1436
1437	err = -ENOMEM;
1438	sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1439	if (!sfd) {
1440		path_put(&path);
1441		goto out_nattch;
1442	}
1443
1444	file = alloc_file(&path, f_mode,
1445			  is_file_hugepages(shp->shm_file) ?
1446				&shm_file_operations_huge :
1447				&shm_file_operations);
1448	err = PTR_ERR(file);
1449	if (IS_ERR(file)) {
1450		kfree(sfd);
1451		path_put(&path);
1452		goto out_nattch;
1453	}
1454
1455	file->private_data = sfd;
1456	file->f_mapping = shp->shm_file->f_mapping;
1457	sfd->id = shp->shm_perm.id;
1458	sfd->ns = get_ipc_ns(ns);
1459	/*
1460	 * We need to take a reference to the real shm file to prevent the
1461	 * pointer from becoming stale in cases where the lifetime of the outer
1462	 * file extends beyond that of the shm segment.  It's not usually
1463	 * possible, but it can happen during remap_file_pages() emulation as
1464	 * that unmaps the memory, then does ->mmap() via file reference only.
1465	 * We'll deny the ->mmap() if the shm segment was since removed, but to
1466	 * detect shm ID reuse we need to compare the file pointers.
1467	 */
1468	sfd->file = get_file(shp->shm_file);
1469	sfd->vm_ops = NULL;
1470
1471	err = security_mmap_file(file, prot, flags);
1472	if (err)
1473		goto out_fput;
1474
1475	if (down_write_killable(&current->mm->mmap_sem)) {
1476		err = -EINTR;
1477		goto out_fput;
1478	}
1479
1480	if (addr && !(shmflg & SHM_REMAP)) {
1481		err = -EINVAL;
1482		if (addr + size < addr)
1483			goto invalid;
1484
1485		if (find_vma_intersection(current->mm, addr, addr + size))
1486			goto invalid;
1487	}
1488
1489	addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate, NULL);
1490	*raddr = addr;
1491	err = 0;
1492	if (IS_ERR_VALUE(addr))
1493		err = (long)addr;
1494invalid:
1495	up_write(&current->mm->mmap_sem);
1496	if (populate)
1497		mm_populate(addr, populate);
1498
1499out_fput:
1500	fput(file);
1501
1502out_nattch:
1503	down_write(&shm_ids(ns).rwsem);
1504	shp = shm_lock(ns, shmid);
1505	shp->shm_nattch--;
1506	if (shm_may_destroy(ns, shp))
1507		shm_destroy(ns, shp);
1508	else
1509		shm_unlock(shp);
1510	up_write(&shm_ids(ns).rwsem);
1511	return err;
1512
1513out_unlock:
1514	rcu_read_unlock();
1515out:
1516	return err;
1517}
1518
1519SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1520{
1521	unsigned long ret;
1522	long err;
1523
1524	err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
1525	if (err)
1526		return err;
1527	force_successful_syscall_return();
1528	return (long)ret;
1529}
1530
1531#ifdef CONFIG_COMPAT
1532
1533#ifndef COMPAT_SHMLBA
1534#define COMPAT_SHMLBA	SHMLBA
1535#endif
1536
1537COMPAT_SYSCALL_DEFINE3(shmat, int, shmid, compat_uptr_t, shmaddr, int, shmflg)
1538{
1539	unsigned long ret;
1540	long err;
1541
1542	err = do_shmat(shmid, compat_ptr(shmaddr), shmflg, &ret, COMPAT_SHMLBA);
1543	if (err)
1544		return err;
1545	force_successful_syscall_return();
1546	return (long)ret;
1547}
1548#endif
1549
1550/*
1551 * detach and kill segment if marked destroyed.
1552 * The work is done in shm_close.
1553 */
1554long ksys_shmdt(char __user *shmaddr)
1555{
1556	struct mm_struct *mm = current->mm;
1557	struct vm_area_struct *vma;
1558	unsigned long addr = (unsigned long)shmaddr;
1559	int retval = -EINVAL;
1560#ifdef CONFIG_MMU
1561	loff_t size = 0;
1562	struct file *file;
1563	struct vm_area_struct *next;
1564#endif
1565
1566	if (addr & ~PAGE_MASK)
1567		return retval;
1568
1569	if (down_write_killable(&mm->mmap_sem))
1570		return -EINTR;
1571
1572	/*
1573	 * This function tries to be smart and unmap shm segments that
1574	 * were modified by partial mlock or munmap calls:
1575	 * - It first determines the size of the shm segment that should be
1576	 *   unmapped: It searches for a vma that is backed by shm and that
1577	 *   started at address shmaddr. It records it's size and then unmaps
1578	 *   it.
1579	 * - Then it unmaps all shm vmas that started at shmaddr and that
1580	 *   are within the initially determined size and that are from the
1581	 *   same shm segment from which we determined the size.
1582	 * Errors from do_munmap are ignored: the function only fails if
1583	 * it's called with invalid parameters or if it's called to unmap
1584	 * a part of a vma. Both calls in this function are for full vmas,
1585	 * the parameters are directly copied from the vma itself and always
1586	 * valid - therefore do_munmap cannot fail. (famous last words?)
1587	 */
1588	/*
1589	 * If it had been mremap()'d, the starting address would not
1590	 * match the usual checks anyway. So assume all vma's are
1591	 * above the starting address given.
1592	 */
1593	vma = find_vma(mm, addr);
1594
1595#ifdef CONFIG_MMU
1596	while (vma) {
1597		next = vma->vm_next;
1598
1599		/*
1600		 * Check if the starting address would match, i.e. it's
1601		 * a fragment created by mprotect() and/or munmap(), or it
1602		 * otherwise it starts at this address with no hassles.
1603		 */
1604		if ((vma->vm_ops == &shm_vm_ops) &&
1605			(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1606
1607			/*
1608			 * Record the file of the shm segment being
1609			 * unmapped.  With mremap(), someone could place
1610			 * page from another segment but with equal offsets
1611			 * in the range we are unmapping.
1612			 */
1613			file = vma->vm_file;
1614			size = i_size_read(file_inode(vma->vm_file));
1615			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1616			/*
1617			 * We discovered the size of the shm segment, so
1618			 * break out of here and fall through to the next
1619			 * loop that uses the size information to stop
1620			 * searching for matching vma's.
1621			 */
1622			retval = 0;
1623			vma = next;
1624			break;
1625		}
1626		vma = next;
1627	}
1628
1629	/*
1630	 * We need look no further than the maximum address a fragment
1631	 * could possibly have landed at. Also cast things to loff_t to
1632	 * prevent overflows and make comparisons vs. equal-width types.
1633	 */
1634	size = PAGE_ALIGN(size);
1635	while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1636		next = vma->vm_next;
1637
1638		/* finding a matching vma now does not alter retval */
1639		if ((vma->vm_ops == &shm_vm_ops) &&
1640		    ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
1641		    (vma->vm_file == file))
1642			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1643		vma = next;
1644	}
1645
1646#else	/* CONFIG_MMU */
1647	/* under NOMMU conditions, the exact address to be destroyed must be
1648	 * given
1649	 */
1650	if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1651		do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1652		retval = 0;
1653	}
1654
1655#endif
1656
1657	up_write(&mm->mmap_sem);
1658	return retval;
1659}
1660
1661SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1662{
1663	return ksys_shmdt(shmaddr);
1664}
1665
1666#ifdef CONFIG_PROC_FS
1667static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1668{
1669	struct pid_namespace *pid_ns = ipc_seq_pid_ns(s);
1670	struct user_namespace *user_ns = seq_user_ns(s);
1671	struct kern_ipc_perm *ipcp = it;
1672	struct shmid_kernel *shp;
1673	unsigned long rss = 0, swp = 0;
1674
1675	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
1676	shm_add_rss_swap(shp, &rss, &swp);
1677
1678#if BITS_PER_LONG <= 32
1679#define SIZE_SPEC "%10lu"
1680#else
1681#define SIZE_SPEC "%21lu"
1682#endif
1683
1684	seq_printf(s,
1685		   "%10d %10d  %4o " SIZE_SPEC " %5u %5u  "
1686		   "%5lu %5u %5u %5u %5u %10llu %10llu %10llu "
1687		   SIZE_SPEC " " SIZE_SPEC "\n",
1688		   shp->shm_perm.key,
1689		   shp->shm_perm.id,
1690		   shp->shm_perm.mode,
1691		   shp->shm_segsz,
1692		   pid_nr_ns(shp->shm_cprid, pid_ns),
1693		   pid_nr_ns(shp->shm_lprid, pid_ns),
1694		   shp->shm_nattch,
1695		   from_kuid_munged(user_ns, shp->shm_perm.uid),
1696		   from_kgid_munged(user_ns, shp->shm_perm.gid),
1697		   from_kuid_munged(user_ns, shp->shm_perm.cuid),
1698		   from_kgid_munged(user_ns, shp->shm_perm.cgid),
1699		   shp->shm_atim,
1700		   shp->shm_dtim,
1701		   shp->shm_ctim,
1702		   rss * PAGE_SIZE,
1703		   swp * PAGE_SIZE);
1704
1705	return 0;
1706}
1707#endif