Linux Audio

Check our new training course

Loading...
v3.15
   1/*
   2 * linux/ipc/shm.c
   3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
   4 *	 Many improvements/fixes by Bruno Haible.
   5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
   6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
   7 *
   8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
   9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
  10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
  11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
  12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
  13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
  14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
  15 *
  16 * support for audit of ipc object properties and permission changes
  17 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  18 *
  19 * namespaces support
  20 * OpenVZ, SWsoft Inc.
  21 * Pavel Emelianov <xemul@openvz.org>
  22 *
  23 * Better ipc lock (kern_ipc_perm.lock) handling
  24 * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
  25 */
  26
  27#include <linux/slab.h>
  28#include <linux/mm.h>
  29#include <linux/hugetlb.h>
  30#include <linux/shm.h>
  31#include <linux/init.h>
  32#include <linux/file.h>
  33#include <linux/mman.h>
  34#include <linux/shmem_fs.h>
  35#include <linux/security.h>
  36#include <linux/syscalls.h>
  37#include <linux/audit.h>
  38#include <linux/capability.h>
  39#include <linux/ptrace.h>
  40#include <linux/seq_file.h>
  41#include <linux/rwsem.h>
  42#include <linux/nsproxy.h>
  43#include <linux/mount.h>
  44#include <linux/ipc_namespace.h>
  45
  46#include <asm/uaccess.h>
  47
  48#include "util.h"
  49
  50struct shm_file_data {
  51	int id;
  52	struct ipc_namespace *ns;
  53	struct file *file;
  54	const struct vm_operations_struct *vm_ops;
  55};
  56
  57#define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
  58
  59static const struct file_operations shm_file_operations;
  60static const struct vm_operations_struct shm_vm_ops;
  61
  62#define shm_ids(ns)	((ns)->ids[IPC_SHM_IDS])
  63
  64#define shm_unlock(shp)			\
  65	ipc_unlock(&(shp)->shm_perm)
  66
  67static int newseg(struct ipc_namespace *, struct ipc_params *);
  68static void shm_open(struct vm_area_struct *vma);
  69static void shm_close(struct vm_area_struct *vma);
  70static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
  71#ifdef CONFIG_PROC_FS
  72static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
  73#endif
  74
  75void shm_init_ns(struct ipc_namespace *ns)
  76{
  77	ns->shm_ctlmax = SHMMAX;
  78	ns->shm_ctlall = SHMALL;
  79	ns->shm_ctlmni = SHMMNI;
  80	ns->shm_rmid_forced = 0;
  81	ns->shm_tot = 0;
  82	ipc_init_ids(&shm_ids(ns));
  83}
  84
  85/*
  86 * Called with shm_ids.rwsem (writer) and the shp structure locked.
  87 * Only shm_ids.rwsem remains locked on exit.
  88 */
  89static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
  90{
  91	struct shmid_kernel *shp;
  92	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  93
  94	if (shp->shm_nattch) {
  95		shp->shm_perm.mode |= SHM_DEST;
  96		/* Do not find it any more */
  97		shp->shm_perm.key = IPC_PRIVATE;
  98		shm_unlock(shp);
  99	} else
 100		shm_destroy(ns, shp);
 101}
 102
 103#ifdef CONFIG_IPC_NS
 104void shm_exit_ns(struct ipc_namespace *ns)
 105{
 106	free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
 107	idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
 108}
 109#endif
 110
 111static int __init ipc_ns_init(void)
 112{
 113	shm_init_ns(&init_ipc_ns);
 114	return 0;
 115}
 116
 117pure_initcall(ipc_ns_init);
 118
 119void __init shm_init(void)
 120{
 121	ipc_init_proc_interface("sysvipc/shm",
 122#if BITS_PER_LONG <= 32
 123				"       key      shmid perms       size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime        rss       swap\n",
 124#else
 125				"       key      shmid perms                  size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime                   rss                  swap\n",
 126#endif
 127				IPC_SHM_IDS, sysvipc_shm_proc_show);
 128}
 129
 130static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
 131{
 132	struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id);
 133
 134	if (IS_ERR(ipcp))
 135		return ERR_CAST(ipcp);
 136
 137	return container_of(ipcp, struct shmid_kernel, shm_perm);
 138}
 139
 140static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
 141{
 142	struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
 143
 144	if (IS_ERR(ipcp))
 145		return ERR_CAST(ipcp);
 146
 147	return container_of(ipcp, struct shmid_kernel, shm_perm);
 148}
 149
 150/*
 151 * shm_lock_(check_) routines are called in the paths where the rwsem
 152 * is not necessarily held.
 153 */
 154static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
 155{
 156	struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
 157
 
 
 
 
 
 158	if (IS_ERR(ipcp))
 159		return (struct shmid_kernel *)ipcp;
 160
 161	return container_of(ipcp, struct shmid_kernel, shm_perm);
 162}
 163
 164static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
 165{
 166	rcu_read_lock();
 167	ipc_lock_object(&ipcp->shm_perm);
 168}
 169
 170static void shm_rcu_free(struct rcu_head *head)
 171{
 172	struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
 173	struct shmid_kernel *shp = ipc_rcu_to_struct(p);
 174
 175	security_shm_free(shp);
 176	ipc_rcu_free(head);
 177}
 178
 179static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
 180{
 
 181	ipc_rmid(&shm_ids(ns), &s->shm_perm);
 182}
 183
 184
 185/* This is called by fork, once for every shm attach. */
 186static void shm_open(struct vm_area_struct *vma)
 187{
 188	struct file *file = vma->vm_file;
 189	struct shm_file_data *sfd = shm_file_data(file);
 190	struct shmid_kernel *shp;
 191
 192	shp = shm_lock(sfd->ns, sfd->id);
 193	BUG_ON(IS_ERR(shp));
 
 
 
 194	shp->shm_atim = get_seconds();
 195	shp->shm_lprid = task_tgid_vnr(current);
 196	shp->shm_nattch++;
 197	shm_unlock(shp);
 
 
 
 
 
 
 
 
 
 
 
 
 198}
 199
 200/*
 201 * shm_destroy - free the struct shmid_kernel
 202 *
 203 * @ns: namespace
 204 * @shp: struct to free
 205 *
 206 * It has to be called with shp and shm_ids.rwsem (writer) locked,
 207 * but returns with shp unlocked and freed.
 208 */
 209static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
 210{
 211	struct file *shm_file;
 212
 213	shm_file = shp->shm_file;
 214	shp->shm_file = NULL;
 215	ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
 216	shm_rmid(ns, shp);
 217	shm_unlock(shp);
 218	if (!is_file_hugepages(shm_file))
 219		shmem_lock(shm_file, 0, shp->mlock_user);
 220	else if (shp->mlock_user)
 221		user_shm_unlock(file_inode(shm_file)->i_size, shp->mlock_user);
 
 222	fput(shm_file);
 223	ipc_rcu_putref(shp, shm_rcu_free);
 224}
 225
 226/*
 227 * shm_may_destroy - identifies whether shm segment should be destroyed now
 228 *
 229 * Returns true if and only if there are no active users of the segment and
 230 * one of the following is true:
 231 *
 232 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
 233 *
 234 * 2) sysctl kernel.shm_rmid_forced is set to 1.
 235 */
 236static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
 237{
 238	return (shp->shm_nattch == 0) &&
 239	       (ns->shm_rmid_forced ||
 240		(shp->shm_perm.mode & SHM_DEST));
 241}
 242
 243/*
 244 * remove the attach descriptor vma.
 245 * free memory for segment if it is marked destroyed.
 246 * The descriptor has already been removed from the current->mm->mmap list
 247 * and will later be kfree()d.
 248 */
 249static void shm_close(struct vm_area_struct *vma)
 250{
 251	struct file *file = vma->vm_file;
 252	struct shm_file_data *sfd = shm_file_data(file);
 253	struct shmid_kernel *shp;
 254	struct ipc_namespace *ns = sfd->ns;
 255
 256	down_write(&shm_ids(ns).rwsem);
 257	/* remove from the list of attaches of the shm segment */
 258	shp = shm_lock(ns, sfd->id);
 259	BUG_ON(IS_ERR(shp));
 
 
 
 
 
 
 
 260	shp->shm_lprid = task_tgid_vnr(current);
 261	shp->shm_dtim = get_seconds();
 262	shp->shm_nattch--;
 263	if (shm_may_destroy(ns, shp))
 264		shm_destroy(ns, shp);
 265	else
 266		shm_unlock(shp);
 
 267	up_write(&shm_ids(ns).rwsem);
 268}
 269
 270/* Called with ns->shm_ids(ns).rwsem locked */
 271static int shm_try_destroy_current(int id, void *p, void *data)
 272{
 273	struct ipc_namespace *ns = data;
 274	struct kern_ipc_perm *ipcp = p;
 275	struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 276
 277	if (shp->shm_creator != current)
 278		return 0;
 279
 280	/*
 281	 * Mark it as orphaned to destroy the segment when
 282	 * kernel.shm_rmid_forced is changed.
 283	 * It is noop if the following shm_may_destroy() returns true.
 284	 */
 285	shp->shm_creator = NULL;
 286
 287	/*
 288	 * Don't even try to destroy it.  If shm_rmid_forced=0 and IPC_RMID
 289	 * is not set, it shouldn't be deleted here.
 290	 */
 291	if (!ns->shm_rmid_forced)
 292		return 0;
 293
 294	if (shm_may_destroy(ns, shp)) {
 295		shm_lock_by_ptr(shp);
 296		shm_destroy(ns, shp);
 297	}
 298	return 0;
 299}
 300
 301/* Called with ns->shm_ids(ns).rwsem locked */
 302static int shm_try_destroy_orphaned(int id, void *p, void *data)
 303{
 304	struct ipc_namespace *ns = data;
 305	struct kern_ipc_perm *ipcp = p;
 306	struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 307
 308	/*
 309	 * We want to destroy segments without users and with already
 310	 * exit'ed originating process.
 311	 *
 312	 * As shp->* are changed under rwsem, it's safe to skip shp locking.
 313	 */
 314	if (shp->shm_creator != NULL)
 315		return 0;
 316
 317	if (shm_may_destroy(ns, shp)) {
 318		shm_lock_by_ptr(shp);
 319		shm_destroy(ns, shp);
 320	}
 321	return 0;
 322}
 323
 324void shm_destroy_orphaned(struct ipc_namespace *ns)
 325{
 326	down_write(&shm_ids(ns).rwsem);
 327	if (shm_ids(ns).in_use)
 328		idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
 329	up_write(&shm_ids(ns).rwsem);
 330}
 331
 332
 333void exit_shm(struct task_struct *task)
 334{
 335	struct ipc_namespace *ns = task->nsproxy->ipc_ns;
 
 336
 337	if (shm_ids(ns).in_use == 0)
 338		return;
 339
 340	/* Destroy all already created segments, but not mapped yet */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 341	down_write(&shm_ids(ns).rwsem);
 342	if (shm_ids(ns).in_use)
 343		idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
 
 
 
 
 
 
 
 
 
 344	up_write(&shm_ids(ns).rwsem);
 345}
 346
 347static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 348{
 349	struct file *file = vma->vm_file;
 350	struct shm_file_data *sfd = shm_file_data(file);
 351
 352	return sfd->vm_ops->fault(vma, vmf);
 353}
 354
 355#ifdef CONFIG_NUMA
 356static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
 357{
 358	struct file *file = vma->vm_file;
 359	struct shm_file_data *sfd = shm_file_data(file);
 360	int err = 0;
 361	if (sfd->vm_ops->set_policy)
 362		err = sfd->vm_ops->set_policy(vma, new);
 363	return err;
 364}
 365
 366static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
 367					unsigned long addr)
 368{
 369	struct file *file = vma->vm_file;
 370	struct shm_file_data *sfd = shm_file_data(file);
 371	struct mempolicy *pol = NULL;
 372
 373	if (sfd->vm_ops->get_policy)
 374		pol = sfd->vm_ops->get_policy(vma, addr);
 375	else if (vma->vm_policy)
 376		pol = vma->vm_policy;
 377
 378	return pol;
 379}
 380#endif
 381
 382static int shm_mmap(struct file *file, struct vm_area_struct *vma)
 383{
 384	struct shm_file_data *sfd = shm_file_data(file);
 385	int ret;
 386
 
 
 
 
 
 
 
 
 387	ret = sfd->file->f_op->mmap(sfd->file, vma);
 388	if (ret != 0)
 
 389		return ret;
 
 390	sfd->vm_ops = vma->vm_ops;
 391#ifdef CONFIG_MMU
 392	BUG_ON(!sfd->vm_ops->fault);
 393#endif
 394	vma->vm_ops = &shm_vm_ops;
 395	shm_open(vma);
 396
 397	return ret;
 398}
 399
 400static int shm_release(struct inode *ino, struct file *file)
 401{
 402	struct shm_file_data *sfd = shm_file_data(file);
 403
 404	put_ipc_ns(sfd->ns);
 405	shm_file_data(file) = NULL;
 406	kfree(sfd);
 407	return 0;
 408}
 409
 410static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 411{
 412	struct shm_file_data *sfd = shm_file_data(file);
 413
 414	if (!sfd->file->f_op->fsync)
 415		return -EINVAL;
 416	return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
 417}
 418
 419static long shm_fallocate(struct file *file, int mode, loff_t offset,
 420			  loff_t len)
 421{
 422	struct shm_file_data *sfd = shm_file_data(file);
 423
 424	if (!sfd->file->f_op->fallocate)
 425		return -EOPNOTSUPP;
 426	return sfd->file->f_op->fallocate(file, mode, offset, len);
 427}
 428
 429static unsigned long shm_get_unmapped_area(struct file *file,
 430	unsigned long addr, unsigned long len, unsigned long pgoff,
 431	unsigned long flags)
 432{
 433	struct shm_file_data *sfd = shm_file_data(file);
 434	return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
 435						pgoff, flags);
 436}
 437
 438static const struct file_operations shm_file_operations = {
 439	.mmap		= shm_mmap,
 440	.fsync		= shm_fsync,
 441	.release	= shm_release,
 442#ifndef CONFIG_MMU
 443	.get_unmapped_area	= shm_get_unmapped_area,
 444#endif
 445	.llseek		= noop_llseek,
 446	.fallocate	= shm_fallocate,
 447};
 448
 449static const struct file_operations shm_file_operations_huge = {
 450	.mmap		= shm_mmap,
 451	.fsync		= shm_fsync,
 452	.release	= shm_release,
 453	.get_unmapped_area	= shm_get_unmapped_area,
 454	.llseek		= noop_llseek,
 455	.fallocate	= shm_fallocate,
 456};
 457
 458int is_file_shm_hugepages(struct file *file)
 459{
 460	return file->f_op == &shm_file_operations_huge;
 461}
 462
 463static const struct vm_operations_struct shm_vm_ops = {
 464	.open	= shm_open,	/* callback for a new vm-area open */
 465	.close	= shm_close,	/* callback for when the vm-area is released */
 466	.fault	= shm_fault,
 467#if defined(CONFIG_NUMA)
 468	.set_policy = shm_set_policy,
 469	.get_policy = shm_get_policy,
 470#endif
 471};
 472
 473/**
 474 * newseg - Create a new shared memory segment
 475 * @ns: namespace
 476 * @params: ptr to the structure that contains key, size and shmflg
 477 *
 478 * Called with shm_ids.rwsem held as a writer.
 479 */
 480static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
 481{
 482	key_t key = params->key;
 483	int shmflg = params->flg;
 484	size_t size = params->u.size;
 485	int error;
 486	struct shmid_kernel *shp;
 487	size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 488	struct file *file;
 489	char name[13];
 490	int id;
 491	vm_flags_t acctflag = 0;
 492
 493	if (size < SHMMIN || size > ns->shm_ctlmax)
 494		return -EINVAL;
 495
 496	if (ns->shm_tot + numpages > ns->shm_ctlall)
 
 
 
 
 497		return -ENOSPC;
 498
 499	shp = ipc_rcu_alloc(sizeof(*shp));
 500	if (!shp)
 501		return -ENOMEM;
 502
 503	shp->shm_perm.key = key;
 504	shp->shm_perm.mode = (shmflg & S_IRWXUGO);
 505	shp->mlock_user = NULL;
 506
 507	shp->shm_perm.security = NULL;
 508	error = security_shm_alloc(shp);
 509	if (error) {
 510		ipc_rcu_putref(shp, ipc_rcu_free);
 511		return error;
 512	}
 513
 514	sprintf(name, "SYSV%08x", key);
 515	if (shmflg & SHM_HUGETLB) {
 516		struct hstate *hs;
 517		size_t hugesize;
 518
 519		hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
 520		if (!hs) {
 521			error = -EINVAL;
 522			goto no_file;
 523		}
 524		hugesize = ALIGN(size, huge_page_size(hs));
 525
 526		/* hugetlb_file_setup applies strict accounting */
 527		if (shmflg & SHM_NORESERVE)
 528			acctflag = VM_NORESERVE;
 529		file = hugetlb_file_setup(name, hugesize, acctflag,
 530				  &shp->mlock_user, HUGETLB_SHMFS_INODE,
 531				(shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
 532	} else {
 533		/*
 534		 * Do not allow no accounting for OVERCOMMIT_NEVER, even
 535		 * if it's asked for.
 536		 */
 537		if  ((shmflg & SHM_NORESERVE) &&
 538				sysctl_overcommit_memory != OVERCOMMIT_NEVER)
 539			acctflag = VM_NORESERVE;
 540		file = shmem_file_setup(name, size, acctflag);
 541	}
 542	error = PTR_ERR(file);
 543	if (IS_ERR(file))
 544		goto no_file;
 545
 546	id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
 547	if (id < 0) {
 548		error = id;
 549		goto no_id;
 550	}
 551
 552	shp->shm_cprid = task_tgid_vnr(current);
 553	shp->shm_lprid = 0;
 554	shp->shm_atim = shp->shm_dtim = 0;
 555	shp->shm_ctim = get_seconds();
 556	shp->shm_segsz = size;
 557	shp->shm_nattch = 0;
 558	shp->shm_file = file;
 559	shp->shm_creator = current;
 560
 
 
 
 
 
 
 
 
 561	/*
 562	 * shmid gets reported as "inode#" in /proc/pid/maps.
 563	 * proc-ps tools use this. Changing this will break them.
 564	 */
 565	file_inode(file)->i_ino = shp->shm_perm.id;
 566
 567	ns->shm_tot += numpages;
 568	error = shp->shm_perm.id;
 569
 570	ipc_unlock_object(&shp->shm_perm);
 571	rcu_read_unlock();
 572	return error;
 573
 574no_id:
 575	if (is_file_hugepages(file) && shp->mlock_user)
 576		user_shm_unlock(size, shp->mlock_user);
 577	fput(file);
 578no_file:
 579	ipc_rcu_putref(shp, shm_rcu_free);
 580	return error;
 581}
 582
 583/*
 584 * Called with shm_ids.rwsem and ipcp locked.
 585 */
 586static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
 587{
 588	struct shmid_kernel *shp;
 589
 590	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 591	return security_shm_associate(shp, shmflg);
 592}
 593
 594/*
 595 * Called with shm_ids.rwsem and ipcp locked.
 596 */
 597static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
 598				struct ipc_params *params)
 599{
 600	struct shmid_kernel *shp;
 601
 602	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 603	if (shp->shm_segsz < params->u.size)
 604		return -EINVAL;
 605
 606	return 0;
 607}
 608
 609SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
 610{
 611	struct ipc_namespace *ns;
 612	struct ipc_ops shm_ops;
 
 
 
 
 613	struct ipc_params shm_params;
 614
 615	ns = current->nsproxy->ipc_ns;
 616
 617	shm_ops.getnew = newseg;
 618	shm_ops.associate = shm_security;
 619	shm_ops.more_checks = shm_more_checks;
 620
 621	shm_params.key = key;
 622	shm_params.flg = shmflg;
 623	shm_params.u.size = size;
 624
 625	return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
 626}
 627
 628static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
 629{
 630	switch (version) {
 631	case IPC_64:
 632		return copy_to_user(buf, in, sizeof(*in));
 633	case IPC_OLD:
 634	    {
 635		struct shmid_ds out;
 636
 637		memset(&out, 0, sizeof(out));
 638		ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
 639		out.shm_segsz	= in->shm_segsz;
 640		out.shm_atime	= in->shm_atime;
 641		out.shm_dtime	= in->shm_dtime;
 642		out.shm_ctime	= in->shm_ctime;
 643		out.shm_cpid	= in->shm_cpid;
 644		out.shm_lpid	= in->shm_lpid;
 645		out.shm_nattch	= in->shm_nattch;
 646
 647		return copy_to_user(buf, &out, sizeof(out));
 648	    }
 649	default:
 650		return -EINVAL;
 651	}
 652}
 653
 654static inline unsigned long
 655copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
 656{
 657	switch (version) {
 658	case IPC_64:
 659		if (copy_from_user(out, buf, sizeof(*out)))
 660			return -EFAULT;
 661		return 0;
 662	case IPC_OLD:
 663	    {
 664		struct shmid_ds tbuf_old;
 665
 666		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
 667			return -EFAULT;
 668
 669		out->shm_perm.uid	= tbuf_old.shm_perm.uid;
 670		out->shm_perm.gid	= tbuf_old.shm_perm.gid;
 671		out->shm_perm.mode	= tbuf_old.shm_perm.mode;
 672
 673		return 0;
 674	    }
 675	default:
 676		return -EINVAL;
 677	}
 678}
 679
 680static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
 681{
 682	switch (version) {
 683	case IPC_64:
 684		return copy_to_user(buf, in, sizeof(*in));
 685	case IPC_OLD:
 686	    {
 687		struct shminfo out;
 688
 689		if (in->shmmax > INT_MAX)
 690			out.shmmax = INT_MAX;
 691		else
 692			out.shmmax = (int)in->shmmax;
 693
 694		out.shmmin	= in->shmmin;
 695		out.shmmni	= in->shmmni;
 696		out.shmseg	= in->shmseg;
 697		out.shmall	= in->shmall; 
 698
 699		return copy_to_user(buf, &out, sizeof(out));
 700	    }
 701	default:
 702		return -EINVAL;
 703	}
 704}
 705
 706/*
 707 * Calculate and add used RSS and swap pages of a shm.
 708 * Called with shm_ids.rwsem held as a reader
 709 */
 710static void shm_add_rss_swap(struct shmid_kernel *shp,
 711	unsigned long *rss_add, unsigned long *swp_add)
 712{
 713	struct inode *inode;
 714
 715	inode = file_inode(shp->shm_file);
 716
 717	if (is_file_hugepages(shp->shm_file)) {
 718		struct address_space *mapping = inode->i_mapping;
 719		struct hstate *h = hstate_file(shp->shm_file);
 720		*rss_add += pages_per_huge_page(h) * mapping->nrpages;
 721	} else {
 722#ifdef CONFIG_SHMEM
 723		struct shmem_inode_info *info = SHMEM_I(inode);
 724		spin_lock(&info->lock);
 725		*rss_add += inode->i_mapping->nrpages;
 726		*swp_add += info->swapped;
 727		spin_unlock(&info->lock);
 728#else
 729		*rss_add += inode->i_mapping->nrpages;
 730#endif
 731	}
 732}
 733
 734/*
 735 * Called with shm_ids.rwsem held as a reader
 736 */
 737static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
 738		unsigned long *swp)
 739{
 740	int next_id;
 741	int total, in_use;
 742
 743	*rss = 0;
 744	*swp = 0;
 745
 746	in_use = shm_ids(ns).in_use;
 747
 748	for (total = 0, next_id = 0; total < in_use; next_id++) {
 749		struct kern_ipc_perm *ipc;
 750		struct shmid_kernel *shp;
 751
 752		ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
 753		if (ipc == NULL)
 754			continue;
 755		shp = container_of(ipc, struct shmid_kernel, shm_perm);
 756
 757		shm_add_rss_swap(shp, rss, swp);
 758
 759		total++;
 760	}
 761}
 762
 763/*
 764 * This function handles some shmctl commands which require the rwsem
 765 * to be held in write mode.
 766 * NOTE: no locks must be held, the rwsem is taken inside this function.
 767 */
 768static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
 769		       struct shmid_ds __user *buf, int version)
 770{
 771	struct kern_ipc_perm *ipcp;
 772	struct shmid64_ds shmid64;
 773	struct shmid_kernel *shp;
 774	int err;
 775
 776	if (cmd == IPC_SET) {
 777		if (copy_shmid_from_user(&shmid64, buf, version))
 778			return -EFAULT;
 779	}
 780
 781	down_write(&shm_ids(ns).rwsem);
 782	rcu_read_lock();
 783
 784	ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
 785				      &shmid64.shm_perm, 0);
 786	if (IS_ERR(ipcp)) {
 787		err = PTR_ERR(ipcp);
 788		goto out_unlock1;
 789	}
 790
 791	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 792
 793	err = security_shm_shmctl(shp, cmd);
 794	if (err)
 795		goto out_unlock1;
 796
 797	switch (cmd) {
 798	case IPC_RMID:
 799		ipc_lock_object(&shp->shm_perm);
 800		/* do_shm_rmid unlocks the ipc object and rcu */
 801		do_shm_rmid(ns, ipcp);
 802		goto out_up;
 803	case IPC_SET:
 804		ipc_lock_object(&shp->shm_perm);
 805		err = ipc_update_perm(&shmid64.shm_perm, ipcp);
 806		if (err)
 807			goto out_unlock0;
 808		shp->shm_ctim = get_seconds();
 809		break;
 810	default:
 811		err = -EINVAL;
 812		goto out_unlock1;
 813	}
 814
 815out_unlock0:
 816	ipc_unlock_object(&shp->shm_perm);
 817out_unlock1:
 818	rcu_read_unlock();
 819out_up:
 820	up_write(&shm_ids(ns).rwsem);
 821	return err;
 822}
 823
 824static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
 825			 int cmd, int version, void __user *buf)
 826{
 827	int err;
 828	struct shmid_kernel *shp;
 829
 830	/* preliminary security checks for *_INFO */
 831	if (cmd == IPC_INFO || cmd == SHM_INFO) {
 832		err = security_shm_shmctl(NULL, cmd);
 833		if (err)
 834			return err;
 835	}
 836
 837	switch (cmd) {
 838	case IPC_INFO:
 839	{
 840		struct shminfo64 shminfo;
 841
 842		memset(&shminfo, 0, sizeof(shminfo));
 843		shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
 844		shminfo.shmmax = ns->shm_ctlmax;
 845		shminfo.shmall = ns->shm_ctlall;
 846
 847		shminfo.shmmin = SHMMIN;
 848		if (copy_shminfo_to_user(buf, &shminfo, version))
 849			return -EFAULT;
 850
 851		down_read(&shm_ids(ns).rwsem);
 852		err = ipc_get_maxid(&shm_ids(ns));
 853		up_read(&shm_ids(ns).rwsem);
 854
 855		if (err < 0)
 856			err = 0;
 857		goto out;
 858	}
 859	case SHM_INFO:
 860	{
 861		struct shm_info shm_info;
 862
 863		memset(&shm_info, 0, sizeof(shm_info));
 864		down_read(&shm_ids(ns).rwsem);
 865		shm_info.used_ids = shm_ids(ns).in_use;
 866		shm_get_stat(ns, &shm_info.shm_rss, &shm_info.shm_swp);
 867		shm_info.shm_tot = ns->shm_tot;
 868		shm_info.swap_attempts = 0;
 869		shm_info.swap_successes = 0;
 870		err = ipc_get_maxid(&shm_ids(ns));
 871		up_read(&shm_ids(ns).rwsem);
 872		if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
 873			err = -EFAULT;
 874			goto out;
 875		}
 876
 877		err = err < 0 ? 0 : err;
 878		goto out;
 879	}
 880	case SHM_STAT:
 881	case IPC_STAT:
 882	{
 883		struct shmid64_ds tbuf;
 884		int result;
 885
 886		rcu_read_lock();
 887		if (cmd == SHM_STAT) {
 888			shp = shm_obtain_object(ns, shmid);
 889			if (IS_ERR(shp)) {
 890				err = PTR_ERR(shp);
 891				goto out_unlock;
 892			}
 893			result = shp->shm_perm.id;
 894		} else {
 895			shp = shm_obtain_object_check(ns, shmid);
 896			if (IS_ERR(shp)) {
 897				err = PTR_ERR(shp);
 898				goto out_unlock;
 899			}
 900			result = 0;
 901		}
 902
 903		err = -EACCES;
 904		if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
 905			goto out_unlock;
 906
 907		err = security_shm_shmctl(shp, cmd);
 908		if (err)
 909			goto out_unlock;
 910
 911		memset(&tbuf, 0, sizeof(tbuf));
 912		kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
 913		tbuf.shm_segsz	= shp->shm_segsz;
 914		tbuf.shm_atime	= shp->shm_atim;
 915		tbuf.shm_dtime	= shp->shm_dtim;
 916		tbuf.shm_ctime	= shp->shm_ctim;
 917		tbuf.shm_cpid	= shp->shm_cprid;
 918		tbuf.shm_lpid	= shp->shm_lprid;
 919		tbuf.shm_nattch	= shp->shm_nattch;
 920		rcu_read_unlock();
 921
 922		if (copy_shmid_to_user(buf, &tbuf, version))
 923			err = -EFAULT;
 924		else
 925			err = result;
 926		goto out;
 927	}
 928	default:
 929		return -EINVAL;
 930	}
 931
 932out_unlock:
 933	rcu_read_unlock();
 934out:
 935	return err;
 936}
 937
 938SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
 939{
 940	struct shmid_kernel *shp;
 941	int err, version;
 942	struct ipc_namespace *ns;
 943
 944	if (cmd < 0 || shmid < 0)
 945		return -EINVAL;
 946
 947	version = ipc_parse_version(&cmd);
 948	ns = current->nsproxy->ipc_ns;
 949
 950	switch (cmd) {
 951	case IPC_INFO:
 952	case SHM_INFO:
 953	case SHM_STAT:
 954	case IPC_STAT:
 955		return shmctl_nolock(ns, shmid, cmd, version, buf);
 956	case IPC_RMID:
 957	case IPC_SET:
 958		return shmctl_down(ns, shmid, cmd, buf, version);
 959	case SHM_LOCK:
 960	case SHM_UNLOCK:
 961	{
 962		struct file *shm_file;
 963
 964		rcu_read_lock();
 965		shp = shm_obtain_object_check(ns, shmid);
 966		if (IS_ERR(shp)) {
 967			err = PTR_ERR(shp);
 968			goto out_unlock1;
 969		}
 970
 971		audit_ipc_obj(&(shp->shm_perm));
 972		err = security_shm_shmctl(shp, cmd);
 973		if (err)
 974			goto out_unlock1;
 975
 976		ipc_lock_object(&shp->shm_perm);
 977
 978		/* check if shm_destroy() is tearing down shp */
 979		if (!ipc_valid_object(&shp->shm_perm)) {
 980			err = -EIDRM;
 981			goto out_unlock0;
 982		}
 983
 984		if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
 985			kuid_t euid = current_euid();
 986			if (!uid_eq(euid, shp->shm_perm.uid) &&
 987			    !uid_eq(euid, shp->shm_perm.cuid)) {
 988				err = -EPERM;
 989				goto out_unlock0;
 990			}
 991			if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
 992				err = -EPERM;
 993				goto out_unlock0;
 994			}
 995		}
 996
 997		shm_file = shp->shm_file;
 998		if (is_file_hugepages(shm_file))
 999			goto out_unlock0;
1000
1001		if (cmd == SHM_LOCK) {
1002			struct user_struct *user = current_user();
1003			err = shmem_lock(shm_file, 1, user);
1004			if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
1005				shp->shm_perm.mode |= SHM_LOCKED;
1006				shp->mlock_user = user;
1007			}
1008			goto out_unlock0;
1009		}
1010
1011		/* SHM_UNLOCK */
1012		if (!(shp->shm_perm.mode & SHM_LOCKED))
1013			goto out_unlock0;
1014		shmem_lock(shm_file, 0, shp->mlock_user);
1015		shp->shm_perm.mode &= ~SHM_LOCKED;
1016		shp->mlock_user = NULL;
1017		get_file(shm_file);
1018		ipc_unlock_object(&shp->shm_perm);
1019		rcu_read_unlock();
1020		shmem_unlock_mapping(shm_file->f_mapping);
1021
1022		fput(shm_file);
1023		return err;
1024	}
1025	default:
1026		return -EINVAL;
1027	}
1028
1029out_unlock0:
1030	ipc_unlock_object(&shp->shm_perm);
1031out_unlock1:
1032	rcu_read_unlock();
1033	return err;
1034}
1035
1036/*
1037 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1038 *
1039 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1040 * "raddr" thing points to kernel space, and there has to be a wrapper around
1041 * this.
1042 */
1043long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
1044	      unsigned long shmlba)
1045{
1046	struct shmid_kernel *shp;
1047	unsigned long addr;
1048	unsigned long size;
1049	struct file *file;
1050	int    err;
1051	unsigned long flags;
1052	unsigned long prot;
1053	int acc_mode;
1054	struct ipc_namespace *ns;
1055	struct shm_file_data *sfd;
1056	struct path path;
1057	fmode_t f_mode;
1058	unsigned long populate = 0;
1059
1060	err = -EINVAL;
1061	if (shmid < 0)
1062		goto out;
1063	else if ((addr = (ulong)shmaddr)) {
1064		if (addr & (shmlba - 1)) {
1065			if (shmflg & SHM_RND)
1066				addr &= ~(shmlba - 1);	   /* round down */
1067			else
1068#ifndef __ARCH_FORCE_SHMLBA
1069				if (addr & ~PAGE_MASK)
1070#endif
1071					goto out;
1072		}
1073		flags = MAP_SHARED | MAP_FIXED;
1074	} else {
1075		if ((shmflg & SHM_REMAP))
1076			goto out;
1077
1078		flags = MAP_SHARED;
1079	}
1080
1081	if (shmflg & SHM_RDONLY) {
1082		prot = PROT_READ;
1083		acc_mode = S_IRUGO;
1084		f_mode = FMODE_READ;
1085	} else {
1086		prot = PROT_READ | PROT_WRITE;
1087		acc_mode = S_IRUGO | S_IWUGO;
1088		f_mode = FMODE_READ | FMODE_WRITE;
1089	}
1090	if (shmflg & SHM_EXEC) {
1091		prot |= PROT_EXEC;
1092		acc_mode |= S_IXUGO;
1093	}
1094
1095	/*
1096	 * We cannot rely on the fs check since SYSV IPC does have an
1097	 * additional creator id...
1098	 */
1099	ns = current->nsproxy->ipc_ns;
1100	rcu_read_lock();
1101	shp = shm_obtain_object_check(ns, shmid);
1102	if (IS_ERR(shp)) {
1103		err = PTR_ERR(shp);
1104		goto out_unlock;
1105	}
1106
1107	err = -EACCES;
1108	if (ipcperms(ns, &shp->shm_perm, acc_mode))
1109		goto out_unlock;
1110
1111	err = security_shm_shmat(shp, shmaddr, shmflg);
1112	if (err)
1113		goto out_unlock;
1114
1115	ipc_lock_object(&shp->shm_perm);
1116
1117	/* check if shm_destroy() is tearing down shp */
1118	if (!ipc_valid_object(&shp->shm_perm)) {
1119		ipc_unlock_object(&shp->shm_perm);
1120		err = -EIDRM;
1121		goto out_unlock;
1122	}
1123
1124	path = shp->shm_file->f_path;
1125	path_get(&path);
1126	shp->shm_nattch++;
1127	size = i_size_read(path.dentry->d_inode);
1128	ipc_unlock_object(&shp->shm_perm);
1129	rcu_read_unlock();
1130
1131	err = -ENOMEM;
1132	sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1133	if (!sfd) {
1134		path_put(&path);
1135		goto out_nattch;
1136	}
1137
1138	file = alloc_file(&path, f_mode,
1139			  is_file_hugepages(shp->shm_file) ?
1140				&shm_file_operations_huge :
1141				&shm_file_operations);
1142	err = PTR_ERR(file);
1143	if (IS_ERR(file)) {
1144		kfree(sfd);
1145		path_put(&path);
1146		goto out_nattch;
1147	}
1148
1149	file->private_data = sfd;
1150	file->f_mapping = shp->shm_file->f_mapping;
1151	sfd->id = shp->shm_perm.id;
1152	sfd->ns = get_ipc_ns(ns);
1153	sfd->file = shp->shm_file;
1154	sfd->vm_ops = NULL;
1155
1156	err = security_mmap_file(file, prot, flags);
1157	if (err)
1158		goto out_fput;
1159
1160	down_write(&current->mm->mmap_sem);
1161	if (addr && !(shmflg & SHM_REMAP)) {
1162		err = -EINVAL;
1163		if (find_vma_intersection(current->mm, addr, addr + size))
1164			goto invalid;
1165		/*
1166		 * If shm segment goes below stack, make sure there is some
1167		 * space left for the stack to grow (at least 4 pages).
1168		 */
1169		if (addr < current->mm->start_stack &&
1170		    addr > current->mm->start_stack - size - PAGE_SIZE * 5)
1171			goto invalid;
1172	}
1173
1174	addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate);
1175	*raddr = addr;
1176	err = 0;
1177	if (IS_ERR_VALUE(addr))
1178		err = (long)addr;
1179invalid:
1180	up_write(&current->mm->mmap_sem);
1181	if (populate)
1182		mm_populate(addr, populate);
1183
1184out_fput:
1185	fput(file);
1186
1187out_nattch:
1188	down_write(&shm_ids(ns).rwsem);
1189	shp = shm_lock(ns, shmid);
1190	BUG_ON(IS_ERR(shp));
1191	shp->shm_nattch--;
1192	if (shm_may_destroy(ns, shp))
1193		shm_destroy(ns, shp);
1194	else
1195		shm_unlock(shp);
1196	up_write(&shm_ids(ns).rwsem);
1197	return err;
1198
1199out_unlock:
1200	rcu_read_unlock();
1201out:
1202	return err;
1203}
1204
1205SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1206{
1207	unsigned long ret;
1208	long err;
1209
1210	err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
1211	if (err)
1212		return err;
1213	force_successful_syscall_return();
1214	return (long)ret;
1215}
1216
1217/*
1218 * detach and kill segment if marked destroyed.
1219 * The work is done in shm_close.
1220 */
1221SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1222{
1223	struct mm_struct *mm = current->mm;
1224	struct vm_area_struct *vma;
1225	unsigned long addr = (unsigned long)shmaddr;
1226	int retval = -EINVAL;
1227#ifdef CONFIG_MMU
1228	loff_t size = 0;
 
1229	struct vm_area_struct *next;
1230#endif
1231
1232	if (addr & ~PAGE_MASK)
1233		return retval;
1234
1235	down_write(&mm->mmap_sem);
1236
1237	/*
1238	 * This function tries to be smart and unmap shm segments that
1239	 * were modified by partial mlock or munmap calls:
1240	 * - It first determines the size of the shm segment that should be
1241	 *   unmapped: It searches for a vma that is backed by shm and that
1242	 *   started at address shmaddr. It records it's size and then unmaps
1243	 *   it.
1244	 * - Then it unmaps all shm vmas that started at shmaddr and that
1245	 *   are within the initially determined size.
 
1246	 * Errors from do_munmap are ignored: the function only fails if
1247	 * it's called with invalid parameters or if it's called to unmap
1248	 * a part of a vma. Both calls in this function are for full vmas,
1249	 * the parameters are directly copied from the vma itself and always
1250	 * valid - therefore do_munmap cannot fail. (famous last words?)
1251	 */
1252	/*
1253	 * If it had been mremap()'d, the starting address would not
1254	 * match the usual checks anyway. So assume all vma's are
1255	 * above the starting address given.
1256	 */
1257	vma = find_vma(mm, addr);
1258
1259#ifdef CONFIG_MMU
1260	while (vma) {
1261		next = vma->vm_next;
1262
1263		/*
1264		 * Check if the starting address would match, i.e. it's
1265		 * a fragment created by mprotect() and/or munmap(), or it
1266		 * otherwise it starts at this address with no hassles.
1267		 */
1268		if ((vma->vm_ops == &shm_vm_ops) &&
1269			(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1270
1271
1272			size = file_inode(vma->vm_file)->i_size;
 
 
 
 
 
 
1273			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1274			/*
1275			 * We discovered the size of the shm segment, so
1276			 * break out of here and fall through to the next
1277			 * loop that uses the size information to stop
1278			 * searching for matching vma's.
1279			 */
1280			retval = 0;
1281			vma = next;
1282			break;
1283		}
1284		vma = next;
1285	}
1286
1287	/*
1288	 * We need look no further than the maximum address a fragment
1289	 * could possibly have landed at. Also cast things to loff_t to
1290	 * prevent overflows and make comparisons vs. equal-width types.
1291	 */
1292	size = PAGE_ALIGN(size);
1293	while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1294		next = vma->vm_next;
1295
1296		/* finding a matching vma now does not alter retval */
1297		if ((vma->vm_ops == &shm_vm_ops) &&
1298			(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
1299
1300			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1301		vma = next;
1302	}
1303
1304#else /* CONFIG_MMU */
1305	/* under NOMMU conditions, the exact address to be destroyed must be
1306	 * given */
1307	if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1308		do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1309		retval = 0;
1310	}
1311
1312#endif
1313
1314	up_write(&mm->mmap_sem);
1315	return retval;
1316}
1317
1318#ifdef CONFIG_PROC_FS
1319static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1320{
1321	struct user_namespace *user_ns = seq_user_ns(s);
1322	struct shmid_kernel *shp = it;
1323	unsigned long rss = 0, swp = 0;
1324
1325	shm_add_rss_swap(shp, &rss, &swp);
1326
1327#if BITS_PER_LONG <= 32
1328#define SIZE_SPEC "%10lu"
1329#else
1330#define SIZE_SPEC "%21lu"
1331#endif
1332
1333	return seq_printf(s,
1334			  "%10d %10d  %4o " SIZE_SPEC " %5u %5u  "
1335			  "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
1336			  SIZE_SPEC " " SIZE_SPEC "\n",
1337			  shp->shm_perm.key,
1338			  shp->shm_perm.id,
1339			  shp->shm_perm.mode,
1340			  shp->shm_segsz,
1341			  shp->shm_cprid,
1342			  shp->shm_lprid,
1343			  shp->shm_nattch,
1344			  from_kuid_munged(user_ns, shp->shm_perm.uid),
1345			  from_kgid_munged(user_ns, shp->shm_perm.gid),
1346			  from_kuid_munged(user_ns, shp->shm_perm.cuid),
1347			  from_kgid_munged(user_ns, shp->shm_perm.cgid),
1348			  shp->shm_atim,
1349			  shp->shm_dtim,
1350			  shp->shm_ctim,
1351			  rss * PAGE_SIZE,
1352			  swp * PAGE_SIZE);
 
 
1353}
1354#endif
v4.6
   1/*
   2 * linux/ipc/shm.c
   3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
   4 *	 Many improvements/fixes by Bruno Haible.
   5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
   6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
   7 *
   8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
   9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
  10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
  11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
  12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
  13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
  14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
  15 *
  16 * support for audit of ipc object properties and permission changes
  17 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  18 *
  19 * namespaces support
  20 * OpenVZ, SWsoft Inc.
  21 * Pavel Emelianov <xemul@openvz.org>
  22 *
  23 * Better ipc lock (kern_ipc_perm.lock) handling
  24 * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
  25 */
  26
  27#include <linux/slab.h>
  28#include <linux/mm.h>
  29#include <linux/hugetlb.h>
  30#include <linux/shm.h>
  31#include <linux/init.h>
  32#include <linux/file.h>
  33#include <linux/mman.h>
  34#include <linux/shmem_fs.h>
  35#include <linux/security.h>
  36#include <linux/syscalls.h>
  37#include <linux/audit.h>
  38#include <linux/capability.h>
  39#include <linux/ptrace.h>
  40#include <linux/seq_file.h>
  41#include <linux/rwsem.h>
  42#include <linux/nsproxy.h>
  43#include <linux/mount.h>
  44#include <linux/ipc_namespace.h>
  45
  46#include <linux/uaccess.h>
  47
  48#include "util.h"
  49
  50struct shm_file_data {
  51	int id;
  52	struct ipc_namespace *ns;
  53	struct file *file;
  54	const struct vm_operations_struct *vm_ops;
  55};
  56
  57#define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
  58
  59static const struct file_operations shm_file_operations;
  60static const struct vm_operations_struct shm_vm_ops;
  61
  62#define shm_ids(ns)	((ns)->ids[IPC_SHM_IDS])
  63
  64#define shm_unlock(shp)			\
  65	ipc_unlock(&(shp)->shm_perm)
  66
  67static int newseg(struct ipc_namespace *, struct ipc_params *);
  68static void shm_open(struct vm_area_struct *vma);
  69static void shm_close(struct vm_area_struct *vma);
  70static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
  71#ifdef CONFIG_PROC_FS
  72static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
  73#endif
  74
  75void shm_init_ns(struct ipc_namespace *ns)
  76{
  77	ns->shm_ctlmax = SHMMAX;
  78	ns->shm_ctlall = SHMALL;
  79	ns->shm_ctlmni = SHMMNI;
  80	ns->shm_rmid_forced = 0;
  81	ns->shm_tot = 0;
  82	ipc_init_ids(&shm_ids(ns));
  83}
  84
  85/*
  86 * Called with shm_ids.rwsem (writer) and the shp structure locked.
  87 * Only shm_ids.rwsem remains locked on exit.
  88 */
  89static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
  90{
  91	struct shmid_kernel *shp;
  92	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  93
  94	if (shp->shm_nattch) {
  95		shp->shm_perm.mode |= SHM_DEST;
  96		/* Do not find it any more */
  97		shp->shm_perm.key = IPC_PRIVATE;
  98		shm_unlock(shp);
  99	} else
 100		shm_destroy(ns, shp);
 101}
 102
 103#ifdef CONFIG_IPC_NS
 104void shm_exit_ns(struct ipc_namespace *ns)
 105{
 106	free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
 107	idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
 108}
 109#endif
 110
 111static int __init ipc_ns_init(void)
 112{
 113	shm_init_ns(&init_ipc_ns);
 114	return 0;
 115}
 116
 117pure_initcall(ipc_ns_init);
 118
 119void __init shm_init(void)
 120{
 121	ipc_init_proc_interface("sysvipc/shm",
 122#if BITS_PER_LONG <= 32
 123				"       key      shmid perms       size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime        rss       swap\n",
 124#else
 125				"       key      shmid perms                  size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime                   rss                  swap\n",
 126#endif
 127				IPC_SHM_IDS, sysvipc_shm_proc_show);
 128}
 129
 130static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
 131{
 132	struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
 133
 134	if (IS_ERR(ipcp))
 135		return ERR_CAST(ipcp);
 136
 137	return container_of(ipcp, struct shmid_kernel, shm_perm);
 138}
 139
 140static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
 141{
 142	struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
 143
 144	if (IS_ERR(ipcp))
 145		return ERR_CAST(ipcp);
 146
 147	return container_of(ipcp, struct shmid_kernel, shm_perm);
 148}
 149
 150/*
 151 * shm_lock_(check_) routines are called in the paths where the rwsem
 152 * is not necessarily held.
 153 */
 154static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
 155{
 156	struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
 157
 158	/*
 159	 * Callers of shm_lock() must validate the status of the returned ipc
 160	 * object pointer (as returned by ipc_lock()), and error out as
 161	 * appropriate.
 162	 */
 163	if (IS_ERR(ipcp))
 164		return (void *)ipcp;
 
 165	return container_of(ipcp, struct shmid_kernel, shm_perm);
 166}
 167
 168static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
 169{
 170	rcu_read_lock();
 171	ipc_lock_object(&ipcp->shm_perm);
 172}
 173
 174static void shm_rcu_free(struct rcu_head *head)
 175{
 176	struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
 177	struct shmid_kernel *shp = ipc_rcu_to_struct(p);
 178
 179	security_shm_free(shp);
 180	ipc_rcu_free(head);
 181}
 182
 183static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
 184{
 185	list_del(&s->shm_clist);
 186	ipc_rmid(&shm_ids(ns), &s->shm_perm);
 187}
 188
 189
 190static int __shm_open(struct vm_area_struct *vma)
 
 191{
 192	struct file *file = vma->vm_file;
 193	struct shm_file_data *sfd = shm_file_data(file);
 194	struct shmid_kernel *shp;
 195
 196	shp = shm_lock(sfd->ns, sfd->id);
 197
 198	if (IS_ERR(shp))
 199		return PTR_ERR(shp);
 200
 201	shp->shm_atim = get_seconds();
 202	shp->shm_lprid = task_tgid_vnr(current);
 203	shp->shm_nattch++;
 204	shm_unlock(shp);
 205	return 0;
 206}
 207
 208/* This is called by fork, once for every shm attach. */
 209static void shm_open(struct vm_area_struct *vma)
 210{
 211	int err = __shm_open(vma);
 212	/*
 213	 * We raced in the idr lookup or with shm_destroy().
 214	 * Either way, the ID is busted.
 215	 */
 216	WARN_ON_ONCE(err);
 217}
 218
 219/*
 220 * shm_destroy - free the struct shmid_kernel
 221 *
 222 * @ns: namespace
 223 * @shp: struct to free
 224 *
 225 * It has to be called with shp and shm_ids.rwsem (writer) locked,
 226 * but returns with shp unlocked and freed.
 227 */
 228static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
 229{
 230	struct file *shm_file;
 231
 232	shm_file = shp->shm_file;
 233	shp->shm_file = NULL;
 234	ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
 235	shm_rmid(ns, shp);
 236	shm_unlock(shp);
 237	if (!is_file_hugepages(shm_file))
 238		shmem_lock(shm_file, 0, shp->mlock_user);
 239	else if (shp->mlock_user)
 240		user_shm_unlock(i_size_read(file_inode(shm_file)),
 241				shp->mlock_user);
 242	fput(shm_file);
 243	ipc_rcu_putref(shp, shm_rcu_free);
 244}
 245
 246/*
 247 * shm_may_destroy - identifies whether shm segment should be destroyed now
 248 *
 249 * Returns true if and only if there are no active users of the segment and
 250 * one of the following is true:
 251 *
 252 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
 253 *
 254 * 2) sysctl kernel.shm_rmid_forced is set to 1.
 255 */
 256static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
 257{
 258	return (shp->shm_nattch == 0) &&
 259	       (ns->shm_rmid_forced ||
 260		(shp->shm_perm.mode & SHM_DEST));
 261}
 262
 263/*
 264 * remove the attach descriptor vma.
 265 * free memory for segment if it is marked destroyed.
 266 * The descriptor has already been removed from the current->mm->mmap list
 267 * and will later be kfree()d.
 268 */
 269static void shm_close(struct vm_area_struct *vma)
 270{
 271	struct file *file = vma->vm_file;
 272	struct shm_file_data *sfd = shm_file_data(file);
 273	struct shmid_kernel *shp;
 274	struct ipc_namespace *ns = sfd->ns;
 275
 276	down_write(&shm_ids(ns).rwsem);
 277	/* remove from the list of attaches of the shm segment */
 278	shp = shm_lock(ns, sfd->id);
 279
 280	/*
 281	 * We raced in the idr lookup or with shm_destroy().
 282	 * Either way, the ID is busted.
 283	 */
 284	if (WARN_ON_ONCE(IS_ERR(shp)))
 285		goto done; /* no-op */
 286
 287	shp->shm_lprid = task_tgid_vnr(current);
 288	shp->shm_dtim = get_seconds();
 289	shp->shm_nattch--;
 290	if (shm_may_destroy(ns, shp))
 291		shm_destroy(ns, shp);
 292	else
 293		shm_unlock(shp);
 294done:
 295	up_write(&shm_ids(ns).rwsem);
 296}
 297
 298/* Called with ns->shm_ids(ns).rwsem locked */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 299static int shm_try_destroy_orphaned(int id, void *p, void *data)
 300{
 301	struct ipc_namespace *ns = data;
 302	struct kern_ipc_perm *ipcp = p;
 303	struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 304
 305	/*
 306	 * We want to destroy segments without users and with already
 307	 * exit'ed originating process.
 308	 *
 309	 * As shp->* are changed under rwsem, it's safe to skip shp locking.
 310	 */
 311	if (shp->shm_creator != NULL)
 312		return 0;
 313
 314	if (shm_may_destroy(ns, shp)) {
 315		shm_lock_by_ptr(shp);
 316		shm_destroy(ns, shp);
 317	}
 318	return 0;
 319}
 320
 321void shm_destroy_orphaned(struct ipc_namespace *ns)
 322{
 323	down_write(&shm_ids(ns).rwsem);
 324	if (shm_ids(ns).in_use)
 325		idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
 326	up_write(&shm_ids(ns).rwsem);
 327}
 328
 329/* Locking assumes this will only be called with task == current */
 330void exit_shm(struct task_struct *task)
 331{
 332	struct ipc_namespace *ns = task->nsproxy->ipc_ns;
 333	struct shmid_kernel *shp, *n;
 334
 335	if (list_empty(&task->sysvshm.shm_clist))
 336		return;
 337
 338	/*
 339	 * If kernel.shm_rmid_forced is not set then only keep track of
 340	 * which shmids are orphaned, so that a later set of the sysctl
 341	 * can clean them up.
 342	 */
 343	if (!ns->shm_rmid_forced) {
 344		down_read(&shm_ids(ns).rwsem);
 345		list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist)
 346			shp->shm_creator = NULL;
 347		/*
 348		 * Only under read lock but we are only called on current
 349		 * so no entry on the list will be shared.
 350		 */
 351		list_del(&task->sysvshm.shm_clist);
 352		up_read(&shm_ids(ns).rwsem);
 353		return;
 354	}
 355
 356	/*
 357	 * Destroy all already created segments, that were not yet mapped,
 358	 * and mark any mapped as orphan to cover the sysctl toggling.
 359	 * Destroy is skipped if shm_may_destroy() returns false.
 360	 */
 361	down_write(&shm_ids(ns).rwsem);
 362	list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) {
 363		shp->shm_creator = NULL;
 364
 365		if (shm_may_destroy(ns, shp)) {
 366			shm_lock_by_ptr(shp);
 367			shm_destroy(ns, shp);
 368		}
 369	}
 370
 371	/* Remove the list head from any segments still attached. */
 372	list_del(&task->sysvshm.shm_clist);
 373	up_write(&shm_ids(ns).rwsem);
 374}
 375
 376static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 377{
 378	struct file *file = vma->vm_file;
 379	struct shm_file_data *sfd = shm_file_data(file);
 380
 381	return sfd->vm_ops->fault(vma, vmf);
 382}
 383
 384#ifdef CONFIG_NUMA
 385static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
 386{
 387	struct file *file = vma->vm_file;
 388	struct shm_file_data *sfd = shm_file_data(file);
 389	int err = 0;
 390	if (sfd->vm_ops->set_policy)
 391		err = sfd->vm_ops->set_policy(vma, new);
 392	return err;
 393}
 394
 395static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
 396					unsigned long addr)
 397{
 398	struct file *file = vma->vm_file;
 399	struct shm_file_data *sfd = shm_file_data(file);
 400	struct mempolicy *pol = NULL;
 401
 402	if (sfd->vm_ops->get_policy)
 403		pol = sfd->vm_ops->get_policy(vma, addr);
 404	else if (vma->vm_policy)
 405		pol = vma->vm_policy;
 406
 407	return pol;
 408}
 409#endif
 410
 411static int shm_mmap(struct file *file, struct vm_area_struct *vma)
 412{
 413	struct shm_file_data *sfd = shm_file_data(file);
 414	int ret;
 415
 416	/*
 417	 * In case of remap_file_pages() emulation, the file can represent
 418	 * removed IPC ID: propogate shm_lock() error to caller.
 419	 */
 420	ret =__shm_open(vma);
 421	if (ret)
 422		return ret;
 423
 424	ret = sfd->file->f_op->mmap(sfd->file, vma);
 425	if (ret) {
 426		shm_close(vma);
 427		return ret;
 428	}
 429	sfd->vm_ops = vma->vm_ops;
 430#ifdef CONFIG_MMU
 431	WARN_ON(!sfd->vm_ops->fault);
 432#endif
 433	vma->vm_ops = &shm_vm_ops;
 434	return 0;
 
 
 435}
 436
 437static int shm_release(struct inode *ino, struct file *file)
 438{
 439	struct shm_file_data *sfd = shm_file_data(file);
 440
 441	put_ipc_ns(sfd->ns);
 442	shm_file_data(file) = NULL;
 443	kfree(sfd);
 444	return 0;
 445}
 446
 447static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 448{
 449	struct shm_file_data *sfd = shm_file_data(file);
 450
 451	if (!sfd->file->f_op->fsync)
 452		return -EINVAL;
 453	return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
 454}
 455
 456static long shm_fallocate(struct file *file, int mode, loff_t offset,
 457			  loff_t len)
 458{
 459	struct shm_file_data *sfd = shm_file_data(file);
 460
 461	if (!sfd->file->f_op->fallocate)
 462		return -EOPNOTSUPP;
 463	return sfd->file->f_op->fallocate(file, mode, offset, len);
 464}
 465
 466static unsigned long shm_get_unmapped_area(struct file *file,
 467	unsigned long addr, unsigned long len, unsigned long pgoff,
 468	unsigned long flags)
 469{
 470	struct shm_file_data *sfd = shm_file_data(file);
 471	return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
 472						pgoff, flags);
 473}
 474
 475static const struct file_operations shm_file_operations = {
 476	.mmap		= shm_mmap,
 477	.fsync		= shm_fsync,
 478	.release	= shm_release,
 479#ifndef CONFIG_MMU
 480	.get_unmapped_area	= shm_get_unmapped_area,
 481#endif
 482	.llseek		= noop_llseek,
 483	.fallocate	= shm_fallocate,
 484};
 485
 486static const struct file_operations shm_file_operations_huge = {
 487	.mmap		= shm_mmap,
 488	.fsync		= shm_fsync,
 489	.release	= shm_release,
 490	.get_unmapped_area	= shm_get_unmapped_area,
 491	.llseek		= noop_llseek,
 492	.fallocate	= shm_fallocate,
 493};
 494
 495bool is_file_shm_hugepages(struct file *file)
 496{
 497	return file->f_op == &shm_file_operations_huge;
 498}
 499
 500static const struct vm_operations_struct shm_vm_ops = {
 501	.open	= shm_open,	/* callback for a new vm-area open */
 502	.close	= shm_close,	/* callback for when the vm-area is released */
 503	.fault	= shm_fault,
 504#if defined(CONFIG_NUMA)
 505	.set_policy = shm_set_policy,
 506	.get_policy = shm_get_policy,
 507#endif
 508};
 509
 510/**
 511 * newseg - Create a new shared memory segment
 512 * @ns: namespace
 513 * @params: ptr to the structure that contains key, size and shmflg
 514 *
 515 * Called with shm_ids.rwsem held as a writer.
 516 */
 517static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
 518{
 519	key_t key = params->key;
 520	int shmflg = params->flg;
 521	size_t size = params->u.size;
 522	int error;
 523	struct shmid_kernel *shp;
 524	size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 525	struct file *file;
 526	char name[13];
 527	int id;
 528	vm_flags_t acctflag = 0;
 529
 530	if (size < SHMMIN || size > ns->shm_ctlmax)
 531		return -EINVAL;
 532
 533	if (numpages << PAGE_SHIFT < size)
 534		return -ENOSPC;
 535
 536	if (ns->shm_tot + numpages < ns->shm_tot ||
 537			ns->shm_tot + numpages > ns->shm_ctlall)
 538		return -ENOSPC;
 539
 540	shp = ipc_rcu_alloc(sizeof(*shp));
 541	if (!shp)
 542		return -ENOMEM;
 543
 544	shp->shm_perm.key = key;
 545	shp->shm_perm.mode = (shmflg & S_IRWXUGO);
 546	shp->mlock_user = NULL;
 547
 548	shp->shm_perm.security = NULL;
 549	error = security_shm_alloc(shp);
 550	if (error) {
 551		ipc_rcu_putref(shp, ipc_rcu_free);
 552		return error;
 553	}
 554
 555	sprintf(name, "SYSV%08x", key);
 556	if (shmflg & SHM_HUGETLB) {
 557		struct hstate *hs;
 558		size_t hugesize;
 559
 560		hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
 561		if (!hs) {
 562			error = -EINVAL;
 563			goto no_file;
 564		}
 565		hugesize = ALIGN(size, huge_page_size(hs));
 566
 567		/* hugetlb_file_setup applies strict accounting */
 568		if (shmflg & SHM_NORESERVE)
 569			acctflag = VM_NORESERVE;
 570		file = hugetlb_file_setup(name, hugesize, acctflag,
 571				  &shp->mlock_user, HUGETLB_SHMFS_INODE,
 572				(shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
 573	} else {
 574		/*
 575		 * Do not allow no accounting for OVERCOMMIT_NEVER, even
 576		 * if it's asked for.
 577		 */
 578		if  ((shmflg & SHM_NORESERVE) &&
 579				sysctl_overcommit_memory != OVERCOMMIT_NEVER)
 580			acctflag = VM_NORESERVE;
 581		file = shmem_kernel_file_setup(name, size, acctflag);
 582	}
 583	error = PTR_ERR(file);
 584	if (IS_ERR(file))
 585		goto no_file;
 586
 
 
 
 
 
 
 587	shp->shm_cprid = task_tgid_vnr(current);
 588	shp->shm_lprid = 0;
 589	shp->shm_atim = shp->shm_dtim = 0;
 590	shp->shm_ctim = get_seconds();
 591	shp->shm_segsz = size;
 592	shp->shm_nattch = 0;
 593	shp->shm_file = file;
 594	shp->shm_creator = current;
 595
 596	id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
 597	if (id < 0) {
 598		error = id;
 599		goto no_id;
 600	}
 601
 602	list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
 603
 604	/*
 605	 * shmid gets reported as "inode#" in /proc/pid/maps.
 606	 * proc-ps tools use this. Changing this will break them.
 607	 */
 608	file_inode(file)->i_ino = shp->shm_perm.id;
 609
 610	ns->shm_tot += numpages;
 611	error = shp->shm_perm.id;
 612
 613	ipc_unlock_object(&shp->shm_perm);
 614	rcu_read_unlock();
 615	return error;
 616
 617no_id:
 618	if (is_file_hugepages(file) && shp->mlock_user)
 619		user_shm_unlock(size, shp->mlock_user);
 620	fput(file);
 621no_file:
 622	ipc_rcu_putref(shp, shm_rcu_free);
 623	return error;
 624}
 625
 626/*
 627 * Called with shm_ids.rwsem and ipcp locked.
 628 */
 629static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
 630{
 631	struct shmid_kernel *shp;
 632
 633	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 634	return security_shm_associate(shp, shmflg);
 635}
 636
 637/*
 638 * Called with shm_ids.rwsem and ipcp locked.
 639 */
 640static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
 641				struct ipc_params *params)
 642{
 643	struct shmid_kernel *shp;
 644
 645	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 646	if (shp->shm_segsz < params->u.size)
 647		return -EINVAL;
 648
 649	return 0;
 650}
 651
 652SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
 653{
 654	struct ipc_namespace *ns;
 655	static const struct ipc_ops shm_ops = {
 656		.getnew = newseg,
 657		.associate = shm_security,
 658		.more_checks = shm_more_checks,
 659	};
 660	struct ipc_params shm_params;
 661
 662	ns = current->nsproxy->ipc_ns;
 663
 
 
 
 
 664	shm_params.key = key;
 665	shm_params.flg = shmflg;
 666	shm_params.u.size = size;
 667
 668	return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
 669}
 670
 671static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
 672{
 673	switch (version) {
 674	case IPC_64:
 675		return copy_to_user(buf, in, sizeof(*in));
 676	case IPC_OLD:
 677	    {
 678		struct shmid_ds out;
 679
 680		memset(&out, 0, sizeof(out));
 681		ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
 682		out.shm_segsz	= in->shm_segsz;
 683		out.shm_atime	= in->shm_atime;
 684		out.shm_dtime	= in->shm_dtime;
 685		out.shm_ctime	= in->shm_ctime;
 686		out.shm_cpid	= in->shm_cpid;
 687		out.shm_lpid	= in->shm_lpid;
 688		out.shm_nattch	= in->shm_nattch;
 689
 690		return copy_to_user(buf, &out, sizeof(out));
 691	    }
 692	default:
 693		return -EINVAL;
 694	}
 695}
 696
 697static inline unsigned long
 698copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
 699{
 700	switch (version) {
 701	case IPC_64:
 702		if (copy_from_user(out, buf, sizeof(*out)))
 703			return -EFAULT;
 704		return 0;
 705	case IPC_OLD:
 706	    {
 707		struct shmid_ds tbuf_old;
 708
 709		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
 710			return -EFAULT;
 711
 712		out->shm_perm.uid	= tbuf_old.shm_perm.uid;
 713		out->shm_perm.gid	= tbuf_old.shm_perm.gid;
 714		out->shm_perm.mode	= tbuf_old.shm_perm.mode;
 715
 716		return 0;
 717	    }
 718	default:
 719		return -EINVAL;
 720	}
 721}
 722
 723static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
 724{
 725	switch (version) {
 726	case IPC_64:
 727		return copy_to_user(buf, in, sizeof(*in));
 728	case IPC_OLD:
 729	    {
 730		struct shminfo out;
 731
 732		if (in->shmmax > INT_MAX)
 733			out.shmmax = INT_MAX;
 734		else
 735			out.shmmax = (int)in->shmmax;
 736
 737		out.shmmin	= in->shmmin;
 738		out.shmmni	= in->shmmni;
 739		out.shmseg	= in->shmseg;
 740		out.shmall	= in->shmall;
 741
 742		return copy_to_user(buf, &out, sizeof(out));
 743	    }
 744	default:
 745		return -EINVAL;
 746	}
 747}
 748
 749/*
 750 * Calculate and add used RSS and swap pages of a shm.
 751 * Called with shm_ids.rwsem held as a reader
 752 */
 753static void shm_add_rss_swap(struct shmid_kernel *shp,
 754	unsigned long *rss_add, unsigned long *swp_add)
 755{
 756	struct inode *inode;
 757
 758	inode = file_inode(shp->shm_file);
 759
 760	if (is_file_hugepages(shp->shm_file)) {
 761		struct address_space *mapping = inode->i_mapping;
 762		struct hstate *h = hstate_file(shp->shm_file);
 763		*rss_add += pages_per_huge_page(h) * mapping->nrpages;
 764	} else {
 765#ifdef CONFIG_SHMEM
 766		struct shmem_inode_info *info = SHMEM_I(inode);
 767		spin_lock(&info->lock);
 768		*rss_add += inode->i_mapping->nrpages;
 769		*swp_add += info->swapped;
 770		spin_unlock(&info->lock);
 771#else
 772		*rss_add += inode->i_mapping->nrpages;
 773#endif
 774	}
 775}
 776
 777/*
 778 * Called with shm_ids.rwsem held as a reader
 779 */
 780static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
 781		unsigned long *swp)
 782{
 783	int next_id;
 784	int total, in_use;
 785
 786	*rss = 0;
 787	*swp = 0;
 788
 789	in_use = shm_ids(ns).in_use;
 790
 791	for (total = 0, next_id = 0; total < in_use; next_id++) {
 792		struct kern_ipc_perm *ipc;
 793		struct shmid_kernel *shp;
 794
 795		ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
 796		if (ipc == NULL)
 797			continue;
 798		shp = container_of(ipc, struct shmid_kernel, shm_perm);
 799
 800		shm_add_rss_swap(shp, rss, swp);
 801
 802		total++;
 803	}
 804}
 805
 806/*
 807 * This function handles some shmctl commands which require the rwsem
 808 * to be held in write mode.
 809 * NOTE: no locks must be held, the rwsem is taken inside this function.
 810 */
 811static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
 812		       struct shmid_ds __user *buf, int version)
 813{
 814	struct kern_ipc_perm *ipcp;
 815	struct shmid64_ds shmid64;
 816	struct shmid_kernel *shp;
 817	int err;
 818
 819	if (cmd == IPC_SET) {
 820		if (copy_shmid_from_user(&shmid64, buf, version))
 821			return -EFAULT;
 822	}
 823
 824	down_write(&shm_ids(ns).rwsem);
 825	rcu_read_lock();
 826
 827	ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
 828				      &shmid64.shm_perm, 0);
 829	if (IS_ERR(ipcp)) {
 830		err = PTR_ERR(ipcp);
 831		goto out_unlock1;
 832	}
 833
 834	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 835
 836	err = security_shm_shmctl(shp, cmd);
 837	if (err)
 838		goto out_unlock1;
 839
 840	switch (cmd) {
 841	case IPC_RMID:
 842		ipc_lock_object(&shp->shm_perm);
 843		/* do_shm_rmid unlocks the ipc object and rcu */
 844		do_shm_rmid(ns, ipcp);
 845		goto out_up;
 846	case IPC_SET:
 847		ipc_lock_object(&shp->shm_perm);
 848		err = ipc_update_perm(&shmid64.shm_perm, ipcp);
 849		if (err)
 850			goto out_unlock0;
 851		shp->shm_ctim = get_seconds();
 852		break;
 853	default:
 854		err = -EINVAL;
 855		goto out_unlock1;
 856	}
 857
 858out_unlock0:
 859	ipc_unlock_object(&shp->shm_perm);
 860out_unlock1:
 861	rcu_read_unlock();
 862out_up:
 863	up_write(&shm_ids(ns).rwsem);
 864	return err;
 865}
 866
 867static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
 868			 int cmd, int version, void __user *buf)
 869{
 870	int err;
 871	struct shmid_kernel *shp;
 872
 873	/* preliminary security checks for *_INFO */
 874	if (cmd == IPC_INFO || cmd == SHM_INFO) {
 875		err = security_shm_shmctl(NULL, cmd);
 876		if (err)
 877			return err;
 878	}
 879
 880	switch (cmd) {
 881	case IPC_INFO:
 882	{
 883		struct shminfo64 shminfo;
 884
 885		memset(&shminfo, 0, sizeof(shminfo));
 886		shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
 887		shminfo.shmmax = ns->shm_ctlmax;
 888		shminfo.shmall = ns->shm_ctlall;
 889
 890		shminfo.shmmin = SHMMIN;
 891		if (copy_shminfo_to_user(buf, &shminfo, version))
 892			return -EFAULT;
 893
 894		down_read(&shm_ids(ns).rwsem);
 895		err = ipc_get_maxid(&shm_ids(ns));
 896		up_read(&shm_ids(ns).rwsem);
 897
 898		if (err < 0)
 899			err = 0;
 900		goto out;
 901	}
 902	case SHM_INFO:
 903	{
 904		struct shm_info shm_info;
 905
 906		memset(&shm_info, 0, sizeof(shm_info));
 907		down_read(&shm_ids(ns).rwsem);
 908		shm_info.used_ids = shm_ids(ns).in_use;
 909		shm_get_stat(ns, &shm_info.shm_rss, &shm_info.shm_swp);
 910		shm_info.shm_tot = ns->shm_tot;
 911		shm_info.swap_attempts = 0;
 912		shm_info.swap_successes = 0;
 913		err = ipc_get_maxid(&shm_ids(ns));
 914		up_read(&shm_ids(ns).rwsem);
 915		if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
 916			err = -EFAULT;
 917			goto out;
 918		}
 919
 920		err = err < 0 ? 0 : err;
 921		goto out;
 922	}
 923	case SHM_STAT:
 924	case IPC_STAT:
 925	{
 926		struct shmid64_ds tbuf;
 927		int result;
 928
 929		rcu_read_lock();
 930		if (cmd == SHM_STAT) {
 931			shp = shm_obtain_object(ns, shmid);
 932			if (IS_ERR(shp)) {
 933				err = PTR_ERR(shp);
 934				goto out_unlock;
 935			}
 936			result = shp->shm_perm.id;
 937		} else {
 938			shp = shm_obtain_object_check(ns, shmid);
 939			if (IS_ERR(shp)) {
 940				err = PTR_ERR(shp);
 941				goto out_unlock;
 942			}
 943			result = 0;
 944		}
 945
 946		err = -EACCES;
 947		if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
 948			goto out_unlock;
 949
 950		err = security_shm_shmctl(shp, cmd);
 951		if (err)
 952			goto out_unlock;
 953
 954		memset(&tbuf, 0, sizeof(tbuf));
 955		kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
 956		tbuf.shm_segsz	= shp->shm_segsz;
 957		tbuf.shm_atime	= shp->shm_atim;
 958		tbuf.shm_dtime	= shp->shm_dtim;
 959		tbuf.shm_ctime	= shp->shm_ctim;
 960		tbuf.shm_cpid	= shp->shm_cprid;
 961		tbuf.shm_lpid	= shp->shm_lprid;
 962		tbuf.shm_nattch	= shp->shm_nattch;
 963		rcu_read_unlock();
 964
 965		if (copy_shmid_to_user(buf, &tbuf, version))
 966			err = -EFAULT;
 967		else
 968			err = result;
 969		goto out;
 970	}
 971	default:
 972		return -EINVAL;
 973	}
 974
 975out_unlock:
 976	rcu_read_unlock();
 977out:
 978	return err;
 979}
 980
 981SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
 982{
 983	struct shmid_kernel *shp;
 984	int err, version;
 985	struct ipc_namespace *ns;
 986
 987	if (cmd < 0 || shmid < 0)
 988		return -EINVAL;
 989
 990	version = ipc_parse_version(&cmd);
 991	ns = current->nsproxy->ipc_ns;
 992
 993	switch (cmd) {
 994	case IPC_INFO:
 995	case SHM_INFO:
 996	case SHM_STAT:
 997	case IPC_STAT:
 998		return shmctl_nolock(ns, shmid, cmd, version, buf);
 999	case IPC_RMID:
1000	case IPC_SET:
1001		return shmctl_down(ns, shmid, cmd, buf, version);
1002	case SHM_LOCK:
1003	case SHM_UNLOCK:
1004	{
1005		struct file *shm_file;
1006
1007		rcu_read_lock();
1008		shp = shm_obtain_object_check(ns, shmid);
1009		if (IS_ERR(shp)) {
1010			err = PTR_ERR(shp);
1011			goto out_unlock1;
1012		}
1013
1014		audit_ipc_obj(&(shp->shm_perm));
1015		err = security_shm_shmctl(shp, cmd);
1016		if (err)
1017			goto out_unlock1;
1018
1019		ipc_lock_object(&shp->shm_perm);
1020
1021		/* check if shm_destroy() is tearing down shp */
1022		if (!ipc_valid_object(&shp->shm_perm)) {
1023			err = -EIDRM;
1024			goto out_unlock0;
1025		}
1026
1027		if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
1028			kuid_t euid = current_euid();
1029			if (!uid_eq(euid, shp->shm_perm.uid) &&
1030			    !uid_eq(euid, shp->shm_perm.cuid)) {
1031				err = -EPERM;
1032				goto out_unlock0;
1033			}
1034			if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
1035				err = -EPERM;
1036				goto out_unlock0;
1037			}
1038		}
1039
1040		shm_file = shp->shm_file;
1041		if (is_file_hugepages(shm_file))
1042			goto out_unlock0;
1043
1044		if (cmd == SHM_LOCK) {
1045			struct user_struct *user = current_user();
1046			err = shmem_lock(shm_file, 1, user);
1047			if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
1048				shp->shm_perm.mode |= SHM_LOCKED;
1049				shp->mlock_user = user;
1050			}
1051			goto out_unlock0;
1052		}
1053
1054		/* SHM_UNLOCK */
1055		if (!(shp->shm_perm.mode & SHM_LOCKED))
1056			goto out_unlock0;
1057		shmem_lock(shm_file, 0, shp->mlock_user);
1058		shp->shm_perm.mode &= ~SHM_LOCKED;
1059		shp->mlock_user = NULL;
1060		get_file(shm_file);
1061		ipc_unlock_object(&shp->shm_perm);
1062		rcu_read_unlock();
1063		shmem_unlock_mapping(shm_file->f_mapping);
1064
1065		fput(shm_file);
1066		return err;
1067	}
1068	default:
1069		return -EINVAL;
1070	}
1071
1072out_unlock0:
1073	ipc_unlock_object(&shp->shm_perm);
1074out_unlock1:
1075	rcu_read_unlock();
1076	return err;
1077}
1078
1079/*
1080 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1081 *
1082 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1083 * "raddr" thing points to kernel space, and there has to be a wrapper around
1084 * this.
1085 */
1086long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
1087	      unsigned long shmlba)
1088{
1089	struct shmid_kernel *shp;
1090	unsigned long addr;
1091	unsigned long size;
1092	struct file *file;
1093	int    err;
1094	unsigned long flags;
1095	unsigned long prot;
1096	int acc_mode;
1097	struct ipc_namespace *ns;
1098	struct shm_file_data *sfd;
1099	struct path path;
1100	fmode_t f_mode;
1101	unsigned long populate = 0;
1102
1103	err = -EINVAL;
1104	if (shmid < 0)
1105		goto out;
1106	else if ((addr = (ulong)shmaddr)) {
1107		if (addr & (shmlba - 1)) {
1108			if (shmflg & SHM_RND)
1109				addr &= ~(shmlba - 1);	   /* round down */
1110			else
1111#ifndef __ARCH_FORCE_SHMLBA
1112				if (addr & ~PAGE_MASK)
1113#endif
1114					goto out;
1115		}
1116		flags = MAP_SHARED | MAP_FIXED;
1117	} else {
1118		if ((shmflg & SHM_REMAP))
1119			goto out;
1120
1121		flags = MAP_SHARED;
1122	}
1123
1124	if (shmflg & SHM_RDONLY) {
1125		prot = PROT_READ;
1126		acc_mode = S_IRUGO;
1127		f_mode = FMODE_READ;
1128	} else {
1129		prot = PROT_READ | PROT_WRITE;
1130		acc_mode = S_IRUGO | S_IWUGO;
1131		f_mode = FMODE_READ | FMODE_WRITE;
1132	}
1133	if (shmflg & SHM_EXEC) {
1134		prot |= PROT_EXEC;
1135		acc_mode |= S_IXUGO;
1136	}
1137
1138	/*
1139	 * We cannot rely on the fs check since SYSV IPC does have an
1140	 * additional creator id...
1141	 */
1142	ns = current->nsproxy->ipc_ns;
1143	rcu_read_lock();
1144	shp = shm_obtain_object_check(ns, shmid);
1145	if (IS_ERR(shp)) {
1146		err = PTR_ERR(shp);
1147		goto out_unlock;
1148	}
1149
1150	err = -EACCES;
1151	if (ipcperms(ns, &shp->shm_perm, acc_mode))
1152		goto out_unlock;
1153
1154	err = security_shm_shmat(shp, shmaddr, shmflg);
1155	if (err)
1156		goto out_unlock;
1157
1158	ipc_lock_object(&shp->shm_perm);
1159
1160	/* check if shm_destroy() is tearing down shp */
1161	if (!ipc_valid_object(&shp->shm_perm)) {
1162		ipc_unlock_object(&shp->shm_perm);
1163		err = -EIDRM;
1164		goto out_unlock;
1165	}
1166
1167	path = shp->shm_file->f_path;
1168	path_get(&path);
1169	shp->shm_nattch++;
1170	size = i_size_read(d_inode(path.dentry));
1171	ipc_unlock_object(&shp->shm_perm);
1172	rcu_read_unlock();
1173
1174	err = -ENOMEM;
1175	sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1176	if (!sfd) {
1177		path_put(&path);
1178		goto out_nattch;
1179	}
1180
1181	file = alloc_file(&path, f_mode,
1182			  is_file_hugepages(shp->shm_file) ?
1183				&shm_file_operations_huge :
1184				&shm_file_operations);
1185	err = PTR_ERR(file);
1186	if (IS_ERR(file)) {
1187		kfree(sfd);
1188		path_put(&path);
1189		goto out_nattch;
1190	}
1191
1192	file->private_data = sfd;
1193	file->f_mapping = shp->shm_file->f_mapping;
1194	sfd->id = shp->shm_perm.id;
1195	sfd->ns = get_ipc_ns(ns);
1196	sfd->file = shp->shm_file;
1197	sfd->vm_ops = NULL;
1198
1199	err = security_mmap_file(file, prot, flags);
1200	if (err)
1201		goto out_fput;
1202
1203	down_write(&current->mm->mmap_sem);
1204	if (addr && !(shmflg & SHM_REMAP)) {
1205		err = -EINVAL;
1206		if (addr + size < addr)
1207			goto invalid;
1208
1209		if (find_vma_intersection(current->mm, addr, addr + size))
 
 
 
 
1210			goto invalid;
1211	}
1212
1213	addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate);
1214	*raddr = addr;
1215	err = 0;
1216	if (IS_ERR_VALUE(addr))
1217		err = (long)addr;
1218invalid:
1219	up_write(&current->mm->mmap_sem);
1220	if (populate)
1221		mm_populate(addr, populate);
1222
1223out_fput:
1224	fput(file);
1225
1226out_nattch:
1227	down_write(&shm_ids(ns).rwsem);
1228	shp = shm_lock(ns, shmid);
 
1229	shp->shm_nattch--;
1230	if (shm_may_destroy(ns, shp))
1231		shm_destroy(ns, shp);
1232	else
1233		shm_unlock(shp);
1234	up_write(&shm_ids(ns).rwsem);
1235	return err;
1236
1237out_unlock:
1238	rcu_read_unlock();
1239out:
1240	return err;
1241}
1242
1243SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1244{
1245	unsigned long ret;
1246	long err;
1247
1248	err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
1249	if (err)
1250		return err;
1251	force_successful_syscall_return();
1252	return (long)ret;
1253}
1254
1255/*
1256 * detach and kill segment if marked destroyed.
1257 * The work is done in shm_close.
1258 */
1259SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1260{
1261	struct mm_struct *mm = current->mm;
1262	struct vm_area_struct *vma;
1263	unsigned long addr = (unsigned long)shmaddr;
1264	int retval = -EINVAL;
1265#ifdef CONFIG_MMU
1266	loff_t size = 0;
1267	struct file *file;
1268	struct vm_area_struct *next;
1269#endif
1270
1271	if (addr & ~PAGE_MASK)
1272		return retval;
1273
1274	down_write(&mm->mmap_sem);
1275
1276	/*
1277	 * This function tries to be smart and unmap shm segments that
1278	 * were modified by partial mlock or munmap calls:
1279	 * - It first determines the size of the shm segment that should be
1280	 *   unmapped: It searches for a vma that is backed by shm and that
1281	 *   started at address shmaddr. It records it's size and then unmaps
1282	 *   it.
1283	 * - Then it unmaps all shm vmas that started at shmaddr and that
1284	 *   are within the initially determined size and that are from the
1285	 *   same shm segment from which we determined the size.
1286	 * Errors from do_munmap are ignored: the function only fails if
1287	 * it's called with invalid parameters or if it's called to unmap
1288	 * a part of a vma. Both calls in this function are for full vmas,
1289	 * the parameters are directly copied from the vma itself and always
1290	 * valid - therefore do_munmap cannot fail. (famous last words?)
1291	 */
1292	/*
1293	 * If it had been mremap()'d, the starting address would not
1294	 * match the usual checks anyway. So assume all vma's are
1295	 * above the starting address given.
1296	 */
1297	vma = find_vma(mm, addr);
1298
1299#ifdef CONFIG_MMU
1300	while (vma) {
1301		next = vma->vm_next;
1302
1303		/*
1304		 * Check if the starting address would match, i.e. it's
1305		 * a fragment created by mprotect() and/or munmap(), or it
1306		 * otherwise it starts at this address with no hassles.
1307		 */
1308		if ((vma->vm_ops == &shm_vm_ops) &&
1309			(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1310
1311			/*
1312			 * Record the file of the shm segment being
1313			 * unmapped.  With mremap(), someone could place
1314			 * page from another segment but with equal offsets
1315			 * in the range we are unmapping.
1316			 */
1317			file = vma->vm_file;
1318			size = i_size_read(file_inode(vma->vm_file));
1319			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1320			/*
1321			 * We discovered the size of the shm segment, so
1322			 * break out of here and fall through to the next
1323			 * loop that uses the size information to stop
1324			 * searching for matching vma's.
1325			 */
1326			retval = 0;
1327			vma = next;
1328			break;
1329		}
1330		vma = next;
1331	}
1332
1333	/*
1334	 * We need look no further than the maximum address a fragment
1335	 * could possibly have landed at. Also cast things to loff_t to
1336	 * prevent overflows and make comparisons vs. equal-width types.
1337	 */
1338	size = PAGE_ALIGN(size);
1339	while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1340		next = vma->vm_next;
1341
1342		/* finding a matching vma now does not alter retval */
1343		if ((vma->vm_ops == &shm_vm_ops) &&
1344		    ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
1345		    (vma->vm_file == file))
1346			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1347		vma = next;
1348	}
1349
1350#else /* CONFIG_MMU */
1351	/* under NOMMU conditions, the exact address to be destroyed must be
1352	 * given */
1353	if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1354		do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1355		retval = 0;
1356	}
1357
1358#endif
1359
1360	up_write(&mm->mmap_sem);
1361	return retval;
1362}
1363
1364#ifdef CONFIG_PROC_FS
1365static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1366{
1367	struct user_namespace *user_ns = seq_user_ns(s);
1368	struct shmid_kernel *shp = it;
1369	unsigned long rss = 0, swp = 0;
1370
1371	shm_add_rss_swap(shp, &rss, &swp);
1372
1373#if BITS_PER_LONG <= 32
1374#define SIZE_SPEC "%10lu"
1375#else
1376#define SIZE_SPEC "%21lu"
1377#endif
1378
1379	seq_printf(s,
1380		   "%10d %10d  %4o " SIZE_SPEC " %5u %5u  "
1381		   "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
1382		   SIZE_SPEC " " SIZE_SPEC "\n",
1383		   shp->shm_perm.key,
1384		   shp->shm_perm.id,
1385		   shp->shm_perm.mode,
1386		   shp->shm_segsz,
1387		   shp->shm_cprid,
1388		   shp->shm_lprid,
1389		   shp->shm_nattch,
1390		   from_kuid_munged(user_ns, shp->shm_perm.uid),
1391		   from_kgid_munged(user_ns, shp->shm_perm.gid),
1392		   from_kuid_munged(user_ns, shp->shm_perm.cuid),
1393		   from_kgid_munged(user_ns, shp->shm_perm.cgid),
1394		   shp->shm_atim,
1395		   shp->shm_dtim,
1396		   shp->shm_ctim,
1397		   rss * PAGE_SIZE,
1398		   swp * PAGE_SIZE);
1399
1400	return 0;
1401}
1402#endif