Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 * linux/ipc/shm.c
   3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
   4 *	 Many improvements/fixes by Bruno Haible.
   5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
   6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
   7 *
   8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
   9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
  10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
  11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
  12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
  13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
  14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
  15 *
  16 * support for audit of ipc object properties and permission changes
  17 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  18 *
  19 * namespaces support
  20 * OpenVZ, SWsoft Inc.
  21 * Pavel Emelianov <xemul@openvz.org>
  22 *
  23 * Better ipc lock (kern_ipc_perm.lock) handling
  24 * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
  25 */
  26
  27#include <linux/slab.h>
  28#include <linux/mm.h>
  29#include <linux/hugetlb.h>
  30#include <linux/shm.h>
 
  31#include <linux/init.h>
  32#include <linux/file.h>
  33#include <linux/mman.h>
  34#include <linux/shmem_fs.h>
  35#include <linux/security.h>
  36#include <linux/syscalls.h>
  37#include <linux/audit.h>
  38#include <linux/capability.h>
  39#include <linux/ptrace.h>
  40#include <linux/seq_file.h>
  41#include <linux/rwsem.h>
  42#include <linux/nsproxy.h>
  43#include <linux/mount.h>
  44#include <linux/ipc_namespace.h>
 
  45
  46#include <linux/uaccess.h>
  47
  48#include "util.h"
  49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  50struct shm_file_data {
  51	int id;
  52	struct ipc_namespace *ns;
  53	struct file *file;
  54	const struct vm_operations_struct *vm_ops;
  55};
  56
  57#define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
  58
  59static const struct file_operations shm_file_operations;
  60static const struct vm_operations_struct shm_vm_ops;
  61
  62#define shm_ids(ns)	((ns)->ids[IPC_SHM_IDS])
  63
  64#define shm_unlock(shp)			\
  65	ipc_unlock(&(shp)->shm_perm)
  66
  67static int newseg(struct ipc_namespace *, struct ipc_params *);
  68static void shm_open(struct vm_area_struct *vma);
  69static void shm_close(struct vm_area_struct *vma);
  70static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
  71#ifdef CONFIG_PROC_FS
  72static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
  73#endif
  74
  75void shm_init_ns(struct ipc_namespace *ns)
  76{
  77	ns->shm_ctlmax = SHMMAX;
  78	ns->shm_ctlall = SHMALL;
  79	ns->shm_ctlmni = SHMMNI;
  80	ns->shm_rmid_forced = 0;
  81	ns->shm_tot = 0;
  82	ipc_init_ids(&shm_ids(ns));
  83}
  84
  85/*
  86 * Called with shm_ids.rwsem (writer) and the shp structure locked.
  87 * Only shm_ids.rwsem remains locked on exit.
  88 */
  89static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
  90{
  91	struct shmid_kernel *shp;
 
  92	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 
  93
  94	if (shp->shm_nattch) {
  95		shp->shm_perm.mode |= SHM_DEST;
  96		/* Do not find it any more */
  97		shp->shm_perm.key = IPC_PRIVATE;
  98		shm_unlock(shp);
  99	} else
 100		shm_destroy(ns, shp);
 101}
 102
 103#ifdef CONFIG_IPC_NS
 104void shm_exit_ns(struct ipc_namespace *ns)
 105{
 106	free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
 107	idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
 
 108}
 109#endif
 110
 111static int __init ipc_ns_init(void)
 112{
 113	shm_init_ns(&init_ipc_ns);
 114	return 0;
 115}
 116
 117pure_initcall(ipc_ns_init);
 118
 119void __init shm_init(void)
 120{
 121	ipc_init_proc_interface("sysvipc/shm",
 122#if BITS_PER_LONG <= 32
 123				"       key      shmid perms       size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime        rss       swap\n",
 124#else
 125				"       key      shmid perms                  size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime                   rss                  swap\n",
 126#endif
 127				IPC_SHM_IDS, sysvipc_shm_proc_show);
 128}
 129
 130static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
 131{
 132	struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
 133
 134	if (IS_ERR(ipcp))
 135		return ERR_CAST(ipcp);
 136
 137	return container_of(ipcp, struct shmid_kernel, shm_perm);
 138}
 139
 140static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
 141{
 142	struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
 143
 144	if (IS_ERR(ipcp))
 145		return ERR_CAST(ipcp);
 146
 147	return container_of(ipcp, struct shmid_kernel, shm_perm);
 148}
 149
 150/*
 151 * shm_lock_(check_) routines are called in the paths where the rwsem
 152 * is not necessarily held.
 153 */
 154static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
 155{
 156	struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
 157
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 158	/*
 159	 * Callers of shm_lock() must validate the status of the returned ipc
 160	 * object pointer (as returned by ipc_lock()), and error out as
 161	 * appropriate.
 162	 */
 163	if (IS_ERR(ipcp))
 164		return (void *)ipcp;
 165	return container_of(ipcp, struct shmid_kernel, shm_perm);
 166}
 167
 168static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
 169{
 170	rcu_read_lock();
 171	ipc_lock_object(&ipcp->shm_perm);
 172}
 173
 174static void shm_rcu_free(struct rcu_head *head)
 175{
 176	struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
 177	struct shmid_kernel *shp = ipc_rcu_to_struct(p);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 178
 179	security_shm_free(shp);
 180	ipc_rcu_free(head);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 181}
 182
 183static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
 184{
 185	list_del(&s->shm_clist);
 186	ipc_rmid(&shm_ids(ns), &s->shm_perm);
 187}
 188
 189
 190static int __shm_open(struct vm_area_struct *vma)
 191{
 192	struct file *file = vma->vm_file;
 193	struct shm_file_data *sfd = shm_file_data(file);
 194	struct shmid_kernel *shp;
 195
 196	shp = shm_lock(sfd->ns, sfd->id);
 197
 198	if (IS_ERR(shp))
 199		return PTR_ERR(shp);
 200
 201	shp->shm_atim = get_seconds();
 202	shp->shm_lprid = task_tgid_vnr(current);
 
 
 
 
 
 
 203	shp->shm_nattch++;
 204	shm_unlock(shp);
 205	return 0;
 206}
 207
 208/* This is called by fork, once for every shm attach. */
 209static void shm_open(struct vm_area_struct *vma)
 210{
 211	int err = __shm_open(vma);
 
 
 
 
 
 
 
 
 212	/*
 213	 * We raced in the idr lookup or with shm_destroy().
 214	 * Either way, the ID is busted.
 215	 */
 216	WARN_ON_ONCE(err);
 217}
 218
 219/*
 220 * shm_destroy - free the struct shmid_kernel
 221 *
 222 * @ns: namespace
 223 * @shp: struct to free
 224 *
 225 * It has to be called with shp and shm_ids.rwsem (writer) locked,
 226 * but returns with shp unlocked and freed.
 227 */
 228static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
 229{
 230	struct file *shm_file;
 231
 232	shm_file = shp->shm_file;
 233	shp->shm_file = NULL;
 234	ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
 235	shm_rmid(ns, shp);
 236	shm_unlock(shp);
 237	if (!is_file_hugepages(shm_file))
 238		shmem_lock(shm_file, 0, shp->mlock_user);
 239	else if (shp->mlock_user)
 240		user_shm_unlock(i_size_read(file_inode(shm_file)),
 241				shp->mlock_user);
 242	fput(shm_file);
 243	ipc_rcu_putref(shp, shm_rcu_free);
 
 
 244}
 245
 246/*
 247 * shm_may_destroy - identifies whether shm segment should be destroyed now
 248 *
 249 * Returns true if and only if there are no active users of the segment and
 250 * one of the following is true:
 251 *
 252 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
 253 *
 254 * 2) sysctl kernel.shm_rmid_forced is set to 1.
 255 */
 256static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
 257{
 258	return (shp->shm_nattch == 0) &&
 259	       (ns->shm_rmid_forced ||
 260		(shp->shm_perm.mode & SHM_DEST));
 261}
 262
 263/*
 264 * remove the attach descriptor vma.
 265 * free memory for segment if it is marked destroyed.
 266 * The descriptor has already been removed from the current->mm->mmap list
 267 * and will later be kfree()d.
 268 */
 269static void shm_close(struct vm_area_struct *vma)
 270{
 271	struct file *file = vma->vm_file;
 272	struct shm_file_data *sfd = shm_file_data(file);
 273	struct shmid_kernel *shp;
 274	struct ipc_namespace *ns = sfd->ns;
 275
 276	down_write(&shm_ids(ns).rwsem);
 277	/* remove from the list of attaches of the shm segment */
 278	shp = shm_lock(ns, sfd->id);
 279
 280	/*
 281	 * We raced in the idr lookup or with shm_destroy().
 282	 * Either way, the ID is busted.
 283	 */
 284	if (WARN_ON_ONCE(IS_ERR(shp)))
 285		goto done; /* no-op */
 286
 287	shp->shm_lprid = task_tgid_vnr(current);
 288	shp->shm_dtim = get_seconds();
 289	shp->shm_nattch--;
 290	if (shm_may_destroy(ns, shp))
 291		shm_destroy(ns, shp);
 292	else
 293		shm_unlock(shp);
 294done:
 295	up_write(&shm_ids(ns).rwsem);
 296}
 297
 
 
 
 
 
 
 
 
 
 
 
 
 298/* Called with ns->shm_ids(ns).rwsem locked */
 299static int shm_try_destroy_orphaned(int id, void *p, void *data)
 300{
 301	struct ipc_namespace *ns = data;
 302	struct kern_ipc_perm *ipcp = p;
 303	struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 304
 305	/*
 306	 * We want to destroy segments without users and with already
 307	 * exit'ed originating process.
 308	 *
 309	 * As shp->* are changed under rwsem, it's safe to skip shp locking.
 310	 */
 311	if (shp->shm_creator != NULL)
 312		return 0;
 313
 314	if (shm_may_destroy(ns, shp)) {
 315		shm_lock_by_ptr(shp);
 316		shm_destroy(ns, shp);
 317	}
 318	return 0;
 319}
 320
 321void shm_destroy_orphaned(struct ipc_namespace *ns)
 322{
 323	down_write(&shm_ids(ns).rwsem);
 324	if (shm_ids(ns).in_use)
 325		idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
 326	up_write(&shm_ids(ns).rwsem);
 327}
 328
 329/* Locking assumes this will only be called with task == current */
 330void exit_shm(struct task_struct *task)
 331{
 332	struct ipc_namespace *ns = task->nsproxy->ipc_ns;
 333	struct shmid_kernel *shp, *n;
 
 334
 335	if (list_empty(&task->sysvshm.shm_clist))
 336		return;
 
 
 
 
 
 
 
 337
 338	/*
 339	 * If kernel.shm_rmid_forced is not set then only keep track of
 340	 * which shmids are orphaned, so that a later set of the sysctl
 341	 * can clean them up.
 342	 */
 343	if (!ns->shm_rmid_forced) {
 344		down_read(&shm_ids(ns).rwsem);
 345		list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist)
 346			shp->shm_creator = NULL;
 347		/*
 348		 * Only under read lock but we are only called on current
 349		 * so no entry on the list will be shared.
 
 
 
 350		 */
 351		list_del(&task->sysvshm.shm_clist);
 352		up_read(&shm_ids(ns).rwsem);
 353		return;
 354	}
 355
 356	/*
 357	 * Destroy all already created segments, that were not yet mapped,
 358	 * and mark any mapped as orphan to cover the sysctl toggling.
 359	 * Destroy is skipped if shm_may_destroy() returns false.
 360	 */
 361	down_write(&shm_ids(ns).rwsem);
 362	list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) {
 363		shp->shm_creator = NULL;
 364
 365		if (shm_may_destroy(ns, shp)) {
 366			shm_lock_by_ptr(shp);
 367			shm_destroy(ns, shp);
 
 
 
 
 
 
 
 
 368		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 369	}
 
 370
 371	/* Remove the list head from any segments still attached. */
 372	list_del(&task->sysvshm.shm_clist);
 373	up_write(&shm_ids(ns).rwsem);
 
 
 
 374}
 375
 376static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 377{
 378	struct file *file = vma->vm_file;
 379	struct shm_file_data *sfd = shm_file_data(file);
 380
 381	return sfd->vm_ops->fault(vma, vmf);
 
 
 
 382}
 383
 384#ifdef CONFIG_NUMA
 385static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
 386{
 387	struct file *file = vma->vm_file;
 388	struct shm_file_data *sfd = shm_file_data(file);
 
 
 
 
 
 
 
 
 
 
 
 389	int err = 0;
 
 390	if (sfd->vm_ops->set_policy)
 391		err = sfd->vm_ops->set_policy(vma, new);
 392	return err;
 393}
 394
 395static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
 396					unsigned long addr)
 397{
 398	struct file *file = vma->vm_file;
 399	struct shm_file_data *sfd = shm_file_data(file);
 400	struct mempolicy *pol = NULL;
 401
 402	if (sfd->vm_ops->get_policy)
 403		pol = sfd->vm_ops->get_policy(vma, addr);
 404	else if (vma->vm_policy)
 405		pol = vma->vm_policy;
 406
 407	return pol;
 408}
 409#endif
 410
 411static int shm_mmap(struct file *file, struct vm_area_struct *vma)
 412{
 413	struct shm_file_data *sfd = shm_file_data(file);
 414	int ret;
 415
 416	/*
 417	 * In case of remap_file_pages() emulation, the file can represent
 418	 * removed IPC ID: propogate shm_lock() error to caller.
 
 419	 */
 420	ret =__shm_open(vma);
 421	if (ret)
 422		return ret;
 423
 424	ret = sfd->file->f_op->mmap(sfd->file, vma);
 425	if (ret) {
 426		shm_close(vma);
 427		return ret;
 428	}
 429	sfd->vm_ops = vma->vm_ops;
 430#ifdef CONFIG_MMU
 431	WARN_ON(!sfd->vm_ops->fault);
 432#endif
 433	vma->vm_ops = &shm_vm_ops;
 434	return 0;
 435}
 436
 437static int shm_release(struct inode *ino, struct file *file)
 438{
 439	struct shm_file_data *sfd = shm_file_data(file);
 440
 441	put_ipc_ns(sfd->ns);
 
 442	shm_file_data(file) = NULL;
 443	kfree(sfd);
 444	return 0;
 445}
 446
 447static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 448{
 449	struct shm_file_data *sfd = shm_file_data(file);
 450
 451	if (!sfd->file->f_op->fsync)
 452		return -EINVAL;
 453	return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
 454}
 455
 456static long shm_fallocate(struct file *file, int mode, loff_t offset,
 457			  loff_t len)
 458{
 459	struct shm_file_data *sfd = shm_file_data(file);
 460
 461	if (!sfd->file->f_op->fallocate)
 462		return -EOPNOTSUPP;
 463	return sfd->file->f_op->fallocate(file, mode, offset, len);
 464}
 465
 466static unsigned long shm_get_unmapped_area(struct file *file,
 467	unsigned long addr, unsigned long len, unsigned long pgoff,
 468	unsigned long flags)
 469{
 470	struct shm_file_data *sfd = shm_file_data(file);
 
 471	return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
 472						pgoff, flags);
 473}
 474
 475static const struct file_operations shm_file_operations = {
 476	.mmap		= shm_mmap,
 477	.fsync		= shm_fsync,
 478	.release	= shm_release,
 479#ifndef CONFIG_MMU
 480	.get_unmapped_area	= shm_get_unmapped_area,
 481#endif
 482	.llseek		= noop_llseek,
 483	.fallocate	= shm_fallocate,
 484};
 485
 
 
 
 
 486static const struct file_operations shm_file_operations_huge = {
 487	.mmap		= shm_mmap,
 488	.fsync		= shm_fsync,
 489	.release	= shm_release,
 490	.get_unmapped_area	= shm_get_unmapped_area,
 491	.llseek		= noop_llseek,
 492	.fallocate	= shm_fallocate,
 
 493};
 494
 495bool is_file_shm_hugepages(struct file *file)
 496{
 497	return file->f_op == &shm_file_operations_huge;
 498}
 499
 500static const struct vm_operations_struct shm_vm_ops = {
 501	.open	= shm_open,	/* callback for a new vm-area open */
 502	.close	= shm_close,	/* callback for when the vm-area is released */
 503	.fault	= shm_fault,
 
 
 504#if defined(CONFIG_NUMA)
 505	.set_policy = shm_set_policy,
 506	.get_policy = shm_get_policy,
 507#endif
 508};
 509
 510/**
 511 * newseg - Create a new shared memory segment
 512 * @ns: namespace
 513 * @params: ptr to the structure that contains key, size and shmflg
 514 *
 515 * Called with shm_ids.rwsem held as a writer.
 516 */
 517static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
 518{
 519	key_t key = params->key;
 520	int shmflg = params->flg;
 521	size_t size = params->u.size;
 522	int error;
 523	struct shmid_kernel *shp;
 524	size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 525	struct file *file;
 526	char name[13];
 527	int id;
 528	vm_flags_t acctflag = 0;
 529
 530	if (size < SHMMIN || size > ns->shm_ctlmax)
 531		return -EINVAL;
 532
 533	if (numpages << PAGE_SHIFT < size)
 534		return -ENOSPC;
 535
 536	if (ns->shm_tot + numpages < ns->shm_tot ||
 537			ns->shm_tot + numpages > ns->shm_ctlall)
 538		return -ENOSPC;
 539
 540	shp = ipc_rcu_alloc(sizeof(*shp));
 541	if (!shp)
 542		return -ENOMEM;
 543
 544	shp->shm_perm.key = key;
 545	shp->shm_perm.mode = (shmflg & S_IRWXUGO);
 546	shp->mlock_user = NULL;
 547
 548	shp->shm_perm.security = NULL;
 549	error = security_shm_alloc(shp);
 550	if (error) {
 551		ipc_rcu_putref(shp, ipc_rcu_free);
 552		return error;
 553	}
 554
 555	sprintf(name, "SYSV%08x", key);
 556	if (shmflg & SHM_HUGETLB) {
 557		struct hstate *hs;
 558		size_t hugesize;
 559
 560		hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
 561		if (!hs) {
 562			error = -EINVAL;
 563			goto no_file;
 564		}
 565		hugesize = ALIGN(size, huge_page_size(hs));
 566
 567		/* hugetlb_file_setup applies strict accounting */
 568		if (shmflg & SHM_NORESERVE)
 569			acctflag = VM_NORESERVE;
 570		file = hugetlb_file_setup(name, hugesize, acctflag,
 571				  &shp->mlock_user, HUGETLB_SHMFS_INODE,
 572				(shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
 573	} else {
 574		/*
 575		 * Do not allow no accounting for OVERCOMMIT_NEVER, even
 576		 * if it's asked for.
 577		 */
 578		if  ((shmflg & SHM_NORESERVE) &&
 579				sysctl_overcommit_memory != OVERCOMMIT_NEVER)
 580			acctflag = VM_NORESERVE;
 581		file = shmem_kernel_file_setup(name, size, acctflag);
 582	}
 583	error = PTR_ERR(file);
 584	if (IS_ERR(file))
 585		goto no_file;
 586
 587	shp->shm_cprid = task_tgid_vnr(current);
 588	shp->shm_lprid = 0;
 589	shp->shm_atim = shp->shm_dtim = 0;
 590	shp->shm_ctim = get_seconds();
 591	shp->shm_segsz = size;
 592	shp->shm_nattch = 0;
 593	shp->shm_file = file;
 594	shp->shm_creator = current;
 595
 596	id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
 597	if (id < 0) {
 598		error = id;
 599		goto no_id;
 600	}
 601
 
 
 
 602	list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
 
 603
 604	/*
 605	 * shmid gets reported as "inode#" in /proc/pid/maps.
 606	 * proc-ps tools use this. Changing this will break them.
 607	 */
 608	file_inode(file)->i_ino = shp->shm_perm.id;
 609
 610	ns->shm_tot += numpages;
 611	error = shp->shm_perm.id;
 612
 613	ipc_unlock_object(&shp->shm_perm);
 614	rcu_read_unlock();
 615	return error;
 616
 617no_id:
 618	if (is_file_hugepages(file) && shp->mlock_user)
 619		user_shm_unlock(size, shp->mlock_user);
 620	fput(file);
 
 
 621no_file:
 622	ipc_rcu_putref(shp, shm_rcu_free);
 623	return error;
 624}
 625
 626/*
 627 * Called with shm_ids.rwsem and ipcp locked.
 628 */
 629static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
 630{
 631	struct shmid_kernel *shp;
 632
 633	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 634	return security_shm_associate(shp, shmflg);
 635}
 636
 637/*
 638 * Called with shm_ids.rwsem and ipcp locked.
 639 */
 640static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
 641				struct ipc_params *params)
 642{
 643	struct shmid_kernel *shp;
 644
 645	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 646	if (shp->shm_segsz < params->u.size)
 647		return -EINVAL;
 648
 649	return 0;
 650}
 651
 652SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
 653{
 654	struct ipc_namespace *ns;
 655	static const struct ipc_ops shm_ops = {
 656		.getnew = newseg,
 657		.associate = shm_security,
 658		.more_checks = shm_more_checks,
 659	};
 660	struct ipc_params shm_params;
 661
 662	ns = current->nsproxy->ipc_ns;
 663
 664	shm_params.key = key;
 665	shm_params.flg = shmflg;
 666	shm_params.u.size = size;
 667
 668	return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
 669}
 670
 
 
 
 
 
 671static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
 672{
 673	switch (version) {
 674	case IPC_64:
 675		return copy_to_user(buf, in, sizeof(*in));
 676	case IPC_OLD:
 677	    {
 678		struct shmid_ds out;
 679
 680		memset(&out, 0, sizeof(out));
 681		ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
 682		out.shm_segsz	= in->shm_segsz;
 683		out.shm_atime	= in->shm_atime;
 684		out.shm_dtime	= in->shm_dtime;
 685		out.shm_ctime	= in->shm_ctime;
 686		out.shm_cpid	= in->shm_cpid;
 687		out.shm_lpid	= in->shm_lpid;
 688		out.shm_nattch	= in->shm_nattch;
 689
 690		return copy_to_user(buf, &out, sizeof(out));
 691	    }
 692	default:
 693		return -EINVAL;
 694	}
 695}
 696
 697static inline unsigned long
 698copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
 699{
 700	switch (version) {
 701	case IPC_64:
 702		if (copy_from_user(out, buf, sizeof(*out)))
 703			return -EFAULT;
 704		return 0;
 705	case IPC_OLD:
 706	    {
 707		struct shmid_ds tbuf_old;
 708
 709		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
 710			return -EFAULT;
 711
 712		out->shm_perm.uid	= tbuf_old.shm_perm.uid;
 713		out->shm_perm.gid	= tbuf_old.shm_perm.gid;
 714		out->shm_perm.mode	= tbuf_old.shm_perm.mode;
 715
 716		return 0;
 717	    }
 718	default:
 719		return -EINVAL;
 720	}
 721}
 722
 723static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
 724{
 725	switch (version) {
 726	case IPC_64:
 727		return copy_to_user(buf, in, sizeof(*in));
 728	case IPC_OLD:
 729	    {
 730		struct shminfo out;
 731
 732		if (in->shmmax > INT_MAX)
 733			out.shmmax = INT_MAX;
 734		else
 735			out.shmmax = (int)in->shmmax;
 736
 737		out.shmmin	= in->shmmin;
 738		out.shmmni	= in->shmmni;
 739		out.shmseg	= in->shmseg;
 740		out.shmall	= in->shmall;
 741
 742		return copy_to_user(buf, &out, sizeof(out));
 743	    }
 744	default:
 745		return -EINVAL;
 746	}
 747}
 748
 749/*
 750 * Calculate and add used RSS and swap pages of a shm.
 751 * Called with shm_ids.rwsem held as a reader
 752 */
 753static void shm_add_rss_swap(struct shmid_kernel *shp,
 754	unsigned long *rss_add, unsigned long *swp_add)
 755{
 756	struct inode *inode;
 757
 758	inode = file_inode(shp->shm_file);
 759
 760	if (is_file_hugepages(shp->shm_file)) {
 761		struct address_space *mapping = inode->i_mapping;
 762		struct hstate *h = hstate_file(shp->shm_file);
 763		*rss_add += pages_per_huge_page(h) * mapping->nrpages;
 764	} else {
 765#ifdef CONFIG_SHMEM
 766		struct shmem_inode_info *info = SHMEM_I(inode);
 767		spin_lock(&info->lock);
 
 768		*rss_add += inode->i_mapping->nrpages;
 769		*swp_add += info->swapped;
 770		spin_unlock(&info->lock);
 771#else
 772		*rss_add += inode->i_mapping->nrpages;
 773#endif
 774	}
 775}
 776
 777/*
 778 * Called with shm_ids.rwsem held as a reader
 779 */
 780static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
 781		unsigned long *swp)
 782{
 783	int next_id;
 784	int total, in_use;
 785
 786	*rss = 0;
 787	*swp = 0;
 788
 789	in_use = shm_ids(ns).in_use;
 790
 791	for (total = 0, next_id = 0; total < in_use; next_id++) {
 792		struct kern_ipc_perm *ipc;
 793		struct shmid_kernel *shp;
 794
 795		ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
 796		if (ipc == NULL)
 797			continue;
 798		shp = container_of(ipc, struct shmid_kernel, shm_perm);
 799
 800		shm_add_rss_swap(shp, rss, swp);
 801
 802		total++;
 803	}
 804}
 805
 806/*
 807 * This function handles some shmctl commands which require the rwsem
 808 * to be held in write mode.
 809 * NOTE: no locks must be held, the rwsem is taken inside this function.
 810 */
 811static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
 812		       struct shmid_ds __user *buf, int version)
 813{
 814	struct kern_ipc_perm *ipcp;
 815	struct shmid64_ds shmid64;
 816	struct shmid_kernel *shp;
 817	int err;
 818
 819	if (cmd == IPC_SET) {
 820		if (copy_shmid_from_user(&shmid64, buf, version))
 821			return -EFAULT;
 822	}
 823
 824	down_write(&shm_ids(ns).rwsem);
 825	rcu_read_lock();
 826
 827	ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
 828				      &shmid64.shm_perm, 0);
 829	if (IS_ERR(ipcp)) {
 830		err = PTR_ERR(ipcp);
 831		goto out_unlock1;
 832	}
 833
 834	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 835
 836	err = security_shm_shmctl(shp, cmd);
 837	if (err)
 838		goto out_unlock1;
 839
 840	switch (cmd) {
 841	case IPC_RMID:
 842		ipc_lock_object(&shp->shm_perm);
 843		/* do_shm_rmid unlocks the ipc object and rcu */
 844		do_shm_rmid(ns, ipcp);
 845		goto out_up;
 846	case IPC_SET:
 847		ipc_lock_object(&shp->shm_perm);
 848		err = ipc_update_perm(&shmid64.shm_perm, ipcp);
 849		if (err)
 850			goto out_unlock0;
 851		shp->shm_ctim = get_seconds();
 852		break;
 853	default:
 854		err = -EINVAL;
 855		goto out_unlock1;
 856	}
 857
 858out_unlock0:
 859	ipc_unlock_object(&shp->shm_perm);
 860out_unlock1:
 861	rcu_read_unlock();
 862out_up:
 863	up_write(&shm_ids(ns).rwsem);
 864	return err;
 865}
 866
 867static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
 868			 int cmd, int version, void __user *buf)
 869{
 870	int err;
 871	struct shmid_kernel *shp;
 872
 873	/* preliminary security checks for *_INFO */
 874	if (cmd == IPC_INFO || cmd == SHM_INFO) {
 875		err = security_shm_shmctl(NULL, cmd);
 876		if (err)
 877			return err;
 878	}
 879
 880	switch (cmd) {
 881	case IPC_INFO:
 882	{
 883		struct shminfo64 shminfo;
 884
 885		memset(&shminfo, 0, sizeof(shminfo));
 886		shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
 887		shminfo.shmmax = ns->shm_ctlmax;
 888		shminfo.shmall = ns->shm_ctlall;
 889
 890		shminfo.shmmin = SHMMIN;
 891		if (copy_shminfo_to_user(buf, &shminfo, version))
 892			return -EFAULT;
 893
 894		down_read(&shm_ids(ns).rwsem);
 895		err = ipc_get_maxid(&shm_ids(ns));
 896		up_read(&shm_ids(ns).rwsem);
 897
 898		if (err < 0)
 899			err = 0;
 900		goto out;
 901	}
 902	case SHM_INFO:
 903	{
 904		struct shm_info shm_info;
 905
 906		memset(&shm_info, 0, sizeof(shm_info));
 
 
 
 
 
 907		down_read(&shm_ids(ns).rwsem);
 908		shm_info.used_ids = shm_ids(ns).in_use;
 909		shm_get_stat(ns, &shm_info.shm_rss, &shm_info.shm_swp);
 910		shm_info.shm_tot = ns->shm_tot;
 911		shm_info.swap_attempts = 0;
 912		shm_info.swap_successes = 0;
 913		err = ipc_get_maxid(&shm_ids(ns));
 914		up_read(&shm_ids(ns).rwsem);
 915		if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
 916			err = -EFAULT;
 917			goto out;
 918		}
 919
 920		err = err < 0 ? 0 : err;
 921		goto out;
 922	}
 923	case SHM_STAT:
 924	case IPC_STAT:
 925	{
 926		struct shmid64_ds tbuf;
 927		int result;
 928
 929		rcu_read_lock();
 930		if (cmd == SHM_STAT) {
 931			shp = shm_obtain_object(ns, shmid);
 932			if (IS_ERR(shp)) {
 933				err = PTR_ERR(shp);
 934				goto out_unlock;
 935			}
 936			result = shp->shm_perm.id;
 937		} else {
 938			shp = shm_obtain_object_check(ns, shmid);
 939			if (IS_ERR(shp)) {
 940				err = PTR_ERR(shp);
 941				goto out_unlock;
 942			}
 943			result = 0;
 
 
 944		}
 
 945
 
 
 
 
 
 
 
 
 
 
 946		err = -EACCES;
 947		if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
 948			goto out_unlock;
 
 949
 950		err = security_shm_shmctl(shp, cmd);
 951		if (err)
 952			goto out_unlock;
 953
 954		memset(&tbuf, 0, sizeof(tbuf));
 955		kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
 956		tbuf.shm_segsz	= shp->shm_segsz;
 957		tbuf.shm_atime	= shp->shm_atim;
 958		tbuf.shm_dtime	= shp->shm_dtim;
 959		tbuf.shm_ctime	= shp->shm_ctim;
 960		tbuf.shm_cpid	= shp->shm_cprid;
 961		tbuf.shm_lpid	= shp->shm_lprid;
 962		tbuf.shm_nattch	= shp->shm_nattch;
 963		rcu_read_unlock();
 964
 965		if (copy_shmid_to_user(buf, &tbuf, version))
 966			err = -EFAULT;
 967		else
 968			err = result;
 969		goto out;
 970	}
 971	default:
 972		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 973	}
 974
 
 975out_unlock:
 976	rcu_read_unlock();
 977out:
 978	return err;
 979}
 980
 981SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
 982{
 983	struct shmid_kernel *shp;
 984	int err, version;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 985	struct ipc_namespace *ns;
 
 986
 987	if (cmd < 0 || shmid < 0)
 988		return -EINVAL;
 989
 990	version = ipc_parse_version(&cmd);
 991	ns = current->nsproxy->ipc_ns;
 992
 993	switch (cmd) {
 994	case IPC_INFO:
 995	case SHM_INFO:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 996	case SHM_STAT:
 997	case IPC_STAT:
 998		return shmctl_nolock(ns, shmid, cmd, version, buf);
 999	case IPC_RMID:
 
 
 
 
 
 
1000	case IPC_SET:
1001		return shmctl_down(ns, shmid, cmd, buf, version);
 
 
 
 
1002	case SHM_LOCK:
1003	case SHM_UNLOCK:
1004	{
1005		struct file *shm_file;
 
 
 
1006
1007		rcu_read_lock();
1008		shp = shm_obtain_object_check(ns, shmid);
1009		if (IS_ERR(shp)) {
1010			err = PTR_ERR(shp);
1011			goto out_unlock1;
1012		}
1013
1014		audit_ipc_obj(&(shp->shm_perm));
1015		err = security_shm_shmctl(shp, cmd);
1016		if (err)
1017			goto out_unlock1;
1018
1019		ipc_lock_object(&shp->shm_perm);
 
1020
1021		/* check if shm_destroy() is tearing down shp */
1022		if (!ipc_valid_object(&shp->shm_perm)) {
1023			err = -EIDRM;
1024			goto out_unlock0;
1025		}
1026
1027		if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
1028			kuid_t euid = current_euid();
1029			if (!uid_eq(euid, shp->shm_perm.uid) &&
1030			    !uid_eq(euid, shp->shm_perm.cuid)) {
1031				err = -EPERM;
1032				goto out_unlock0;
1033			}
1034			if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
1035				err = -EPERM;
1036				goto out_unlock0;
1037			}
1038		}
1039
1040		shm_file = shp->shm_file;
1041		if (is_file_hugepages(shm_file))
1042			goto out_unlock0;
 
 
 
 
 
 
 
 
 
 
1043
1044		if (cmd == SHM_LOCK) {
1045			struct user_struct *user = current_user();
1046			err = shmem_lock(shm_file, 1, user);
1047			if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
1048				shp->shm_perm.mode |= SHM_LOCKED;
1049				shp->mlock_user = user;
1050			}
1051			goto out_unlock0;
1052		}
 
 
1053
1054		/* SHM_UNLOCK */
1055		if (!(shp->shm_perm.mode & SHM_LOCKED))
1056			goto out_unlock0;
1057		shmem_lock(shm_file, 0, shp->mlock_user);
1058		shp->shm_perm.mode &= ~SHM_LOCKED;
1059		shp->mlock_user = NULL;
1060		get_file(shm_file);
1061		ipc_unlock_object(&shp->shm_perm);
1062		rcu_read_unlock();
1063		shmem_unlock_mapping(shm_file->f_mapping);
1064
1065		fput(shm_file);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1066		return err;
1067	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1068	default:
1069		return -EINVAL;
1070	}
1071
1072out_unlock0:
1073	ipc_unlock_object(&shp->shm_perm);
1074out_unlock1:
1075	rcu_read_unlock();
1076	return err;
1077}
1078
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1079/*
1080 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1081 *
1082 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1083 * "raddr" thing points to kernel space, and there has to be a wrapper around
1084 * this.
1085 */
1086long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
1087	      unsigned long shmlba)
1088{
1089	struct shmid_kernel *shp;
1090	unsigned long addr;
1091	unsigned long size;
1092	struct file *file;
1093	int    err;
1094	unsigned long flags;
1095	unsigned long prot;
1096	int acc_mode;
1097	struct ipc_namespace *ns;
1098	struct shm_file_data *sfd;
1099	struct path path;
1100	fmode_t f_mode;
1101	unsigned long populate = 0;
1102
1103	err = -EINVAL;
1104	if (shmid < 0)
1105		goto out;
1106	else if ((addr = (ulong)shmaddr)) {
 
1107		if (addr & (shmlba - 1)) {
1108			if (shmflg & SHM_RND)
1109				addr &= ~(shmlba - 1);	   /* round down */
1110			else
 
 
 
 
 
 
 
 
1111#ifndef __ARCH_FORCE_SHMLBA
1112				if (addr & ~PAGE_MASK)
1113#endif
1114					goto out;
1115		}
1116		flags = MAP_SHARED | MAP_FIXED;
1117	} else {
1118		if ((shmflg & SHM_REMAP))
1119			goto out;
1120
1121		flags = MAP_SHARED;
1122	}
 
1123
1124	if (shmflg & SHM_RDONLY) {
1125		prot = PROT_READ;
1126		acc_mode = S_IRUGO;
1127		f_mode = FMODE_READ;
1128	} else {
1129		prot = PROT_READ | PROT_WRITE;
1130		acc_mode = S_IRUGO | S_IWUGO;
1131		f_mode = FMODE_READ | FMODE_WRITE;
1132	}
1133	if (shmflg & SHM_EXEC) {
1134		prot |= PROT_EXEC;
1135		acc_mode |= S_IXUGO;
1136	}
1137
1138	/*
1139	 * We cannot rely on the fs check since SYSV IPC does have an
1140	 * additional creator id...
1141	 */
1142	ns = current->nsproxy->ipc_ns;
1143	rcu_read_lock();
1144	shp = shm_obtain_object_check(ns, shmid);
1145	if (IS_ERR(shp)) {
1146		err = PTR_ERR(shp);
1147		goto out_unlock;
1148	}
1149
1150	err = -EACCES;
1151	if (ipcperms(ns, &shp->shm_perm, acc_mode))
1152		goto out_unlock;
1153
1154	err = security_shm_shmat(shp, shmaddr, shmflg);
1155	if (err)
1156		goto out_unlock;
1157
1158	ipc_lock_object(&shp->shm_perm);
1159
1160	/* check if shm_destroy() is tearing down shp */
1161	if (!ipc_valid_object(&shp->shm_perm)) {
1162		ipc_unlock_object(&shp->shm_perm);
1163		err = -EIDRM;
1164		goto out_unlock;
1165	}
1166
1167	path = shp->shm_file->f_path;
1168	path_get(&path);
 
 
 
 
 
 
 
 
1169	shp->shm_nattch++;
1170	size = i_size_read(d_inode(path.dentry));
1171	ipc_unlock_object(&shp->shm_perm);
1172	rcu_read_unlock();
1173
1174	err = -ENOMEM;
1175	sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1176	if (!sfd) {
1177		path_put(&path);
1178		goto out_nattch;
1179	}
1180
1181	file = alloc_file(&path, f_mode,
1182			  is_file_hugepages(shp->shm_file) ?
1183				&shm_file_operations_huge :
1184				&shm_file_operations);
1185	err = PTR_ERR(file);
1186	if (IS_ERR(file)) {
1187		kfree(sfd);
1188		path_put(&path);
1189		goto out_nattch;
1190	}
1191
1192	file->private_data = sfd;
1193	file->f_mapping = shp->shm_file->f_mapping;
1194	sfd->id = shp->shm_perm.id;
1195	sfd->ns = get_ipc_ns(ns);
1196	sfd->file = shp->shm_file;
1197	sfd->vm_ops = NULL;
 
1198
1199	err = security_mmap_file(file, prot, flags);
1200	if (err)
1201		goto out_fput;
1202
1203	down_write(&current->mm->mmap_sem);
 
 
 
 
1204	if (addr && !(shmflg & SHM_REMAP)) {
1205		err = -EINVAL;
1206		if (addr + size < addr)
1207			goto invalid;
1208
1209		if (find_vma_intersection(current->mm, addr, addr + size))
1210			goto invalid;
1211	}
1212
1213	addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate);
1214	*raddr = addr;
1215	err = 0;
1216	if (IS_ERR_VALUE(addr))
1217		err = (long)addr;
1218invalid:
1219	up_write(&current->mm->mmap_sem);
1220	if (populate)
1221		mm_populate(addr, populate);
1222
1223out_fput:
1224	fput(file);
1225
1226out_nattch:
1227	down_write(&shm_ids(ns).rwsem);
1228	shp = shm_lock(ns, shmid);
1229	shp->shm_nattch--;
1230	if (shm_may_destroy(ns, shp))
 
1231		shm_destroy(ns, shp);
1232	else
1233		shm_unlock(shp);
1234	up_write(&shm_ids(ns).rwsem);
1235	return err;
1236
1237out_unlock:
1238	rcu_read_unlock();
1239out:
1240	return err;
1241}
1242
1243SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1244{
1245	unsigned long ret;
1246	long err;
1247
1248	err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
1249	if (err)
1250		return err;
1251	force_successful_syscall_return();
1252	return (long)ret;
1253}
1254
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1255/*
1256 * detach and kill segment if marked destroyed.
1257 * The work is done in shm_close.
1258 */
1259SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1260{
1261	struct mm_struct *mm = current->mm;
1262	struct vm_area_struct *vma;
1263	unsigned long addr = (unsigned long)shmaddr;
1264	int retval = -EINVAL;
1265#ifdef CONFIG_MMU
1266	loff_t size = 0;
1267	struct file *file;
1268	struct vm_area_struct *next;
1269#endif
1270
1271	if (addr & ~PAGE_MASK)
1272		return retval;
1273
1274	down_write(&mm->mmap_sem);
 
1275
1276	/*
1277	 * This function tries to be smart and unmap shm segments that
1278	 * were modified by partial mlock or munmap calls:
1279	 * - It first determines the size of the shm segment that should be
1280	 *   unmapped: It searches for a vma that is backed by shm and that
1281	 *   started at address shmaddr. It records it's size and then unmaps
1282	 *   it.
1283	 * - Then it unmaps all shm vmas that started at shmaddr and that
1284	 *   are within the initially determined size and that are from the
1285	 *   same shm segment from which we determined the size.
1286	 * Errors from do_munmap are ignored: the function only fails if
1287	 * it's called with invalid parameters or if it's called to unmap
1288	 * a part of a vma. Both calls in this function are for full vmas,
1289	 * the parameters are directly copied from the vma itself and always
1290	 * valid - therefore do_munmap cannot fail. (famous last words?)
1291	 */
1292	/*
1293	 * If it had been mremap()'d, the starting address would not
1294	 * match the usual checks anyway. So assume all vma's are
1295	 * above the starting address given.
1296	 */
1297	vma = find_vma(mm, addr);
1298
1299#ifdef CONFIG_MMU
1300	while (vma) {
1301		next = vma->vm_next;
1302
1303		/*
1304		 * Check if the starting address would match, i.e. it's
1305		 * a fragment created by mprotect() and/or munmap(), or it
1306		 * otherwise it starts at this address with no hassles.
1307		 */
1308		if ((vma->vm_ops == &shm_vm_ops) &&
1309			(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1310
1311			/*
1312			 * Record the file of the shm segment being
1313			 * unmapped.  With mremap(), someone could place
1314			 * page from another segment but with equal offsets
1315			 * in the range we are unmapping.
1316			 */
1317			file = vma->vm_file;
1318			size = i_size_read(file_inode(vma->vm_file));
1319			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
 
1320			/*
1321			 * We discovered the size of the shm segment, so
1322			 * break out of here and fall through to the next
1323			 * loop that uses the size information to stop
1324			 * searching for matching vma's.
1325			 */
1326			retval = 0;
1327			vma = next;
1328			break;
1329		}
1330		vma = next;
1331	}
1332
1333	/*
1334	 * We need look no further than the maximum address a fragment
1335	 * could possibly have landed at. Also cast things to loff_t to
1336	 * prevent overflows and make comparisons vs. equal-width types.
1337	 */
1338	size = PAGE_ALIGN(size);
1339	while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1340		next = vma->vm_next;
1341
1342		/* finding a matching vma now does not alter retval */
1343		if ((vma->vm_ops == &shm_vm_ops) &&
1344		    ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
1345		    (vma->vm_file == file))
1346			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1347		vma = next;
 
 
 
1348	}
1349
1350#else /* CONFIG_MMU */
 
1351	/* under NOMMU conditions, the exact address to be destroyed must be
1352	 * given */
 
1353	if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1354		do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1355		retval = 0;
1356	}
1357
1358#endif
1359
1360	up_write(&mm->mmap_sem);
1361	return retval;
1362}
1363
 
 
 
 
 
1364#ifdef CONFIG_PROC_FS
1365static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1366{
 
1367	struct user_namespace *user_ns = seq_user_ns(s);
1368	struct shmid_kernel *shp = it;
 
1369	unsigned long rss = 0, swp = 0;
1370
 
1371	shm_add_rss_swap(shp, &rss, &swp);
1372
1373#if BITS_PER_LONG <= 32
1374#define SIZE_SPEC "%10lu"
1375#else
1376#define SIZE_SPEC "%21lu"
1377#endif
1378
1379	seq_printf(s,
1380		   "%10d %10d  %4o " SIZE_SPEC " %5u %5u  "
1381		   "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
1382		   SIZE_SPEC " " SIZE_SPEC "\n",
1383		   shp->shm_perm.key,
1384		   shp->shm_perm.id,
1385		   shp->shm_perm.mode,
1386		   shp->shm_segsz,
1387		   shp->shm_cprid,
1388		   shp->shm_lprid,
1389		   shp->shm_nattch,
1390		   from_kuid_munged(user_ns, shp->shm_perm.uid),
1391		   from_kgid_munged(user_ns, shp->shm_perm.gid),
1392		   from_kuid_munged(user_ns, shp->shm_perm.cuid),
1393		   from_kgid_munged(user_ns, shp->shm_perm.cgid),
1394		   shp->shm_atim,
1395		   shp->shm_dtim,
1396		   shp->shm_ctim,
1397		   rss * PAGE_SIZE,
1398		   swp * PAGE_SIZE);
1399
1400	return 0;
1401}
1402#endif
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * linux/ipc/shm.c
   4 * Copyright (C) 1992, 1993 Krishna Balasubramanian
   5 *	 Many improvements/fixes by Bruno Haible.
   6 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
   7 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
   8 *
   9 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
  10 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
  11 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
  12 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
  13 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
  14 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
  15 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
  16 *
  17 * support for audit of ipc object properties and permission changes
  18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  19 *
  20 * namespaces support
  21 * OpenVZ, SWsoft Inc.
  22 * Pavel Emelianov <xemul@openvz.org>
  23 *
  24 * Better ipc lock (kern_ipc_perm.lock) handling
  25 * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
  26 */
  27
  28#include <linux/slab.h>
  29#include <linux/mm.h>
  30#include <linux/hugetlb.h>
  31#include <linux/shm.h>
  32#include <uapi/linux/shm.h>
  33#include <linux/init.h>
  34#include <linux/file.h>
  35#include <linux/mman.h>
  36#include <linux/shmem_fs.h>
  37#include <linux/security.h>
  38#include <linux/syscalls.h>
  39#include <linux/audit.h>
  40#include <linux/capability.h>
  41#include <linux/ptrace.h>
  42#include <linux/seq_file.h>
  43#include <linux/rwsem.h>
  44#include <linux/nsproxy.h>
  45#include <linux/mount.h>
  46#include <linux/ipc_namespace.h>
  47#include <linux/rhashtable.h>
  48
  49#include <linux/uaccess.h>
  50
  51#include "util.h"
  52
  53struct shmid_kernel /* private to the kernel */
  54{
  55	struct kern_ipc_perm	shm_perm;
  56	struct file		*shm_file;
  57	unsigned long		shm_nattch;
  58	unsigned long		shm_segsz;
  59	time64_t		shm_atim;
  60	time64_t		shm_dtim;
  61	time64_t		shm_ctim;
  62	struct pid		*shm_cprid;
  63	struct pid		*shm_lprid;
  64	struct ucounts		*mlock_ucounts;
  65
  66	/*
  67	 * The task created the shm object, for
  68	 * task_lock(shp->shm_creator)
  69	 */
  70	struct task_struct	*shm_creator;
  71
  72	/*
  73	 * List by creator. task_lock(->shm_creator) required for read/write.
  74	 * If list_empty(), then the creator is dead already.
  75	 */
  76	struct list_head	shm_clist;
  77	struct ipc_namespace	*ns;
  78} __randomize_layout;
  79
  80/* shm_mode upper byte flags */
  81#define SHM_DEST	01000	/* segment will be destroyed on last detach */
  82#define SHM_LOCKED	02000   /* segment will not be swapped */
  83
  84struct shm_file_data {
  85	int id;
  86	struct ipc_namespace *ns;
  87	struct file *file;
  88	const struct vm_operations_struct *vm_ops;
  89};
  90
  91#define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
  92
  93static const struct file_operations shm_file_operations;
  94static const struct vm_operations_struct shm_vm_ops;
  95
  96#define shm_ids(ns)	((ns)->ids[IPC_SHM_IDS])
  97
  98#define shm_unlock(shp)			\
  99	ipc_unlock(&(shp)->shm_perm)
 100
 101static int newseg(struct ipc_namespace *, struct ipc_params *);
 102static void shm_open(struct vm_area_struct *vma);
 103static void shm_close(struct vm_area_struct *vma);
 104static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
 105#ifdef CONFIG_PROC_FS
 106static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
 107#endif
 108
 109void shm_init_ns(struct ipc_namespace *ns)
 110{
 111	ns->shm_ctlmax = SHMMAX;
 112	ns->shm_ctlall = SHMALL;
 113	ns->shm_ctlmni = SHMMNI;
 114	ns->shm_rmid_forced = 0;
 115	ns->shm_tot = 0;
 116	ipc_init_ids(&shm_ids(ns));
 117}
 118
 119/*
 120 * Called with shm_ids.rwsem (writer) and the shp structure locked.
 121 * Only shm_ids.rwsem remains locked on exit.
 122 */
 123static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
 124{
 125	struct shmid_kernel *shp;
 126
 127	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 128	WARN_ON(ns != shp->ns);
 129
 130	if (shp->shm_nattch) {
 131		shp->shm_perm.mode |= SHM_DEST;
 132		/* Do not find it any more */
 133		ipc_set_key_private(&shm_ids(ns), &shp->shm_perm);
 134		shm_unlock(shp);
 135	} else
 136		shm_destroy(ns, shp);
 137}
 138
 139#ifdef CONFIG_IPC_NS
 140void shm_exit_ns(struct ipc_namespace *ns)
 141{
 142	free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
 143	idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
 144	rhashtable_destroy(&ns->ids[IPC_SHM_IDS].key_ht);
 145}
 146#endif
 147
 148static int __init ipc_ns_init(void)
 149{
 150	shm_init_ns(&init_ipc_ns);
 151	return 0;
 152}
 153
 154pure_initcall(ipc_ns_init);
 155
 156void __init shm_init(void)
 157{
 158	ipc_init_proc_interface("sysvipc/shm",
 159#if BITS_PER_LONG <= 32
 160				"       key      shmid perms       size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime        rss       swap\n",
 161#else
 162				"       key      shmid perms                  size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime                   rss                  swap\n",
 163#endif
 164				IPC_SHM_IDS, sysvipc_shm_proc_show);
 165}
 166
 167static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
 168{
 169	struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
 170
 171	if (IS_ERR(ipcp))
 172		return ERR_CAST(ipcp);
 173
 174	return container_of(ipcp, struct shmid_kernel, shm_perm);
 175}
 176
 177static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
 178{
 179	struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
 180
 181	if (IS_ERR(ipcp))
 182		return ERR_CAST(ipcp);
 183
 184	return container_of(ipcp, struct shmid_kernel, shm_perm);
 185}
 186
 187/*
 188 * shm_lock_(check_) routines are called in the paths where the rwsem
 189 * is not necessarily held.
 190 */
 191static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
 192{
 193	struct kern_ipc_perm *ipcp;
 194
 195	rcu_read_lock();
 196	ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
 197	if (IS_ERR(ipcp))
 198		goto err;
 199
 200	ipc_lock_object(ipcp);
 201	/*
 202	 * ipc_rmid() may have already freed the ID while ipc_lock_object()
 203	 * was spinning: here verify that the structure is still valid.
 204	 * Upon races with RMID, return -EIDRM, thus indicating that
 205	 * the ID points to a removed identifier.
 206	 */
 207	if (ipc_valid_object(ipcp)) {
 208		/* return a locked ipc object upon success */
 209		return container_of(ipcp, struct shmid_kernel, shm_perm);
 210	}
 211
 212	ipc_unlock_object(ipcp);
 213	ipcp = ERR_PTR(-EIDRM);
 214err:
 215	rcu_read_unlock();
 216	/*
 217	 * Callers of shm_lock() must validate the status of the returned ipc
 218	 * object pointer and error out as appropriate.
 
 219	 */
 220	return ERR_CAST(ipcp);
 
 
 221}
 222
 223static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
 224{
 225	rcu_read_lock();
 226	ipc_lock_object(&ipcp->shm_perm);
 227}
 228
 229static void shm_rcu_free(struct rcu_head *head)
 230{
 231	struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm,
 232							rcu);
 233	struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel,
 234							shm_perm);
 235	security_shm_free(&shp->shm_perm);
 236	kfree(shp);
 237}
 238
 239/*
 240 * It has to be called with shp locked.
 241 * It must be called before ipc_rmid()
 242 */
 243static inline void shm_clist_rm(struct shmid_kernel *shp)
 244{
 245	struct task_struct *creator;
 246
 247	/* ensure that shm_creator does not disappear */
 248	rcu_read_lock();
 249
 250	/*
 251	 * A concurrent exit_shm may do a list_del_init() as well.
 252	 * Just do nothing if exit_shm already did the work
 253	 */
 254	if (!list_empty(&shp->shm_clist)) {
 255		/*
 256		 * shp->shm_creator is guaranteed to be valid *only*
 257		 * if shp->shm_clist is not empty.
 258		 */
 259		creator = shp->shm_creator;
 260
 261		task_lock(creator);
 262		/*
 263		 * list_del_init() is a nop if the entry was already removed
 264		 * from the list.
 265		 */
 266		list_del_init(&shp->shm_clist);
 267		task_unlock(creator);
 268	}
 269	rcu_read_unlock();
 270}
 271
 272static inline void shm_rmid(struct shmid_kernel *s)
 273{
 274	shm_clist_rm(s);
 275	ipc_rmid(&shm_ids(s->ns), &s->shm_perm);
 276}
 277
 278
 279static int __shm_open(struct shm_file_data *sfd)
 280{
 
 
 281	struct shmid_kernel *shp;
 282
 283	shp = shm_lock(sfd->ns, sfd->id);
 284
 285	if (IS_ERR(shp))
 286		return PTR_ERR(shp);
 287
 288	if (shp->shm_file != sfd->file) {
 289		/* ID was reused */
 290		shm_unlock(shp);
 291		return -EINVAL;
 292	}
 293
 294	shp->shm_atim = ktime_get_real_seconds();
 295	ipc_update_pid(&shp->shm_lprid, task_tgid(current));
 296	shp->shm_nattch++;
 297	shm_unlock(shp);
 298	return 0;
 299}
 300
 301/* This is called by fork, once for every shm attach. */
 302static void shm_open(struct vm_area_struct *vma)
 303{
 304	struct file *file = vma->vm_file;
 305	struct shm_file_data *sfd = shm_file_data(file);
 306	int err;
 307
 308	/* Always call underlying open if present */
 309	if (sfd->vm_ops->open)
 310		sfd->vm_ops->open(vma);
 311
 312	err = __shm_open(sfd);
 313	/*
 314	 * We raced in the idr lookup or with shm_destroy().
 315	 * Either way, the ID is busted.
 316	 */
 317	WARN_ON_ONCE(err);
 318}
 319
 320/*
 321 * shm_destroy - free the struct shmid_kernel
 322 *
 323 * @ns: namespace
 324 * @shp: struct to free
 325 *
 326 * It has to be called with shp and shm_ids.rwsem (writer) locked,
 327 * but returns with shp unlocked and freed.
 328 */
 329static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
 330{
 331	struct file *shm_file;
 332
 333	shm_file = shp->shm_file;
 334	shp->shm_file = NULL;
 335	ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
 336	shm_rmid(shp);
 337	shm_unlock(shp);
 338	if (!is_file_hugepages(shm_file))
 339		shmem_lock(shm_file, 0, shp->mlock_ucounts);
 
 
 
 340	fput(shm_file);
 341	ipc_update_pid(&shp->shm_cprid, NULL);
 342	ipc_update_pid(&shp->shm_lprid, NULL);
 343	ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
 344}
 345
 346/*
 347 * shm_may_destroy - identifies whether shm segment should be destroyed now
 348 *
 349 * Returns true if and only if there are no active users of the segment and
 350 * one of the following is true:
 351 *
 352 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
 353 *
 354 * 2) sysctl kernel.shm_rmid_forced is set to 1.
 355 */
 356static bool shm_may_destroy(struct shmid_kernel *shp)
 357{
 358	return (shp->shm_nattch == 0) &&
 359	       (shp->ns->shm_rmid_forced ||
 360		(shp->shm_perm.mode & SHM_DEST));
 361}
 362
 363/*
 364 * remove the attach descriptor vma.
 365 * free memory for segment if it is marked destroyed.
 366 * The descriptor has already been removed from the current->mm->mmap list
 367 * and will later be kfree()d.
 368 */
 369static void __shm_close(struct shm_file_data *sfd)
 370{
 
 
 371	struct shmid_kernel *shp;
 372	struct ipc_namespace *ns = sfd->ns;
 373
 374	down_write(&shm_ids(ns).rwsem);
 375	/* remove from the list of attaches of the shm segment */
 376	shp = shm_lock(ns, sfd->id);
 377
 378	/*
 379	 * We raced in the idr lookup or with shm_destroy().
 380	 * Either way, the ID is busted.
 381	 */
 382	if (WARN_ON_ONCE(IS_ERR(shp)))
 383		goto done; /* no-op */
 384
 385	ipc_update_pid(&shp->shm_lprid, task_tgid(current));
 386	shp->shm_dtim = ktime_get_real_seconds();
 387	shp->shm_nattch--;
 388	if (shm_may_destroy(shp))
 389		shm_destroy(ns, shp);
 390	else
 391		shm_unlock(shp);
 392done:
 393	up_write(&shm_ids(ns).rwsem);
 394}
 395
 396static void shm_close(struct vm_area_struct *vma)
 397{
 398	struct file *file = vma->vm_file;
 399	struct shm_file_data *sfd = shm_file_data(file);
 400
 401	/* Always call underlying close if present */
 402	if (sfd->vm_ops->close)
 403		sfd->vm_ops->close(vma);
 404
 405	__shm_close(sfd);
 406}
 407
 408/* Called with ns->shm_ids(ns).rwsem locked */
 409static int shm_try_destroy_orphaned(int id, void *p, void *data)
 410{
 411	struct ipc_namespace *ns = data;
 412	struct kern_ipc_perm *ipcp = p;
 413	struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 414
 415	/*
 416	 * We want to destroy segments without users and with already
 417	 * exit'ed originating process.
 418	 *
 419	 * As shp->* are changed under rwsem, it's safe to skip shp locking.
 420	 */
 421	if (!list_empty(&shp->shm_clist))
 422		return 0;
 423
 424	if (shm_may_destroy(shp)) {
 425		shm_lock_by_ptr(shp);
 426		shm_destroy(ns, shp);
 427	}
 428	return 0;
 429}
 430
 431void shm_destroy_orphaned(struct ipc_namespace *ns)
 432{
 433	down_write(&shm_ids(ns).rwsem);
 434	if (shm_ids(ns).in_use)
 435		idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
 436	up_write(&shm_ids(ns).rwsem);
 437}
 438
 439/* Locking assumes this will only be called with task == current */
 440void exit_shm(struct task_struct *task)
 441{
 442	for (;;) {
 443		struct shmid_kernel *shp;
 444		struct ipc_namespace *ns;
 445
 446		task_lock(task);
 447
 448		if (list_empty(&task->sysvshm.shm_clist)) {
 449			task_unlock(task);
 450			break;
 451		}
 452
 453		shp = list_first_entry(&task->sysvshm.shm_clist, struct shmid_kernel,
 454				shm_clist);
 455
 
 
 
 
 
 
 
 
 
 456		/*
 457		 * 1) Get pointer to the ipc namespace. It is worth to say
 458		 * that this pointer is guaranteed to be valid because
 459		 * shp lifetime is always shorter than namespace lifetime
 460		 * in which shp lives.
 461		 * We taken task_lock it means that shp won't be freed.
 462		 */
 463		ns = shp->ns;
 
 
 
 464
 465		/*
 466		 * 2) If kernel.shm_rmid_forced is not set then only keep track of
 467		 * which shmids are orphaned, so that a later set of the sysctl
 468		 * can clean them up.
 469		 */
 470		if (!ns->shm_rmid_forced)
 471			goto unlink_continue;
 
 472
 473		/*
 474		 * 3) get a reference to the namespace.
 475		 *    The refcount could be already 0. If it is 0, then
 476		 *    the shm objects will be free by free_ipc_work().
 477		 */
 478		ns = get_ipc_ns_not_zero(ns);
 479		if (!ns) {
 480unlink_continue:
 481			list_del_init(&shp->shm_clist);
 482			task_unlock(task);
 483			continue;
 484		}
 485
 486		/*
 487		 * 4) get a reference to shp.
 488		 *   This cannot fail: shm_clist_rm() is called before
 489		 *   ipc_rmid(), thus the refcount cannot be 0.
 490		 */
 491		WARN_ON(!ipc_rcu_getref(&shp->shm_perm));
 492
 493		/*
 494		 * 5) unlink the shm segment from the list of segments
 495		 *    created by current.
 496		 *    This must be done last. After unlinking,
 497		 *    only the refcounts obtained above prevent IPC_RMID
 498		 *    from destroying the segment or the namespace.
 499		 */
 500		list_del_init(&shp->shm_clist);
 501
 502		task_unlock(task);
 503
 504		/*
 505		 * 6) we have all references
 506		 *    Thus lock & if needed destroy shp.
 507		 */
 508		down_write(&shm_ids(ns).rwsem);
 509		shm_lock_by_ptr(shp);
 510		/*
 511		 * rcu_read_lock was implicitly taken in shm_lock_by_ptr, it's
 512		 * safe to call ipc_rcu_putref here
 513		 */
 514		ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
 515
 516		if (ipc_valid_object(&shp->shm_perm)) {
 517			if (shm_may_destroy(shp))
 518				shm_destroy(ns, shp);
 519			else
 520				shm_unlock(shp);
 521		} else {
 522			/*
 523			 * Someone else deleted the shp from namespace
 524			 * idr/kht while we have waited.
 525			 * Just unlock and continue.
 526			 */
 527			shm_unlock(shp);
 528		}
 529
 530		up_write(&shm_ids(ns).rwsem);
 531		put_ipc_ns(ns); /* paired with get_ipc_ns_not_zero */
 532	}
 533}
 534
 535static vm_fault_t shm_fault(struct vm_fault *vmf)
 536{
 537	struct file *file = vmf->vma->vm_file;
 538	struct shm_file_data *sfd = shm_file_data(file);
 539
 540	return sfd->vm_ops->fault(vmf);
 541}
 542
 543static int shm_may_split(struct vm_area_struct *vma, unsigned long addr)
 544{
 545	struct file *file = vma->vm_file;
 546	struct shm_file_data *sfd = shm_file_data(file);
 547
 548	if (sfd->vm_ops->may_split)
 549		return sfd->vm_ops->may_split(vma, addr);
 550
 551	return 0;
 552}
 553
 554static unsigned long shm_pagesize(struct vm_area_struct *vma)
 
 555{
 556	struct file *file = vma->vm_file;
 557	struct shm_file_data *sfd = shm_file_data(file);
 558
 559	if (sfd->vm_ops->pagesize)
 560		return sfd->vm_ops->pagesize(vma);
 561
 562	return PAGE_SIZE;
 563}
 564
 565#ifdef CONFIG_NUMA
 566static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
 567{
 568	struct shm_file_data *sfd = shm_file_data(vma->vm_file);
 569	int err = 0;
 570
 571	if (sfd->vm_ops->set_policy)
 572		err = sfd->vm_ops->set_policy(vma, mpol);
 573	return err;
 574}
 575
 576static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
 577					unsigned long addr, pgoff_t *ilx)
 578{
 579	struct shm_file_data *sfd = shm_file_data(vma->vm_file);
 580	struct mempolicy *mpol = vma->vm_policy;
 
 581
 582	if (sfd->vm_ops->get_policy)
 583		mpol = sfd->vm_ops->get_policy(vma, addr, ilx);
 584	return mpol;
 
 
 
 585}
 586#endif
 587
 588static int shm_mmap(struct file *file, struct vm_area_struct *vma)
 589{
 590	struct shm_file_data *sfd = shm_file_data(file);
 591	int ret;
 592
 593	/*
 594	 * In case of remap_file_pages() emulation, the file can represent an
 595	 * IPC ID that was removed, and possibly even reused by another shm
 596	 * segment already.  Propagate this case as an error to caller.
 597	 */
 598	ret = __shm_open(sfd);
 599	if (ret)
 600		return ret;
 601
 602	ret = call_mmap(sfd->file, vma);
 603	if (ret) {
 604		__shm_close(sfd);
 605		return ret;
 606	}
 607	sfd->vm_ops = vma->vm_ops;
 608#ifdef CONFIG_MMU
 609	WARN_ON(!sfd->vm_ops->fault);
 610#endif
 611	vma->vm_ops = &shm_vm_ops;
 612	return 0;
 613}
 614
 615static int shm_release(struct inode *ino, struct file *file)
 616{
 617	struct shm_file_data *sfd = shm_file_data(file);
 618
 619	put_ipc_ns(sfd->ns);
 620	fput(sfd->file);
 621	shm_file_data(file) = NULL;
 622	kfree(sfd);
 623	return 0;
 624}
 625
 626static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 627{
 628	struct shm_file_data *sfd = shm_file_data(file);
 629
 630	if (!sfd->file->f_op->fsync)
 631		return -EINVAL;
 632	return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
 633}
 634
 635static long shm_fallocate(struct file *file, int mode, loff_t offset,
 636			  loff_t len)
 637{
 638	struct shm_file_data *sfd = shm_file_data(file);
 639
 640	if (!sfd->file->f_op->fallocate)
 641		return -EOPNOTSUPP;
 642	return sfd->file->f_op->fallocate(file, mode, offset, len);
 643}
 644
 645static unsigned long shm_get_unmapped_area(struct file *file,
 646	unsigned long addr, unsigned long len, unsigned long pgoff,
 647	unsigned long flags)
 648{
 649	struct shm_file_data *sfd = shm_file_data(file);
 650
 651	return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
 652						pgoff, flags);
 653}
 654
 655static const struct file_operations shm_file_operations = {
 656	.mmap		= shm_mmap,
 657	.fsync		= shm_fsync,
 658	.release	= shm_release,
 
 659	.get_unmapped_area	= shm_get_unmapped_area,
 
 660	.llseek		= noop_llseek,
 661	.fallocate	= shm_fallocate,
 662};
 663
 664/*
 665 * shm_file_operations_huge is now identical to shm_file_operations
 666 * except for fop_flags
 667 */
 668static const struct file_operations shm_file_operations_huge = {
 669	.mmap		= shm_mmap,
 670	.fsync		= shm_fsync,
 671	.release	= shm_release,
 672	.get_unmapped_area	= shm_get_unmapped_area,
 673	.llseek		= noop_llseek,
 674	.fallocate	= shm_fallocate,
 675	.fop_flags	= FOP_HUGE_PAGES,
 676};
 677
 
 
 
 
 
 678static const struct vm_operations_struct shm_vm_ops = {
 679	.open	= shm_open,	/* callback for a new vm-area open */
 680	.close	= shm_close,	/* callback for when the vm-area is released */
 681	.fault	= shm_fault,
 682	.may_split = shm_may_split,
 683	.pagesize = shm_pagesize,
 684#if defined(CONFIG_NUMA)
 685	.set_policy = shm_set_policy,
 686	.get_policy = shm_get_policy,
 687#endif
 688};
 689
 690/**
 691 * newseg - Create a new shared memory segment
 692 * @ns: namespace
 693 * @params: ptr to the structure that contains key, size and shmflg
 694 *
 695 * Called with shm_ids.rwsem held as a writer.
 696 */
 697static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
 698{
 699	key_t key = params->key;
 700	int shmflg = params->flg;
 701	size_t size = params->u.size;
 702	int error;
 703	struct shmid_kernel *shp;
 704	size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 705	struct file *file;
 706	char name[13];
 
 707	vm_flags_t acctflag = 0;
 708
 709	if (size < SHMMIN || size > ns->shm_ctlmax)
 710		return -EINVAL;
 711
 712	if (numpages << PAGE_SHIFT < size)
 713		return -ENOSPC;
 714
 715	if (ns->shm_tot + numpages < ns->shm_tot ||
 716			ns->shm_tot + numpages > ns->shm_ctlall)
 717		return -ENOSPC;
 718
 719	shp = kmalloc(sizeof(*shp), GFP_KERNEL_ACCOUNT);
 720	if (unlikely(!shp))
 721		return -ENOMEM;
 722
 723	shp->shm_perm.key = key;
 724	shp->shm_perm.mode = (shmflg & S_IRWXUGO);
 725	shp->mlock_ucounts = NULL;
 726
 727	shp->shm_perm.security = NULL;
 728	error = security_shm_alloc(&shp->shm_perm);
 729	if (error) {
 730		kfree(shp);
 731		return error;
 732	}
 733
 734	sprintf(name, "SYSV%08x", key);
 735	if (shmflg & SHM_HUGETLB) {
 736		struct hstate *hs;
 737		size_t hugesize;
 738
 739		hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
 740		if (!hs) {
 741			error = -EINVAL;
 742			goto no_file;
 743		}
 744		hugesize = ALIGN(size, huge_page_size(hs));
 745
 746		/* hugetlb_file_setup applies strict accounting */
 747		if (shmflg & SHM_NORESERVE)
 748			acctflag = VM_NORESERVE;
 749		file = hugetlb_file_setup(name, hugesize, acctflag,
 750				HUGETLB_SHMFS_INODE, (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
 
 751	} else {
 752		/*
 753		 * Do not allow no accounting for OVERCOMMIT_NEVER, even
 754		 * if it's asked for.
 755		 */
 756		if  ((shmflg & SHM_NORESERVE) &&
 757				sysctl_overcommit_memory != OVERCOMMIT_NEVER)
 758			acctflag = VM_NORESERVE;
 759		file = shmem_kernel_file_setup(name, size, acctflag);
 760	}
 761	error = PTR_ERR(file);
 762	if (IS_ERR(file))
 763		goto no_file;
 764
 765	shp->shm_cprid = get_pid(task_tgid(current));
 766	shp->shm_lprid = NULL;
 767	shp->shm_atim = shp->shm_dtim = 0;
 768	shp->shm_ctim = ktime_get_real_seconds();
 769	shp->shm_segsz = size;
 770	shp->shm_nattch = 0;
 771	shp->shm_file = file;
 772	shp->shm_creator = current;
 773
 774	/* ipc_addid() locks shp upon success. */
 775	error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
 776	if (error < 0)
 777		goto no_id;
 
 778
 779	shp->ns = ns;
 780
 781	task_lock(current);
 782	list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
 783	task_unlock(current);
 784
 785	/*
 786	 * shmid gets reported as "inode#" in /proc/pid/maps.
 787	 * proc-ps tools use this. Changing this will break them.
 788	 */
 789	file_inode(file)->i_ino = shp->shm_perm.id;
 790
 791	ns->shm_tot += numpages;
 792	error = shp->shm_perm.id;
 793
 794	ipc_unlock_object(&shp->shm_perm);
 795	rcu_read_unlock();
 796	return error;
 797
 798no_id:
 799	ipc_update_pid(&shp->shm_cprid, NULL);
 800	ipc_update_pid(&shp->shm_lprid, NULL);
 801	fput(file);
 802	ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
 803	return error;
 804no_file:
 805	call_rcu(&shp->shm_perm.rcu, shm_rcu_free);
 806	return error;
 807}
 808
 809/*
 810 * Called with shm_ids.rwsem and ipcp locked.
 811 */
 812static int shm_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params)
 
 
 
 
 
 
 
 
 
 
 
 
 813{
 814	struct shmid_kernel *shp;
 815
 816	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 817	if (shp->shm_segsz < params->u.size)
 818		return -EINVAL;
 819
 820	return 0;
 821}
 822
 823long ksys_shmget(key_t key, size_t size, int shmflg)
 824{
 825	struct ipc_namespace *ns;
 826	static const struct ipc_ops shm_ops = {
 827		.getnew = newseg,
 828		.associate = security_shm_associate,
 829		.more_checks = shm_more_checks,
 830	};
 831	struct ipc_params shm_params;
 832
 833	ns = current->nsproxy->ipc_ns;
 834
 835	shm_params.key = key;
 836	shm_params.flg = shmflg;
 837	shm_params.u.size = size;
 838
 839	return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
 840}
 841
 842SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
 843{
 844	return ksys_shmget(key, size, shmflg);
 845}
 846
 847static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
 848{
 849	switch (version) {
 850	case IPC_64:
 851		return copy_to_user(buf, in, sizeof(*in));
 852	case IPC_OLD:
 853	    {
 854		struct shmid_ds out;
 855
 856		memset(&out, 0, sizeof(out));
 857		ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
 858		out.shm_segsz	= in->shm_segsz;
 859		out.shm_atime	= in->shm_atime;
 860		out.shm_dtime	= in->shm_dtime;
 861		out.shm_ctime	= in->shm_ctime;
 862		out.shm_cpid	= in->shm_cpid;
 863		out.shm_lpid	= in->shm_lpid;
 864		out.shm_nattch	= in->shm_nattch;
 865
 866		return copy_to_user(buf, &out, sizeof(out));
 867	    }
 868	default:
 869		return -EINVAL;
 870	}
 871}
 872
 873static inline unsigned long
 874copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
 875{
 876	switch (version) {
 877	case IPC_64:
 878		if (copy_from_user(out, buf, sizeof(*out)))
 879			return -EFAULT;
 880		return 0;
 881	case IPC_OLD:
 882	    {
 883		struct shmid_ds tbuf_old;
 884
 885		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
 886			return -EFAULT;
 887
 888		out->shm_perm.uid	= tbuf_old.shm_perm.uid;
 889		out->shm_perm.gid	= tbuf_old.shm_perm.gid;
 890		out->shm_perm.mode	= tbuf_old.shm_perm.mode;
 891
 892		return 0;
 893	    }
 894	default:
 895		return -EINVAL;
 896	}
 897}
 898
 899static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
 900{
 901	switch (version) {
 902	case IPC_64:
 903		return copy_to_user(buf, in, sizeof(*in));
 904	case IPC_OLD:
 905	    {
 906		struct shminfo out;
 907
 908		if (in->shmmax > INT_MAX)
 909			out.shmmax = INT_MAX;
 910		else
 911			out.shmmax = (int)in->shmmax;
 912
 913		out.shmmin	= in->shmmin;
 914		out.shmmni	= in->shmmni;
 915		out.shmseg	= in->shmseg;
 916		out.shmall	= in->shmall;
 917
 918		return copy_to_user(buf, &out, sizeof(out));
 919	    }
 920	default:
 921		return -EINVAL;
 922	}
 923}
 924
 925/*
 926 * Calculate and add used RSS and swap pages of a shm.
 927 * Called with shm_ids.rwsem held as a reader
 928 */
 929static void shm_add_rss_swap(struct shmid_kernel *shp,
 930	unsigned long *rss_add, unsigned long *swp_add)
 931{
 932	struct inode *inode;
 933
 934	inode = file_inode(shp->shm_file);
 935
 936	if (is_file_hugepages(shp->shm_file)) {
 937		struct address_space *mapping = inode->i_mapping;
 938		struct hstate *h = hstate_file(shp->shm_file);
 939		*rss_add += pages_per_huge_page(h) * mapping->nrpages;
 940	} else {
 941#ifdef CONFIG_SHMEM
 942		struct shmem_inode_info *info = SHMEM_I(inode);
 943
 944		spin_lock_irq(&info->lock);
 945		*rss_add += inode->i_mapping->nrpages;
 946		*swp_add += info->swapped;
 947		spin_unlock_irq(&info->lock);
 948#else
 949		*rss_add += inode->i_mapping->nrpages;
 950#endif
 951	}
 952}
 953
 954/*
 955 * Called with shm_ids.rwsem held as a reader
 956 */
 957static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
 958		unsigned long *swp)
 959{
 960	int next_id;
 961	int total, in_use;
 962
 963	*rss = 0;
 964	*swp = 0;
 965
 966	in_use = shm_ids(ns).in_use;
 967
 968	for (total = 0, next_id = 0; total < in_use; next_id++) {
 969		struct kern_ipc_perm *ipc;
 970		struct shmid_kernel *shp;
 971
 972		ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
 973		if (ipc == NULL)
 974			continue;
 975		shp = container_of(ipc, struct shmid_kernel, shm_perm);
 976
 977		shm_add_rss_swap(shp, rss, swp);
 978
 979		total++;
 980	}
 981}
 982
 983/*
 984 * This function handles some shmctl commands which require the rwsem
 985 * to be held in write mode.
 986 * NOTE: no locks must be held, the rwsem is taken inside this function.
 987 */
 988static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
 989		       struct shmid64_ds *shmid64)
 990{
 991	struct kern_ipc_perm *ipcp;
 
 992	struct shmid_kernel *shp;
 993	int err;
 994
 
 
 
 
 
 995	down_write(&shm_ids(ns).rwsem);
 996	rcu_read_lock();
 997
 998	ipcp = ipcctl_obtain_check(ns, &shm_ids(ns), shmid, cmd,
 999				      &shmid64->shm_perm, 0);
1000	if (IS_ERR(ipcp)) {
1001		err = PTR_ERR(ipcp);
1002		goto out_unlock1;
1003	}
1004
1005	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
1006
1007	err = security_shm_shmctl(&shp->shm_perm, cmd);
1008	if (err)
1009		goto out_unlock1;
1010
1011	switch (cmd) {
1012	case IPC_RMID:
1013		ipc_lock_object(&shp->shm_perm);
1014		/* do_shm_rmid unlocks the ipc object and rcu */
1015		do_shm_rmid(ns, ipcp);
1016		goto out_up;
1017	case IPC_SET:
1018		ipc_lock_object(&shp->shm_perm);
1019		err = ipc_update_perm(&shmid64->shm_perm, ipcp);
1020		if (err)
1021			goto out_unlock0;
1022		shp->shm_ctim = ktime_get_real_seconds();
1023		break;
1024	default:
1025		err = -EINVAL;
1026		goto out_unlock1;
1027	}
1028
1029out_unlock0:
1030	ipc_unlock_object(&shp->shm_perm);
1031out_unlock1:
1032	rcu_read_unlock();
1033out_up:
1034	up_write(&shm_ids(ns).rwsem);
1035	return err;
1036}
1037
1038static int shmctl_ipc_info(struct ipc_namespace *ns,
1039			   struct shminfo64 *shminfo)
1040{
1041	int err = security_shm_shmctl(NULL, IPC_INFO);
1042	if (!err) {
1043		memset(shminfo, 0, sizeof(*shminfo));
1044		shminfo->shmmni = shminfo->shmseg = ns->shm_ctlmni;
1045		shminfo->shmmax = ns->shm_ctlmax;
1046		shminfo->shmall = ns->shm_ctlall;
1047		shminfo->shmmin = SHMMIN;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1048		down_read(&shm_ids(ns).rwsem);
1049		err = ipc_get_maxidx(&shm_ids(ns));
1050		up_read(&shm_ids(ns).rwsem);
 
1051		if (err < 0)
1052			err = 0;
 
1053	}
1054	return err;
1055}
 
1056
1057static int shmctl_shm_info(struct ipc_namespace *ns,
1058			   struct shm_info *shm_info)
1059{
1060	int err = security_shm_shmctl(NULL, SHM_INFO);
1061	if (!err) {
1062		memset(shm_info, 0, sizeof(*shm_info));
1063		down_read(&shm_ids(ns).rwsem);
1064		shm_info->used_ids = shm_ids(ns).in_use;
1065		shm_get_stat(ns, &shm_info->shm_rss, &shm_info->shm_swp);
1066		shm_info->shm_tot = ns->shm_tot;
1067		shm_info->swap_attempts = 0;
1068		shm_info->swap_successes = 0;
1069		err = ipc_get_maxidx(&shm_ids(ns));
1070		up_read(&shm_ids(ns).rwsem);
1071		if (err < 0)
1072			err = 0;
 
 
 
 
 
1073	}
1074	return err;
1075}
1076
1077static int shmctl_stat(struct ipc_namespace *ns, int shmid,
1078			int cmd, struct shmid64_ds *tbuf)
1079{
1080	struct shmid_kernel *shp;
1081	int err;
1082
1083	memset(tbuf, 0, sizeof(*tbuf));
1084
1085	rcu_read_lock();
1086	if (cmd == SHM_STAT || cmd == SHM_STAT_ANY) {
1087		shp = shm_obtain_object(ns, shmid);
1088		if (IS_ERR(shp)) {
1089			err = PTR_ERR(shp);
1090			goto out_unlock;
1091		}
1092	} else { /* IPC_STAT */
1093		shp = shm_obtain_object_check(ns, shmid);
1094		if (IS_ERR(shp)) {
1095			err = PTR_ERR(shp);
1096			goto out_unlock;
1097		}
1098	}
1099
1100	/*
1101	 * Semantically SHM_STAT_ANY ought to be identical to
1102	 * that functionality provided by the /proc/sysvipc/
1103	 * interface. As such, only audit these calls and
1104	 * do not do traditional S_IRUGO permission checks on
1105	 * the ipc object.
1106	 */
1107	if (cmd == SHM_STAT_ANY)
1108		audit_ipc_obj(&shp->shm_perm);
1109	else {
1110		err = -EACCES;
1111		if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
1112			goto out_unlock;
1113	}
1114
1115	err = security_shm_shmctl(&shp->shm_perm, cmd);
1116	if (err)
1117		goto out_unlock;
1118
1119	ipc_lock_object(&shp->shm_perm);
 
 
 
 
 
 
 
 
 
1120
1121	if (!ipc_valid_object(&shp->shm_perm)) {
1122		ipc_unlock_object(&shp->shm_perm);
1123		err = -EIDRM;
1124		goto out_unlock;
 
1125	}
1126
1127	kernel_to_ipc64_perm(&shp->shm_perm, &tbuf->shm_perm);
1128	tbuf->shm_segsz	= shp->shm_segsz;
1129	tbuf->shm_atime	= shp->shm_atim;
1130	tbuf->shm_dtime	= shp->shm_dtim;
1131	tbuf->shm_ctime	= shp->shm_ctim;
1132#ifndef CONFIG_64BIT
1133	tbuf->shm_atime_high = shp->shm_atim >> 32;
1134	tbuf->shm_dtime_high = shp->shm_dtim >> 32;
1135	tbuf->shm_ctime_high = shp->shm_ctim >> 32;
1136#endif
1137	tbuf->shm_cpid	= pid_vnr(shp->shm_cprid);
1138	tbuf->shm_lpid	= pid_vnr(shp->shm_lprid);
1139	tbuf->shm_nattch = shp->shm_nattch;
1140
1141	if (cmd == IPC_STAT) {
1142		/*
1143		 * As defined in SUS:
1144		 * Return 0 on success
1145		 */
1146		err = 0;
1147	} else {
1148		/*
1149		 * SHM_STAT and SHM_STAT_ANY (both Linux specific)
1150		 * Return the full id, including the sequence number
1151		 */
1152		err = shp->shm_perm.id;
1153	}
1154
1155	ipc_unlock_object(&shp->shm_perm);
1156out_unlock:
1157	rcu_read_unlock();
 
1158	return err;
1159}
1160
1161static int shmctl_do_lock(struct ipc_namespace *ns, int shmid, int cmd)
1162{
1163	struct shmid_kernel *shp;
1164	struct file *shm_file;
1165	int err;
1166
1167	rcu_read_lock();
1168	shp = shm_obtain_object_check(ns, shmid);
1169	if (IS_ERR(shp)) {
1170		err = PTR_ERR(shp);
1171		goto out_unlock1;
1172	}
1173
1174	audit_ipc_obj(&(shp->shm_perm));
1175	err = security_shm_shmctl(&shp->shm_perm, cmd);
1176	if (err)
1177		goto out_unlock1;
1178
1179	ipc_lock_object(&shp->shm_perm);
1180
1181	/* check if shm_destroy() is tearing down shp */
1182	if (!ipc_valid_object(&shp->shm_perm)) {
1183		err = -EIDRM;
1184		goto out_unlock0;
1185	}
1186
1187	if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
1188		kuid_t euid = current_euid();
1189
1190		if (!uid_eq(euid, shp->shm_perm.uid) &&
1191		    !uid_eq(euid, shp->shm_perm.cuid)) {
1192			err = -EPERM;
1193			goto out_unlock0;
1194		}
1195		if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
1196			err = -EPERM;
1197			goto out_unlock0;
1198		}
1199	}
1200
1201	shm_file = shp->shm_file;
1202	if (is_file_hugepages(shm_file))
1203		goto out_unlock0;
1204
1205	if (cmd == SHM_LOCK) {
1206		struct ucounts *ucounts = current_ucounts();
1207
1208		err = shmem_lock(shm_file, 1, ucounts);
1209		if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
1210			shp->shm_perm.mode |= SHM_LOCKED;
1211			shp->mlock_ucounts = ucounts;
1212		}
1213		goto out_unlock0;
1214	}
1215
1216	/* SHM_UNLOCK */
1217	if (!(shp->shm_perm.mode & SHM_LOCKED))
1218		goto out_unlock0;
1219	shmem_lock(shm_file, 0, shp->mlock_ucounts);
1220	shp->shm_perm.mode &= ~SHM_LOCKED;
1221	shp->mlock_ucounts = NULL;
1222	get_file(shm_file);
1223	ipc_unlock_object(&shp->shm_perm);
1224	rcu_read_unlock();
1225	shmem_unlock_mapping(shm_file->f_mapping);
1226
1227	fput(shm_file);
1228	return err;
1229
1230out_unlock0:
1231	ipc_unlock_object(&shp->shm_perm);
1232out_unlock1:
1233	rcu_read_unlock();
1234	return err;
1235}
1236
1237static long ksys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf, int version)
1238{
1239	int err;
1240	struct ipc_namespace *ns;
1241	struct shmid64_ds sem64;
1242
1243	if (cmd < 0 || shmid < 0)
1244		return -EINVAL;
1245
 
1246	ns = current->nsproxy->ipc_ns;
1247
1248	switch (cmd) {
1249	case IPC_INFO: {
1250		struct shminfo64 shminfo;
1251		err = shmctl_ipc_info(ns, &shminfo);
1252		if (err < 0)
1253			return err;
1254		if (copy_shminfo_to_user(buf, &shminfo, version))
1255			err = -EFAULT;
1256		return err;
1257	}
1258	case SHM_INFO: {
1259		struct shm_info shm_info;
1260		err = shmctl_shm_info(ns, &shm_info);
1261		if (err < 0)
1262			return err;
1263		if (copy_to_user(buf, &shm_info, sizeof(shm_info)))
1264			err = -EFAULT;
1265		return err;
1266	}
1267	case SHM_STAT:
1268	case SHM_STAT_ANY:
1269	case IPC_STAT: {
1270		err = shmctl_stat(ns, shmid, cmd, &sem64);
1271		if (err < 0)
1272			return err;
1273		if (copy_shmid_to_user(buf, &sem64, version))
1274			err = -EFAULT;
1275		return err;
1276	}
1277	case IPC_SET:
1278		if (copy_shmid_from_user(&sem64, buf, version))
1279			return -EFAULT;
1280		fallthrough;
1281	case IPC_RMID:
1282		return shmctl_down(ns, shmid, cmd, &sem64);
1283	case SHM_LOCK:
1284	case SHM_UNLOCK:
1285		return shmctl_do_lock(ns, shmid, cmd);
1286	default:
1287		return -EINVAL;
1288	}
1289}
1290
1291SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1292{
1293	return ksys_shmctl(shmid, cmd, buf, IPC_64);
1294}
 
 
1295
1296#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
1297long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf)
1298{
1299	int version = ipc_parse_version(&cmd);
1300
1301	return ksys_shmctl(shmid, cmd, buf, version);
1302}
1303
1304SYSCALL_DEFINE3(old_shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1305{
1306	return ksys_old_shmctl(shmid, cmd, buf);
1307}
1308#endif
1309
1310#ifdef CONFIG_COMPAT
 
 
 
 
 
 
 
 
 
 
 
1311
1312struct compat_shmid_ds {
1313	struct compat_ipc_perm shm_perm;
1314	int shm_segsz;
1315	old_time32_t shm_atime;
1316	old_time32_t shm_dtime;
1317	old_time32_t shm_ctime;
1318	compat_ipc_pid_t shm_cpid;
1319	compat_ipc_pid_t shm_lpid;
1320	unsigned short shm_nattch;
1321	unsigned short shm_unused;
1322	compat_uptr_t shm_unused2;
1323	compat_uptr_t shm_unused3;
1324};
1325
1326struct compat_shminfo64 {
1327	compat_ulong_t shmmax;
1328	compat_ulong_t shmmin;
1329	compat_ulong_t shmmni;
1330	compat_ulong_t shmseg;
1331	compat_ulong_t shmall;
1332	compat_ulong_t __unused1;
1333	compat_ulong_t __unused2;
1334	compat_ulong_t __unused3;
1335	compat_ulong_t __unused4;
1336};
1337
1338struct compat_shm_info {
1339	compat_int_t used_ids;
1340	compat_ulong_t shm_tot, shm_rss, shm_swp;
1341	compat_ulong_t swap_attempts, swap_successes;
1342};
 
 
 
 
 
1343
1344static int copy_compat_shminfo_to_user(void __user *buf, struct shminfo64 *in,
1345					int version)
1346{
1347	if (in->shmmax > INT_MAX)
1348		in->shmmax = INT_MAX;
1349	if (version == IPC_64) {
1350		struct compat_shminfo64 info;
1351		memset(&info, 0, sizeof(info));
1352		info.shmmax = in->shmmax;
1353		info.shmmin = in->shmmin;
1354		info.shmmni = in->shmmni;
1355		info.shmseg = in->shmseg;
1356		info.shmall = in->shmall;
1357		return copy_to_user(buf, &info, sizeof(info));
1358	} else {
1359		struct shminfo info;
1360		memset(&info, 0, sizeof(info));
1361		info.shmmax = in->shmmax;
1362		info.shmmin = in->shmmin;
1363		info.shmmni = in->shmmni;
1364		info.shmseg = in->shmseg;
1365		info.shmall = in->shmall;
1366		return copy_to_user(buf, &info, sizeof(info));
1367	}
1368}
1369
1370static int put_compat_shm_info(struct shm_info *ip,
1371				struct compat_shm_info __user *uip)
1372{
1373	struct compat_shm_info info;
1374
1375	memset(&info, 0, sizeof(info));
1376	info.used_ids = ip->used_ids;
1377	info.shm_tot = ip->shm_tot;
1378	info.shm_rss = ip->shm_rss;
1379	info.shm_swp = ip->shm_swp;
1380	info.swap_attempts = ip->swap_attempts;
1381	info.swap_successes = ip->swap_successes;
1382	return copy_to_user(uip, &info, sizeof(info));
1383}
1384
1385static int copy_compat_shmid_to_user(void __user *buf, struct shmid64_ds *in,
1386					int version)
1387{
1388	if (version == IPC_64) {
1389		struct compat_shmid64_ds v;
1390		memset(&v, 0, sizeof(v));
1391		to_compat_ipc64_perm(&v.shm_perm, &in->shm_perm);
1392		v.shm_atime	 = lower_32_bits(in->shm_atime);
1393		v.shm_atime_high = upper_32_bits(in->shm_atime);
1394		v.shm_dtime	 = lower_32_bits(in->shm_dtime);
1395		v.shm_dtime_high = upper_32_bits(in->shm_dtime);
1396		v.shm_ctime	 = lower_32_bits(in->shm_ctime);
1397		v.shm_ctime_high = upper_32_bits(in->shm_ctime);
1398		v.shm_segsz = in->shm_segsz;
1399		v.shm_nattch = in->shm_nattch;
1400		v.shm_cpid = in->shm_cpid;
1401		v.shm_lpid = in->shm_lpid;
1402		return copy_to_user(buf, &v, sizeof(v));
1403	} else {
1404		struct compat_shmid_ds v;
1405		memset(&v, 0, sizeof(v));
1406		to_compat_ipc_perm(&v.shm_perm, &in->shm_perm);
1407		v.shm_perm.key = in->shm_perm.key;
1408		v.shm_atime = in->shm_atime;
1409		v.shm_dtime = in->shm_dtime;
1410		v.shm_ctime = in->shm_ctime;
1411		v.shm_segsz = in->shm_segsz;
1412		v.shm_nattch = in->shm_nattch;
1413		v.shm_cpid = in->shm_cpid;
1414		v.shm_lpid = in->shm_lpid;
1415		return copy_to_user(buf, &v, sizeof(v));
1416	}
1417}
1418
1419static int copy_compat_shmid_from_user(struct shmid64_ds *out, void __user *buf,
1420					int version)
1421{
1422	memset(out, 0, sizeof(*out));
1423	if (version == IPC_64) {
1424		struct compat_shmid64_ds __user *p = buf;
1425		return get_compat_ipc64_perm(&out->shm_perm, &p->shm_perm);
1426	} else {
1427		struct compat_shmid_ds __user *p = buf;
1428		return get_compat_ipc_perm(&out->shm_perm, &p->shm_perm);
1429	}
1430}
1431
1432static long compat_ksys_shmctl(int shmid, int cmd, void __user *uptr, int version)
1433{
1434	struct ipc_namespace *ns;
1435	struct shmid64_ds sem64;
1436	int err;
1437
1438	ns = current->nsproxy->ipc_ns;
1439
1440	if (cmd < 0 || shmid < 0)
1441		return -EINVAL;
1442
1443	switch (cmd) {
1444	case IPC_INFO: {
1445		struct shminfo64 shminfo;
1446		err = shmctl_ipc_info(ns, &shminfo);
1447		if (err < 0)
1448			return err;
1449		if (copy_compat_shminfo_to_user(uptr, &shminfo, version))
1450			err = -EFAULT;
1451		return err;
1452	}
1453	case SHM_INFO: {
1454		struct shm_info shm_info;
1455		err = shmctl_shm_info(ns, &shm_info);
1456		if (err < 0)
1457			return err;
1458		if (put_compat_shm_info(&shm_info, uptr))
1459			err = -EFAULT;
1460		return err;
1461	}
1462	case IPC_STAT:
1463	case SHM_STAT_ANY:
1464	case SHM_STAT:
1465		err = shmctl_stat(ns, shmid, cmd, &sem64);
1466		if (err < 0)
1467			return err;
1468		if (copy_compat_shmid_to_user(uptr, &sem64, version))
1469			err = -EFAULT;
1470		return err;
1471
1472	case IPC_SET:
1473		if (copy_compat_shmid_from_user(&sem64, uptr, version))
1474			return -EFAULT;
1475		fallthrough;
1476	case IPC_RMID:
1477		return shmctl_down(ns, shmid, cmd, &sem64);
1478	case SHM_LOCK:
1479	case SHM_UNLOCK:
1480		return shmctl_do_lock(ns, shmid, cmd);
1481	default:
1482		return -EINVAL;
1483	}
 
 
 
 
 
1484	return err;
1485}
1486
1487COMPAT_SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, void __user *, uptr)
1488{
1489	return compat_ksys_shmctl(shmid, cmd, uptr, IPC_64);
1490}
1491
1492#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION
1493long compat_ksys_old_shmctl(int shmid, int cmd, void __user *uptr)
1494{
1495	int version = compat_ipc_parse_version(&cmd);
1496
1497	return compat_ksys_shmctl(shmid, cmd, uptr, version);
1498}
1499
1500COMPAT_SYSCALL_DEFINE3(old_shmctl, int, shmid, int, cmd, void __user *, uptr)
1501{
1502	return compat_ksys_old_shmctl(shmid, cmd, uptr);
1503}
1504#endif
1505#endif
1506
1507/*
1508 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1509 *
1510 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1511 * "raddr" thing points to kernel space, and there has to be a wrapper around
1512 * this.
1513 */
1514long do_shmat(int shmid, char __user *shmaddr, int shmflg,
1515	      ulong *raddr, unsigned long shmlba)
1516{
1517	struct shmid_kernel *shp;
1518	unsigned long addr = (unsigned long)shmaddr;
1519	unsigned long size;
1520	struct file *file, *base;
1521	int    err;
1522	unsigned long flags = MAP_SHARED;
1523	unsigned long prot;
1524	int acc_mode;
1525	struct ipc_namespace *ns;
1526	struct shm_file_data *sfd;
1527	int f_flags;
 
1528	unsigned long populate = 0;
1529
1530	err = -EINVAL;
1531	if (shmid < 0)
1532		goto out;
1533
1534	if (addr) {
1535		if (addr & (shmlba - 1)) {
1536			if (shmflg & SHM_RND) {
1537				addr &= ~(shmlba - 1);  /* round down */
1538
1539				/*
1540				 * Ensure that the round-down is non-nil
1541				 * when remapping. This can happen for
1542				 * cases when addr < shmlba.
1543				 */
1544				if (!addr && (shmflg & SHM_REMAP))
1545					goto out;
1546			} else
1547#ifndef __ARCH_FORCE_SHMLBA
1548				if (addr & ~PAGE_MASK)
1549#endif
1550					goto out;
1551		}
 
 
 
 
1552
1553		flags |= MAP_FIXED;
1554	} else if ((shmflg & SHM_REMAP))
1555		goto out;
1556
1557	if (shmflg & SHM_RDONLY) {
1558		prot = PROT_READ;
1559		acc_mode = S_IRUGO;
1560		f_flags = O_RDONLY;
1561	} else {
1562		prot = PROT_READ | PROT_WRITE;
1563		acc_mode = S_IRUGO | S_IWUGO;
1564		f_flags = O_RDWR;
1565	}
1566	if (shmflg & SHM_EXEC) {
1567		prot |= PROT_EXEC;
1568		acc_mode |= S_IXUGO;
1569	}
1570
1571	/*
1572	 * We cannot rely on the fs check since SYSV IPC does have an
1573	 * additional creator id...
1574	 */
1575	ns = current->nsproxy->ipc_ns;
1576	rcu_read_lock();
1577	shp = shm_obtain_object_check(ns, shmid);
1578	if (IS_ERR(shp)) {
1579		err = PTR_ERR(shp);
1580		goto out_unlock;
1581	}
1582
1583	err = -EACCES;
1584	if (ipcperms(ns, &shp->shm_perm, acc_mode))
1585		goto out_unlock;
1586
1587	err = security_shm_shmat(&shp->shm_perm, shmaddr, shmflg);
1588	if (err)
1589		goto out_unlock;
1590
1591	ipc_lock_object(&shp->shm_perm);
1592
1593	/* check if shm_destroy() is tearing down shp */
1594	if (!ipc_valid_object(&shp->shm_perm)) {
1595		ipc_unlock_object(&shp->shm_perm);
1596		err = -EIDRM;
1597		goto out_unlock;
1598	}
1599
1600	/*
1601	 * We need to take a reference to the real shm file to prevent the
1602	 * pointer from becoming stale in cases where the lifetime of the outer
1603	 * file extends beyond that of the shm segment.  It's not usually
1604	 * possible, but it can happen during remap_file_pages() emulation as
1605	 * that unmaps the memory, then does ->mmap() via file reference only.
1606	 * We'll deny the ->mmap() if the shm segment was since removed, but to
1607	 * detect shm ID reuse we need to compare the file pointers.
1608	 */
1609	base = get_file(shp->shm_file);
1610	shp->shm_nattch++;
1611	size = i_size_read(file_inode(base));
1612	ipc_unlock_object(&shp->shm_perm);
1613	rcu_read_unlock();
1614
1615	err = -ENOMEM;
1616	sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1617	if (!sfd) {
1618		fput(base);
1619		goto out_nattch;
1620	}
1621
1622	file = alloc_file_clone(base, f_flags,
1623			  is_file_hugepages(base) ?
1624				&shm_file_operations_huge :
1625				&shm_file_operations);
1626	err = PTR_ERR(file);
1627	if (IS_ERR(file)) {
1628		kfree(sfd);
1629		fput(base);
1630		goto out_nattch;
1631	}
1632
 
 
1633	sfd->id = shp->shm_perm.id;
1634	sfd->ns = get_ipc_ns(ns);
1635	sfd->file = base;
1636	sfd->vm_ops = NULL;
1637	file->private_data = sfd;
1638
1639	err = security_mmap_file(file, prot, flags);
1640	if (err)
1641		goto out_fput;
1642
1643	if (mmap_write_lock_killable(current->mm)) {
1644		err = -EINTR;
1645		goto out_fput;
1646	}
1647
1648	if (addr && !(shmflg & SHM_REMAP)) {
1649		err = -EINVAL;
1650		if (addr + size < addr)
1651			goto invalid;
1652
1653		if (find_vma_intersection(current->mm, addr, addr + size))
1654			goto invalid;
1655	}
1656
1657	addr = do_mmap(file, addr, size, prot, flags, 0, 0, &populate, NULL);
1658	*raddr = addr;
1659	err = 0;
1660	if (IS_ERR_VALUE(addr))
1661		err = (long)addr;
1662invalid:
1663	mmap_write_unlock(current->mm);
1664	if (populate)
1665		mm_populate(addr, populate);
1666
1667out_fput:
1668	fput(file);
1669
1670out_nattch:
1671	down_write(&shm_ids(ns).rwsem);
1672	shp = shm_lock(ns, shmid);
1673	shp->shm_nattch--;
1674
1675	if (shm_may_destroy(shp))
1676		shm_destroy(ns, shp);
1677	else
1678		shm_unlock(shp);
1679	up_write(&shm_ids(ns).rwsem);
1680	return err;
1681
1682out_unlock:
1683	rcu_read_unlock();
1684out:
1685	return err;
1686}
1687
1688SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1689{
1690	unsigned long ret;
1691	long err;
1692
1693	err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
1694	if (err)
1695		return err;
1696	force_successful_syscall_return();
1697	return (long)ret;
1698}
1699
1700#ifdef CONFIG_COMPAT
1701
1702#ifndef COMPAT_SHMLBA
1703#define COMPAT_SHMLBA	SHMLBA
1704#endif
1705
1706COMPAT_SYSCALL_DEFINE3(shmat, int, shmid, compat_uptr_t, shmaddr, int, shmflg)
1707{
1708	unsigned long ret;
1709	long err;
1710
1711	err = do_shmat(shmid, compat_ptr(shmaddr), shmflg, &ret, COMPAT_SHMLBA);
1712	if (err)
1713		return err;
1714	force_successful_syscall_return();
1715	return (long)ret;
1716}
1717#endif
1718
1719/*
1720 * detach and kill segment if marked destroyed.
1721 * The work is done in shm_close.
1722 */
1723long ksys_shmdt(char __user *shmaddr)
1724{
1725	struct mm_struct *mm = current->mm;
1726	struct vm_area_struct *vma;
1727	unsigned long addr = (unsigned long)shmaddr;
1728	int retval = -EINVAL;
1729#ifdef CONFIG_MMU
1730	loff_t size = 0;
1731	struct file *file;
1732	VMA_ITERATOR(vmi, mm, addr);
1733#endif
1734
1735	if (addr & ~PAGE_MASK)
1736		return retval;
1737
1738	if (mmap_write_lock_killable(mm))
1739		return -EINTR;
1740
1741	/*
1742	 * This function tries to be smart and unmap shm segments that
1743	 * were modified by partial mlock or munmap calls:
1744	 * - It first determines the size of the shm segment that should be
1745	 *   unmapped: It searches for a vma that is backed by shm and that
1746	 *   started at address shmaddr. It records it's size and then unmaps
1747	 *   it.
1748	 * - Then it unmaps all shm vmas that started at shmaddr and that
1749	 *   are within the initially determined size and that are from the
1750	 *   same shm segment from which we determined the size.
1751	 * Errors from do_munmap are ignored: the function only fails if
1752	 * it's called with invalid parameters or if it's called to unmap
1753	 * a part of a vma. Both calls in this function are for full vmas,
1754	 * the parameters are directly copied from the vma itself and always
1755	 * valid - therefore do_munmap cannot fail. (famous last words?)
1756	 */
1757	/*
1758	 * If it had been mremap()'d, the starting address would not
1759	 * match the usual checks anyway. So assume all vma's are
1760	 * above the starting address given.
1761	 */
 
1762
1763#ifdef CONFIG_MMU
1764	for_each_vma(vmi, vma) {
 
 
1765		/*
1766		 * Check if the starting address would match, i.e. it's
1767		 * a fragment created by mprotect() and/or munmap(), or it
1768		 * otherwise it starts at this address with no hassles.
1769		 */
1770		if ((vma->vm_ops == &shm_vm_ops) &&
1771			(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1772
1773			/*
1774			 * Record the file of the shm segment being
1775			 * unmapped.  With mremap(), someone could place
1776			 * page from another segment but with equal offsets
1777			 * in the range we are unmapping.
1778			 */
1779			file = vma->vm_file;
1780			size = i_size_read(file_inode(vma->vm_file));
1781			do_vmi_align_munmap(&vmi, vma, mm, vma->vm_start,
1782					    vma->vm_end, NULL, false);
1783			/*
1784			 * We discovered the size of the shm segment, so
1785			 * break out of here and fall through to the next
1786			 * loop that uses the size information to stop
1787			 * searching for matching vma's.
1788			 */
1789			retval = 0;
1790			vma = vma_next(&vmi);
1791			break;
1792		}
 
1793	}
1794
1795	/*
1796	 * We need look no further than the maximum address a fragment
1797	 * could possibly have landed at. Also cast things to loff_t to
1798	 * prevent overflows and make comparisons vs. equal-width types.
1799	 */
1800	size = PAGE_ALIGN(size);
1801	while (vma && (loff_t)(vma->vm_end - addr) <= size) {
 
 
1802		/* finding a matching vma now does not alter retval */
1803		if ((vma->vm_ops == &shm_vm_ops) &&
1804		    ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
1805		    (vma->vm_file == file)) {
1806			do_vmi_align_munmap(&vmi, vma, mm, vma->vm_start,
1807					    vma->vm_end, NULL, false);
1808		}
1809
1810		vma = vma_next(&vmi);
1811	}
1812
1813#else	/* CONFIG_MMU */
1814	vma = vma_lookup(mm, addr);
1815	/* under NOMMU conditions, the exact address to be destroyed must be
1816	 * given
1817	 */
1818	if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1819		do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1820		retval = 0;
1821	}
1822
1823#endif
1824
1825	mmap_write_unlock(mm);
1826	return retval;
1827}
1828
1829SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1830{
1831	return ksys_shmdt(shmaddr);
1832}
1833
1834#ifdef CONFIG_PROC_FS
1835static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1836{
1837	struct pid_namespace *pid_ns = ipc_seq_pid_ns(s);
1838	struct user_namespace *user_ns = seq_user_ns(s);
1839	struct kern_ipc_perm *ipcp = it;
1840	struct shmid_kernel *shp;
1841	unsigned long rss = 0, swp = 0;
1842
1843	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
1844	shm_add_rss_swap(shp, &rss, &swp);
1845
1846#if BITS_PER_LONG <= 32
1847#define SIZE_SPEC "%10lu"
1848#else
1849#define SIZE_SPEC "%21lu"
1850#endif
1851
1852	seq_printf(s,
1853		   "%10d %10d  %4o " SIZE_SPEC " %5u %5u  "
1854		   "%5lu %5u %5u %5u %5u %10llu %10llu %10llu "
1855		   SIZE_SPEC " " SIZE_SPEC "\n",
1856		   shp->shm_perm.key,
1857		   shp->shm_perm.id,
1858		   shp->shm_perm.mode,
1859		   shp->shm_segsz,
1860		   pid_nr_ns(shp->shm_cprid, pid_ns),
1861		   pid_nr_ns(shp->shm_lprid, pid_ns),
1862		   shp->shm_nattch,
1863		   from_kuid_munged(user_ns, shp->shm_perm.uid),
1864		   from_kgid_munged(user_ns, shp->shm_perm.gid),
1865		   from_kuid_munged(user_ns, shp->shm_perm.cuid),
1866		   from_kgid_munged(user_ns, shp->shm_perm.cgid),
1867		   shp->shm_atim,
1868		   shp->shm_dtim,
1869		   shp->shm_ctim,
1870		   rss * PAGE_SIZE,
1871		   swp * PAGE_SIZE);
1872
1873	return 0;
1874}
1875#endif