Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 * linux/ipc/shm.c
   3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
   4 *	 Many improvements/fixes by Bruno Haible.
   5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
   6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
   7 *
   8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
   9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
  10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
  11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
  12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
  13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
  14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
  15 *
  16 * support for audit of ipc object properties and permission changes
  17 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  18 *
  19 * namespaces support
  20 * OpenVZ, SWsoft Inc.
  21 * Pavel Emelianov <xemul@openvz.org>
 
 
 
  22 */
  23
  24#include <linux/slab.h>
  25#include <linux/mm.h>
  26#include <linux/hugetlb.h>
  27#include <linux/shm.h>
 
  28#include <linux/init.h>
  29#include <linux/file.h>
  30#include <linux/mman.h>
  31#include <linux/shmem_fs.h>
  32#include <linux/security.h>
  33#include <linux/syscalls.h>
  34#include <linux/audit.h>
  35#include <linux/capability.h>
  36#include <linux/ptrace.h>
  37#include <linux/seq_file.h>
  38#include <linux/rwsem.h>
  39#include <linux/nsproxy.h>
  40#include <linux/mount.h>
  41#include <linux/ipc_namespace.h>
 
  42
  43#include <asm/uaccess.h>
  44
  45#include "util.h"
  46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  47struct shm_file_data {
  48	int id;
  49	struct ipc_namespace *ns;
  50	struct file *file;
  51	const struct vm_operations_struct *vm_ops;
  52};
  53
  54#define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
  55
  56static const struct file_operations shm_file_operations;
  57static const struct vm_operations_struct shm_vm_ops;
  58
  59#define shm_ids(ns)	((ns)->ids[IPC_SHM_IDS])
  60
  61#define shm_unlock(shp)			\
  62	ipc_unlock(&(shp)->shm_perm)
  63
  64static int newseg(struct ipc_namespace *, struct ipc_params *);
  65static void shm_open(struct vm_area_struct *vma);
  66static void shm_close(struct vm_area_struct *vma);
  67static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
  68#ifdef CONFIG_PROC_FS
  69static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
  70#endif
  71
  72void shm_init_ns(struct ipc_namespace *ns)
  73{
  74	ns->shm_ctlmax = SHMMAX;
  75	ns->shm_ctlall = SHMALL;
  76	ns->shm_ctlmni = SHMMNI;
  77	ns->shm_rmid_forced = 0;
  78	ns->shm_tot = 0;
  79	ipc_init_ids(&shm_ids(ns));
  80}
  81
  82/*
  83 * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
  84 * Only shm_ids.rw_mutex remains locked on exit.
  85 */
  86static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
  87{
  88	struct shmid_kernel *shp;
 
  89	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 
  90
  91	if (shp->shm_nattch){
  92		shp->shm_perm.mode |= SHM_DEST;
  93		/* Do not find it any more */
  94		shp->shm_perm.key = IPC_PRIVATE;
  95		shm_unlock(shp);
  96	} else
  97		shm_destroy(ns, shp);
  98}
  99
 100#ifdef CONFIG_IPC_NS
 101void shm_exit_ns(struct ipc_namespace *ns)
 102{
 103	free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
 104	idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
 
 105}
 106#endif
 107
 108static int __init ipc_ns_init(void)
 109{
 110	shm_init_ns(&init_ipc_ns);
 111	return 0;
 112}
 113
 114pure_initcall(ipc_ns_init);
 115
 116void __init shm_init (void)
 117{
 118	ipc_init_proc_interface("sysvipc/shm",
 119#if BITS_PER_LONG <= 32
 120				"       key      shmid perms       size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime        rss       swap\n",
 121#else
 122				"       key      shmid perms                  size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime                   rss                  swap\n",
 123#endif
 124				IPC_SHM_IDS, sysvipc_shm_proc_show);
 125}
 126
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 127/*
 128 * shm_lock_(check_) routines are called in the paths where the rw_mutex
 129 * is not necessarily held.
 130 */
 131static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
 132{
 133	struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
 134
 
 
 135	if (IS_ERR(ipcp))
 136		return (struct shmid_kernel *)ipcp;
 137
 138	return container_of(ipcp, struct shmid_kernel, shm_perm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 139}
 140
 141static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
 142{
 143	rcu_read_lock();
 144	spin_lock(&ipcp->shm_perm.lock);
 145}
 146
 147static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
 148						int id)
 149{
 150	struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
 
 
 
 
 
 
 151
 152	if (IS_ERR(ipcp))
 153		return (struct shmid_kernel *)ipcp;
 
 
 
 
 
 154
 155	return container_of(ipcp, struct shmid_kernel, shm_perm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 156}
 157
 158static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
 159{
 160	ipc_rmid(&shm_ids(ns), &s->shm_perm);
 
 161}
 162
 163
 164/* This is called by fork, once for every shm attach. */
 165static void shm_open(struct vm_area_struct *vma)
 166{
 167	struct file *file = vma->vm_file;
 168	struct shm_file_data *sfd = shm_file_data(file);
 169	struct shmid_kernel *shp;
 170
 171	shp = shm_lock(sfd->ns, sfd->id);
 172	BUG_ON(IS_ERR(shp));
 173	shp->shm_atim = get_seconds();
 174	shp->shm_lprid = task_tgid_vnr(current);
 
 
 
 
 
 
 
 
 
 175	shp->shm_nattch++;
 176	shm_unlock(shp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 177}
 178
 179/*
 180 * shm_destroy - free the struct shmid_kernel
 181 *
 182 * @ns: namespace
 183 * @shp: struct to free
 184 *
 185 * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
 186 * but returns with shp unlocked and freed.
 187 */
 188static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
 189{
 
 
 
 
 190	ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
 191	shm_rmid(ns, shp);
 192	shm_unlock(shp);
 193	if (!is_file_hugepages(shp->shm_file))
 194		shmem_lock(shp->shm_file, 0, shp->mlock_user);
 195	else if (shp->mlock_user)
 196		user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size,
 197						shp->mlock_user);
 198	fput (shp->shm_file);
 199	security_shm_free(shp);
 200	ipc_rcu_putref(shp);
 201}
 202
 203/*
 204 * shm_may_destroy - identifies whether shm segment should be destroyed now
 205 *
 206 * Returns true if and only if there are no active users of the segment and
 207 * one of the following is true:
 208 *
 209 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
 210 *
 211 * 2) sysctl kernel.shm_rmid_forced is set to 1.
 212 */
 213static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
 214{
 215	return (shp->shm_nattch == 0) &&
 216	       (ns->shm_rmid_forced ||
 217		(shp->shm_perm.mode & SHM_DEST));
 218}
 219
 220/*
 221 * remove the attach descriptor vma.
 222 * free memory for segment if it is marked destroyed.
 223 * The descriptor has already been removed from the current->mm->mmap list
 224 * and will later be kfree()d.
 225 */
 226static void shm_close(struct vm_area_struct *vma)
 227{
 228	struct file * file = vma->vm_file;
 229	struct shm_file_data *sfd = shm_file_data(file);
 230	struct shmid_kernel *shp;
 231	struct ipc_namespace *ns = sfd->ns;
 232
 233	down_write(&shm_ids(ns).rw_mutex);
 234	/* remove from the list of attaches of the shm segment */
 235	shp = shm_lock(ns, sfd->id);
 236	BUG_ON(IS_ERR(shp));
 237	shp->shm_lprid = task_tgid_vnr(current);
 238	shp->shm_dtim = get_seconds();
 
 
 
 
 
 
 
 239	shp->shm_nattch--;
 240	if (shm_may_destroy(ns, shp))
 241		shm_destroy(ns, shp);
 242	else
 243		shm_unlock(shp);
 244	up_write(&shm_ids(ns).rw_mutex);
 
 245}
 246
 247/* Called with ns->shm_ids(ns).rw_mutex locked */
 248static int shm_try_destroy_current(int id, void *p, void *data)
 249{
 250	struct ipc_namespace *ns = data;
 251	struct kern_ipc_perm *ipcp = p;
 252	struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 253
 254	if (shp->shm_creator != current)
 255		return 0;
 256
 257	/*
 258	 * Mark it as orphaned to destroy the segment when
 259	 * kernel.shm_rmid_forced is changed.
 260	 * It is noop if the following shm_may_destroy() returns true.
 261	 */
 262	shp->shm_creator = NULL;
 263
 264	/*
 265	 * Don't even try to destroy it.  If shm_rmid_forced=0 and IPC_RMID
 266	 * is not set, it shouldn't be deleted here.
 267	 */
 268	if (!ns->shm_rmid_forced)
 269		return 0;
 270
 271	if (shm_may_destroy(ns, shp)) {
 272		shm_lock_by_ptr(shp);
 273		shm_destroy(ns, shp);
 274	}
 275	return 0;
 276}
 277
 278/* Called with ns->shm_ids(ns).rw_mutex locked */
 279static int shm_try_destroy_orphaned(int id, void *p, void *data)
 280{
 281	struct ipc_namespace *ns = data;
 282	struct kern_ipc_perm *ipcp = p;
 283	struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 284
 285	/*
 286	 * We want to destroy segments without users and with already
 287	 * exit'ed originating process.
 288	 *
 289	 * As shp->* are changed under rw_mutex, it's safe to skip shp locking.
 290	 */
 291	if (shp->shm_creator != NULL)
 292		return 0;
 293
 294	if (shm_may_destroy(ns, shp)) {
 295		shm_lock_by_ptr(shp);
 296		shm_destroy(ns, shp);
 297	}
 298	return 0;
 299}
 300
 301void shm_destroy_orphaned(struct ipc_namespace *ns)
 302{
 303	down_write(&shm_ids(ns).rw_mutex);
 304	if (shm_ids(ns).in_use)
 305		idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
 306	up_write(&shm_ids(ns).rw_mutex);
 307}
 308
 309
 310void exit_shm(struct task_struct *task)
 311{
 312	struct ipc_namespace *ns = task->nsproxy->ipc_ns;
 
 
 313
 314	if (shm_ids(ns).in_use == 0)
 315		return;
 316
 317	/* Destroy all already created segments, but not mapped yet */
 318	down_write(&shm_ids(ns).rw_mutex);
 319	if (shm_ids(ns).in_use)
 320		idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
 321	up_write(&shm_ids(ns).rw_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 322}
 323
 324static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
 
 
 
 
 
 
 
 325{
 326	struct file *file = vma->vm_file;
 327	struct shm_file_data *sfd = shm_file_data(file);
 328
 329	return sfd->vm_ops->fault(vma, vmf);
 
 
 
 330}
 331
 332#ifdef CONFIG_NUMA
 333static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
 334{
 335	struct file *file = vma->vm_file;
 336	struct shm_file_data *sfd = shm_file_data(file);
 
 
 
 
 
 
 
 
 
 
 
 337	int err = 0;
 
 338	if (sfd->vm_ops->set_policy)
 339		err = sfd->vm_ops->set_policy(vma, new);
 340	return err;
 341}
 342
 343static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
 344					unsigned long addr)
 345{
 346	struct file *file = vma->vm_file;
 347	struct shm_file_data *sfd = shm_file_data(file);
 348	struct mempolicy *pol = NULL;
 349
 350	if (sfd->vm_ops->get_policy)
 351		pol = sfd->vm_ops->get_policy(vma, addr);
 352	else if (vma->vm_policy)
 353		pol = vma->vm_policy;
 354
 355	return pol;
 356}
 357#endif
 358
 359static int shm_mmap(struct file * file, struct vm_area_struct * vma)
 360{
 361	struct shm_file_data *sfd = shm_file_data(file);
 362	int ret;
 363
 364	ret = sfd->file->f_op->mmap(sfd->file, vma);
 365	if (ret != 0)
 
 
 
 
 
 
 
 
 
 
 366		return ret;
 
 367	sfd->vm_ops = vma->vm_ops;
 368#ifdef CONFIG_MMU
 369	BUG_ON(!sfd->vm_ops->fault);
 370#endif
 371	vma->vm_ops = &shm_vm_ops;
 372	shm_open(vma);
 373
 374	return ret;
 375}
 376
 377static int shm_release(struct inode *ino, struct file *file)
 378{
 379	struct shm_file_data *sfd = shm_file_data(file);
 380
 381	put_ipc_ns(sfd->ns);
 
 382	shm_file_data(file) = NULL;
 383	kfree(sfd);
 384	return 0;
 385}
 386
 387static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 388{
 389	struct shm_file_data *sfd = shm_file_data(file);
 390
 391	if (!sfd->file->f_op->fsync)
 392		return -EINVAL;
 393	return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
 394}
 395
 
 
 
 
 
 
 
 
 
 
 396static unsigned long shm_get_unmapped_area(struct file *file,
 397	unsigned long addr, unsigned long len, unsigned long pgoff,
 398	unsigned long flags)
 399{
 400	struct shm_file_data *sfd = shm_file_data(file);
 
 401	return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
 402						pgoff, flags);
 403}
 404
 405static const struct file_operations shm_file_operations = {
 406	.mmap		= shm_mmap,
 407	.fsync		= shm_fsync,
 408	.release	= shm_release,
 409#ifndef CONFIG_MMU
 410	.get_unmapped_area	= shm_get_unmapped_area,
 411#endif
 412	.llseek		= noop_llseek,
 
 413};
 414
 
 
 
 
 415static const struct file_operations shm_file_operations_huge = {
 416	.mmap		= shm_mmap,
 417	.fsync		= shm_fsync,
 418	.release	= shm_release,
 419	.get_unmapped_area	= shm_get_unmapped_area,
 420	.llseek		= noop_llseek,
 
 421};
 422
 423int is_file_shm_hugepages(struct file *file)
 424{
 425	return file->f_op == &shm_file_operations_huge;
 426}
 427
 428static const struct vm_operations_struct shm_vm_ops = {
 429	.open	= shm_open,	/* callback for a new vm-area open */
 430	.close	= shm_close,	/* callback for when the vm-area is released */
 431	.fault	= shm_fault,
 
 
 432#if defined(CONFIG_NUMA)
 433	.set_policy = shm_set_policy,
 434	.get_policy = shm_get_policy,
 435#endif
 436};
 437
 438/**
 439 * newseg - Create a new shared memory segment
 440 * @ns: namespace
 441 * @params: ptr to the structure that contains key, size and shmflg
 442 *
 443 * Called with shm_ids.rw_mutex held as a writer.
 444 */
 445
 446static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
 447{
 448	key_t key = params->key;
 449	int shmflg = params->flg;
 450	size_t size = params->u.size;
 451	int error;
 452	struct shmid_kernel *shp;
 453	int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
 454	struct file * file;
 455	char name[13];
 456	int id;
 457	vm_flags_t acctflag = 0;
 458
 459	if (size < SHMMIN || size > ns->shm_ctlmax)
 460		return -EINVAL;
 461
 462	if (ns->shm_tot + numpages > ns->shm_ctlall)
 
 
 
 
 463		return -ENOSPC;
 464
 465	shp = ipc_rcu_alloc(sizeof(*shp));
 466	if (!shp)
 467		return -ENOMEM;
 468
 469	shp->shm_perm.key = key;
 470	shp->shm_perm.mode = (shmflg & S_IRWXUGO);
 471	shp->mlock_user = NULL;
 472
 473	shp->shm_perm.security = NULL;
 474	error = security_shm_alloc(shp);
 475	if (error) {
 476		ipc_rcu_putref(shp);
 477		return error;
 478	}
 479
 480	sprintf (name, "SYSV%08x", key);
 481	if (shmflg & SHM_HUGETLB) {
 
 
 
 
 
 
 
 
 
 
 482		/* hugetlb_file_setup applies strict accounting */
 483		if (shmflg & SHM_NORESERVE)
 484			acctflag = VM_NORESERVE;
 485		file = hugetlb_file_setup(name, size, acctflag,
 486					&shp->mlock_user, HUGETLB_SHMFS_INODE);
 487	} else {
 488		/*
 489		 * Do not allow no accounting for OVERCOMMIT_NEVER, even
 490	 	 * if it's asked for.
 491		 */
 492		if  ((shmflg & SHM_NORESERVE) &&
 493				sysctl_overcommit_memory != OVERCOMMIT_NEVER)
 494			acctflag = VM_NORESERVE;
 495		file = shmem_file_setup(name, size, acctflag);
 496	}
 497	error = PTR_ERR(file);
 498	if (IS_ERR(file))
 499		goto no_file;
 500
 501	id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
 502	if (id < 0) {
 503		error = id;
 504		goto no_id;
 505	}
 506
 507	shp->shm_cprid = task_tgid_vnr(current);
 508	shp->shm_lprid = 0;
 509	shp->shm_atim = shp->shm_dtim = 0;
 510	shp->shm_ctim = get_seconds();
 511	shp->shm_segsz = size;
 512	shp->shm_nattch = 0;
 513	shp->shm_file = file;
 514	shp->shm_creator = current;
 
 
 
 
 
 
 
 
 
 
 
 
 515	/*
 516	 * shmid gets reported as "inode#" in /proc/pid/maps.
 517	 * proc-ps tools use this. Changing this will break them.
 518	 */
 519	file->f_dentry->d_inode->i_ino = shp->shm_perm.id;
 520
 521	ns->shm_tot += numpages;
 522	error = shp->shm_perm.id;
 523	shm_unlock(shp);
 
 
 524	return error;
 525
 526no_id:
 527	if (is_file_hugepages(file) && shp->mlock_user)
 528		user_shm_unlock(size, shp->mlock_user);
 529	fput(file);
 
 
 530no_file:
 531	security_shm_free(shp);
 532	ipc_rcu_putref(shp);
 533	return error;
 534}
 535
 536/*
 537 * Called with shm_ids.rw_mutex and ipcp locked.
 538 */
 539static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
 540{
 541	struct shmid_kernel *shp;
 542
 543	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 544	return security_shm_associate(shp, shmflg);
 545}
 546
 547/*
 548 * Called with shm_ids.rw_mutex and ipcp locked.
 549 */
 550static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
 551				struct ipc_params *params)
 552{
 553	struct shmid_kernel *shp;
 554
 555	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 556	if (shp->shm_segsz < params->u.size)
 557		return -EINVAL;
 558
 559	return 0;
 560}
 561
 562SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
 563{
 564	struct ipc_namespace *ns;
 565	struct ipc_ops shm_ops;
 
 
 
 
 566	struct ipc_params shm_params;
 567
 568	ns = current->nsproxy->ipc_ns;
 569
 570	shm_ops.getnew = newseg;
 571	shm_ops.associate = shm_security;
 572	shm_ops.more_checks = shm_more_checks;
 573
 574	shm_params.key = key;
 575	shm_params.flg = shmflg;
 576	shm_params.u.size = size;
 577
 578	return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
 579}
 580
 
 
 
 
 
 581static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
 582{
 583	switch(version) {
 584	case IPC_64:
 585		return copy_to_user(buf, in, sizeof(*in));
 586	case IPC_OLD:
 587	    {
 588		struct shmid_ds out;
 589
 590		memset(&out, 0, sizeof(out));
 591		ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
 592		out.shm_segsz	= in->shm_segsz;
 593		out.shm_atime	= in->shm_atime;
 594		out.shm_dtime	= in->shm_dtime;
 595		out.shm_ctime	= in->shm_ctime;
 596		out.shm_cpid	= in->shm_cpid;
 597		out.shm_lpid	= in->shm_lpid;
 598		out.shm_nattch	= in->shm_nattch;
 599
 600		return copy_to_user(buf, &out, sizeof(out));
 601	    }
 602	default:
 603		return -EINVAL;
 604	}
 605}
 606
 607static inline unsigned long
 608copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
 609{
 610	switch(version) {
 611	case IPC_64:
 612		if (copy_from_user(out, buf, sizeof(*out)))
 613			return -EFAULT;
 614		return 0;
 615	case IPC_OLD:
 616	    {
 617		struct shmid_ds tbuf_old;
 618
 619		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
 620			return -EFAULT;
 621
 622		out->shm_perm.uid	= tbuf_old.shm_perm.uid;
 623		out->shm_perm.gid	= tbuf_old.shm_perm.gid;
 624		out->shm_perm.mode	= tbuf_old.shm_perm.mode;
 625
 626		return 0;
 627	    }
 628	default:
 629		return -EINVAL;
 630	}
 631}
 632
 633static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
 634{
 635	switch(version) {
 636	case IPC_64:
 637		return copy_to_user(buf, in, sizeof(*in));
 638	case IPC_OLD:
 639	    {
 640		struct shminfo out;
 641
 642		if(in->shmmax > INT_MAX)
 643			out.shmmax = INT_MAX;
 644		else
 645			out.shmmax = (int)in->shmmax;
 646
 647		out.shmmin	= in->shmmin;
 648		out.shmmni	= in->shmmni;
 649		out.shmseg	= in->shmseg;
 650		out.shmall	= in->shmall; 
 651
 652		return copy_to_user(buf, &out, sizeof(out));
 653	    }
 654	default:
 655		return -EINVAL;
 656	}
 657}
 658
 659/*
 660 * Calculate and add used RSS and swap pages of a shm.
 661 * Called with shm_ids.rw_mutex held as a reader
 662 */
 663static void shm_add_rss_swap(struct shmid_kernel *shp,
 664	unsigned long *rss_add, unsigned long *swp_add)
 665{
 666	struct inode *inode;
 667
 668	inode = shp->shm_file->f_path.dentry->d_inode;
 669
 670	if (is_file_hugepages(shp->shm_file)) {
 671		struct address_space *mapping = inode->i_mapping;
 672		struct hstate *h = hstate_file(shp->shm_file);
 673		*rss_add += pages_per_huge_page(h) * mapping->nrpages;
 674	} else {
 675#ifdef CONFIG_SHMEM
 676		struct shmem_inode_info *info = SHMEM_I(inode);
 677		spin_lock(&info->lock);
 
 678		*rss_add += inode->i_mapping->nrpages;
 679		*swp_add += info->swapped;
 680		spin_unlock(&info->lock);
 681#else
 682		*rss_add += inode->i_mapping->nrpages;
 683#endif
 684	}
 685}
 686
 687/*
 688 * Called with shm_ids.rw_mutex held as a reader
 689 */
 690static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
 691		unsigned long *swp)
 692{
 693	int next_id;
 694	int total, in_use;
 695
 696	*rss = 0;
 697	*swp = 0;
 698
 699	in_use = shm_ids(ns).in_use;
 700
 701	for (total = 0, next_id = 0; total < in_use; next_id++) {
 702		struct kern_ipc_perm *ipc;
 703		struct shmid_kernel *shp;
 704
 705		ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
 706		if (ipc == NULL)
 707			continue;
 708		shp = container_of(ipc, struct shmid_kernel, shm_perm);
 709
 710		shm_add_rss_swap(shp, rss, swp);
 711
 712		total++;
 713	}
 714}
 715
 716/*
 717 * This function handles some shmctl commands which require the rw_mutex
 718 * to be held in write mode.
 719 * NOTE: no locks must be held, the rw_mutex is taken inside this function.
 720 */
 721static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
 722		       struct shmid_ds __user *buf, int version)
 723{
 724	struct kern_ipc_perm *ipcp;
 725	struct shmid64_ds shmid64;
 726	struct shmid_kernel *shp;
 727	int err;
 728
 729	if (cmd == IPC_SET) {
 730		if (copy_shmid_from_user(&shmid64, buf, version))
 731			return -EFAULT;
 732	}
 733
 734	ipcp = ipcctl_pre_down(ns, &shm_ids(ns), shmid, cmd,
 735			       &shmid64.shm_perm, 0);
 736	if (IS_ERR(ipcp))
 737		return PTR_ERR(ipcp);
 
 
 738
 739	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 740
 741	err = security_shm_shmctl(shp, cmd);
 742	if (err)
 743		goto out_unlock;
 
 744	switch (cmd) {
 745	case IPC_RMID:
 
 
 746		do_shm_rmid(ns, ipcp);
 747		goto out_up;
 748	case IPC_SET:
 749		ipc_update_perm(&shmid64.shm_perm, ipcp);
 750		shp->shm_ctim = get_seconds();
 
 
 
 751		break;
 752	default:
 753		err = -EINVAL;
 
 754	}
 755out_unlock:
 756	shm_unlock(shp);
 
 
 
 757out_up:
 758	up_write(&shm_ids(ns).rw_mutex);
 759	return err;
 760}
 761
 762SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 763{
 764	struct shmid_kernel *shp;
 765	int err, version;
 766	struct ipc_namespace *ns;
 767
 768	if (cmd < 0 || shmid < 0) {
 769		err = -EINVAL;
 770		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 771	}
 772
 773	version = ipc_parse_version(&cmd);
 774	ns = current->nsproxy->ipc_ns;
 
 
 
 
 
 
 
 
 
 
 
 
 775
 776	switch (cmd) { /* replace with proc interface ? */
 777	case IPC_INFO:
 778	{
 779		struct shminfo64 shminfo;
 780
 781		err = security_shm_shmctl(NULL, cmd);
 782		if (err)
 783			return err;
 784
 785		memset(&shminfo, 0, sizeof(shminfo));
 786		shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
 787		shminfo.shmmax = ns->shm_ctlmax;
 788		shminfo.shmall = ns->shm_ctlall;
 
 789
 790		shminfo.shmmin = SHMMIN;
 791		if(copy_shminfo_to_user (buf, &shminfo, version))
 792			return -EFAULT;
 
 
 
 
 
 
 
 
 
 
 793
 794		down_read(&shm_ids(ns).rw_mutex);
 795		err = ipc_get_maxid(&shm_ids(ns));
 796		up_read(&shm_ids(ns).rw_mutex);
 
 
 
 
 
 
 
 
 
 
 797
 798		if(err<0)
 799			err = 0;
 800		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 801	}
 802	case SHM_INFO:
 803	{
 804		struct shm_info shm_info;
 805
 806		err = security_shm_shmctl(NULL, cmd);
 807		if (err)
 808			return err;
 
 809
 810		memset(&shm_info, 0, sizeof(shm_info));
 811		down_read(&shm_ids(ns).rw_mutex);
 812		shm_info.used_ids = shm_ids(ns).in_use;
 813		shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
 814		shm_info.shm_tot = ns->shm_tot;
 815		shm_info.swap_attempts = 0;
 816		shm_info.swap_successes = 0;
 817		err = ipc_get_maxid(&shm_ids(ns));
 818		up_read(&shm_ids(ns).rw_mutex);
 819		if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
 820			err = -EFAULT;
 821			goto out;
 
 
 
 
 
 
 
 822		}
 
 823
 824		err = err < 0 ? 0 : err;
 825		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 826	}
 827	case SHM_STAT:
 828	case IPC_STAT:
 829	{
 830		struct shmid64_ds tbuf;
 831		int result;
 832
 833		if (cmd == SHM_STAT) {
 834			shp = shm_lock(ns, shmid);
 835			if (IS_ERR(shp)) {
 836				err = PTR_ERR(shp);
 837				goto out;
 838			}
 839			result = shp->shm_perm.id;
 840		} else {
 841			shp = shm_lock_check(ns, shmid);
 842			if (IS_ERR(shp)) {
 843				err = PTR_ERR(shp);
 844				goto out;
 845			}
 846			result = 0;
 847		}
 848		err = -EACCES;
 849		if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
 850			goto out_unlock;
 851		err = security_shm_shmctl(shp, cmd);
 852		if (err)
 853			goto out_unlock;
 854		memset(&tbuf, 0, sizeof(tbuf));
 855		kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
 856		tbuf.shm_segsz	= shp->shm_segsz;
 857		tbuf.shm_atime	= shp->shm_atim;
 858		tbuf.shm_dtime	= shp->shm_dtim;
 859		tbuf.shm_ctime	= shp->shm_ctim;
 860		tbuf.shm_cpid	= shp->shm_cprid;
 861		tbuf.shm_lpid	= shp->shm_lprid;
 862		tbuf.shm_nattch	= shp->shm_nattch;
 863		shm_unlock(shp);
 864		if(copy_shmid_to_user (buf, &tbuf, version))
 865			err = -EFAULT;
 866		else
 867			err = result;
 868		goto out;
 869	}
 
 
 
 
 
 
 870	case SHM_LOCK:
 871	case SHM_UNLOCK:
 872	{
 873		struct file *uninitialized_var(shm_file);
 
 
 
 874
 875		lru_add_drain_all();  /* drain pagevecs to lru lists */
 
 
 
 876
 877		shp = shm_lock_check(ns, shmid);
 878		if (IS_ERR(shp)) {
 879			err = PTR_ERR(shp);
 880			goto out;
 881		}
 882
 883		audit_ipc_obj(&(shp->shm_perm));
 
 884
 885		if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
 886			uid_t euid = current_euid();
 887			err = -EPERM;
 888			if (euid != shp->shm_perm.uid &&
 889			    euid != shp->shm_perm.cuid)
 890				goto out_unlock;
 891			if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK))
 892				goto out_unlock;
 893		}
 894
 895		err = security_shm_shmctl(shp, cmd);
 896		if (err)
 897			goto out_unlock;
 898		
 899		if(cmd==SHM_LOCK) {
 900			struct user_struct *user = current_user();
 901			if (!is_file_hugepages(shp->shm_file)) {
 902				err = shmem_lock(shp->shm_file, 1, user);
 903				if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
 904					shp->shm_perm.mode |= SHM_LOCKED;
 905					shp->mlock_user = user;
 906				}
 907			}
 908		} else if (!is_file_hugepages(shp->shm_file)) {
 909			shmem_lock(shp->shm_file, 0, shp->mlock_user);
 910			shp->shm_perm.mode &= ~SHM_LOCKED;
 911			shp->mlock_user = NULL;
 912		}
 913		shm_unlock(shp);
 914		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 915	}
 916	case IPC_RMID:
 917	case IPC_SET:
 918		err = shmctl_down(ns, shmid, cmd, buf, version);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 919		return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 920	default:
 921		return -EINVAL;
 922	}
 923
 924out_unlock:
 925	shm_unlock(shp);
 926out:
 927	return err;
 928}
 929
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 930/*
 931 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
 932 *
 933 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
 934 * "raddr" thing points to kernel space, and there has to be a wrapper around
 935 * this.
 936 */
 937long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
 
 938{
 939	struct shmid_kernel *shp;
 940	unsigned long addr;
 941	unsigned long size;
 942	struct file * file;
 943	int    err;
 944	unsigned long flags;
 945	unsigned long prot;
 946	int acc_mode;
 947	unsigned long user_addr;
 948	struct ipc_namespace *ns;
 949	struct shm_file_data *sfd;
 950	struct path path;
 951	fmode_t f_mode;
 952
 953	err = -EINVAL;
 954	if (shmid < 0)
 955		goto out;
 956	else if ((addr = (ulong)shmaddr)) {
 957		if (addr & (SHMLBA-1)) {
 958			if (shmflg & SHM_RND)
 959				addr &= ~(SHMLBA-1);	   /* round down */
 960			else
 
 
 
 
 
 
 
 
 
 961#ifndef __ARCH_FORCE_SHMLBA
 962				if (addr & ~PAGE_MASK)
 963#endif
 964					goto out;
 965		}
 966		flags = MAP_SHARED | MAP_FIXED;
 967	} else {
 968		if ((shmflg & SHM_REMAP))
 969			goto out;
 970
 971		flags = MAP_SHARED;
 972	}
 
 973
 974	if (shmflg & SHM_RDONLY) {
 975		prot = PROT_READ;
 976		acc_mode = S_IRUGO;
 977		f_mode = FMODE_READ;
 978	} else {
 979		prot = PROT_READ | PROT_WRITE;
 980		acc_mode = S_IRUGO | S_IWUGO;
 981		f_mode = FMODE_READ | FMODE_WRITE;
 982	}
 983	if (shmflg & SHM_EXEC) {
 984		prot |= PROT_EXEC;
 985		acc_mode |= S_IXUGO;
 986	}
 987
 988	/*
 989	 * We cannot rely on the fs check since SYSV IPC does have an
 990	 * additional creator id...
 991	 */
 992	ns = current->nsproxy->ipc_ns;
 993	shp = shm_lock_check(ns, shmid);
 
 994	if (IS_ERR(shp)) {
 995		err = PTR_ERR(shp);
 996		goto out;
 997	}
 998
 999	err = -EACCES;
1000	if (ipcperms(ns, &shp->shm_perm, acc_mode))
1001		goto out_unlock;
1002
1003	err = security_shm_shmat(shp, shmaddr, shmflg);
1004	if (err)
1005		goto out_unlock;
1006
1007	path = shp->shm_file->f_path;
1008	path_get(&path);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1009	shp->shm_nattch++;
1010	size = i_size_read(path.dentry->d_inode);
1011	shm_unlock(shp);
 
1012
1013	err = -ENOMEM;
1014	sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1015	if (!sfd)
1016		goto out_put_dentry;
 
 
1017
1018	file = alloc_file(&path, f_mode,
1019			  is_file_hugepages(shp->shm_file) ?
1020				&shm_file_operations_huge :
1021				&shm_file_operations);
1022	if (!file)
1023		goto out_free;
 
 
 
 
1024
1025	file->private_data = sfd;
1026	file->f_mapping = shp->shm_file->f_mapping;
1027	sfd->id = shp->shm_perm.id;
1028	sfd->ns = get_ipc_ns(ns);
1029	sfd->file = shp->shm_file;
1030	sfd->vm_ops = NULL;
 
 
 
 
 
 
 
 
 
 
1031
1032	down_write(&current->mm->mmap_sem);
1033	if (addr && !(shmflg & SHM_REMAP)) {
1034		err = -EINVAL;
1035		if (find_vma_intersection(current->mm, addr, addr + size))
1036			goto invalid;
1037		/*
1038		 * If shm segment goes below stack, make sure there is some
1039		 * space left for the stack to grow (at least 4 pages).
1040		 */
1041		if (addr < current->mm->start_stack &&
1042		    addr > current->mm->start_stack - size - PAGE_SIZE * 5)
1043			goto invalid;
1044	}
1045		
1046	user_addr = do_mmap (file, addr, size, prot, flags, 0);
1047	*raddr = user_addr;
1048	err = 0;
1049	if (IS_ERR_VALUE(user_addr))
1050		err = (long)user_addr;
1051invalid:
1052	up_write(&current->mm->mmap_sem);
 
 
1053
 
1054	fput(file);
1055
1056out_nattch:
1057	down_write(&shm_ids(ns).rw_mutex);
1058	shp = shm_lock(ns, shmid);
1059	BUG_ON(IS_ERR(shp));
1060	shp->shm_nattch--;
1061	if (shm_may_destroy(ns, shp))
 
1062		shm_destroy(ns, shp);
1063	else
1064		shm_unlock(shp);
1065	up_write(&shm_ids(ns).rw_mutex);
 
1066
 
 
1067out:
1068	return err;
 
1069
1070out_unlock:
1071	shm_unlock(shp);
1072	goto out;
 
1073
1074out_free:
1075	kfree(sfd);
1076out_put_dentry:
1077	path_put(&path);
1078	goto out_nattch;
1079}
1080
1081SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
 
 
 
 
 
 
1082{
1083	unsigned long ret;
1084	long err;
1085
1086	err = do_shmat(shmid, shmaddr, shmflg, &ret);
1087	if (err)
1088		return err;
1089	force_successful_syscall_return();
1090	return (long)ret;
1091}
 
1092
1093/*
1094 * detach and kill segment if marked destroyed.
1095 * The work is done in shm_close.
1096 */
1097SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1098{
1099	struct mm_struct *mm = current->mm;
1100	struct vm_area_struct *vma;
1101	unsigned long addr = (unsigned long)shmaddr;
1102	int retval = -EINVAL;
1103#ifdef CONFIG_MMU
1104	loff_t size = 0;
1105	struct vm_area_struct *next;
 
1106#endif
1107
1108	if (addr & ~PAGE_MASK)
1109		return retval;
1110
1111	down_write(&mm->mmap_sem);
 
1112
1113	/*
1114	 * This function tries to be smart and unmap shm segments that
1115	 * were modified by partial mlock or munmap calls:
1116	 * - It first determines the size of the shm segment that should be
1117	 *   unmapped: It searches for a vma that is backed by shm and that
1118	 *   started at address shmaddr. It records it's size and then unmaps
1119	 *   it.
1120	 * - Then it unmaps all shm vmas that started at shmaddr and that
1121	 *   are within the initially determined size.
 
1122	 * Errors from do_munmap are ignored: the function only fails if
1123	 * it's called with invalid parameters or if it's called to unmap
1124	 * a part of a vma. Both calls in this function are for full vmas,
1125	 * the parameters are directly copied from the vma itself and always
1126	 * valid - therefore do_munmap cannot fail. (famous last words?)
1127	 */
1128	/*
1129	 * If it had been mremap()'d, the starting address would not
1130	 * match the usual checks anyway. So assume all vma's are
1131	 * above the starting address given.
1132	 */
1133	vma = find_vma(mm, addr);
1134
1135#ifdef CONFIG_MMU
1136	while (vma) {
1137		next = vma->vm_next;
1138
1139		/*
1140		 * Check if the starting address would match, i.e. it's
1141		 * a fragment created by mprotect() and/or munmap(), or it
1142		 * otherwise it starts at this address with no hassles.
1143		 */
1144		if ((vma->vm_ops == &shm_vm_ops) &&
1145			(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1146
1147
1148			size = vma->vm_file->f_path.dentry->d_inode->i_size;
1149			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
 
 
 
 
 
 
 
1150			/*
1151			 * We discovered the size of the shm segment, so
1152			 * break out of here and fall through to the next
1153			 * loop that uses the size information to stop
1154			 * searching for matching vma's.
1155			 */
1156			retval = 0;
1157			vma = next;
1158			break;
1159		}
1160		vma = next;
1161	}
1162
1163	/*
1164	 * We need look no further than the maximum address a fragment
1165	 * could possibly have landed at. Also cast things to loff_t to
1166	 * prevent overflows and make comparisons vs. equal-width types.
1167	 */
1168	size = PAGE_ALIGN(size);
1169	while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1170		next = vma->vm_next;
1171
1172		/* finding a matching vma now does not alter retval */
1173		if ((vma->vm_ops == &shm_vm_ops) &&
1174			(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
 
 
 
 
1175
1176			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1177		vma = next;
1178	}
1179
1180#else /* CONFIG_MMU */
 
1181	/* under NOMMU conditions, the exact address to be destroyed must be
1182	 * given */
1183	retval = -EINVAL;
1184	if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1185		do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1186		retval = 0;
1187	}
1188
1189#endif
1190
1191	up_write(&mm->mmap_sem);
1192	return retval;
1193}
1194
 
 
 
 
 
1195#ifdef CONFIG_PROC_FS
1196static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1197{
1198	struct shmid_kernel *shp = it;
 
 
 
1199	unsigned long rss = 0, swp = 0;
1200
 
1201	shm_add_rss_swap(shp, &rss, &swp);
1202
1203#if BITS_PER_LONG <= 32
1204#define SIZE_SPEC "%10lu"
1205#else
1206#define SIZE_SPEC "%21lu"
1207#endif
1208
1209	return seq_printf(s,
1210			  "%10d %10d  %4o " SIZE_SPEC " %5u %5u  "
1211			  "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
1212			  SIZE_SPEC " " SIZE_SPEC "\n",
1213			  shp->shm_perm.key,
1214			  shp->shm_perm.id,
1215			  shp->shm_perm.mode,
1216			  shp->shm_segsz,
1217			  shp->shm_cprid,
1218			  shp->shm_lprid,
1219			  shp->shm_nattch,
1220			  shp->shm_perm.uid,
1221			  shp->shm_perm.gid,
1222			  shp->shm_perm.cuid,
1223			  shp->shm_perm.cgid,
1224			  shp->shm_atim,
1225			  shp->shm_dtim,
1226			  shp->shm_ctim,
1227			  rss * PAGE_SIZE,
1228			  swp * PAGE_SIZE);
 
 
1229}
1230#endif
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * linux/ipc/shm.c
   4 * Copyright (C) 1992, 1993 Krishna Balasubramanian
   5 *	 Many improvements/fixes by Bruno Haible.
   6 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
   7 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
   8 *
   9 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
  10 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
  11 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
  12 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
  13 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
  14 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
  15 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
  16 *
  17 * support for audit of ipc object properties and permission changes
  18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  19 *
  20 * namespaces support
  21 * OpenVZ, SWsoft Inc.
  22 * Pavel Emelianov <xemul@openvz.org>
  23 *
  24 * Better ipc lock (kern_ipc_perm.lock) handling
  25 * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
  26 */
  27
  28#include <linux/slab.h>
  29#include <linux/mm.h>
  30#include <linux/hugetlb.h>
  31#include <linux/shm.h>
  32#include <uapi/linux/shm.h>
  33#include <linux/init.h>
  34#include <linux/file.h>
  35#include <linux/mman.h>
  36#include <linux/shmem_fs.h>
  37#include <linux/security.h>
  38#include <linux/syscalls.h>
  39#include <linux/audit.h>
  40#include <linux/capability.h>
  41#include <linux/ptrace.h>
  42#include <linux/seq_file.h>
  43#include <linux/rwsem.h>
  44#include <linux/nsproxy.h>
  45#include <linux/mount.h>
  46#include <linux/ipc_namespace.h>
  47#include <linux/rhashtable.h>
  48
  49#include <linux/uaccess.h>
  50
  51#include "util.h"
  52
  53struct shmid_kernel /* private to the kernel */
  54{
  55	struct kern_ipc_perm	shm_perm;
  56	struct file		*shm_file;
  57	unsigned long		shm_nattch;
  58	unsigned long		shm_segsz;
  59	time64_t		shm_atim;
  60	time64_t		shm_dtim;
  61	time64_t		shm_ctim;
  62	struct pid		*shm_cprid;
  63	struct pid		*shm_lprid;
  64	struct ucounts		*mlock_ucounts;
  65
  66	/*
  67	 * The task created the shm object, for
  68	 * task_lock(shp->shm_creator)
  69	 */
  70	struct task_struct	*shm_creator;
  71
  72	/*
  73	 * List by creator. task_lock(->shm_creator) required for read/write.
  74	 * If list_empty(), then the creator is dead already.
  75	 */
  76	struct list_head	shm_clist;
  77	struct ipc_namespace	*ns;
  78} __randomize_layout;
  79
  80/* shm_mode upper byte flags */
  81#define SHM_DEST	01000	/* segment will be destroyed on last detach */
  82#define SHM_LOCKED	02000   /* segment will not be swapped */
  83
  84struct shm_file_data {
  85	int id;
  86	struct ipc_namespace *ns;
  87	struct file *file;
  88	const struct vm_operations_struct *vm_ops;
  89};
  90
  91#define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
  92
  93static const struct file_operations shm_file_operations;
  94static const struct vm_operations_struct shm_vm_ops;
  95
  96#define shm_ids(ns)	((ns)->ids[IPC_SHM_IDS])
  97
  98#define shm_unlock(shp)			\
  99	ipc_unlock(&(shp)->shm_perm)
 100
 101static int newseg(struct ipc_namespace *, struct ipc_params *);
 102static void shm_open(struct vm_area_struct *vma);
 103static void shm_close(struct vm_area_struct *vma);
 104static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
 105#ifdef CONFIG_PROC_FS
 106static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
 107#endif
 108
 109void shm_init_ns(struct ipc_namespace *ns)
 110{
 111	ns->shm_ctlmax = SHMMAX;
 112	ns->shm_ctlall = SHMALL;
 113	ns->shm_ctlmni = SHMMNI;
 114	ns->shm_rmid_forced = 0;
 115	ns->shm_tot = 0;
 116	ipc_init_ids(&shm_ids(ns));
 117}
 118
 119/*
 120 * Called with shm_ids.rwsem (writer) and the shp structure locked.
 121 * Only shm_ids.rwsem remains locked on exit.
 122 */
 123static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
 124{
 125	struct shmid_kernel *shp;
 126
 127	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 128	WARN_ON(ns != shp->ns);
 129
 130	if (shp->shm_nattch) {
 131		shp->shm_perm.mode |= SHM_DEST;
 132		/* Do not find it any more */
 133		ipc_set_key_private(&shm_ids(ns), &shp->shm_perm);
 134		shm_unlock(shp);
 135	} else
 136		shm_destroy(ns, shp);
 137}
 138
 139#ifdef CONFIG_IPC_NS
 140void shm_exit_ns(struct ipc_namespace *ns)
 141{
 142	free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
 143	idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
 144	rhashtable_destroy(&ns->ids[IPC_SHM_IDS].key_ht);
 145}
 146#endif
 147
 148static int __init ipc_ns_init(void)
 149{
 150	shm_init_ns(&init_ipc_ns);
 151	return 0;
 152}
 153
 154pure_initcall(ipc_ns_init);
 155
 156void __init shm_init(void)
 157{
 158	ipc_init_proc_interface("sysvipc/shm",
 159#if BITS_PER_LONG <= 32
 160				"       key      shmid perms       size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime        rss       swap\n",
 161#else
 162				"       key      shmid perms                  size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime                   rss                  swap\n",
 163#endif
 164				IPC_SHM_IDS, sysvipc_shm_proc_show);
 165}
 166
 167static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
 168{
 169	struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
 170
 171	if (IS_ERR(ipcp))
 172		return ERR_CAST(ipcp);
 173
 174	return container_of(ipcp, struct shmid_kernel, shm_perm);
 175}
 176
 177static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
 178{
 179	struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
 180
 181	if (IS_ERR(ipcp))
 182		return ERR_CAST(ipcp);
 183
 184	return container_of(ipcp, struct shmid_kernel, shm_perm);
 185}
 186
 187/*
 188 * shm_lock_(check_) routines are called in the paths where the rwsem
 189 * is not necessarily held.
 190 */
 191static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
 192{
 193	struct kern_ipc_perm *ipcp;
 194
 195	rcu_read_lock();
 196	ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
 197	if (IS_ERR(ipcp))
 198		goto err;
 199
 200	ipc_lock_object(ipcp);
 201	/*
 202	 * ipc_rmid() may have already freed the ID while ipc_lock_object()
 203	 * was spinning: here verify that the structure is still valid.
 204	 * Upon races with RMID, return -EIDRM, thus indicating that
 205	 * the ID points to a removed identifier.
 206	 */
 207	if (ipc_valid_object(ipcp)) {
 208		/* return a locked ipc object upon success */
 209		return container_of(ipcp, struct shmid_kernel, shm_perm);
 210	}
 211
 212	ipc_unlock_object(ipcp);
 213	ipcp = ERR_PTR(-EIDRM);
 214err:
 215	rcu_read_unlock();
 216	/*
 217	 * Callers of shm_lock() must validate the status of the returned ipc
 218	 * object pointer and error out as appropriate.
 219	 */
 220	return ERR_CAST(ipcp);
 221}
 222
 223static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
 224{
 225	rcu_read_lock();
 226	ipc_lock_object(&ipcp->shm_perm);
 227}
 228
 229static void shm_rcu_free(struct rcu_head *head)
 
 230{
 231	struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm,
 232							rcu);
 233	struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel,
 234							shm_perm);
 235	security_shm_free(&shp->shm_perm);
 236	kfree(shp);
 237}
 238
 239/*
 240 * It has to be called with shp locked.
 241 * It must be called before ipc_rmid()
 242 */
 243static inline void shm_clist_rm(struct shmid_kernel *shp)
 244{
 245	struct task_struct *creator;
 246
 247	/* ensure that shm_creator does not disappear */
 248	rcu_read_lock();
 249
 250	/*
 251	 * A concurrent exit_shm may do a list_del_init() as well.
 252	 * Just do nothing if exit_shm already did the work
 253	 */
 254	if (!list_empty(&shp->shm_clist)) {
 255		/*
 256		 * shp->shm_creator is guaranteed to be valid *only*
 257		 * if shp->shm_clist is not empty.
 258		 */
 259		creator = shp->shm_creator;
 260
 261		task_lock(creator);
 262		/*
 263		 * list_del_init() is a nop if the entry was already removed
 264		 * from the list.
 265		 */
 266		list_del_init(&shp->shm_clist);
 267		task_unlock(creator);
 268	}
 269	rcu_read_unlock();
 270}
 271
 272static inline void shm_rmid(struct shmid_kernel *s)
 273{
 274	shm_clist_rm(s);
 275	ipc_rmid(&shm_ids(s->ns), &s->shm_perm);
 276}
 277
 278
 279static int __shm_open(struct shm_file_data *sfd)
 
 280{
 
 
 281	struct shmid_kernel *shp;
 282
 283	shp = shm_lock(sfd->ns, sfd->id);
 284
 285	if (IS_ERR(shp))
 286		return PTR_ERR(shp);
 287
 288	if (shp->shm_file != sfd->file) {
 289		/* ID was reused */
 290		shm_unlock(shp);
 291		return -EINVAL;
 292	}
 293
 294	shp->shm_atim = ktime_get_real_seconds();
 295	ipc_update_pid(&shp->shm_lprid, task_tgid(current));
 296	shp->shm_nattch++;
 297	shm_unlock(shp);
 298	return 0;
 299}
 300
 301/* This is called by fork, once for every shm attach. */
 302static void shm_open(struct vm_area_struct *vma)
 303{
 304	struct file *file = vma->vm_file;
 305	struct shm_file_data *sfd = shm_file_data(file);
 306	int err;
 307
 308	/* Always call underlying open if present */
 309	if (sfd->vm_ops->open)
 310		sfd->vm_ops->open(vma);
 311
 312	err = __shm_open(sfd);
 313	/*
 314	 * We raced in the idr lookup or with shm_destroy().
 315	 * Either way, the ID is busted.
 316	 */
 317	WARN_ON_ONCE(err);
 318}
 319
 320/*
 321 * shm_destroy - free the struct shmid_kernel
 322 *
 323 * @ns: namespace
 324 * @shp: struct to free
 325 *
 326 * It has to be called with shp and shm_ids.rwsem (writer) locked,
 327 * but returns with shp unlocked and freed.
 328 */
 329static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
 330{
 331	struct file *shm_file;
 332
 333	shm_file = shp->shm_file;
 334	shp->shm_file = NULL;
 335	ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
 336	shm_rmid(shp);
 337	shm_unlock(shp);
 338	if (!is_file_hugepages(shm_file))
 339		shmem_lock(shm_file, 0, shp->mlock_ucounts);
 340	fput(shm_file);
 341	ipc_update_pid(&shp->shm_cprid, NULL);
 342	ipc_update_pid(&shp->shm_lprid, NULL);
 343	ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
 
 
 344}
 345
 346/*
 347 * shm_may_destroy - identifies whether shm segment should be destroyed now
 348 *
 349 * Returns true if and only if there are no active users of the segment and
 350 * one of the following is true:
 351 *
 352 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
 353 *
 354 * 2) sysctl kernel.shm_rmid_forced is set to 1.
 355 */
 356static bool shm_may_destroy(struct shmid_kernel *shp)
 357{
 358	return (shp->shm_nattch == 0) &&
 359	       (shp->ns->shm_rmid_forced ||
 360		(shp->shm_perm.mode & SHM_DEST));
 361}
 362
 363/*
 364 * remove the attach descriptor vma.
 365 * free memory for segment if it is marked destroyed.
 366 * The descriptor has already been removed from the current->mm->mmap list
 367 * and will later be kfree()d.
 368 */
 369static void __shm_close(struct shm_file_data *sfd)
 370{
 
 
 371	struct shmid_kernel *shp;
 372	struct ipc_namespace *ns = sfd->ns;
 373
 374	down_write(&shm_ids(ns).rwsem);
 375	/* remove from the list of attaches of the shm segment */
 376	shp = shm_lock(ns, sfd->id);
 377
 378	/*
 379	 * We raced in the idr lookup or with shm_destroy().
 380	 * Either way, the ID is busted.
 381	 */
 382	if (WARN_ON_ONCE(IS_ERR(shp)))
 383		goto done; /* no-op */
 384
 385	ipc_update_pid(&shp->shm_lprid, task_tgid(current));
 386	shp->shm_dtim = ktime_get_real_seconds();
 387	shp->shm_nattch--;
 388	if (shm_may_destroy(shp))
 389		shm_destroy(ns, shp);
 390	else
 391		shm_unlock(shp);
 392done:
 393	up_write(&shm_ids(ns).rwsem);
 394}
 395
 396static void shm_close(struct vm_area_struct *vma)
 
 397{
 398	struct file *file = vma->vm_file;
 399	struct shm_file_data *sfd = shm_file_data(file);
 
 
 
 
 
 
 
 
 
 
 
 400
 401	/* Always call underlying close if present */
 402	if (sfd->vm_ops->close)
 403		sfd->vm_ops->close(vma);
 
 
 
 404
 405	__shm_close(sfd);
 
 
 
 
 406}
 407
 408/* Called with ns->shm_ids(ns).rwsem locked */
 409static int shm_try_destroy_orphaned(int id, void *p, void *data)
 410{
 411	struct ipc_namespace *ns = data;
 412	struct kern_ipc_perm *ipcp = p;
 413	struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 414
 415	/*
 416	 * We want to destroy segments without users and with already
 417	 * exit'ed originating process.
 418	 *
 419	 * As shp->* are changed under rwsem, it's safe to skip shp locking.
 420	 */
 421	if (!list_empty(&shp->shm_clist))
 422		return 0;
 423
 424	if (shm_may_destroy(shp)) {
 425		shm_lock_by_ptr(shp);
 426		shm_destroy(ns, shp);
 427	}
 428	return 0;
 429}
 430
 431void shm_destroy_orphaned(struct ipc_namespace *ns)
 432{
 433	down_write(&shm_ids(ns).rwsem);
 434	if (shm_ids(ns).in_use)
 435		idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
 436	up_write(&shm_ids(ns).rwsem);
 437}
 438
 439/* Locking assumes this will only be called with task == current */
 440void exit_shm(struct task_struct *task)
 441{
 442	for (;;) {
 443		struct shmid_kernel *shp;
 444		struct ipc_namespace *ns;
 445
 446		task_lock(task);
 
 447
 448		if (list_empty(&task->sysvshm.shm_clist)) {
 449			task_unlock(task);
 450			break;
 451		}
 452
 453		shp = list_first_entry(&task->sysvshm.shm_clist, struct shmid_kernel,
 454				shm_clist);
 455
 456		/*
 457		 * 1) Get pointer to the ipc namespace. It is worth to say
 458		 * that this pointer is guaranteed to be valid because
 459		 * shp lifetime is always shorter than namespace lifetime
 460		 * in which shp lives.
 461		 * We taken task_lock it means that shp won't be freed.
 462		 */
 463		ns = shp->ns;
 464
 465		/*
 466		 * 2) If kernel.shm_rmid_forced is not set then only keep track of
 467		 * which shmids are orphaned, so that a later set of the sysctl
 468		 * can clean them up.
 469		 */
 470		if (!ns->shm_rmid_forced)
 471			goto unlink_continue;
 472
 473		/*
 474		 * 3) get a reference to the namespace.
 475		 *    The refcount could be already 0. If it is 0, then
 476		 *    the shm objects will be free by free_ipc_work().
 477		 */
 478		ns = get_ipc_ns_not_zero(ns);
 479		if (!ns) {
 480unlink_continue:
 481			list_del_init(&shp->shm_clist);
 482			task_unlock(task);
 483			continue;
 484		}
 485
 486		/*
 487		 * 4) get a reference to shp.
 488		 *   This cannot fail: shm_clist_rm() is called before
 489		 *   ipc_rmid(), thus the refcount cannot be 0.
 490		 */
 491		WARN_ON(!ipc_rcu_getref(&shp->shm_perm));
 492
 493		/*
 494		 * 5) unlink the shm segment from the list of segments
 495		 *    created by current.
 496		 *    This must be done last. After unlinking,
 497		 *    only the refcounts obtained above prevent IPC_RMID
 498		 *    from destroying the segment or the namespace.
 499		 */
 500		list_del_init(&shp->shm_clist);
 501
 502		task_unlock(task);
 503
 504		/*
 505		 * 6) we have all references
 506		 *    Thus lock & if needed destroy shp.
 507		 */
 508		down_write(&shm_ids(ns).rwsem);
 509		shm_lock_by_ptr(shp);
 510		/*
 511		 * rcu_read_lock was implicitly taken in shm_lock_by_ptr, it's
 512		 * safe to call ipc_rcu_putref here
 513		 */
 514		ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
 515
 516		if (ipc_valid_object(&shp->shm_perm)) {
 517			if (shm_may_destroy(shp))
 518				shm_destroy(ns, shp);
 519			else
 520				shm_unlock(shp);
 521		} else {
 522			/*
 523			 * Someone else deleted the shp from namespace
 524			 * idr/kht while we have waited.
 525			 * Just unlock and continue.
 526			 */
 527			shm_unlock(shp);
 528		}
 529
 530		up_write(&shm_ids(ns).rwsem);
 531		put_ipc_ns(ns); /* paired with get_ipc_ns_not_zero */
 532	}
 533}
 534
 535static vm_fault_t shm_fault(struct vm_fault *vmf)
 536{
 537	struct file *file = vmf->vma->vm_file;
 538	struct shm_file_data *sfd = shm_file_data(file);
 539
 540	return sfd->vm_ops->fault(vmf);
 541}
 542
 543static int shm_may_split(struct vm_area_struct *vma, unsigned long addr)
 544{
 545	struct file *file = vma->vm_file;
 546	struct shm_file_data *sfd = shm_file_data(file);
 547
 548	if (sfd->vm_ops->may_split)
 549		return sfd->vm_ops->may_split(vma, addr);
 550
 551	return 0;
 552}
 553
 554static unsigned long shm_pagesize(struct vm_area_struct *vma)
 
 555{
 556	struct file *file = vma->vm_file;
 557	struct shm_file_data *sfd = shm_file_data(file);
 558
 559	if (sfd->vm_ops->pagesize)
 560		return sfd->vm_ops->pagesize(vma);
 561
 562	return PAGE_SIZE;
 563}
 564
 565#ifdef CONFIG_NUMA
 566static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
 567{
 568	struct shm_file_data *sfd = shm_file_data(vma->vm_file);
 569	int err = 0;
 570
 571	if (sfd->vm_ops->set_policy)
 572		err = sfd->vm_ops->set_policy(vma, mpol);
 573	return err;
 574}
 575
 576static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
 577					unsigned long addr, pgoff_t *ilx)
 578{
 579	struct shm_file_data *sfd = shm_file_data(vma->vm_file);
 580	struct mempolicy *mpol = vma->vm_policy;
 
 581
 582	if (sfd->vm_ops->get_policy)
 583		mpol = sfd->vm_ops->get_policy(vma, addr, ilx);
 584	return mpol;
 
 
 
 585}
 586#endif
 587
 588static int shm_mmap(struct file *file, struct vm_area_struct *vma)
 589{
 590	struct shm_file_data *sfd = shm_file_data(file);
 591	int ret;
 592
 593	/*
 594	 * In case of remap_file_pages() emulation, the file can represent an
 595	 * IPC ID that was removed, and possibly even reused by another shm
 596	 * segment already.  Propagate this case as an error to caller.
 597	 */
 598	ret = __shm_open(sfd);
 599	if (ret)
 600		return ret;
 601
 602	ret = call_mmap(sfd->file, vma);
 603	if (ret) {
 604		__shm_close(sfd);
 605		return ret;
 606	}
 607	sfd->vm_ops = vma->vm_ops;
 608#ifdef CONFIG_MMU
 609	WARN_ON(!sfd->vm_ops->fault);
 610#endif
 611	vma->vm_ops = &shm_vm_ops;
 612	return 0;
 
 
 613}
 614
 615static int shm_release(struct inode *ino, struct file *file)
 616{
 617	struct shm_file_data *sfd = shm_file_data(file);
 618
 619	put_ipc_ns(sfd->ns);
 620	fput(sfd->file);
 621	shm_file_data(file) = NULL;
 622	kfree(sfd);
 623	return 0;
 624}
 625
 626static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 627{
 628	struct shm_file_data *sfd = shm_file_data(file);
 629
 630	if (!sfd->file->f_op->fsync)
 631		return -EINVAL;
 632	return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
 633}
 634
 635static long shm_fallocate(struct file *file, int mode, loff_t offset,
 636			  loff_t len)
 637{
 638	struct shm_file_data *sfd = shm_file_data(file);
 639
 640	if (!sfd->file->f_op->fallocate)
 641		return -EOPNOTSUPP;
 642	return sfd->file->f_op->fallocate(file, mode, offset, len);
 643}
 644
 645static unsigned long shm_get_unmapped_area(struct file *file,
 646	unsigned long addr, unsigned long len, unsigned long pgoff,
 647	unsigned long flags)
 648{
 649	struct shm_file_data *sfd = shm_file_data(file);
 650
 651	return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
 652						pgoff, flags);
 653}
 654
 655static const struct file_operations shm_file_operations = {
 656	.mmap		= shm_mmap,
 657	.fsync		= shm_fsync,
 658	.release	= shm_release,
 
 659	.get_unmapped_area	= shm_get_unmapped_area,
 
 660	.llseek		= noop_llseek,
 661	.fallocate	= shm_fallocate,
 662};
 663
 664/*
 665 * shm_file_operations_huge is now identical to shm_file_operations,
 666 * but we keep it distinct for the sake of is_file_shm_hugepages().
 667 */
 668static const struct file_operations shm_file_operations_huge = {
 669	.mmap		= shm_mmap,
 670	.fsync		= shm_fsync,
 671	.release	= shm_release,
 672	.get_unmapped_area	= shm_get_unmapped_area,
 673	.llseek		= noop_llseek,
 674	.fallocate	= shm_fallocate,
 675};
 676
 677bool is_file_shm_hugepages(struct file *file)
 678{
 679	return file->f_op == &shm_file_operations_huge;
 680}
 681
 682static const struct vm_operations_struct shm_vm_ops = {
 683	.open	= shm_open,	/* callback for a new vm-area open */
 684	.close	= shm_close,	/* callback for when the vm-area is released */
 685	.fault	= shm_fault,
 686	.may_split = shm_may_split,
 687	.pagesize = shm_pagesize,
 688#if defined(CONFIG_NUMA)
 689	.set_policy = shm_set_policy,
 690	.get_policy = shm_get_policy,
 691#endif
 692};
 693
 694/**
 695 * newseg - Create a new shared memory segment
 696 * @ns: namespace
 697 * @params: ptr to the structure that contains key, size and shmflg
 698 *
 699 * Called with shm_ids.rwsem held as a writer.
 700 */
 
 701static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
 702{
 703	key_t key = params->key;
 704	int shmflg = params->flg;
 705	size_t size = params->u.size;
 706	int error;
 707	struct shmid_kernel *shp;
 708	size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 709	struct file *file;
 710	char name[13];
 
 711	vm_flags_t acctflag = 0;
 712
 713	if (size < SHMMIN || size > ns->shm_ctlmax)
 714		return -EINVAL;
 715
 716	if (numpages << PAGE_SHIFT < size)
 717		return -ENOSPC;
 718
 719	if (ns->shm_tot + numpages < ns->shm_tot ||
 720			ns->shm_tot + numpages > ns->shm_ctlall)
 721		return -ENOSPC;
 722
 723	shp = kmalloc(sizeof(*shp), GFP_KERNEL_ACCOUNT);
 724	if (unlikely(!shp))
 725		return -ENOMEM;
 726
 727	shp->shm_perm.key = key;
 728	shp->shm_perm.mode = (shmflg & S_IRWXUGO);
 729	shp->mlock_ucounts = NULL;
 730
 731	shp->shm_perm.security = NULL;
 732	error = security_shm_alloc(&shp->shm_perm);
 733	if (error) {
 734		kfree(shp);
 735		return error;
 736	}
 737
 738	sprintf(name, "SYSV%08x", key);
 739	if (shmflg & SHM_HUGETLB) {
 740		struct hstate *hs;
 741		size_t hugesize;
 742
 743		hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
 744		if (!hs) {
 745			error = -EINVAL;
 746			goto no_file;
 747		}
 748		hugesize = ALIGN(size, huge_page_size(hs));
 749
 750		/* hugetlb_file_setup applies strict accounting */
 751		if (shmflg & SHM_NORESERVE)
 752			acctflag = VM_NORESERVE;
 753		file = hugetlb_file_setup(name, hugesize, acctflag,
 754				HUGETLB_SHMFS_INODE, (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
 755	} else {
 756		/*
 757		 * Do not allow no accounting for OVERCOMMIT_NEVER, even
 758		 * if it's asked for.
 759		 */
 760		if  ((shmflg & SHM_NORESERVE) &&
 761				sysctl_overcommit_memory != OVERCOMMIT_NEVER)
 762			acctflag = VM_NORESERVE;
 763		file = shmem_kernel_file_setup(name, size, acctflag);
 764	}
 765	error = PTR_ERR(file);
 766	if (IS_ERR(file))
 767		goto no_file;
 768
 769	shp->shm_cprid = get_pid(task_tgid(current));
 770	shp->shm_lprid = NULL;
 
 
 
 
 
 
 771	shp->shm_atim = shp->shm_dtim = 0;
 772	shp->shm_ctim = ktime_get_real_seconds();
 773	shp->shm_segsz = size;
 774	shp->shm_nattch = 0;
 775	shp->shm_file = file;
 776	shp->shm_creator = current;
 777
 778	/* ipc_addid() locks shp upon success. */
 779	error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
 780	if (error < 0)
 781		goto no_id;
 782
 783	shp->ns = ns;
 784
 785	task_lock(current);
 786	list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
 787	task_unlock(current);
 788
 789	/*
 790	 * shmid gets reported as "inode#" in /proc/pid/maps.
 791	 * proc-ps tools use this. Changing this will break them.
 792	 */
 793	file_inode(file)->i_ino = shp->shm_perm.id;
 794
 795	ns->shm_tot += numpages;
 796	error = shp->shm_perm.id;
 797
 798	ipc_unlock_object(&shp->shm_perm);
 799	rcu_read_unlock();
 800	return error;
 801
 802no_id:
 803	ipc_update_pid(&shp->shm_cprid, NULL);
 804	ipc_update_pid(&shp->shm_lprid, NULL);
 805	fput(file);
 806	ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
 807	return error;
 808no_file:
 809	call_rcu(&shp->shm_perm.rcu, shm_rcu_free);
 
 810	return error;
 811}
 812
 813/*
 814 * Called with shm_ids.rwsem and ipcp locked.
 
 
 
 
 
 
 
 
 
 
 
 815 */
 816static int shm_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params)
 
 817{
 818	struct shmid_kernel *shp;
 819
 820	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 821	if (shp->shm_segsz < params->u.size)
 822		return -EINVAL;
 823
 824	return 0;
 825}
 826
 827long ksys_shmget(key_t key, size_t size, int shmflg)
 828{
 829	struct ipc_namespace *ns;
 830	static const struct ipc_ops shm_ops = {
 831		.getnew = newseg,
 832		.associate = security_shm_associate,
 833		.more_checks = shm_more_checks,
 834	};
 835	struct ipc_params shm_params;
 836
 837	ns = current->nsproxy->ipc_ns;
 838
 
 
 
 
 839	shm_params.key = key;
 840	shm_params.flg = shmflg;
 841	shm_params.u.size = size;
 842
 843	return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
 844}
 845
 846SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
 847{
 848	return ksys_shmget(key, size, shmflg);
 849}
 850
 851static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
 852{
 853	switch (version) {
 854	case IPC_64:
 855		return copy_to_user(buf, in, sizeof(*in));
 856	case IPC_OLD:
 857	    {
 858		struct shmid_ds out;
 859
 860		memset(&out, 0, sizeof(out));
 861		ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
 862		out.shm_segsz	= in->shm_segsz;
 863		out.shm_atime	= in->shm_atime;
 864		out.shm_dtime	= in->shm_dtime;
 865		out.shm_ctime	= in->shm_ctime;
 866		out.shm_cpid	= in->shm_cpid;
 867		out.shm_lpid	= in->shm_lpid;
 868		out.shm_nattch	= in->shm_nattch;
 869
 870		return copy_to_user(buf, &out, sizeof(out));
 871	    }
 872	default:
 873		return -EINVAL;
 874	}
 875}
 876
 877static inline unsigned long
 878copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
 879{
 880	switch (version) {
 881	case IPC_64:
 882		if (copy_from_user(out, buf, sizeof(*out)))
 883			return -EFAULT;
 884		return 0;
 885	case IPC_OLD:
 886	    {
 887		struct shmid_ds tbuf_old;
 888
 889		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
 890			return -EFAULT;
 891
 892		out->shm_perm.uid	= tbuf_old.shm_perm.uid;
 893		out->shm_perm.gid	= tbuf_old.shm_perm.gid;
 894		out->shm_perm.mode	= tbuf_old.shm_perm.mode;
 895
 896		return 0;
 897	    }
 898	default:
 899		return -EINVAL;
 900	}
 901}
 902
 903static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
 904{
 905	switch (version) {
 906	case IPC_64:
 907		return copy_to_user(buf, in, sizeof(*in));
 908	case IPC_OLD:
 909	    {
 910		struct shminfo out;
 911
 912		if (in->shmmax > INT_MAX)
 913			out.shmmax = INT_MAX;
 914		else
 915			out.shmmax = (int)in->shmmax;
 916
 917		out.shmmin	= in->shmmin;
 918		out.shmmni	= in->shmmni;
 919		out.shmseg	= in->shmseg;
 920		out.shmall	= in->shmall;
 921
 922		return copy_to_user(buf, &out, sizeof(out));
 923	    }
 924	default:
 925		return -EINVAL;
 926	}
 927}
 928
 929/*
 930 * Calculate and add used RSS and swap pages of a shm.
 931 * Called with shm_ids.rwsem held as a reader
 932 */
 933static void shm_add_rss_swap(struct shmid_kernel *shp,
 934	unsigned long *rss_add, unsigned long *swp_add)
 935{
 936	struct inode *inode;
 937
 938	inode = file_inode(shp->shm_file);
 939
 940	if (is_file_hugepages(shp->shm_file)) {
 941		struct address_space *mapping = inode->i_mapping;
 942		struct hstate *h = hstate_file(shp->shm_file);
 943		*rss_add += pages_per_huge_page(h) * mapping->nrpages;
 944	} else {
 945#ifdef CONFIG_SHMEM
 946		struct shmem_inode_info *info = SHMEM_I(inode);
 947
 948		spin_lock_irq(&info->lock);
 949		*rss_add += inode->i_mapping->nrpages;
 950		*swp_add += info->swapped;
 951		spin_unlock_irq(&info->lock);
 952#else
 953		*rss_add += inode->i_mapping->nrpages;
 954#endif
 955	}
 956}
 957
 958/*
 959 * Called with shm_ids.rwsem held as a reader
 960 */
 961static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
 962		unsigned long *swp)
 963{
 964	int next_id;
 965	int total, in_use;
 966
 967	*rss = 0;
 968	*swp = 0;
 969
 970	in_use = shm_ids(ns).in_use;
 971
 972	for (total = 0, next_id = 0; total < in_use; next_id++) {
 973		struct kern_ipc_perm *ipc;
 974		struct shmid_kernel *shp;
 975
 976		ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
 977		if (ipc == NULL)
 978			continue;
 979		shp = container_of(ipc, struct shmid_kernel, shm_perm);
 980
 981		shm_add_rss_swap(shp, rss, swp);
 982
 983		total++;
 984	}
 985}
 986
 987/*
 988 * This function handles some shmctl commands which require the rwsem
 989 * to be held in write mode.
 990 * NOTE: no locks must be held, the rwsem is taken inside this function.
 991 */
 992static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
 993		       struct shmid64_ds *shmid64)
 994{
 995	struct kern_ipc_perm *ipcp;
 
 996	struct shmid_kernel *shp;
 997	int err;
 998
 999	down_write(&shm_ids(ns).rwsem);
1000	rcu_read_lock();
 
 
1001
1002	ipcp = ipcctl_obtain_check(ns, &shm_ids(ns), shmid, cmd,
1003				      &shmid64->shm_perm, 0);
1004	if (IS_ERR(ipcp)) {
1005		err = PTR_ERR(ipcp);
1006		goto out_unlock1;
1007	}
1008
1009	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
1010
1011	err = security_shm_shmctl(&shp->shm_perm, cmd);
1012	if (err)
1013		goto out_unlock1;
1014
1015	switch (cmd) {
1016	case IPC_RMID:
1017		ipc_lock_object(&shp->shm_perm);
1018		/* do_shm_rmid unlocks the ipc object and rcu */
1019		do_shm_rmid(ns, ipcp);
1020		goto out_up;
1021	case IPC_SET:
1022		ipc_lock_object(&shp->shm_perm);
1023		err = ipc_update_perm(&shmid64->shm_perm, ipcp);
1024		if (err)
1025			goto out_unlock0;
1026		shp->shm_ctim = ktime_get_real_seconds();
1027		break;
1028	default:
1029		err = -EINVAL;
1030		goto out_unlock1;
1031	}
1032
1033out_unlock0:
1034	ipc_unlock_object(&shp->shm_perm);
1035out_unlock1:
1036	rcu_read_unlock();
1037out_up:
1038	up_write(&shm_ids(ns).rwsem);
1039	return err;
1040}
1041
1042static int shmctl_ipc_info(struct ipc_namespace *ns,
1043			   struct shminfo64 *shminfo)
1044{
1045	int err = security_shm_shmctl(NULL, IPC_INFO);
1046	if (!err) {
1047		memset(shminfo, 0, sizeof(*shminfo));
1048		shminfo->shmmni = shminfo->shmseg = ns->shm_ctlmni;
1049		shminfo->shmmax = ns->shm_ctlmax;
1050		shminfo->shmall = ns->shm_ctlall;
1051		shminfo->shmmin = SHMMIN;
1052		down_read(&shm_ids(ns).rwsem);
1053		err = ipc_get_maxidx(&shm_ids(ns));
1054		up_read(&shm_ids(ns).rwsem);
1055		if (err < 0)
1056			err = 0;
1057	}
1058	return err;
1059}
1060
1061static int shmctl_shm_info(struct ipc_namespace *ns,
1062			   struct shm_info *shm_info)
1063{
1064	int err = security_shm_shmctl(NULL, SHM_INFO);
1065	if (!err) {
1066		memset(shm_info, 0, sizeof(*shm_info));
1067		down_read(&shm_ids(ns).rwsem);
1068		shm_info->used_ids = shm_ids(ns).in_use;
1069		shm_get_stat(ns, &shm_info->shm_rss, &shm_info->shm_swp);
1070		shm_info->shm_tot = ns->shm_tot;
1071		shm_info->swap_attempts = 0;
1072		shm_info->swap_successes = 0;
1073		err = ipc_get_maxidx(&shm_ids(ns));
1074		up_read(&shm_ids(ns).rwsem);
1075		if (err < 0)
1076			err = 0;
1077	}
1078	return err;
1079}
1080
1081static int shmctl_stat(struct ipc_namespace *ns, int shmid,
1082			int cmd, struct shmid64_ds *tbuf)
1083{
1084	struct shmid_kernel *shp;
1085	int err;
 
1086
1087	memset(tbuf, 0, sizeof(*tbuf));
1088
1089	rcu_read_lock();
1090	if (cmd == SHM_STAT || cmd == SHM_STAT_ANY) {
1091		shp = shm_obtain_object(ns, shmid);
1092		if (IS_ERR(shp)) {
1093			err = PTR_ERR(shp);
1094			goto out_unlock;
1095		}
1096	} else { /* IPC_STAT */
1097		shp = shm_obtain_object_check(ns, shmid);
1098		if (IS_ERR(shp)) {
1099			err = PTR_ERR(shp);
1100			goto out_unlock;
1101		}
1102	}
1103
1104	/*
1105	 * Semantically SHM_STAT_ANY ought to be identical to
1106	 * that functionality provided by the /proc/sysvipc/
1107	 * interface. As such, only audit these calls and
1108	 * do not do traditional S_IRUGO permission checks on
1109	 * the ipc object.
1110	 */
1111	if (cmd == SHM_STAT_ANY)
1112		audit_ipc_obj(&shp->shm_perm);
1113	else {
1114		err = -EACCES;
1115		if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
1116			goto out_unlock;
1117	}
1118
1119	err = security_shm_shmctl(&shp->shm_perm, cmd);
1120	if (err)
1121		goto out_unlock;
 
1122
1123	ipc_lock_object(&shp->shm_perm);
 
 
1124
1125	if (!ipc_valid_object(&shp->shm_perm)) {
1126		ipc_unlock_object(&shp->shm_perm);
1127		err = -EIDRM;
1128		goto out_unlock;
1129	}
1130
1131	kernel_to_ipc64_perm(&shp->shm_perm, &tbuf->shm_perm);
1132	tbuf->shm_segsz	= shp->shm_segsz;
1133	tbuf->shm_atime	= shp->shm_atim;
1134	tbuf->shm_dtime	= shp->shm_dtim;
1135	tbuf->shm_ctime	= shp->shm_ctim;
1136#ifndef CONFIG_64BIT
1137	tbuf->shm_atime_high = shp->shm_atim >> 32;
1138	tbuf->shm_dtime_high = shp->shm_dtim >> 32;
1139	tbuf->shm_ctime_high = shp->shm_ctim >> 32;
1140#endif
1141	tbuf->shm_cpid	= pid_vnr(shp->shm_cprid);
1142	tbuf->shm_lpid	= pid_vnr(shp->shm_lprid);
1143	tbuf->shm_nattch = shp->shm_nattch;
1144
1145	if (cmd == IPC_STAT) {
1146		/*
1147		 * As defined in SUS:
1148		 * Return 0 on success
1149		 */
1150		err = 0;
1151	} else {
1152		/*
1153		 * SHM_STAT and SHM_STAT_ANY (both Linux specific)
1154		 * Return the full id, including the sequence number
1155		 */
1156		err = shp->shm_perm.id;
1157	}
1158
1159	ipc_unlock_object(&shp->shm_perm);
1160out_unlock:
1161	rcu_read_unlock();
1162	return err;
1163}
1164
1165static int shmctl_do_lock(struct ipc_namespace *ns, int shmid, int cmd)
1166{
1167	struct shmid_kernel *shp;
1168	struct file *shm_file;
1169	int err;
1170
1171	rcu_read_lock();
1172	shp = shm_obtain_object_check(ns, shmid);
1173	if (IS_ERR(shp)) {
1174		err = PTR_ERR(shp);
1175		goto out_unlock1;
1176	}
 
 
 
1177
1178	audit_ipc_obj(&(shp->shm_perm));
1179	err = security_shm_shmctl(&shp->shm_perm, cmd);
1180	if (err)
1181		goto out_unlock1;
1182
1183	ipc_lock_object(&shp->shm_perm);
1184
1185	/* check if shm_destroy() is tearing down shp */
1186	if (!ipc_valid_object(&shp->shm_perm)) {
1187		err = -EIDRM;
1188		goto out_unlock0;
1189	}
1190
1191	if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
1192		kuid_t euid = current_euid();
1193
1194		if (!uid_eq(euid, shp->shm_perm.uid) &&
1195		    !uid_eq(euid, shp->shm_perm.cuid)) {
1196			err = -EPERM;
1197			goto out_unlock0;
1198		}
1199		if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
1200			err = -EPERM;
1201			goto out_unlock0;
1202		}
1203	}
1204
1205	shm_file = shp->shm_file;
1206	if (is_file_hugepages(shm_file))
1207		goto out_unlock0;
1208
1209	if (cmd == SHM_LOCK) {
1210		struct ucounts *ucounts = current_ucounts();
1211
1212		err = shmem_lock(shm_file, 1, ucounts);
1213		if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
1214			shp->shm_perm.mode |= SHM_LOCKED;
1215			shp->mlock_ucounts = ucounts;
1216		}
1217		goto out_unlock0;
1218	}
1219
1220	/* SHM_UNLOCK */
1221	if (!(shp->shm_perm.mode & SHM_LOCKED))
1222		goto out_unlock0;
1223	shmem_lock(shm_file, 0, shp->mlock_ucounts);
1224	shp->shm_perm.mode &= ~SHM_LOCKED;
1225	shp->mlock_ucounts = NULL;
1226	get_file(shm_file);
1227	ipc_unlock_object(&shp->shm_perm);
1228	rcu_read_unlock();
1229	shmem_unlock_mapping(shm_file->f_mapping);
1230
1231	fput(shm_file);
1232	return err;
1233
1234out_unlock0:
1235	ipc_unlock_object(&shp->shm_perm);
1236out_unlock1:
1237	rcu_read_unlock();
1238	return err;
1239}
1240
1241static long ksys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf, int version)
1242{
1243	int err;
1244	struct ipc_namespace *ns;
1245	struct shmid64_ds sem64;
1246
1247	if (cmd < 0 || shmid < 0)
1248		return -EINVAL;
1249
1250	ns = current->nsproxy->ipc_ns;
1251
1252	switch (cmd) {
1253	case IPC_INFO: {
1254		struct shminfo64 shminfo;
1255		err = shmctl_ipc_info(ns, &shminfo);
1256		if (err < 0)
1257			return err;
1258		if (copy_shminfo_to_user(buf, &shminfo, version))
1259			err = -EFAULT;
1260		return err;
1261	}
1262	case SHM_INFO: {
1263		struct shm_info shm_info;
1264		err = shmctl_shm_info(ns, &shm_info);
1265		if (err < 0)
1266			return err;
1267		if (copy_to_user(buf, &shm_info, sizeof(shm_info)))
1268			err = -EFAULT;
1269		return err;
1270	}
1271	case SHM_STAT:
1272	case SHM_STAT_ANY:
1273	case IPC_STAT: {
1274		err = shmctl_stat(ns, shmid, cmd, &sem64);
1275		if (err < 0)
1276			return err;
1277		if (copy_shmid_to_user(buf, &sem64, version))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1278			err = -EFAULT;
1279		return err;
 
 
1280	}
1281	case IPC_SET:
1282		if (copy_shmid_from_user(&sem64, buf, version))
1283			return -EFAULT;
1284		fallthrough;
1285	case IPC_RMID:
1286		return shmctl_down(ns, shmid, cmd, &sem64);
1287	case SHM_LOCK:
1288	case SHM_UNLOCK:
1289		return shmctl_do_lock(ns, shmid, cmd);
1290	default:
1291		return -EINVAL;
1292	}
1293}
1294
1295SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1296{
1297	return ksys_shmctl(shmid, cmd, buf, IPC_64);
1298}
1299
1300#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
1301long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf)
1302{
1303	int version = ipc_parse_version(&cmd);
 
1304
1305	return ksys_shmctl(shmid, cmd, buf, version);
1306}
1307
1308SYSCALL_DEFINE3(old_shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1309{
1310	return ksys_old_shmctl(shmid, cmd, buf);
1311}
1312#endif
 
 
 
 
1313
1314#ifdef CONFIG_COMPAT
1315
1316struct compat_shmid_ds {
1317	struct compat_ipc_perm shm_perm;
1318	int shm_segsz;
1319	old_time32_t shm_atime;
1320	old_time32_t shm_dtime;
1321	old_time32_t shm_ctime;
1322	compat_ipc_pid_t shm_cpid;
1323	compat_ipc_pid_t shm_lpid;
1324	unsigned short shm_nattch;
1325	unsigned short shm_unused;
1326	compat_uptr_t shm_unused2;
1327	compat_uptr_t shm_unused3;
1328};
1329
1330struct compat_shminfo64 {
1331	compat_ulong_t shmmax;
1332	compat_ulong_t shmmin;
1333	compat_ulong_t shmmni;
1334	compat_ulong_t shmseg;
1335	compat_ulong_t shmall;
1336	compat_ulong_t __unused1;
1337	compat_ulong_t __unused2;
1338	compat_ulong_t __unused3;
1339	compat_ulong_t __unused4;
1340};
1341
1342struct compat_shm_info {
1343	compat_int_t used_ids;
1344	compat_ulong_t shm_tot, shm_rss, shm_swp;
1345	compat_ulong_t swap_attempts, swap_successes;
1346};
1347
1348static int copy_compat_shminfo_to_user(void __user *buf, struct shminfo64 *in,
1349					int version)
1350{
1351	if (in->shmmax > INT_MAX)
1352		in->shmmax = INT_MAX;
1353	if (version == IPC_64) {
1354		struct compat_shminfo64 info;
1355		memset(&info, 0, sizeof(info));
1356		info.shmmax = in->shmmax;
1357		info.shmmin = in->shmmin;
1358		info.shmmni = in->shmmni;
1359		info.shmseg = in->shmseg;
1360		info.shmall = in->shmall;
1361		return copy_to_user(buf, &info, sizeof(info));
1362	} else {
1363		struct shminfo info;
1364		memset(&info, 0, sizeof(info));
1365		info.shmmax = in->shmmax;
1366		info.shmmin = in->shmmin;
1367		info.shmmni = in->shmmni;
1368		info.shmseg = in->shmseg;
1369		info.shmall = in->shmall;
1370		return copy_to_user(buf, &info, sizeof(info));
1371	}
1372}
1373
1374static int put_compat_shm_info(struct shm_info *ip,
1375				struct compat_shm_info __user *uip)
1376{
1377	struct compat_shm_info info;
1378
1379	memset(&info, 0, sizeof(info));
1380	info.used_ids = ip->used_ids;
1381	info.shm_tot = ip->shm_tot;
1382	info.shm_rss = ip->shm_rss;
1383	info.shm_swp = ip->shm_swp;
1384	info.swap_attempts = ip->swap_attempts;
1385	info.swap_successes = ip->swap_successes;
1386	return copy_to_user(uip, &info, sizeof(info));
1387}
1388
1389static int copy_compat_shmid_to_user(void __user *buf, struct shmid64_ds *in,
1390					int version)
1391{
1392	if (version == IPC_64) {
1393		struct compat_shmid64_ds v;
1394		memset(&v, 0, sizeof(v));
1395		to_compat_ipc64_perm(&v.shm_perm, &in->shm_perm);
1396		v.shm_atime	 = lower_32_bits(in->shm_atime);
1397		v.shm_atime_high = upper_32_bits(in->shm_atime);
1398		v.shm_dtime	 = lower_32_bits(in->shm_dtime);
1399		v.shm_dtime_high = upper_32_bits(in->shm_dtime);
1400		v.shm_ctime	 = lower_32_bits(in->shm_ctime);
1401		v.shm_ctime_high = upper_32_bits(in->shm_ctime);
1402		v.shm_segsz = in->shm_segsz;
1403		v.shm_nattch = in->shm_nattch;
1404		v.shm_cpid = in->shm_cpid;
1405		v.shm_lpid = in->shm_lpid;
1406		return copy_to_user(buf, &v, sizeof(v));
1407	} else {
1408		struct compat_shmid_ds v;
1409		memset(&v, 0, sizeof(v));
1410		to_compat_ipc_perm(&v.shm_perm, &in->shm_perm);
1411		v.shm_perm.key = in->shm_perm.key;
1412		v.shm_atime = in->shm_atime;
1413		v.shm_dtime = in->shm_dtime;
1414		v.shm_ctime = in->shm_ctime;
1415		v.shm_segsz = in->shm_segsz;
1416		v.shm_nattch = in->shm_nattch;
1417		v.shm_cpid = in->shm_cpid;
1418		v.shm_lpid = in->shm_lpid;
1419		return copy_to_user(buf, &v, sizeof(v));
1420	}
1421}
1422
1423static int copy_compat_shmid_from_user(struct shmid64_ds *out, void __user *buf,
1424					int version)
1425{
1426	memset(out, 0, sizeof(*out));
1427	if (version == IPC_64) {
1428		struct compat_shmid64_ds __user *p = buf;
1429		return get_compat_ipc64_perm(&out->shm_perm, &p->shm_perm);
1430	} else {
1431		struct compat_shmid_ds __user *p = buf;
1432		return get_compat_ipc_perm(&out->shm_perm, &p->shm_perm);
1433	}
1434}
1435
1436static long compat_ksys_shmctl(int shmid, int cmd, void __user *uptr, int version)
1437{
1438	struct ipc_namespace *ns;
1439	struct shmid64_ds sem64;
1440	int err;
1441
1442	ns = current->nsproxy->ipc_ns;
1443
1444	if (cmd < 0 || shmid < 0)
1445		return -EINVAL;
1446
1447	switch (cmd) {
1448	case IPC_INFO: {
1449		struct shminfo64 shminfo;
1450		err = shmctl_ipc_info(ns, &shminfo);
1451		if (err < 0)
1452			return err;
1453		if (copy_compat_shminfo_to_user(uptr, &shminfo, version))
1454			err = -EFAULT;
1455		return err;
1456	}
1457	case SHM_INFO: {
1458		struct shm_info shm_info;
1459		err = shmctl_shm_info(ns, &shm_info);
1460		if (err < 0)
1461			return err;
1462		if (put_compat_shm_info(&shm_info, uptr))
1463			err = -EFAULT;
1464		return err;
1465	}
1466	case IPC_STAT:
1467	case SHM_STAT_ANY:
1468	case SHM_STAT:
1469		err = shmctl_stat(ns, shmid, cmd, &sem64);
1470		if (err < 0)
1471			return err;
1472		if (copy_compat_shmid_to_user(uptr, &sem64, version))
1473			err = -EFAULT;
1474		return err;
1475
1476	case IPC_SET:
1477		if (copy_compat_shmid_from_user(&sem64, uptr, version))
1478			return -EFAULT;
1479		fallthrough;
1480	case IPC_RMID:
1481		return shmctl_down(ns, shmid, cmd, &sem64);
1482	case SHM_LOCK:
1483	case SHM_UNLOCK:
1484		return shmctl_do_lock(ns, shmid, cmd);
1485	default:
1486		return -EINVAL;
1487	}
 
 
 
 
1488	return err;
1489}
1490
1491COMPAT_SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, void __user *, uptr)
1492{
1493	return compat_ksys_shmctl(shmid, cmd, uptr, IPC_64);
1494}
1495
1496#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION
1497long compat_ksys_old_shmctl(int shmid, int cmd, void __user *uptr)
1498{
1499	int version = compat_ipc_parse_version(&cmd);
1500
1501	return compat_ksys_shmctl(shmid, cmd, uptr, version);
1502}
1503
1504COMPAT_SYSCALL_DEFINE3(old_shmctl, int, shmid, int, cmd, void __user *, uptr)
1505{
1506	return compat_ksys_old_shmctl(shmid, cmd, uptr);
1507}
1508#endif
1509#endif
1510
1511/*
1512 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1513 *
1514 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1515 * "raddr" thing points to kernel space, and there has to be a wrapper around
1516 * this.
1517 */
1518long do_shmat(int shmid, char __user *shmaddr, int shmflg,
1519	      ulong *raddr, unsigned long shmlba)
1520{
1521	struct shmid_kernel *shp;
1522	unsigned long addr = (unsigned long)shmaddr;
1523	unsigned long size;
1524	struct file *file, *base;
1525	int    err;
1526	unsigned long flags = MAP_SHARED;
1527	unsigned long prot;
1528	int acc_mode;
 
1529	struct ipc_namespace *ns;
1530	struct shm_file_data *sfd;
1531	int f_flags;
1532	unsigned long populate = 0;
1533
1534	err = -EINVAL;
1535	if (shmid < 0)
1536		goto out;
1537
1538	if (addr) {
1539		if (addr & (shmlba - 1)) {
1540			if (shmflg & SHM_RND) {
1541				addr &= ~(shmlba - 1);  /* round down */
1542
1543				/*
1544				 * Ensure that the round-down is non-nil
1545				 * when remapping. This can happen for
1546				 * cases when addr < shmlba.
1547				 */
1548				if (!addr && (shmflg & SHM_REMAP))
1549					goto out;
1550			} else
1551#ifndef __ARCH_FORCE_SHMLBA
1552				if (addr & ~PAGE_MASK)
1553#endif
1554					goto out;
1555		}
 
 
 
 
1556
1557		flags |= MAP_FIXED;
1558	} else if ((shmflg & SHM_REMAP))
1559		goto out;
1560
1561	if (shmflg & SHM_RDONLY) {
1562		prot = PROT_READ;
1563		acc_mode = S_IRUGO;
1564		f_flags = O_RDONLY;
1565	} else {
1566		prot = PROT_READ | PROT_WRITE;
1567		acc_mode = S_IRUGO | S_IWUGO;
1568		f_flags = O_RDWR;
1569	}
1570	if (shmflg & SHM_EXEC) {
1571		prot |= PROT_EXEC;
1572		acc_mode |= S_IXUGO;
1573	}
1574
1575	/*
1576	 * We cannot rely on the fs check since SYSV IPC does have an
1577	 * additional creator id...
1578	 */
1579	ns = current->nsproxy->ipc_ns;
1580	rcu_read_lock();
1581	shp = shm_obtain_object_check(ns, shmid);
1582	if (IS_ERR(shp)) {
1583		err = PTR_ERR(shp);
1584		goto out_unlock;
1585	}
1586
1587	err = -EACCES;
1588	if (ipcperms(ns, &shp->shm_perm, acc_mode))
1589		goto out_unlock;
1590
1591	err = security_shm_shmat(&shp->shm_perm, shmaddr, shmflg);
1592	if (err)
1593		goto out_unlock;
1594
1595	ipc_lock_object(&shp->shm_perm);
1596
1597	/* check if shm_destroy() is tearing down shp */
1598	if (!ipc_valid_object(&shp->shm_perm)) {
1599		ipc_unlock_object(&shp->shm_perm);
1600		err = -EIDRM;
1601		goto out_unlock;
1602	}
1603
1604	/*
1605	 * We need to take a reference to the real shm file to prevent the
1606	 * pointer from becoming stale in cases where the lifetime of the outer
1607	 * file extends beyond that of the shm segment.  It's not usually
1608	 * possible, but it can happen during remap_file_pages() emulation as
1609	 * that unmaps the memory, then does ->mmap() via file reference only.
1610	 * We'll deny the ->mmap() if the shm segment was since removed, but to
1611	 * detect shm ID reuse we need to compare the file pointers.
1612	 */
1613	base = get_file(shp->shm_file);
1614	shp->shm_nattch++;
1615	size = i_size_read(file_inode(base));
1616	ipc_unlock_object(&shp->shm_perm);
1617	rcu_read_unlock();
1618
1619	err = -ENOMEM;
1620	sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1621	if (!sfd) {
1622		fput(base);
1623		goto out_nattch;
1624	}
1625
1626	file = alloc_file_clone(base, f_flags,
1627			  is_file_hugepages(base) ?
1628				&shm_file_operations_huge :
1629				&shm_file_operations);
1630	err = PTR_ERR(file);
1631	if (IS_ERR(file)) {
1632		kfree(sfd);
1633		fput(base);
1634		goto out_nattch;
1635	}
1636
 
 
1637	sfd->id = shp->shm_perm.id;
1638	sfd->ns = get_ipc_ns(ns);
1639	sfd->file = base;
1640	sfd->vm_ops = NULL;
1641	file->private_data = sfd;
1642
1643	err = security_mmap_file(file, prot, flags);
1644	if (err)
1645		goto out_fput;
1646
1647	if (mmap_write_lock_killable(current->mm)) {
1648		err = -EINTR;
1649		goto out_fput;
1650	}
1651
 
1652	if (addr && !(shmflg & SHM_REMAP)) {
1653		err = -EINVAL;
1654		if (addr + size < addr)
1655			goto invalid;
1656
1657		if (find_vma_intersection(current->mm, addr, addr + size))
 
 
 
 
1658			goto invalid;
1659	}
1660
1661	addr = do_mmap(file, addr, size, prot, flags, 0, 0, &populate, NULL);
1662	*raddr = addr;
1663	err = 0;
1664	if (IS_ERR_VALUE(addr))
1665		err = (long)addr;
1666invalid:
1667	mmap_write_unlock(current->mm);
1668	if (populate)
1669		mm_populate(addr, populate);
1670
1671out_fput:
1672	fput(file);
1673
1674out_nattch:
1675	down_write(&shm_ids(ns).rwsem);
1676	shp = shm_lock(ns, shmid);
 
1677	shp->shm_nattch--;
1678
1679	if (shm_may_destroy(shp))
1680		shm_destroy(ns, shp);
1681	else
1682		shm_unlock(shp);
1683	up_write(&shm_ids(ns).rwsem);
1684	return err;
1685
1686out_unlock:
1687	rcu_read_unlock();
1688out:
1689	return err;
1690}
1691
1692SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1693{
1694	unsigned long ret;
1695	long err;
1696
1697	err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
1698	if (err)
1699		return err;
1700	force_successful_syscall_return();
1701	return (long)ret;
1702}
1703
1704#ifdef CONFIG_COMPAT
1705
1706#ifndef COMPAT_SHMLBA
1707#define COMPAT_SHMLBA	SHMLBA
1708#endif
1709
1710COMPAT_SYSCALL_DEFINE3(shmat, int, shmid, compat_uptr_t, shmaddr, int, shmflg)
1711{
1712	unsigned long ret;
1713	long err;
1714
1715	err = do_shmat(shmid, compat_ptr(shmaddr), shmflg, &ret, COMPAT_SHMLBA);
1716	if (err)
1717		return err;
1718	force_successful_syscall_return();
1719	return (long)ret;
1720}
1721#endif
1722
1723/*
1724 * detach and kill segment if marked destroyed.
1725 * The work is done in shm_close.
1726 */
1727long ksys_shmdt(char __user *shmaddr)
1728{
1729	struct mm_struct *mm = current->mm;
1730	struct vm_area_struct *vma;
1731	unsigned long addr = (unsigned long)shmaddr;
1732	int retval = -EINVAL;
1733#ifdef CONFIG_MMU
1734	loff_t size = 0;
1735	struct file *file;
1736	VMA_ITERATOR(vmi, mm, addr);
1737#endif
1738
1739	if (addr & ~PAGE_MASK)
1740		return retval;
1741
1742	if (mmap_write_lock_killable(mm))
1743		return -EINTR;
1744
1745	/*
1746	 * This function tries to be smart and unmap shm segments that
1747	 * were modified by partial mlock or munmap calls:
1748	 * - It first determines the size of the shm segment that should be
1749	 *   unmapped: It searches for a vma that is backed by shm and that
1750	 *   started at address shmaddr. It records it's size and then unmaps
1751	 *   it.
1752	 * - Then it unmaps all shm vmas that started at shmaddr and that
1753	 *   are within the initially determined size and that are from the
1754	 *   same shm segment from which we determined the size.
1755	 * Errors from do_munmap are ignored: the function only fails if
1756	 * it's called with invalid parameters or if it's called to unmap
1757	 * a part of a vma. Both calls in this function are for full vmas,
1758	 * the parameters are directly copied from the vma itself and always
1759	 * valid - therefore do_munmap cannot fail. (famous last words?)
1760	 */
1761	/*
1762	 * If it had been mremap()'d, the starting address would not
1763	 * match the usual checks anyway. So assume all vma's are
1764	 * above the starting address given.
1765	 */
 
1766
1767#ifdef CONFIG_MMU
1768	for_each_vma(vmi, vma) {
 
 
1769		/*
1770		 * Check if the starting address would match, i.e. it's
1771		 * a fragment created by mprotect() and/or munmap(), or it
1772		 * otherwise it starts at this address with no hassles.
1773		 */
1774		if ((vma->vm_ops == &shm_vm_ops) &&
1775			(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1776
1777			/*
1778			 * Record the file of the shm segment being
1779			 * unmapped.  With mremap(), someone could place
1780			 * page from another segment but with equal offsets
1781			 * in the range we are unmapping.
1782			 */
1783			file = vma->vm_file;
1784			size = i_size_read(file_inode(vma->vm_file));
1785			do_vma_munmap(&vmi, vma, vma->vm_start, vma->vm_end,
1786				      NULL, false);
1787			/*
1788			 * We discovered the size of the shm segment, so
1789			 * break out of here and fall through to the next
1790			 * loop that uses the size information to stop
1791			 * searching for matching vma's.
1792			 */
1793			retval = 0;
1794			vma = vma_next(&vmi);
1795			break;
1796		}
 
1797	}
1798
1799	/*
1800	 * We need look no further than the maximum address a fragment
1801	 * could possibly have landed at. Also cast things to loff_t to
1802	 * prevent overflows and make comparisons vs. equal-width types.
1803	 */
1804	size = PAGE_ALIGN(size);
1805	while (vma && (loff_t)(vma->vm_end - addr) <= size) {
 
 
1806		/* finding a matching vma now does not alter retval */
1807		if ((vma->vm_ops == &shm_vm_ops) &&
1808		    ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
1809		    (vma->vm_file == file)) {
1810			do_vma_munmap(&vmi, vma, vma->vm_start, vma->vm_end,
1811				      NULL, false);
1812		}
1813
1814		vma = vma_next(&vmi);
 
1815	}
1816
1817#else	/* CONFIG_MMU */
1818	vma = vma_lookup(mm, addr);
1819	/* under NOMMU conditions, the exact address to be destroyed must be
1820	 * given
1821	 */
1822	if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1823		do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1824		retval = 0;
1825	}
1826
1827#endif
1828
1829	mmap_write_unlock(mm);
1830	return retval;
1831}
1832
1833SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1834{
1835	return ksys_shmdt(shmaddr);
1836}
1837
1838#ifdef CONFIG_PROC_FS
1839static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1840{
1841	struct pid_namespace *pid_ns = ipc_seq_pid_ns(s);
1842	struct user_namespace *user_ns = seq_user_ns(s);
1843	struct kern_ipc_perm *ipcp = it;
1844	struct shmid_kernel *shp;
1845	unsigned long rss = 0, swp = 0;
1846
1847	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
1848	shm_add_rss_swap(shp, &rss, &swp);
1849
1850#if BITS_PER_LONG <= 32
1851#define SIZE_SPEC "%10lu"
1852#else
1853#define SIZE_SPEC "%21lu"
1854#endif
1855
1856	seq_printf(s,
1857		   "%10d %10d  %4o " SIZE_SPEC " %5u %5u  "
1858		   "%5lu %5u %5u %5u %5u %10llu %10llu %10llu "
1859		   SIZE_SPEC " " SIZE_SPEC "\n",
1860		   shp->shm_perm.key,
1861		   shp->shm_perm.id,
1862		   shp->shm_perm.mode,
1863		   shp->shm_segsz,
1864		   pid_nr_ns(shp->shm_cprid, pid_ns),
1865		   pid_nr_ns(shp->shm_lprid, pid_ns),
1866		   shp->shm_nattch,
1867		   from_kuid_munged(user_ns, shp->shm_perm.uid),
1868		   from_kgid_munged(user_ns, shp->shm_perm.gid),
1869		   from_kuid_munged(user_ns, shp->shm_perm.cuid),
1870		   from_kgid_munged(user_ns, shp->shm_perm.cgid),
1871		   shp->shm_atim,
1872		   shp->shm_dtim,
1873		   shp->shm_ctim,
1874		   rss * PAGE_SIZE,
1875		   swp * PAGE_SIZE);
1876
1877	return 0;
1878}
1879#endif