Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 * linux/ipc/shm.c
   3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
   4 *	 Many improvements/fixes by Bruno Haible.
   5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
   6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
   7 *
   8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
   9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
  10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
  11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
  12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
  13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
  14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
  15 *
  16 * support for audit of ipc object properties and permission changes
  17 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  18 *
  19 * namespaces support
  20 * OpenVZ, SWsoft Inc.
  21 * Pavel Emelianov <xemul@openvz.org>
 
 
 
  22 */
  23
  24#include <linux/slab.h>
  25#include <linux/mm.h>
  26#include <linux/hugetlb.h>
  27#include <linux/shm.h>
  28#include <linux/init.h>
  29#include <linux/file.h>
  30#include <linux/mman.h>
  31#include <linux/shmem_fs.h>
  32#include <linux/security.h>
  33#include <linux/syscalls.h>
  34#include <linux/audit.h>
  35#include <linux/capability.h>
  36#include <linux/ptrace.h>
  37#include <linux/seq_file.h>
  38#include <linux/rwsem.h>
  39#include <linux/nsproxy.h>
  40#include <linux/mount.h>
  41#include <linux/ipc_namespace.h>
 
  42
  43#include <asm/uaccess.h>
  44
  45#include "util.h"
  46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  47struct shm_file_data {
  48	int id;
  49	struct ipc_namespace *ns;
  50	struct file *file;
  51	const struct vm_operations_struct *vm_ops;
  52};
  53
  54#define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
  55
  56static const struct file_operations shm_file_operations;
  57static const struct vm_operations_struct shm_vm_ops;
  58
  59#define shm_ids(ns)	((ns)->ids[IPC_SHM_IDS])
  60
  61#define shm_unlock(shp)			\
  62	ipc_unlock(&(shp)->shm_perm)
  63
  64static int newseg(struct ipc_namespace *, struct ipc_params *);
  65static void shm_open(struct vm_area_struct *vma);
  66static void shm_close(struct vm_area_struct *vma);
  67static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
  68#ifdef CONFIG_PROC_FS
  69static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
  70#endif
  71
  72void shm_init_ns(struct ipc_namespace *ns)
  73{
  74	ns->shm_ctlmax = SHMMAX;
  75	ns->shm_ctlall = SHMALL;
  76	ns->shm_ctlmni = SHMMNI;
  77	ns->shm_rmid_forced = 0;
  78	ns->shm_tot = 0;
  79	ipc_init_ids(&shm_ids(ns));
  80}
  81
  82/*
  83 * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
  84 * Only shm_ids.rw_mutex remains locked on exit.
  85 */
  86static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
  87{
  88	struct shmid_kernel *shp;
 
  89	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 
  90
  91	if (shp->shm_nattch){
  92		shp->shm_perm.mode |= SHM_DEST;
  93		/* Do not find it any more */
  94		shp->shm_perm.key = IPC_PRIVATE;
  95		shm_unlock(shp);
  96	} else
  97		shm_destroy(ns, shp);
  98}
  99
 100#ifdef CONFIG_IPC_NS
 101void shm_exit_ns(struct ipc_namespace *ns)
 102{
 103	free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
 104	idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
 
 105}
 106#endif
 107
 108static int __init ipc_ns_init(void)
 109{
 110	shm_init_ns(&init_ipc_ns);
 111	return 0;
 112}
 113
 114pure_initcall(ipc_ns_init);
 115
 116void __init shm_init (void)
 117{
 118	ipc_init_proc_interface("sysvipc/shm",
 119#if BITS_PER_LONG <= 32
 120				"       key      shmid perms       size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime        rss       swap\n",
 121#else
 122				"       key      shmid perms                  size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime                   rss                  swap\n",
 123#endif
 124				IPC_SHM_IDS, sysvipc_shm_proc_show);
 125}
 126
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 127/*
 128 * shm_lock_(check_) routines are called in the paths where the rw_mutex
 129 * is not necessarily held.
 130 */
 131static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
 132{
 133	struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
 134
 
 
 135	if (IS_ERR(ipcp))
 136		return (struct shmid_kernel *)ipcp;
 137
 138	return container_of(ipcp, struct shmid_kernel, shm_perm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 139}
 140
 141static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
 142{
 143	rcu_read_lock();
 144	spin_lock(&ipcp->shm_perm.lock);
 145}
 146
 147static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
 148						int id)
 149{
 150	struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
 
 
 
 
 
 
 151
 152	if (IS_ERR(ipcp))
 153		return (struct shmid_kernel *)ipcp;
 
 
 
 
 
 154
 155	return container_of(ipcp, struct shmid_kernel, shm_perm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 156}
 157
 158static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
 159{
 160	ipc_rmid(&shm_ids(ns), &s->shm_perm);
 
 161}
 162
 163
 164/* This is called by fork, once for every shm attach. */
 165static void shm_open(struct vm_area_struct *vma)
 166{
 167	struct file *file = vma->vm_file;
 168	struct shm_file_data *sfd = shm_file_data(file);
 169	struct shmid_kernel *shp;
 170
 171	shp = shm_lock(sfd->ns, sfd->id);
 172	BUG_ON(IS_ERR(shp));
 173	shp->shm_atim = get_seconds();
 174	shp->shm_lprid = task_tgid_vnr(current);
 
 
 
 
 
 
 
 
 
 175	shp->shm_nattch++;
 176	shm_unlock(shp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 177}
 178
 179/*
 180 * shm_destroy - free the struct shmid_kernel
 181 *
 182 * @ns: namespace
 183 * @shp: struct to free
 184 *
 185 * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
 186 * but returns with shp unlocked and freed.
 187 */
 188static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
 189{
 
 
 
 
 190	ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
 191	shm_rmid(ns, shp);
 192	shm_unlock(shp);
 193	if (!is_file_hugepages(shp->shm_file))
 194		shmem_lock(shp->shm_file, 0, shp->mlock_user);
 195	else if (shp->mlock_user)
 196		user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size,
 197						shp->mlock_user);
 198	fput (shp->shm_file);
 199	security_shm_free(shp);
 200	ipc_rcu_putref(shp);
 201}
 202
 203/*
 204 * shm_may_destroy - identifies whether shm segment should be destroyed now
 205 *
 206 * Returns true if and only if there are no active users of the segment and
 207 * one of the following is true:
 208 *
 209 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
 210 *
 211 * 2) sysctl kernel.shm_rmid_forced is set to 1.
 212 */
 213static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
 214{
 215	return (shp->shm_nattch == 0) &&
 216	       (ns->shm_rmid_forced ||
 217		(shp->shm_perm.mode & SHM_DEST));
 218}
 219
 220/*
 221 * remove the attach descriptor vma.
 222 * free memory for segment if it is marked destroyed.
 223 * The descriptor has already been removed from the current->mm->mmap list
 224 * and will later be kfree()d.
 225 */
 226static void shm_close(struct vm_area_struct *vma)
 227{
 228	struct file * file = vma->vm_file;
 229	struct shm_file_data *sfd = shm_file_data(file);
 230	struct shmid_kernel *shp;
 231	struct ipc_namespace *ns = sfd->ns;
 232
 233	down_write(&shm_ids(ns).rw_mutex);
 234	/* remove from the list of attaches of the shm segment */
 235	shp = shm_lock(ns, sfd->id);
 236	BUG_ON(IS_ERR(shp));
 237	shp->shm_lprid = task_tgid_vnr(current);
 238	shp->shm_dtim = get_seconds();
 
 
 
 
 
 
 
 239	shp->shm_nattch--;
 240	if (shm_may_destroy(ns, shp))
 241		shm_destroy(ns, shp);
 242	else
 243		shm_unlock(shp);
 244	up_write(&shm_ids(ns).rw_mutex);
 
 245}
 246
 247/* Called with ns->shm_ids(ns).rw_mutex locked */
 248static int shm_try_destroy_current(int id, void *p, void *data)
 249{
 250	struct ipc_namespace *ns = data;
 251	struct kern_ipc_perm *ipcp = p;
 252	struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 253
 254	if (shp->shm_creator != current)
 255		return 0;
 256
 257	/*
 258	 * Mark it as orphaned to destroy the segment when
 259	 * kernel.shm_rmid_forced is changed.
 260	 * It is noop if the following shm_may_destroy() returns true.
 261	 */
 262	shp->shm_creator = NULL;
 263
 264	/*
 265	 * Don't even try to destroy it.  If shm_rmid_forced=0 and IPC_RMID
 266	 * is not set, it shouldn't be deleted here.
 267	 */
 268	if (!ns->shm_rmid_forced)
 269		return 0;
 270
 271	if (shm_may_destroy(ns, shp)) {
 272		shm_lock_by_ptr(shp);
 273		shm_destroy(ns, shp);
 274	}
 275	return 0;
 276}
 277
 278/* Called with ns->shm_ids(ns).rw_mutex locked */
 279static int shm_try_destroy_orphaned(int id, void *p, void *data)
 280{
 281	struct ipc_namespace *ns = data;
 282	struct kern_ipc_perm *ipcp = p;
 283	struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 284
 285	/*
 286	 * We want to destroy segments without users and with already
 287	 * exit'ed originating process.
 288	 *
 289	 * As shp->* are changed under rw_mutex, it's safe to skip shp locking.
 290	 */
 291	if (shp->shm_creator != NULL)
 292		return 0;
 293
 294	if (shm_may_destroy(ns, shp)) {
 295		shm_lock_by_ptr(shp);
 296		shm_destroy(ns, shp);
 297	}
 298	return 0;
 299}
 300
 301void shm_destroy_orphaned(struct ipc_namespace *ns)
 302{
 303	down_write(&shm_ids(ns).rw_mutex);
 304	if (shm_ids(ns).in_use)
 305		idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
 306	up_write(&shm_ids(ns).rw_mutex);
 307}
 308
 309
 310void exit_shm(struct task_struct *task)
 311{
 312	struct ipc_namespace *ns = task->nsproxy->ipc_ns;
 
 
 313
 314	if (shm_ids(ns).in_use == 0)
 315		return;
 316
 317	/* Destroy all already created segments, but not mapped yet */
 318	down_write(&shm_ids(ns).rw_mutex);
 319	if (shm_ids(ns).in_use)
 320		idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
 321	up_write(&shm_ids(ns).rw_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 322}
 323
 324static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
 
 
 
 
 
 
 
 325{
 326	struct file *file = vma->vm_file;
 327	struct shm_file_data *sfd = shm_file_data(file);
 328
 329	return sfd->vm_ops->fault(vma, vmf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 330}
 331
 332#ifdef CONFIG_NUMA
 333static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
 334{
 335	struct file *file = vma->vm_file;
 336	struct shm_file_data *sfd = shm_file_data(file);
 337	int err = 0;
 
 338	if (sfd->vm_ops->set_policy)
 339		err = sfd->vm_ops->set_policy(vma, new);
 340	return err;
 341}
 342
 343static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
 344					unsigned long addr)
 345{
 346	struct file *file = vma->vm_file;
 347	struct shm_file_data *sfd = shm_file_data(file);
 348	struct mempolicy *pol = NULL;
 349
 350	if (sfd->vm_ops->get_policy)
 351		pol = sfd->vm_ops->get_policy(vma, addr);
 352	else if (vma->vm_policy)
 353		pol = vma->vm_policy;
 354
 355	return pol;
 356}
 357#endif
 358
 359static int shm_mmap(struct file * file, struct vm_area_struct * vma)
 360{
 361	struct shm_file_data *sfd = shm_file_data(file);
 362	int ret;
 363
 364	ret = sfd->file->f_op->mmap(sfd->file, vma);
 365	if (ret != 0)
 
 
 
 
 
 366		return ret;
 
 
 
 
 
 
 367	sfd->vm_ops = vma->vm_ops;
 368#ifdef CONFIG_MMU
 369	BUG_ON(!sfd->vm_ops->fault);
 370#endif
 371	vma->vm_ops = &shm_vm_ops;
 372	shm_open(vma);
 373
 374	return ret;
 375}
 376
 377static int shm_release(struct inode *ino, struct file *file)
 378{
 379	struct shm_file_data *sfd = shm_file_data(file);
 380
 381	put_ipc_ns(sfd->ns);
 
 382	shm_file_data(file) = NULL;
 383	kfree(sfd);
 384	return 0;
 385}
 386
 387static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 388{
 389	struct shm_file_data *sfd = shm_file_data(file);
 390
 391	if (!sfd->file->f_op->fsync)
 392		return -EINVAL;
 393	return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
 394}
 395
 
 
 
 
 
 
 
 
 
 
 396static unsigned long shm_get_unmapped_area(struct file *file,
 397	unsigned long addr, unsigned long len, unsigned long pgoff,
 398	unsigned long flags)
 399{
 400	struct shm_file_data *sfd = shm_file_data(file);
 
 401	return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
 402						pgoff, flags);
 403}
 404
 405static const struct file_operations shm_file_operations = {
 406	.mmap		= shm_mmap,
 407	.fsync		= shm_fsync,
 408	.release	= shm_release,
 409#ifndef CONFIG_MMU
 410	.get_unmapped_area	= shm_get_unmapped_area,
 411#endif
 412	.llseek		= noop_llseek,
 
 413};
 414
 
 
 
 
 415static const struct file_operations shm_file_operations_huge = {
 416	.mmap		= shm_mmap,
 417	.fsync		= shm_fsync,
 418	.release	= shm_release,
 419	.get_unmapped_area	= shm_get_unmapped_area,
 420	.llseek		= noop_llseek,
 
 421};
 422
 423int is_file_shm_hugepages(struct file *file)
 424{
 425	return file->f_op == &shm_file_operations_huge;
 426}
 427
 428static const struct vm_operations_struct shm_vm_ops = {
 429	.open	= shm_open,	/* callback for a new vm-area open */
 430	.close	= shm_close,	/* callback for when the vm-area is released */
 431	.fault	= shm_fault,
 
 
 432#if defined(CONFIG_NUMA)
 433	.set_policy = shm_set_policy,
 434	.get_policy = shm_get_policy,
 435#endif
 436};
 437
 438/**
 439 * newseg - Create a new shared memory segment
 440 * @ns: namespace
 441 * @params: ptr to the structure that contains key, size and shmflg
 442 *
 443 * Called with shm_ids.rw_mutex held as a writer.
 444 */
 445
 446static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
 447{
 448	key_t key = params->key;
 449	int shmflg = params->flg;
 450	size_t size = params->u.size;
 451	int error;
 452	struct shmid_kernel *shp;
 453	int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
 454	struct file * file;
 455	char name[13];
 456	int id;
 457	vm_flags_t acctflag = 0;
 458
 459	if (size < SHMMIN || size > ns->shm_ctlmax)
 460		return -EINVAL;
 461
 462	if (ns->shm_tot + numpages > ns->shm_ctlall)
 
 
 
 
 463		return -ENOSPC;
 464
 465	shp = ipc_rcu_alloc(sizeof(*shp));
 466	if (!shp)
 467		return -ENOMEM;
 468
 469	shp->shm_perm.key = key;
 470	shp->shm_perm.mode = (shmflg & S_IRWXUGO);
 471	shp->mlock_user = NULL;
 472
 473	shp->shm_perm.security = NULL;
 474	error = security_shm_alloc(shp);
 475	if (error) {
 476		ipc_rcu_putref(shp);
 477		return error;
 478	}
 479
 480	sprintf (name, "SYSV%08x", key);
 481	if (shmflg & SHM_HUGETLB) {
 
 
 
 
 
 
 
 
 
 
 482		/* hugetlb_file_setup applies strict accounting */
 483		if (shmflg & SHM_NORESERVE)
 484			acctflag = VM_NORESERVE;
 485		file = hugetlb_file_setup(name, size, acctflag,
 486					&shp->mlock_user, HUGETLB_SHMFS_INODE);
 487	} else {
 488		/*
 489		 * Do not allow no accounting for OVERCOMMIT_NEVER, even
 490	 	 * if it's asked for.
 491		 */
 492		if  ((shmflg & SHM_NORESERVE) &&
 493				sysctl_overcommit_memory != OVERCOMMIT_NEVER)
 494			acctflag = VM_NORESERVE;
 495		file = shmem_file_setup(name, size, acctflag);
 496	}
 497	error = PTR_ERR(file);
 498	if (IS_ERR(file))
 499		goto no_file;
 500
 501	id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
 502	if (id < 0) {
 503		error = id;
 504		goto no_id;
 505	}
 506
 507	shp->shm_cprid = task_tgid_vnr(current);
 508	shp->shm_lprid = 0;
 509	shp->shm_atim = shp->shm_dtim = 0;
 510	shp->shm_ctim = get_seconds();
 511	shp->shm_segsz = size;
 512	shp->shm_nattch = 0;
 513	shp->shm_file = file;
 514	shp->shm_creator = current;
 
 
 
 
 
 
 
 
 
 
 
 
 515	/*
 516	 * shmid gets reported as "inode#" in /proc/pid/maps.
 517	 * proc-ps tools use this. Changing this will break them.
 518	 */
 519	file->f_dentry->d_inode->i_ino = shp->shm_perm.id;
 520
 521	ns->shm_tot += numpages;
 522	error = shp->shm_perm.id;
 523	shm_unlock(shp);
 
 
 524	return error;
 525
 526no_id:
 527	if (is_file_hugepages(file) && shp->mlock_user)
 528		user_shm_unlock(size, shp->mlock_user);
 529	fput(file);
 
 
 530no_file:
 531	security_shm_free(shp);
 532	ipc_rcu_putref(shp);
 533	return error;
 534}
 535
 536/*
 537 * Called with shm_ids.rw_mutex and ipcp locked.
 538 */
 539static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
 540{
 541	struct shmid_kernel *shp;
 542
 543	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 544	return security_shm_associate(shp, shmflg);
 545}
 546
 547/*
 548 * Called with shm_ids.rw_mutex and ipcp locked.
 549 */
 550static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
 551				struct ipc_params *params)
 552{
 553	struct shmid_kernel *shp;
 554
 555	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 556	if (shp->shm_segsz < params->u.size)
 557		return -EINVAL;
 558
 559	return 0;
 560}
 561
 562SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
 563{
 564	struct ipc_namespace *ns;
 565	struct ipc_ops shm_ops;
 
 
 
 
 566	struct ipc_params shm_params;
 567
 568	ns = current->nsproxy->ipc_ns;
 569
 570	shm_ops.getnew = newseg;
 571	shm_ops.associate = shm_security;
 572	shm_ops.more_checks = shm_more_checks;
 573
 574	shm_params.key = key;
 575	shm_params.flg = shmflg;
 576	shm_params.u.size = size;
 577
 578	return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
 579}
 580
 
 
 
 
 
 581static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
 582{
 583	switch(version) {
 584	case IPC_64:
 585		return copy_to_user(buf, in, sizeof(*in));
 586	case IPC_OLD:
 587	    {
 588		struct shmid_ds out;
 589
 590		memset(&out, 0, sizeof(out));
 591		ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
 592		out.shm_segsz	= in->shm_segsz;
 593		out.shm_atime	= in->shm_atime;
 594		out.shm_dtime	= in->shm_dtime;
 595		out.shm_ctime	= in->shm_ctime;
 596		out.shm_cpid	= in->shm_cpid;
 597		out.shm_lpid	= in->shm_lpid;
 598		out.shm_nattch	= in->shm_nattch;
 599
 600		return copy_to_user(buf, &out, sizeof(out));
 601	    }
 602	default:
 603		return -EINVAL;
 604	}
 605}
 606
 607static inline unsigned long
 608copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
 609{
 610	switch(version) {
 611	case IPC_64:
 612		if (copy_from_user(out, buf, sizeof(*out)))
 613			return -EFAULT;
 614		return 0;
 615	case IPC_OLD:
 616	    {
 617		struct shmid_ds tbuf_old;
 618
 619		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
 620			return -EFAULT;
 621
 622		out->shm_perm.uid	= tbuf_old.shm_perm.uid;
 623		out->shm_perm.gid	= tbuf_old.shm_perm.gid;
 624		out->shm_perm.mode	= tbuf_old.shm_perm.mode;
 625
 626		return 0;
 627	    }
 628	default:
 629		return -EINVAL;
 630	}
 631}
 632
 633static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
 634{
 635	switch(version) {
 636	case IPC_64:
 637		return copy_to_user(buf, in, sizeof(*in));
 638	case IPC_OLD:
 639	    {
 640		struct shminfo out;
 641
 642		if(in->shmmax > INT_MAX)
 643			out.shmmax = INT_MAX;
 644		else
 645			out.shmmax = (int)in->shmmax;
 646
 647		out.shmmin	= in->shmmin;
 648		out.shmmni	= in->shmmni;
 649		out.shmseg	= in->shmseg;
 650		out.shmall	= in->shmall; 
 651
 652		return copy_to_user(buf, &out, sizeof(out));
 653	    }
 654	default:
 655		return -EINVAL;
 656	}
 657}
 658
 659/*
 660 * Calculate and add used RSS and swap pages of a shm.
 661 * Called with shm_ids.rw_mutex held as a reader
 662 */
 663static void shm_add_rss_swap(struct shmid_kernel *shp,
 664	unsigned long *rss_add, unsigned long *swp_add)
 665{
 666	struct inode *inode;
 667
 668	inode = shp->shm_file->f_path.dentry->d_inode;
 669
 670	if (is_file_hugepages(shp->shm_file)) {
 671		struct address_space *mapping = inode->i_mapping;
 672		struct hstate *h = hstate_file(shp->shm_file);
 673		*rss_add += pages_per_huge_page(h) * mapping->nrpages;
 674	} else {
 675#ifdef CONFIG_SHMEM
 676		struct shmem_inode_info *info = SHMEM_I(inode);
 677		spin_lock(&info->lock);
 
 678		*rss_add += inode->i_mapping->nrpages;
 679		*swp_add += info->swapped;
 680		spin_unlock(&info->lock);
 681#else
 682		*rss_add += inode->i_mapping->nrpages;
 683#endif
 684	}
 685}
 686
 687/*
 688 * Called with shm_ids.rw_mutex held as a reader
 689 */
 690static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
 691		unsigned long *swp)
 692{
 693	int next_id;
 694	int total, in_use;
 695
 696	*rss = 0;
 697	*swp = 0;
 698
 699	in_use = shm_ids(ns).in_use;
 700
 701	for (total = 0, next_id = 0; total < in_use; next_id++) {
 702		struct kern_ipc_perm *ipc;
 703		struct shmid_kernel *shp;
 704
 705		ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
 706		if (ipc == NULL)
 707			continue;
 708		shp = container_of(ipc, struct shmid_kernel, shm_perm);
 709
 710		shm_add_rss_swap(shp, rss, swp);
 711
 712		total++;
 713	}
 714}
 715
 716/*
 717 * This function handles some shmctl commands which require the rw_mutex
 718 * to be held in write mode.
 719 * NOTE: no locks must be held, the rw_mutex is taken inside this function.
 720 */
 721static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
 722		       struct shmid_ds __user *buf, int version)
 723{
 724	struct kern_ipc_perm *ipcp;
 725	struct shmid64_ds shmid64;
 726	struct shmid_kernel *shp;
 727	int err;
 728
 729	if (cmd == IPC_SET) {
 730		if (copy_shmid_from_user(&shmid64, buf, version))
 731			return -EFAULT;
 732	}
 733
 734	ipcp = ipcctl_pre_down(ns, &shm_ids(ns), shmid, cmd,
 735			       &shmid64.shm_perm, 0);
 736	if (IS_ERR(ipcp))
 737		return PTR_ERR(ipcp);
 
 
 738
 739	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 740
 741	err = security_shm_shmctl(shp, cmd);
 742	if (err)
 743		goto out_unlock;
 
 744	switch (cmd) {
 745	case IPC_RMID:
 
 
 746		do_shm_rmid(ns, ipcp);
 747		goto out_up;
 748	case IPC_SET:
 749		ipc_update_perm(&shmid64.shm_perm, ipcp);
 750		shp->shm_ctim = get_seconds();
 
 
 
 751		break;
 752	default:
 753		err = -EINVAL;
 
 754	}
 755out_unlock:
 756	shm_unlock(shp);
 
 
 
 757out_up:
 758	up_write(&shm_ids(ns).rw_mutex);
 759	return err;
 760}
 761
 762SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 763{
 764	struct shmid_kernel *shp;
 765	int err, version;
 766	struct ipc_namespace *ns;
 767
 768	if (cmd < 0 || shmid < 0) {
 769		err = -EINVAL;
 770		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 771	}
 772
 773	version = ipc_parse_version(&cmd);
 774	ns = current->nsproxy->ipc_ns;
 
 
 
 
 
 
 
 
 
 
 
 
 775
 776	switch (cmd) { /* replace with proc interface ? */
 777	case IPC_INFO:
 778	{
 779		struct shminfo64 shminfo;
 780
 781		err = security_shm_shmctl(NULL, cmd);
 782		if (err)
 783			return err;
 784
 785		memset(&shminfo, 0, sizeof(shminfo));
 786		shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
 787		shminfo.shmmax = ns->shm_ctlmax;
 788		shminfo.shmall = ns->shm_ctlall;
 
 789
 790		shminfo.shmmin = SHMMIN;
 791		if(copy_shminfo_to_user (buf, &shminfo, version))
 792			return -EFAULT;
 
 
 
 
 
 
 
 
 
 
 793
 794		down_read(&shm_ids(ns).rw_mutex);
 795		err = ipc_get_maxid(&shm_ids(ns));
 796		up_read(&shm_ids(ns).rw_mutex);
 
 
 
 
 
 
 
 
 
 
 797
 798		if(err<0)
 799			err = 0;
 800		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 801	}
 802	case SHM_INFO:
 803	{
 804		struct shm_info shm_info;
 805
 806		err = security_shm_shmctl(NULL, cmd);
 807		if (err)
 808			return err;
 
 809
 810		memset(&shm_info, 0, sizeof(shm_info));
 811		down_read(&shm_ids(ns).rw_mutex);
 812		shm_info.used_ids = shm_ids(ns).in_use;
 813		shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
 814		shm_info.shm_tot = ns->shm_tot;
 815		shm_info.swap_attempts = 0;
 816		shm_info.swap_successes = 0;
 817		err = ipc_get_maxid(&shm_ids(ns));
 818		up_read(&shm_ids(ns).rw_mutex);
 819		if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
 820			err = -EFAULT;
 821			goto out;
 
 
 
 
 
 
 
 822		}
 
 823
 824		err = err < 0 ? 0 : err;
 825		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 826	}
 827	case SHM_STAT:
 828	case IPC_STAT:
 829	{
 830		struct shmid64_ds tbuf;
 831		int result;
 832
 833		if (cmd == SHM_STAT) {
 834			shp = shm_lock(ns, shmid);
 835			if (IS_ERR(shp)) {
 836				err = PTR_ERR(shp);
 837				goto out;
 838			}
 839			result = shp->shm_perm.id;
 840		} else {
 841			shp = shm_lock_check(ns, shmid);
 842			if (IS_ERR(shp)) {
 843				err = PTR_ERR(shp);
 844				goto out;
 845			}
 846			result = 0;
 847		}
 848		err = -EACCES;
 849		if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
 850			goto out_unlock;
 851		err = security_shm_shmctl(shp, cmd);
 852		if (err)
 853			goto out_unlock;
 854		memset(&tbuf, 0, sizeof(tbuf));
 855		kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
 856		tbuf.shm_segsz	= shp->shm_segsz;
 857		tbuf.shm_atime	= shp->shm_atim;
 858		tbuf.shm_dtime	= shp->shm_dtim;
 859		tbuf.shm_ctime	= shp->shm_ctim;
 860		tbuf.shm_cpid	= shp->shm_cprid;
 861		tbuf.shm_lpid	= shp->shm_lprid;
 862		tbuf.shm_nattch	= shp->shm_nattch;
 863		shm_unlock(shp);
 864		if(copy_shmid_to_user (buf, &tbuf, version))
 865			err = -EFAULT;
 866		else
 867			err = result;
 868		goto out;
 869	}
 
 
 
 
 
 
 870	case SHM_LOCK:
 871	case SHM_UNLOCK:
 872	{
 873		struct file *uninitialized_var(shm_file);
 
 
 
 874
 875		lru_add_drain_all();  /* drain pagevecs to lru lists */
 
 
 
 876
 877		shp = shm_lock_check(ns, shmid);
 878		if (IS_ERR(shp)) {
 879			err = PTR_ERR(shp);
 880			goto out;
 881		}
 882
 883		audit_ipc_obj(&(shp->shm_perm));
 
 884
 885		if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
 886			uid_t euid = current_euid();
 887			err = -EPERM;
 888			if (euid != shp->shm_perm.uid &&
 889			    euid != shp->shm_perm.cuid)
 890				goto out_unlock;
 891			if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK))
 892				goto out_unlock;
 893		}
 894
 895		err = security_shm_shmctl(shp, cmd);
 896		if (err)
 897			goto out_unlock;
 898		
 899		if(cmd==SHM_LOCK) {
 900			struct user_struct *user = current_user();
 901			if (!is_file_hugepages(shp->shm_file)) {
 902				err = shmem_lock(shp->shm_file, 1, user);
 903				if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
 904					shp->shm_perm.mode |= SHM_LOCKED;
 905					shp->mlock_user = user;
 906				}
 907			}
 908		} else if (!is_file_hugepages(shp->shm_file)) {
 909			shmem_lock(shp->shm_file, 0, shp->mlock_user);
 910			shp->shm_perm.mode &= ~SHM_LOCKED;
 911			shp->mlock_user = NULL;
 912		}
 913		shm_unlock(shp);
 914		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 915	}
 916	case IPC_RMID:
 917	case IPC_SET:
 918		err = shmctl_down(ns, shmid, cmd, buf, version);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 919		return err;
 
 
 
 
 
 
 
 
 
 
 920	default:
 921		return -EINVAL;
 922	}
 923
 924out_unlock:
 925	shm_unlock(shp);
 926out:
 927	return err;
 928}
 929
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 930/*
 931 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
 932 *
 933 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
 934 * "raddr" thing points to kernel space, and there has to be a wrapper around
 935 * this.
 936 */
 937long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
 
 938{
 939	struct shmid_kernel *shp;
 940	unsigned long addr;
 941	unsigned long size;
 942	struct file * file;
 943	int    err;
 944	unsigned long flags;
 945	unsigned long prot;
 946	int acc_mode;
 947	unsigned long user_addr;
 948	struct ipc_namespace *ns;
 949	struct shm_file_data *sfd;
 950	struct path path;
 951	fmode_t f_mode;
 952
 953	err = -EINVAL;
 954	if (shmid < 0)
 955		goto out;
 956	else if ((addr = (ulong)shmaddr)) {
 957		if (addr & (SHMLBA-1)) {
 958			if (shmflg & SHM_RND)
 959				addr &= ~(SHMLBA-1);	   /* round down */
 960			else
 
 
 
 
 
 
 
 
 
 961#ifndef __ARCH_FORCE_SHMLBA
 962				if (addr & ~PAGE_MASK)
 963#endif
 964					goto out;
 965		}
 966		flags = MAP_SHARED | MAP_FIXED;
 967	} else {
 968		if ((shmflg & SHM_REMAP))
 969			goto out;
 970
 971		flags = MAP_SHARED;
 972	}
 
 973
 974	if (shmflg & SHM_RDONLY) {
 975		prot = PROT_READ;
 976		acc_mode = S_IRUGO;
 977		f_mode = FMODE_READ;
 978	} else {
 979		prot = PROT_READ | PROT_WRITE;
 980		acc_mode = S_IRUGO | S_IWUGO;
 981		f_mode = FMODE_READ | FMODE_WRITE;
 982	}
 983	if (shmflg & SHM_EXEC) {
 984		prot |= PROT_EXEC;
 985		acc_mode |= S_IXUGO;
 986	}
 987
 988	/*
 989	 * We cannot rely on the fs check since SYSV IPC does have an
 990	 * additional creator id...
 991	 */
 992	ns = current->nsproxy->ipc_ns;
 993	shp = shm_lock_check(ns, shmid);
 
 994	if (IS_ERR(shp)) {
 995		err = PTR_ERR(shp);
 996		goto out;
 997	}
 998
 999	err = -EACCES;
1000	if (ipcperms(ns, &shp->shm_perm, acc_mode))
1001		goto out_unlock;
1002
1003	err = security_shm_shmat(shp, shmaddr, shmflg);
1004	if (err)
1005		goto out_unlock;
1006
1007	path = shp->shm_file->f_path;
1008	path_get(&path);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1009	shp->shm_nattch++;
1010	size = i_size_read(path.dentry->d_inode);
1011	shm_unlock(shp);
 
1012
1013	err = -ENOMEM;
1014	sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1015	if (!sfd)
1016		goto out_put_dentry;
 
 
1017
1018	file = alloc_file(&path, f_mode,
1019			  is_file_hugepages(shp->shm_file) ?
1020				&shm_file_operations_huge :
1021				&shm_file_operations);
1022	if (!file)
1023		goto out_free;
 
 
 
 
1024
1025	file->private_data = sfd;
1026	file->f_mapping = shp->shm_file->f_mapping;
1027	sfd->id = shp->shm_perm.id;
1028	sfd->ns = get_ipc_ns(ns);
1029	sfd->file = shp->shm_file;
1030	sfd->vm_ops = NULL;
 
 
 
 
 
 
 
 
 
 
1031
1032	down_write(&current->mm->mmap_sem);
1033	if (addr && !(shmflg & SHM_REMAP)) {
1034		err = -EINVAL;
1035		if (find_vma_intersection(current->mm, addr, addr + size))
1036			goto invalid;
1037		/*
1038		 * If shm segment goes below stack, make sure there is some
1039		 * space left for the stack to grow (at least 4 pages).
1040		 */
1041		if (addr < current->mm->start_stack &&
1042		    addr > current->mm->start_stack - size - PAGE_SIZE * 5)
1043			goto invalid;
1044	}
1045		
1046	user_addr = do_mmap (file, addr, size, prot, flags, 0);
1047	*raddr = user_addr;
1048	err = 0;
1049	if (IS_ERR_VALUE(user_addr))
1050		err = (long)user_addr;
1051invalid:
1052	up_write(&current->mm->mmap_sem);
 
 
1053
 
1054	fput(file);
1055
1056out_nattch:
1057	down_write(&shm_ids(ns).rw_mutex);
1058	shp = shm_lock(ns, shmid);
1059	BUG_ON(IS_ERR(shp));
1060	shp->shm_nattch--;
1061	if (shm_may_destroy(ns, shp))
 
1062		shm_destroy(ns, shp);
1063	else
1064		shm_unlock(shp);
1065	up_write(&shm_ids(ns).rw_mutex);
 
1066
 
 
1067out:
1068	return err;
 
1069
1070out_unlock:
1071	shm_unlock(shp);
1072	goto out;
 
1073
1074out_free:
1075	kfree(sfd);
1076out_put_dentry:
1077	path_put(&path);
1078	goto out_nattch;
1079}
1080
1081SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
 
 
 
 
 
 
1082{
1083	unsigned long ret;
1084	long err;
1085
1086	err = do_shmat(shmid, shmaddr, shmflg, &ret);
1087	if (err)
1088		return err;
1089	force_successful_syscall_return();
1090	return (long)ret;
1091}
 
1092
1093/*
1094 * detach and kill segment if marked destroyed.
1095 * The work is done in shm_close.
1096 */
1097SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1098{
1099	struct mm_struct *mm = current->mm;
1100	struct vm_area_struct *vma;
1101	unsigned long addr = (unsigned long)shmaddr;
1102	int retval = -EINVAL;
1103#ifdef CONFIG_MMU
1104	loff_t size = 0;
1105	struct vm_area_struct *next;
 
1106#endif
1107
1108	if (addr & ~PAGE_MASK)
1109		return retval;
1110
1111	down_write(&mm->mmap_sem);
 
1112
1113	/*
1114	 * This function tries to be smart and unmap shm segments that
1115	 * were modified by partial mlock or munmap calls:
1116	 * - It first determines the size of the shm segment that should be
1117	 *   unmapped: It searches for a vma that is backed by shm and that
1118	 *   started at address shmaddr. It records it's size and then unmaps
1119	 *   it.
1120	 * - Then it unmaps all shm vmas that started at shmaddr and that
1121	 *   are within the initially determined size.
 
1122	 * Errors from do_munmap are ignored: the function only fails if
1123	 * it's called with invalid parameters or if it's called to unmap
1124	 * a part of a vma. Both calls in this function are for full vmas,
1125	 * the parameters are directly copied from the vma itself and always
1126	 * valid - therefore do_munmap cannot fail. (famous last words?)
1127	 */
1128	/*
1129	 * If it had been mremap()'d, the starting address would not
1130	 * match the usual checks anyway. So assume all vma's are
1131	 * above the starting address given.
1132	 */
1133	vma = find_vma(mm, addr);
1134
1135#ifdef CONFIG_MMU
1136	while (vma) {
1137		next = vma->vm_next;
1138
1139		/*
1140		 * Check if the starting address would match, i.e. it's
1141		 * a fragment created by mprotect() and/or munmap(), or it
1142		 * otherwise it starts at this address with no hassles.
1143		 */
1144		if ((vma->vm_ops == &shm_vm_ops) &&
1145			(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1146
1147
1148			size = vma->vm_file->f_path.dentry->d_inode->i_size;
1149			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
 
 
 
 
 
 
 
1150			/*
1151			 * We discovered the size of the shm segment, so
1152			 * break out of here and fall through to the next
1153			 * loop that uses the size information to stop
1154			 * searching for matching vma's.
1155			 */
1156			retval = 0;
1157			vma = next;
1158			break;
1159		}
1160		vma = next;
1161	}
1162
1163	/*
1164	 * We need look no further than the maximum address a fragment
1165	 * could possibly have landed at. Also cast things to loff_t to
1166	 * prevent overflows and make comparisons vs. equal-width types.
1167	 */
1168	size = PAGE_ALIGN(size);
1169	while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1170		next = vma->vm_next;
1171
1172		/* finding a matching vma now does not alter retval */
1173		if ((vma->vm_ops == &shm_vm_ops) &&
1174			(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
 
 
 
 
1175
1176			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1177		vma = next;
1178	}
1179
1180#else /* CONFIG_MMU */
 
1181	/* under NOMMU conditions, the exact address to be destroyed must be
1182	 * given */
1183	retval = -EINVAL;
1184	if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1185		do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1186		retval = 0;
1187	}
1188
1189#endif
1190
1191	up_write(&mm->mmap_sem);
1192	return retval;
1193}
1194
 
 
 
 
 
1195#ifdef CONFIG_PROC_FS
1196static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1197{
1198	struct shmid_kernel *shp = it;
 
 
 
1199	unsigned long rss = 0, swp = 0;
1200
 
1201	shm_add_rss_swap(shp, &rss, &swp);
1202
1203#if BITS_PER_LONG <= 32
1204#define SIZE_SPEC "%10lu"
1205#else
1206#define SIZE_SPEC "%21lu"
1207#endif
1208
1209	return seq_printf(s,
1210			  "%10d %10d  %4o " SIZE_SPEC " %5u %5u  "
1211			  "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
1212			  SIZE_SPEC " " SIZE_SPEC "\n",
1213			  shp->shm_perm.key,
1214			  shp->shm_perm.id,
1215			  shp->shm_perm.mode,
1216			  shp->shm_segsz,
1217			  shp->shm_cprid,
1218			  shp->shm_lprid,
1219			  shp->shm_nattch,
1220			  shp->shm_perm.uid,
1221			  shp->shm_perm.gid,
1222			  shp->shm_perm.cuid,
1223			  shp->shm_perm.cgid,
1224			  shp->shm_atim,
1225			  shp->shm_dtim,
1226			  shp->shm_ctim,
1227			  rss * PAGE_SIZE,
1228			  swp * PAGE_SIZE);
 
 
1229}
1230#endif
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * linux/ipc/shm.c
   4 * Copyright (C) 1992, 1993 Krishna Balasubramanian
   5 *	 Many improvements/fixes by Bruno Haible.
   6 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
   7 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
   8 *
   9 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
  10 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
  11 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
  12 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
  13 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
  14 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
  15 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
  16 *
  17 * support for audit of ipc object properties and permission changes
  18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  19 *
  20 * namespaces support
  21 * OpenVZ, SWsoft Inc.
  22 * Pavel Emelianov <xemul@openvz.org>
  23 *
  24 * Better ipc lock (kern_ipc_perm.lock) handling
  25 * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
  26 */
  27
  28#include <linux/slab.h>
  29#include <linux/mm.h>
  30#include <linux/hugetlb.h>
  31#include <linux/shm.h>
  32#include <linux/init.h>
  33#include <linux/file.h>
  34#include <linux/mman.h>
  35#include <linux/shmem_fs.h>
  36#include <linux/security.h>
  37#include <linux/syscalls.h>
  38#include <linux/audit.h>
  39#include <linux/capability.h>
  40#include <linux/ptrace.h>
  41#include <linux/seq_file.h>
  42#include <linux/rwsem.h>
  43#include <linux/nsproxy.h>
  44#include <linux/mount.h>
  45#include <linux/ipc_namespace.h>
  46#include <linux/rhashtable.h>
  47
  48#include <linux/uaccess.h>
  49
  50#include "util.h"
  51
  52struct shmid_kernel /* private to the kernel */
  53{
  54	struct kern_ipc_perm	shm_perm;
  55	struct file		*shm_file;
  56	unsigned long		shm_nattch;
  57	unsigned long		shm_segsz;
  58	time64_t		shm_atim;
  59	time64_t		shm_dtim;
  60	time64_t		shm_ctim;
  61	struct pid		*shm_cprid;
  62	struct pid		*shm_lprid;
  63	struct ucounts		*mlock_ucounts;
  64
  65	/*
  66	 * The task created the shm object, for
  67	 * task_lock(shp->shm_creator)
  68	 */
  69	struct task_struct	*shm_creator;
  70
  71	/*
  72	 * List by creator. task_lock(->shm_creator) required for read/write.
  73	 * If list_empty(), then the creator is dead already.
  74	 */
  75	struct list_head	shm_clist;
  76	struct ipc_namespace	*ns;
  77} __randomize_layout;
  78
  79/* shm_mode upper byte flags */
  80#define SHM_DEST	01000	/* segment will be destroyed on last detach */
  81#define SHM_LOCKED	02000   /* segment will not be swapped */
  82
  83struct shm_file_data {
  84	int id;
  85	struct ipc_namespace *ns;
  86	struct file *file;
  87	const struct vm_operations_struct *vm_ops;
  88};
  89
  90#define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
  91
  92static const struct file_operations shm_file_operations;
  93static const struct vm_operations_struct shm_vm_ops;
  94
  95#define shm_ids(ns)	((ns)->ids[IPC_SHM_IDS])
  96
  97#define shm_unlock(shp)			\
  98	ipc_unlock(&(shp)->shm_perm)
  99
 100static int newseg(struct ipc_namespace *, struct ipc_params *);
 101static void shm_open(struct vm_area_struct *vma);
 102static void shm_close(struct vm_area_struct *vma);
 103static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
 104#ifdef CONFIG_PROC_FS
 105static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
 106#endif
 107
 108void shm_init_ns(struct ipc_namespace *ns)
 109{
 110	ns->shm_ctlmax = SHMMAX;
 111	ns->shm_ctlall = SHMALL;
 112	ns->shm_ctlmni = SHMMNI;
 113	ns->shm_rmid_forced = 0;
 114	ns->shm_tot = 0;
 115	ipc_init_ids(&shm_ids(ns));
 116}
 117
 118/*
 119 * Called with shm_ids.rwsem (writer) and the shp structure locked.
 120 * Only shm_ids.rwsem remains locked on exit.
 121 */
 122static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
 123{
 124	struct shmid_kernel *shp;
 125
 126	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 127	WARN_ON(ns != shp->ns);
 128
 129	if (shp->shm_nattch) {
 130		shp->shm_perm.mode |= SHM_DEST;
 131		/* Do not find it any more */
 132		ipc_set_key_private(&shm_ids(ns), &shp->shm_perm);
 133		shm_unlock(shp);
 134	} else
 135		shm_destroy(ns, shp);
 136}
 137
 138#ifdef CONFIG_IPC_NS
 139void shm_exit_ns(struct ipc_namespace *ns)
 140{
 141	free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
 142	idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
 143	rhashtable_destroy(&ns->ids[IPC_SHM_IDS].key_ht);
 144}
 145#endif
 146
 147static int __init ipc_ns_init(void)
 148{
 149	shm_init_ns(&init_ipc_ns);
 150	return 0;
 151}
 152
 153pure_initcall(ipc_ns_init);
 154
 155void __init shm_init(void)
 156{
 157	ipc_init_proc_interface("sysvipc/shm",
 158#if BITS_PER_LONG <= 32
 159				"       key      shmid perms       size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime        rss       swap\n",
 160#else
 161				"       key      shmid perms                  size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime                   rss                  swap\n",
 162#endif
 163				IPC_SHM_IDS, sysvipc_shm_proc_show);
 164}
 165
 166static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
 167{
 168	struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
 169
 170	if (IS_ERR(ipcp))
 171		return ERR_CAST(ipcp);
 172
 173	return container_of(ipcp, struct shmid_kernel, shm_perm);
 174}
 175
 176static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
 177{
 178	struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
 179
 180	if (IS_ERR(ipcp))
 181		return ERR_CAST(ipcp);
 182
 183	return container_of(ipcp, struct shmid_kernel, shm_perm);
 184}
 185
 186/*
 187 * shm_lock_(check_) routines are called in the paths where the rwsem
 188 * is not necessarily held.
 189 */
 190static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
 191{
 192	struct kern_ipc_perm *ipcp;
 193
 194	rcu_read_lock();
 195	ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
 196	if (IS_ERR(ipcp))
 197		goto err;
 198
 199	ipc_lock_object(ipcp);
 200	/*
 201	 * ipc_rmid() may have already freed the ID while ipc_lock_object()
 202	 * was spinning: here verify that the structure is still valid.
 203	 * Upon races with RMID, return -EIDRM, thus indicating that
 204	 * the ID points to a removed identifier.
 205	 */
 206	if (ipc_valid_object(ipcp)) {
 207		/* return a locked ipc object upon success */
 208		return container_of(ipcp, struct shmid_kernel, shm_perm);
 209	}
 210
 211	ipc_unlock_object(ipcp);
 212	ipcp = ERR_PTR(-EIDRM);
 213err:
 214	rcu_read_unlock();
 215	/*
 216	 * Callers of shm_lock() must validate the status of the returned ipc
 217	 * object pointer and error out as appropriate.
 218	 */
 219	return ERR_CAST(ipcp);
 220}
 221
 222static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
 223{
 224	rcu_read_lock();
 225	ipc_lock_object(&ipcp->shm_perm);
 226}
 227
 228static void shm_rcu_free(struct rcu_head *head)
 
 229{
 230	struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm,
 231							rcu);
 232	struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel,
 233							shm_perm);
 234	security_shm_free(&shp->shm_perm);
 235	kfree(shp);
 236}
 237
 238/*
 239 * It has to be called with shp locked.
 240 * It must be called before ipc_rmid()
 241 */
 242static inline void shm_clist_rm(struct shmid_kernel *shp)
 243{
 244	struct task_struct *creator;
 245
 246	/* ensure that shm_creator does not disappear */
 247	rcu_read_lock();
 248
 249	/*
 250	 * A concurrent exit_shm may do a list_del_init() as well.
 251	 * Just do nothing if exit_shm already did the work
 252	 */
 253	if (!list_empty(&shp->shm_clist)) {
 254		/*
 255		 * shp->shm_creator is guaranteed to be valid *only*
 256		 * if shp->shm_clist is not empty.
 257		 */
 258		creator = shp->shm_creator;
 259
 260		task_lock(creator);
 261		/*
 262		 * list_del_init() is a nop if the entry was already removed
 263		 * from the list.
 264		 */
 265		list_del_init(&shp->shm_clist);
 266		task_unlock(creator);
 267	}
 268	rcu_read_unlock();
 269}
 270
 271static inline void shm_rmid(struct shmid_kernel *s)
 272{
 273	shm_clist_rm(s);
 274	ipc_rmid(&shm_ids(s->ns), &s->shm_perm);
 275}
 276
 277
 278static int __shm_open(struct shm_file_data *sfd)
 
 279{
 
 
 280	struct shmid_kernel *shp;
 281
 282	shp = shm_lock(sfd->ns, sfd->id);
 283
 284	if (IS_ERR(shp))
 285		return PTR_ERR(shp);
 286
 287	if (shp->shm_file != sfd->file) {
 288		/* ID was reused */
 289		shm_unlock(shp);
 290		return -EINVAL;
 291	}
 292
 293	shp->shm_atim = ktime_get_real_seconds();
 294	ipc_update_pid(&shp->shm_lprid, task_tgid(current));
 295	shp->shm_nattch++;
 296	shm_unlock(shp);
 297	return 0;
 298}
 299
 300/* This is called by fork, once for every shm attach. */
 301static void shm_open(struct vm_area_struct *vma)
 302{
 303	struct file *file = vma->vm_file;
 304	struct shm_file_data *sfd = shm_file_data(file);
 305	int err;
 306
 307	/* Always call underlying open if present */
 308	if (sfd->vm_ops->open)
 309		sfd->vm_ops->open(vma);
 310
 311	err = __shm_open(sfd);
 312	/*
 313	 * We raced in the idr lookup or with shm_destroy().
 314	 * Either way, the ID is busted.
 315	 */
 316	WARN_ON_ONCE(err);
 317}
 318
 319/*
 320 * shm_destroy - free the struct shmid_kernel
 321 *
 322 * @ns: namespace
 323 * @shp: struct to free
 324 *
 325 * It has to be called with shp and shm_ids.rwsem (writer) locked,
 326 * but returns with shp unlocked and freed.
 327 */
 328static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
 329{
 330	struct file *shm_file;
 331
 332	shm_file = shp->shm_file;
 333	shp->shm_file = NULL;
 334	ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
 335	shm_rmid(shp);
 336	shm_unlock(shp);
 337	if (!is_file_hugepages(shm_file))
 338		shmem_lock(shm_file, 0, shp->mlock_ucounts);
 339	fput(shm_file);
 340	ipc_update_pid(&shp->shm_cprid, NULL);
 341	ipc_update_pid(&shp->shm_lprid, NULL);
 342	ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
 
 
 343}
 344
 345/*
 346 * shm_may_destroy - identifies whether shm segment should be destroyed now
 347 *
 348 * Returns true if and only if there are no active users of the segment and
 349 * one of the following is true:
 350 *
 351 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
 352 *
 353 * 2) sysctl kernel.shm_rmid_forced is set to 1.
 354 */
 355static bool shm_may_destroy(struct shmid_kernel *shp)
 356{
 357	return (shp->shm_nattch == 0) &&
 358	       (shp->ns->shm_rmid_forced ||
 359		(shp->shm_perm.mode & SHM_DEST));
 360}
 361
 362/*
 363 * remove the attach descriptor vma.
 364 * free memory for segment if it is marked destroyed.
 365 * The descriptor has already been removed from the current->mm->mmap list
 366 * and will later be kfree()d.
 367 */
 368static void __shm_close(struct shm_file_data *sfd)
 369{
 
 
 370	struct shmid_kernel *shp;
 371	struct ipc_namespace *ns = sfd->ns;
 372
 373	down_write(&shm_ids(ns).rwsem);
 374	/* remove from the list of attaches of the shm segment */
 375	shp = shm_lock(ns, sfd->id);
 376
 377	/*
 378	 * We raced in the idr lookup or with shm_destroy().
 379	 * Either way, the ID is busted.
 380	 */
 381	if (WARN_ON_ONCE(IS_ERR(shp)))
 382		goto done; /* no-op */
 383
 384	ipc_update_pid(&shp->shm_lprid, task_tgid(current));
 385	shp->shm_dtim = ktime_get_real_seconds();
 386	shp->shm_nattch--;
 387	if (shm_may_destroy(shp))
 388		shm_destroy(ns, shp);
 389	else
 390		shm_unlock(shp);
 391done:
 392	up_write(&shm_ids(ns).rwsem);
 393}
 394
 395static void shm_close(struct vm_area_struct *vma)
 
 396{
 397	struct file *file = vma->vm_file;
 398	struct shm_file_data *sfd = shm_file_data(file);
 
 
 
 
 399
 400	/* Always call underlying close if present */
 401	if (sfd->vm_ops->close)
 402		sfd->vm_ops->close(vma);
 
 
 
 403
 404	__shm_close(sfd);
 
 
 
 
 
 
 
 
 
 
 
 405}
 406
 407/* Called with ns->shm_ids(ns).rwsem locked */
 408static int shm_try_destroy_orphaned(int id, void *p, void *data)
 409{
 410	struct ipc_namespace *ns = data;
 411	struct kern_ipc_perm *ipcp = p;
 412	struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 413
 414	/*
 415	 * We want to destroy segments without users and with already
 416	 * exit'ed originating process.
 417	 *
 418	 * As shp->* are changed under rwsem, it's safe to skip shp locking.
 419	 */
 420	if (!list_empty(&shp->shm_clist))
 421		return 0;
 422
 423	if (shm_may_destroy(shp)) {
 424		shm_lock_by_ptr(shp);
 425		shm_destroy(ns, shp);
 426	}
 427	return 0;
 428}
 429
 430void shm_destroy_orphaned(struct ipc_namespace *ns)
 431{
 432	down_write(&shm_ids(ns).rwsem);
 433	if (shm_ids(ns).in_use)
 434		idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
 435	up_write(&shm_ids(ns).rwsem);
 436}
 437
 438/* Locking assumes this will only be called with task == current */
 439void exit_shm(struct task_struct *task)
 440{
 441	for (;;) {
 442		struct shmid_kernel *shp;
 443		struct ipc_namespace *ns;
 444
 445		task_lock(task);
 
 446
 447		if (list_empty(&task->sysvshm.shm_clist)) {
 448			task_unlock(task);
 449			break;
 450		}
 451
 452		shp = list_first_entry(&task->sysvshm.shm_clist, struct shmid_kernel,
 453				shm_clist);
 454
 455		/*
 456		 * 1) Get pointer to the ipc namespace. It is worth to say
 457		 * that this pointer is guaranteed to be valid because
 458		 * shp lifetime is always shorter than namespace lifetime
 459		 * in which shp lives.
 460		 * We taken task_lock it means that shp won't be freed.
 461		 */
 462		ns = shp->ns;
 463
 464		/*
 465		 * 2) If kernel.shm_rmid_forced is not set then only keep track of
 466		 * which shmids are orphaned, so that a later set of the sysctl
 467		 * can clean them up.
 468		 */
 469		if (!ns->shm_rmid_forced)
 470			goto unlink_continue;
 471
 472		/*
 473		 * 3) get a reference to the namespace.
 474		 *    The refcount could be already 0. If it is 0, then
 475		 *    the shm objects will be free by free_ipc_work().
 476		 */
 477		ns = get_ipc_ns_not_zero(ns);
 478		if (!ns) {
 479unlink_continue:
 480			list_del_init(&shp->shm_clist);
 481			task_unlock(task);
 482			continue;
 483		}
 484
 485		/*
 486		 * 4) get a reference to shp.
 487		 *   This cannot fail: shm_clist_rm() is called before
 488		 *   ipc_rmid(), thus the refcount cannot be 0.
 489		 */
 490		WARN_ON(!ipc_rcu_getref(&shp->shm_perm));
 491
 492		/*
 493		 * 5) unlink the shm segment from the list of segments
 494		 *    created by current.
 495		 *    This must be done last. After unlinking,
 496		 *    only the refcounts obtained above prevent IPC_RMID
 497		 *    from destroying the segment or the namespace.
 498		 */
 499		list_del_init(&shp->shm_clist);
 500
 501		task_unlock(task);
 502
 503		/*
 504		 * 6) we have all references
 505		 *    Thus lock & if needed destroy shp.
 506		 */
 507		down_write(&shm_ids(ns).rwsem);
 508		shm_lock_by_ptr(shp);
 509		/*
 510		 * rcu_read_lock was implicitly taken in shm_lock_by_ptr, it's
 511		 * safe to call ipc_rcu_putref here
 512		 */
 513		ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
 514
 515		if (ipc_valid_object(&shp->shm_perm)) {
 516			if (shm_may_destroy(shp))
 517				shm_destroy(ns, shp);
 518			else
 519				shm_unlock(shp);
 520		} else {
 521			/*
 522			 * Someone else deleted the shp from namespace
 523			 * idr/kht while we have waited.
 524			 * Just unlock and continue.
 525			 */
 526			shm_unlock(shp);
 527		}
 528
 529		up_write(&shm_ids(ns).rwsem);
 530		put_ipc_ns(ns); /* paired with get_ipc_ns_not_zero */
 531	}
 532}
 533
 534static vm_fault_t shm_fault(struct vm_fault *vmf)
 535{
 536	struct file *file = vmf->vma->vm_file;
 537	struct shm_file_data *sfd = shm_file_data(file);
 538
 539	return sfd->vm_ops->fault(vmf);
 540}
 541
 542static int shm_may_split(struct vm_area_struct *vma, unsigned long addr)
 543{
 544	struct file *file = vma->vm_file;
 545	struct shm_file_data *sfd = shm_file_data(file);
 546
 547	if (sfd->vm_ops->may_split)
 548		return sfd->vm_ops->may_split(vma, addr);
 549
 550	return 0;
 551}
 552
 553static unsigned long shm_pagesize(struct vm_area_struct *vma)
 554{
 555	struct file *file = vma->vm_file;
 556	struct shm_file_data *sfd = shm_file_data(file);
 557
 558	if (sfd->vm_ops->pagesize)
 559		return sfd->vm_ops->pagesize(vma);
 560
 561	return PAGE_SIZE;
 562}
 563
 564#ifdef CONFIG_NUMA
 565static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
 566{
 567	struct file *file = vma->vm_file;
 568	struct shm_file_data *sfd = shm_file_data(file);
 569	int err = 0;
 570
 571	if (sfd->vm_ops->set_policy)
 572		err = sfd->vm_ops->set_policy(vma, new);
 573	return err;
 574}
 575
 576static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
 577					unsigned long addr)
 578{
 579	struct file *file = vma->vm_file;
 580	struct shm_file_data *sfd = shm_file_data(file);
 581	struct mempolicy *pol = NULL;
 582
 583	if (sfd->vm_ops->get_policy)
 584		pol = sfd->vm_ops->get_policy(vma, addr);
 585	else if (vma->vm_policy)
 586		pol = vma->vm_policy;
 587
 588	return pol;
 589}
 590#endif
 591
 592static int shm_mmap(struct file *file, struct vm_area_struct *vma)
 593{
 594	struct shm_file_data *sfd = shm_file_data(file);
 595	int ret;
 596
 597	/*
 598	 * In case of remap_file_pages() emulation, the file can represent an
 599	 * IPC ID that was removed, and possibly even reused by another shm
 600	 * segment already.  Propagate this case as an error to caller.
 601	 */
 602	ret = __shm_open(sfd);
 603	if (ret)
 604		return ret;
 605
 606	ret = call_mmap(sfd->file, vma);
 607	if (ret) {
 608		__shm_close(sfd);
 609		return ret;
 610	}
 611	sfd->vm_ops = vma->vm_ops;
 612#ifdef CONFIG_MMU
 613	WARN_ON(!sfd->vm_ops->fault);
 614#endif
 615	vma->vm_ops = &shm_vm_ops;
 616	return 0;
 
 
 617}
 618
 619static int shm_release(struct inode *ino, struct file *file)
 620{
 621	struct shm_file_data *sfd = shm_file_data(file);
 622
 623	put_ipc_ns(sfd->ns);
 624	fput(sfd->file);
 625	shm_file_data(file) = NULL;
 626	kfree(sfd);
 627	return 0;
 628}
 629
 630static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 631{
 632	struct shm_file_data *sfd = shm_file_data(file);
 633
 634	if (!sfd->file->f_op->fsync)
 635		return -EINVAL;
 636	return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
 637}
 638
 639static long shm_fallocate(struct file *file, int mode, loff_t offset,
 640			  loff_t len)
 641{
 642	struct shm_file_data *sfd = shm_file_data(file);
 643
 644	if (!sfd->file->f_op->fallocate)
 645		return -EOPNOTSUPP;
 646	return sfd->file->f_op->fallocate(file, mode, offset, len);
 647}
 648
 649static unsigned long shm_get_unmapped_area(struct file *file,
 650	unsigned long addr, unsigned long len, unsigned long pgoff,
 651	unsigned long flags)
 652{
 653	struct shm_file_data *sfd = shm_file_data(file);
 654
 655	return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
 656						pgoff, flags);
 657}
 658
 659static const struct file_operations shm_file_operations = {
 660	.mmap		= shm_mmap,
 661	.fsync		= shm_fsync,
 662	.release	= shm_release,
 
 663	.get_unmapped_area	= shm_get_unmapped_area,
 
 664	.llseek		= noop_llseek,
 665	.fallocate	= shm_fallocate,
 666};
 667
 668/*
 669 * shm_file_operations_huge is now identical to shm_file_operations,
 670 * but we keep it distinct for the sake of is_file_shm_hugepages().
 671 */
 672static const struct file_operations shm_file_operations_huge = {
 673	.mmap		= shm_mmap,
 674	.fsync		= shm_fsync,
 675	.release	= shm_release,
 676	.get_unmapped_area	= shm_get_unmapped_area,
 677	.llseek		= noop_llseek,
 678	.fallocate	= shm_fallocate,
 679};
 680
 681bool is_file_shm_hugepages(struct file *file)
 682{
 683	return file->f_op == &shm_file_operations_huge;
 684}
 685
 686static const struct vm_operations_struct shm_vm_ops = {
 687	.open	= shm_open,	/* callback for a new vm-area open */
 688	.close	= shm_close,	/* callback for when the vm-area is released */
 689	.fault	= shm_fault,
 690	.may_split = shm_may_split,
 691	.pagesize = shm_pagesize,
 692#if defined(CONFIG_NUMA)
 693	.set_policy = shm_set_policy,
 694	.get_policy = shm_get_policy,
 695#endif
 696};
 697
 698/**
 699 * newseg - Create a new shared memory segment
 700 * @ns: namespace
 701 * @params: ptr to the structure that contains key, size and shmflg
 702 *
 703 * Called with shm_ids.rwsem held as a writer.
 704 */
 
 705static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
 706{
 707	key_t key = params->key;
 708	int shmflg = params->flg;
 709	size_t size = params->u.size;
 710	int error;
 711	struct shmid_kernel *shp;
 712	size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 713	struct file *file;
 714	char name[13];
 
 715	vm_flags_t acctflag = 0;
 716
 717	if (size < SHMMIN || size > ns->shm_ctlmax)
 718		return -EINVAL;
 719
 720	if (numpages << PAGE_SHIFT < size)
 721		return -ENOSPC;
 722
 723	if (ns->shm_tot + numpages < ns->shm_tot ||
 724			ns->shm_tot + numpages > ns->shm_ctlall)
 725		return -ENOSPC;
 726
 727	shp = kmalloc(sizeof(*shp), GFP_KERNEL_ACCOUNT);
 728	if (unlikely(!shp))
 729		return -ENOMEM;
 730
 731	shp->shm_perm.key = key;
 732	shp->shm_perm.mode = (shmflg & S_IRWXUGO);
 733	shp->mlock_ucounts = NULL;
 734
 735	shp->shm_perm.security = NULL;
 736	error = security_shm_alloc(&shp->shm_perm);
 737	if (error) {
 738		kfree(shp);
 739		return error;
 740	}
 741
 742	sprintf(name, "SYSV%08x", key);
 743	if (shmflg & SHM_HUGETLB) {
 744		struct hstate *hs;
 745		size_t hugesize;
 746
 747		hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
 748		if (!hs) {
 749			error = -EINVAL;
 750			goto no_file;
 751		}
 752		hugesize = ALIGN(size, huge_page_size(hs));
 753
 754		/* hugetlb_file_setup applies strict accounting */
 755		if (shmflg & SHM_NORESERVE)
 756			acctflag = VM_NORESERVE;
 757		file = hugetlb_file_setup(name, hugesize, acctflag,
 758				HUGETLB_SHMFS_INODE, (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
 759	} else {
 760		/*
 761		 * Do not allow no accounting for OVERCOMMIT_NEVER, even
 762		 * if it's asked for.
 763		 */
 764		if  ((shmflg & SHM_NORESERVE) &&
 765				sysctl_overcommit_memory != OVERCOMMIT_NEVER)
 766			acctflag = VM_NORESERVE;
 767		file = shmem_kernel_file_setup(name, size, acctflag);
 768	}
 769	error = PTR_ERR(file);
 770	if (IS_ERR(file))
 771		goto no_file;
 772
 773	shp->shm_cprid = get_pid(task_tgid(current));
 774	shp->shm_lprid = NULL;
 
 
 
 
 
 
 775	shp->shm_atim = shp->shm_dtim = 0;
 776	shp->shm_ctim = ktime_get_real_seconds();
 777	shp->shm_segsz = size;
 778	shp->shm_nattch = 0;
 779	shp->shm_file = file;
 780	shp->shm_creator = current;
 781
 782	/* ipc_addid() locks shp upon success. */
 783	error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
 784	if (error < 0)
 785		goto no_id;
 786
 787	shp->ns = ns;
 788
 789	task_lock(current);
 790	list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
 791	task_unlock(current);
 792
 793	/*
 794	 * shmid gets reported as "inode#" in /proc/pid/maps.
 795	 * proc-ps tools use this. Changing this will break them.
 796	 */
 797	file_inode(file)->i_ino = shp->shm_perm.id;
 798
 799	ns->shm_tot += numpages;
 800	error = shp->shm_perm.id;
 801
 802	ipc_unlock_object(&shp->shm_perm);
 803	rcu_read_unlock();
 804	return error;
 805
 806no_id:
 807	ipc_update_pid(&shp->shm_cprid, NULL);
 808	ipc_update_pid(&shp->shm_lprid, NULL);
 809	fput(file);
 810	ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
 811	return error;
 812no_file:
 813	call_rcu(&shp->shm_perm.rcu, shm_rcu_free);
 
 814	return error;
 815}
 816
 817/*
 818 * Called with shm_ids.rwsem and ipcp locked.
 819 */
 820static int shm_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params)
 
 
 
 
 
 
 
 
 
 
 
 
 821{
 822	struct shmid_kernel *shp;
 823
 824	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 825	if (shp->shm_segsz < params->u.size)
 826		return -EINVAL;
 827
 828	return 0;
 829}
 830
 831long ksys_shmget(key_t key, size_t size, int shmflg)
 832{
 833	struct ipc_namespace *ns;
 834	static const struct ipc_ops shm_ops = {
 835		.getnew = newseg,
 836		.associate = security_shm_associate,
 837		.more_checks = shm_more_checks,
 838	};
 839	struct ipc_params shm_params;
 840
 841	ns = current->nsproxy->ipc_ns;
 842
 
 
 
 
 843	shm_params.key = key;
 844	shm_params.flg = shmflg;
 845	shm_params.u.size = size;
 846
 847	return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
 848}
 849
 850SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
 851{
 852	return ksys_shmget(key, size, shmflg);
 853}
 854
 855static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
 856{
 857	switch (version) {
 858	case IPC_64:
 859		return copy_to_user(buf, in, sizeof(*in));
 860	case IPC_OLD:
 861	    {
 862		struct shmid_ds out;
 863
 864		memset(&out, 0, sizeof(out));
 865		ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
 866		out.shm_segsz	= in->shm_segsz;
 867		out.shm_atime	= in->shm_atime;
 868		out.shm_dtime	= in->shm_dtime;
 869		out.shm_ctime	= in->shm_ctime;
 870		out.shm_cpid	= in->shm_cpid;
 871		out.shm_lpid	= in->shm_lpid;
 872		out.shm_nattch	= in->shm_nattch;
 873
 874		return copy_to_user(buf, &out, sizeof(out));
 875	    }
 876	default:
 877		return -EINVAL;
 878	}
 879}
 880
 881static inline unsigned long
 882copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
 883{
 884	switch (version) {
 885	case IPC_64:
 886		if (copy_from_user(out, buf, sizeof(*out)))
 887			return -EFAULT;
 888		return 0;
 889	case IPC_OLD:
 890	    {
 891		struct shmid_ds tbuf_old;
 892
 893		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
 894			return -EFAULT;
 895
 896		out->shm_perm.uid	= tbuf_old.shm_perm.uid;
 897		out->shm_perm.gid	= tbuf_old.shm_perm.gid;
 898		out->shm_perm.mode	= tbuf_old.shm_perm.mode;
 899
 900		return 0;
 901	    }
 902	default:
 903		return -EINVAL;
 904	}
 905}
 906
 907static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
 908{
 909	switch (version) {
 910	case IPC_64:
 911		return copy_to_user(buf, in, sizeof(*in));
 912	case IPC_OLD:
 913	    {
 914		struct shminfo out;
 915
 916		if (in->shmmax > INT_MAX)
 917			out.shmmax = INT_MAX;
 918		else
 919			out.shmmax = (int)in->shmmax;
 920
 921		out.shmmin	= in->shmmin;
 922		out.shmmni	= in->shmmni;
 923		out.shmseg	= in->shmseg;
 924		out.shmall	= in->shmall;
 925
 926		return copy_to_user(buf, &out, sizeof(out));
 927	    }
 928	default:
 929		return -EINVAL;
 930	}
 931}
 932
 933/*
 934 * Calculate and add used RSS and swap pages of a shm.
 935 * Called with shm_ids.rwsem held as a reader
 936 */
 937static void shm_add_rss_swap(struct shmid_kernel *shp,
 938	unsigned long *rss_add, unsigned long *swp_add)
 939{
 940	struct inode *inode;
 941
 942	inode = file_inode(shp->shm_file);
 943
 944	if (is_file_hugepages(shp->shm_file)) {
 945		struct address_space *mapping = inode->i_mapping;
 946		struct hstate *h = hstate_file(shp->shm_file);
 947		*rss_add += pages_per_huge_page(h) * mapping->nrpages;
 948	} else {
 949#ifdef CONFIG_SHMEM
 950		struct shmem_inode_info *info = SHMEM_I(inode);
 951
 952		spin_lock_irq(&info->lock);
 953		*rss_add += inode->i_mapping->nrpages;
 954		*swp_add += info->swapped;
 955		spin_unlock_irq(&info->lock);
 956#else
 957		*rss_add += inode->i_mapping->nrpages;
 958#endif
 959	}
 960}
 961
 962/*
 963 * Called with shm_ids.rwsem held as a reader
 964 */
 965static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
 966		unsigned long *swp)
 967{
 968	int next_id;
 969	int total, in_use;
 970
 971	*rss = 0;
 972	*swp = 0;
 973
 974	in_use = shm_ids(ns).in_use;
 975
 976	for (total = 0, next_id = 0; total < in_use; next_id++) {
 977		struct kern_ipc_perm *ipc;
 978		struct shmid_kernel *shp;
 979
 980		ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
 981		if (ipc == NULL)
 982			continue;
 983		shp = container_of(ipc, struct shmid_kernel, shm_perm);
 984
 985		shm_add_rss_swap(shp, rss, swp);
 986
 987		total++;
 988	}
 989}
 990
 991/*
 992 * This function handles some shmctl commands which require the rwsem
 993 * to be held in write mode.
 994 * NOTE: no locks must be held, the rwsem is taken inside this function.
 995 */
 996static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
 997		       struct shmid64_ds *shmid64)
 998{
 999	struct kern_ipc_perm *ipcp;
 
1000	struct shmid_kernel *shp;
1001	int err;
1002
1003	down_write(&shm_ids(ns).rwsem);
1004	rcu_read_lock();
 
 
1005
1006	ipcp = ipcctl_obtain_check(ns, &shm_ids(ns), shmid, cmd,
1007				      &shmid64->shm_perm, 0);
1008	if (IS_ERR(ipcp)) {
1009		err = PTR_ERR(ipcp);
1010		goto out_unlock1;
1011	}
1012
1013	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
1014
1015	err = security_shm_shmctl(&shp->shm_perm, cmd);
1016	if (err)
1017		goto out_unlock1;
1018
1019	switch (cmd) {
1020	case IPC_RMID:
1021		ipc_lock_object(&shp->shm_perm);
1022		/* do_shm_rmid unlocks the ipc object and rcu */
1023		do_shm_rmid(ns, ipcp);
1024		goto out_up;
1025	case IPC_SET:
1026		ipc_lock_object(&shp->shm_perm);
1027		err = ipc_update_perm(&shmid64->shm_perm, ipcp);
1028		if (err)
1029			goto out_unlock0;
1030		shp->shm_ctim = ktime_get_real_seconds();
1031		break;
1032	default:
1033		err = -EINVAL;
1034		goto out_unlock1;
1035	}
1036
1037out_unlock0:
1038	ipc_unlock_object(&shp->shm_perm);
1039out_unlock1:
1040	rcu_read_unlock();
1041out_up:
1042	up_write(&shm_ids(ns).rwsem);
1043	return err;
1044}
1045
1046static int shmctl_ipc_info(struct ipc_namespace *ns,
1047			   struct shminfo64 *shminfo)
1048{
1049	int err = security_shm_shmctl(NULL, IPC_INFO);
1050	if (!err) {
1051		memset(shminfo, 0, sizeof(*shminfo));
1052		shminfo->shmmni = shminfo->shmseg = ns->shm_ctlmni;
1053		shminfo->shmmax = ns->shm_ctlmax;
1054		shminfo->shmall = ns->shm_ctlall;
1055		shminfo->shmmin = SHMMIN;
1056		down_read(&shm_ids(ns).rwsem);
1057		err = ipc_get_maxidx(&shm_ids(ns));
1058		up_read(&shm_ids(ns).rwsem);
1059		if (err < 0)
1060			err = 0;
1061	}
1062	return err;
1063}
1064
1065static int shmctl_shm_info(struct ipc_namespace *ns,
1066			   struct shm_info *shm_info)
1067{
1068	int err = security_shm_shmctl(NULL, SHM_INFO);
1069	if (!err) {
1070		memset(shm_info, 0, sizeof(*shm_info));
1071		down_read(&shm_ids(ns).rwsem);
1072		shm_info->used_ids = shm_ids(ns).in_use;
1073		shm_get_stat(ns, &shm_info->shm_rss, &shm_info->shm_swp);
1074		shm_info->shm_tot = ns->shm_tot;
1075		shm_info->swap_attempts = 0;
1076		shm_info->swap_successes = 0;
1077		err = ipc_get_maxidx(&shm_ids(ns));
1078		up_read(&shm_ids(ns).rwsem);
1079		if (err < 0)
1080			err = 0;
1081	}
1082	return err;
1083}
1084
1085static int shmctl_stat(struct ipc_namespace *ns, int shmid,
1086			int cmd, struct shmid64_ds *tbuf)
1087{
1088	struct shmid_kernel *shp;
1089	int err;
 
1090
1091	memset(tbuf, 0, sizeof(*tbuf));
1092
1093	rcu_read_lock();
1094	if (cmd == SHM_STAT || cmd == SHM_STAT_ANY) {
1095		shp = shm_obtain_object(ns, shmid);
1096		if (IS_ERR(shp)) {
1097			err = PTR_ERR(shp);
1098			goto out_unlock;
1099		}
1100	} else { /* IPC_STAT */
1101		shp = shm_obtain_object_check(ns, shmid);
1102		if (IS_ERR(shp)) {
1103			err = PTR_ERR(shp);
1104			goto out_unlock;
1105		}
1106	}
1107
1108	/*
1109	 * Semantically SHM_STAT_ANY ought to be identical to
1110	 * that functionality provided by the /proc/sysvipc/
1111	 * interface. As such, only audit these calls and
1112	 * do not do traditional S_IRUGO permission checks on
1113	 * the ipc object.
1114	 */
1115	if (cmd == SHM_STAT_ANY)
1116		audit_ipc_obj(&shp->shm_perm);
1117	else {
1118		err = -EACCES;
1119		if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
1120			goto out_unlock;
1121	}
1122
1123	err = security_shm_shmctl(&shp->shm_perm, cmd);
1124	if (err)
1125		goto out_unlock;
 
1126
1127	ipc_lock_object(&shp->shm_perm);
 
 
1128
1129	if (!ipc_valid_object(&shp->shm_perm)) {
1130		ipc_unlock_object(&shp->shm_perm);
1131		err = -EIDRM;
1132		goto out_unlock;
1133	}
1134
1135	kernel_to_ipc64_perm(&shp->shm_perm, &tbuf->shm_perm);
1136	tbuf->shm_segsz	= shp->shm_segsz;
1137	tbuf->shm_atime	= shp->shm_atim;
1138	tbuf->shm_dtime	= shp->shm_dtim;
1139	tbuf->shm_ctime	= shp->shm_ctim;
1140#ifndef CONFIG_64BIT
1141	tbuf->shm_atime_high = shp->shm_atim >> 32;
1142	tbuf->shm_dtime_high = shp->shm_dtim >> 32;
1143	tbuf->shm_ctime_high = shp->shm_ctim >> 32;
1144#endif
1145	tbuf->shm_cpid	= pid_vnr(shp->shm_cprid);
1146	tbuf->shm_lpid	= pid_vnr(shp->shm_lprid);
1147	tbuf->shm_nattch = shp->shm_nattch;
1148
1149	if (cmd == IPC_STAT) {
1150		/*
1151		 * As defined in SUS:
1152		 * Return 0 on success
1153		 */
1154		err = 0;
1155	} else {
1156		/*
1157		 * SHM_STAT and SHM_STAT_ANY (both Linux specific)
1158		 * Return the full id, including the sequence number
1159		 */
1160		err = shp->shm_perm.id;
1161	}
1162
1163	ipc_unlock_object(&shp->shm_perm);
1164out_unlock:
1165	rcu_read_unlock();
1166	return err;
1167}
1168
1169static int shmctl_do_lock(struct ipc_namespace *ns, int shmid, int cmd)
1170{
1171	struct shmid_kernel *shp;
1172	struct file *shm_file;
1173	int err;
1174
1175	rcu_read_lock();
1176	shp = shm_obtain_object_check(ns, shmid);
1177	if (IS_ERR(shp)) {
1178		err = PTR_ERR(shp);
1179		goto out_unlock1;
1180	}
 
 
 
1181
1182	audit_ipc_obj(&(shp->shm_perm));
1183	err = security_shm_shmctl(&shp->shm_perm, cmd);
1184	if (err)
1185		goto out_unlock1;
1186
1187	ipc_lock_object(&shp->shm_perm);
1188
1189	/* check if shm_destroy() is tearing down shp */
1190	if (!ipc_valid_object(&shp->shm_perm)) {
1191		err = -EIDRM;
1192		goto out_unlock0;
1193	}
1194
1195	if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
1196		kuid_t euid = current_euid();
1197
1198		if (!uid_eq(euid, shp->shm_perm.uid) &&
1199		    !uid_eq(euid, shp->shm_perm.cuid)) {
1200			err = -EPERM;
1201			goto out_unlock0;
1202		}
1203		if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
1204			err = -EPERM;
1205			goto out_unlock0;
1206		}
1207	}
1208
1209	shm_file = shp->shm_file;
1210	if (is_file_hugepages(shm_file))
1211		goto out_unlock0;
1212
1213	if (cmd == SHM_LOCK) {
1214		struct ucounts *ucounts = current_ucounts();
1215
1216		err = shmem_lock(shm_file, 1, ucounts);
1217		if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
1218			shp->shm_perm.mode |= SHM_LOCKED;
1219			shp->mlock_ucounts = ucounts;
1220		}
1221		goto out_unlock0;
1222	}
1223
1224	/* SHM_UNLOCK */
1225	if (!(shp->shm_perm.mode & SHM_LOCKED))
1226		goto out_unlock0;
1227	shmem_lock(shm_file, 0, shp->mlock_ucounts);
1228	shp->shm_perm.mode &= ~SHM_LOCKED;
1229	shp->mlock_ucounts = NULL;
1230	get_file(shm_file);
1231	ipc_unlock_object(&shp->shm_perm);
1232	rcu_read_unlock();
1233	shmem_unlock_mapping(shm_file->f_mapping);
1234
1235	fput(shm_file);
1236	return err;
1237
1238out_unlock0:
1239	ipc_unlock_object(&shp->shm_perm);
1240out_unlock1:
1241	rcu_read_unlock();
1242	return err;
1243}
1244
1245static long ksys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf, int version)
1246{
1247	int err;
1248	struct ipc_namespace *ns;
1249	struct shmid64_ds sem64;
1250
1251	if (cmd < 0 || shmid < 0)
1252		return -EINVAL;
1253
1254	ns = current->nsproxy->ipc_ns;
1255
1256	switch (cmd) {
1257	case IPC_INFO: {
1258		struct shminfo64 shminfo;
1259		err = shmctl_ipc_info(ns, &shminfo);
1260		if (err < 0)
1261			return err;
1262		if (copy_shminfo_to_user(buf, &shminfo, version))
1263			err = -EFAULT;
1264		return err;
1265	}
1266	case SHM_INFO: {
1267		struct shm_info shm_info;
1268		err = shmctl_shm_info(ns, &shm_info);
1269		if (err < 0)
1270			return err;
1271		if (copy_to_user(buf, &shm_info, sizeof(shm_info)))
1272			err = -EFAULT;
1273		return err;
1274	}
1275	case SHM_STAT:
1276	case SHM_STAT_ANY:
1277	case IPC_STAT: {
1278		err = shmctl_stat(ns, shmid, cmd, &sem64);
1279		if (err < 0)
1280			return err;
1281		if (copy_shmid_to_user(buf, &sem64, version))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1282			err = -EFAULT;
1283		return err;
 
 
1284	}
1285	case IPC_SET:
1286		if (copy_shmid_from_user(&sem64, buf, version))
1287			return -EFAULT;
1288		fallthrough;
1289	case IPC_RMID:
1290		return shmctl_down(ns, shmid, cmd, &sem64);
1291	case SHM_LOCK:
1292	case SHM_UNLOCK:
1293		return shmctl_do_lock(ns, shmid, cmd);
1294	default:
1295		return -EINVAL;
1296	}
1297}
1298
1299SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1300{
1301	return ksys_shmctl(shmid, cmd, buf, IPC_64);
1302}
1303
1304#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
1305long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf)
1306{
1307	int version = ipc_parse_version(&cmd);
 
1308
1309	return ksys_shmctl(shmid, cmd, buf, version);
1310}
1311
1312SYSCALL_DEFINE3(old_shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1313{
1314	return ksys_old_shmctl(shmid, cmd, buf);
1315}
1316#endif
 
 
 
 
1317
1318#ifdef CONFIG_COMPAT
1319
1320struct compat_shmid_ds {
1321	struct compat_ipc_perm shm_perm;
1322	int shm_segsz;
1323	old_time32_t shm_atime;
1324	old_time32_t shm_dtime;
1325	old_time32_t shm_ctime;
1326	compat_ipc_pid_t shm_cpid;
1327	compat_ipc_pid_t shm_lpid;
1328	unsigned short shm_nattch;
1329	unsigned short shm_unused;
1330	compat_uptr_t shm_unused2;
1331	compat_uptr_t shm_unused3;
1332};
1333
1334struct compat_shminfo64 {
1335	compat_ulong_t shmmax;
1336	compat_ulong_t shmmin;
1337	compat_ulong_t shmmni;
1338	compat_ulong_t shmseg;
1339	compat_ulong_t shmall;
1340	compat_ulong_t __unused1;
1341	compat_ulong_t __unused2;
1342	compat_ulong_t __unused3;
1343	compat_ulong_t __unused4;
1344};
1345
1346struct compat_shm_info {
1347	compat_int_t used_ids;
1348	compat_ulong_t shm_tot, shm_rss, shm_swp;
1349	compat_ulong_t swap_attempts, swap_successes;
1350};
1351
1352static int copy_compat_shminfo_to_user(void __user *buf, struct shminfo64 *in,
1353					int version)
1354{
1355	if (in->shmmax > INT_MAX)
1356		in->shmmax = INT_MAX;
1357	if (version == IPC_64) {
1358		struct compat_shminfo64 info;
1359		memset(&info, 0, sizeof(info));
1360		info.shmmax = in->shmmax;
1361		info.shmmin = in->shmmin;
1362		info.shmmni = in->shmmni;
1363		info.shmseg = in->shmseg;
1364		info.shmall = in->shmall;
1365		return copy_to_user(buf, &info, sizeof(info));
1366	} else {
1367		struct shminfo info;
1368		memset(&info, 0, sizeof(info));
1369		info.shmmax = in->shmmax;
1370		info.shmmin = in->shmmin;
1371		info.shmmni = in->shmmni;
1372		info.shmseg = in->shmseg;
1373		info.shmall = in->shmall;
1374		return copy_to_user(buf, &info, sizeof(info));
1375	}
1376}
1377
1378static int put_compat_shm_info(struct shm_info *ip,
1379				struct compat_shm_info __user *uip)
1380{
1381	struct compat_shm_info info;
1382
1383	memset(&info, 0, sizeof(info));
1384	info.used_ids = ip->used_ids;
1385	info.shm_tot = ip->shm_tot;
1386	info.shm_rss = ip->shm_rss;
1387	info.shm_swp = ip->shm_swp;
1388	info.swap_attempts = ip->swap_attempts;
1389	info.swap_successes = ip->swap_successes;
1390	return copy_to_user(uip, &info, sizeof(info));
1391}
1392
1393static int copy_compat_shmid_to_user(void __user *buf, struct shmid64_ds *in,
1394					int version)
1395{
1396	if (version == IPC_64) {
1397		struct compat_shmid64_ds v;
1398		memset(&v, 0, sizeof(v));
1399		to_compat_ipc64_perm(&v.shm_perm, &in->shm_perm);
1400		v.shm_atime	 = lower_32_bits(in->shm_atime);
1401		v.shm_atime_high = upper_32_bits(in->shm_atime);
1402		v.shm_dtime	 = lower_32_bits(in->shm_dtime);
1403		v.shm_dtime_high = upper_32_bits(in->shm_dtime);
1404		v.shm_ctime	 = lower_32_bits(in->shm_ctime);
1405		v.shm_ctime_high = upper_32_bits(in->shm_ctime);
1406		v.shm_segsz = in->shm_segsz;
1407		v.shm_nattch = in->shm_nattch;
1408		v.shm_cpid = in->shm_cpid;
1409		v.shm_lpid = in->shm_lpid;
1410		return copy_to_user(buf, &v, sizeof(v));
1411	} else {
1412		struct compat_shmid_ds v;
1413		memset(&v, 0, sizeof(v));
1414		to_compat_ipc_perm(&v.shm_perm, &in->shm_perm);
1415		v.shm_perm.key = in->shm_perm.key;
1416		v.shm_atime = in->shm_atime;
1417		v.shm_dtime = in->shm_dtime;
1418		v.shm_ctime = in->shm_ctime;
1419		v.shm_segsz = in->shm_segsz;
1420		v.shm_nattch = in->shm_nattch;
1421		v.shm_cpid = in->shm_cpid;
1422		v.shm_lpid = in->shm_lpid;
1423		return copy_to_user(buf, &v, sizeof(v));
1424	}
1425}
1426
1427static int copy_compat_shmid_from_user(struct shmid64_ds *out, void __user *buf,
1428					int version)
1429{
1430	memset(out, 0, sizeof(*out));
1431	if (version == IPC_64) {
1432		struct compat_shmid64_ds __user *p = buf;
1433		return get_compat_ipc64_perm(&out->shm_perm, &p->shm_perm);
1434	} else {
1435		struct compat_shmid_ds __user *p = buf;
1436		return get_compat_ipc_perm(&out->shm_perm, &p->shm_perm);
1437	}
1438}
1439
1440static long compat_ksys_shmctl(int shmid, int cmd, void __user *uptr, int version)
1441{
1442	struct ipc_namespace *ns;
1443	struct shmid64_ds sem64;
1444	int err;
1445
1446	ns = current->nsproxy->ipc_ns;
1447
1448	if (cmd < 0 || shmid < 0)
1449		return -EINVAL;
1450
1451	switch (cmd) {
1452	case IPC_INFO: {
1453		struct shminfo64 shminfo;
1454		err = shmctl_ipc_info(ns, &shminfo);
1455		if (err < 0)
1456			return err;
1457		if (copy_compat_shminfo_to_user(uptr, &shminfo, version))
1458			err = -EFAULT;
1459		return err;
1460	}
1461	case SHM_INFO: {
1462		struct shm_info shm_info;
1463		err = shmctl_shm_info(ns, &shm_info);
1464		if (err < 0)
1465			return err;
1466		if (put_compat_shm_info(&shm_info, uptr))
1467			err = -EFAULT;
1468		return err;
1469	}
1470	case IPC_STAT:
1471	case SHM_STAT_ANY:
1472	case SHM_STAT:
1473		err = shmctl_stat(ns, shmid, cmd, &sem64);
1474		if (err < 0)
1475			return err;
1476		if (copy_compat_shmid_to_user(uptr, &sem64, version))
1477			err = -EFAULT;
1478		return err;
1479
1480	case IPC_SET:
1481		if (copy_compat_shmid_from_user(&sem64, uptr, version))
1482			return -EFAULT;
1483		fallthrough;
1484	case IPC_RMID:
1485		return shmctl_down(ns, shmid, cmd, &sem64);
1486	case SHM_LOCK:
1487	case SHM_UNLOCK:
1488		return shmctl_do_lock(ns, shmid, cmd);
1489	default:
1490		return -EINVAL;
1491	}
 
 
 
 
1492	return err;
1493}
1494
1495COMPAT_SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, void __user *, uptr)
1496{
1497	return compat_ksys_shmctl(shmid, cmd, uptr, IPC_64);
1498}
1499
1500#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION
1501long compat_ksys_old_shmctl(int shmid, int cmd, void __user *uptr)
1502{
1503	int version = compat_ipc_parse_version(&cmd);
1504
1505	return compat_ksys_shmctl(shmid, cmd, uptr, version);
1506}
1507
1508COMPAT_SYSCALL_DEFINE3(old_shmctl, int, shmid, int, cmd, void __user *, uptr)
1509{
1510	return compat_ksys_old_shmctl(shmid, cmd, uptr);
1511}
1512#endif
1513#endif
1514
1515/*
1516 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1517 *
1518 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1519 * "raddr" thing points to kernel space, and there has to be a wrapper around
1520 * this.
1521 */
1522long do_shmat(int shmid, char __user *shmaddr, int shmflg,
1523	      ulong *raddr, unsigned long shmlba)
1524{
1525	struct shmid_kernel *shp;
1526	unsigned long addr = (unsigned long)shmaddr;
1527	unsigned long size;
1528	struct file *file, *base;
1529	int    err;
1530	unsigned long flags = MAP_SHARED;
1531	unsigned long prot;
1532	int acc_mode;
 
1533	struct ipc_namespace *ns;
1534	struct shm_file_data *sfd;
1535	int f_flags;
1536	unsigned long populate = 0;
1537
1538	err = -EINVAL;
1539	if (shmid < 0)
1540		goto out;
1541
1542	if (addr) {
1543		if (addr & (shmlba - 1)) {
1544			if (shmflg & SHM_RND) {
1545				addr &= ~(shmlba - 1);  /* round down */
1546
1547				/*
1548				 * Ensure that the round-down is non-nil
1549				 * when remapping. This can happen for
1550				 * cases when addr < shmlba.
1551				 */
1552				if (!addr && (shmflg & SHM_REMAP))
1553					goto out;
1554			} else
1555#ifndef __ARCH_FORCE_SHMLBA
1556				if (addr & ~PAGE_MASK)
1557#endif
1558					goto out;
1559		}
 
 
 
 
1560
1561		flags |= MAP_FIXED;
1562	} else if ((shmflg & SHM_REMAP))
1563		goto out;
1564
1565	if (shmflg & SHM_RDONLY) {
1566		prot = PROT_READ;
1567		acc_mode = S_IRUGO;
1568		f_flags = O_RDONLY;
1569	} else {
1570		prot = PROT_READ | PROT_WRITE;
1571		acc_mode = S_IRUGO | S_IWUGO;
1572		f_flags = O_RDWR;
1573	}
1574	if (shmflg & SHM_EXEC) {
1575		prot |= PROT_EXEC;
1576		acc_mode |= S_IXUGO;
1577	}
1578
1579	/*
1580	 * We cannot rely on the fs check since SYSV IPC does have an
1581	 * additional creator id...
1582	 */
1583	ns = current->nsproxy->ipc_ns;
1584	rcu_read_lock();
1585	shp = shm_obtain_object_check(ns, shmid);
1586	if (IS_ERR(shp)) {
1587		err = PTR_ERR(shp);
1588		goto out_unlock;
1589	}
1590
1591	err = -EACCES;
1592	if (ipcperms(ns, &shp->shm_perm, acc_mode))
1593		goto out_unlock;
1594
1595	err = security_shm_shmat(&shp->shm_perm, shmaddr, shmflg);
1596	if (err)
1597		goto out_unlock;
1598
1599	ipc_lock_object(&shp->shm_perm);
1600
1601	/* check if shm_destroy() is tearing down shp */
1602	if (!ipc_valid_object(&shp->shm_perm)) {
1603		ipc_unlock_object(&shp->shm_perm);
1604		err = -EIDRM;
1605		goto out_unlock;
1606	}
1607
1608	/*
1609	 * We need to take a reference to the real shm file to prevent the
1610	 * pointer from becoming stale in cases where the lifetime of the outer
1611	 * file extends beyond that of the shm segment.  It's not usually
1612	 * possible, but it can happen during remap_file_pages() emulation as
1613	 * that unmaps the memory, then does ->mmap() via file reference only.
1614	 * We'll deny the ->mmap() if the shm segment was since removed, but to
1615	 * detect shm ID reuse we need to compare the file pointers.
1616	 */
1617	base = get_file(shp->shm_file);
1618	shp->shm_nattch++;
1619	size = i_size_read(file_inode(base));
1620	ipc_unlock_object(&shp->shm_perm);
1621	rcu_read_unlock();
1622
1623	err = -ENOMEM;
1624	sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1625	if (!sfd) {
1626		fput(base);
1627		goto out_nattch;
1628	}
1629
1630	file = alloc_file_clone(base, f_flags,
1631			  is_file_hugepages(base) ?
1632				&shm_file_operations_huge :
1633				&shm_file_operations);
1634	err = PTR_ERR(file);
1635	if (IS_ERR(file)) {
1636		kfree(sfd);
1637		fput(base);
1638		goto out_nattch;
1639	}
1640
 
 
1641	sfd->id = shp->shm_perm.id;
1642	sfd->ns = get_ipc_ns(ns);
1643	sfd->file = base;
1644	sfd->vm_ops = NULL;
1645	file->private_data = sfd;
1646
1647	err = security_mmap_file(file, prot, flags);
1648	if (err)
1649		goto out_fput;
1650
1651	if (mmap_write_lock_killable(current->mm)) {
1652		err = -EINTR;
1653		goto out_fput;
1654	}
1655
 
1656	if (addr && !(shmflg & SHM_REMAP)) {
1657		err = -EINVAL;
1658		if (addr + size < addr)
1659			goto invalid;
1660
1661		if (find_vma_intersection(current->mm, addr, addr + size))
 
 
 
 
1662			goto invalid;
1663	}
1664
1665	addr = do_mmap(file, addr, size, prot, flags, 0, &populate, NULL);
1666	*raddr = addr;
1667	err = 0;
1668	if (IS_ERR_VALUE(addr))
1669		err = (long)addr;
1670invalid:
1671	mmap_write_unlock(current->mm);
1672	if (populate)
1673		mm_populate(addr, populate);
1674
1675out_fput:
1676	fput(file);
1677
1678out_nattch:
1679	down_write(&shm_ids(ns).rwsem);
1680	shp = shm_lock(ns, shmid);
 
1681	shp->shm_nattch--;
1682
1683	if (shm_may_destroy(shp))
1684		shm_destroy(ns, shp);
1685	else
1686		shm_unlock(shp);
1687	up_write(&shm_ids(ns).rwsem);
1688	return err;
1689
1690out_unlock:
1691	rcu_read_unlock();
1692out:
1693	return err;
1694}
1695
1696SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1697{
1698	unsigned long ret;
1699	long err;
1700
1701	err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
1702	if (err)
1703		return err;
1704	force_successful_syscall_return();
1705	return (long)ret;
1706}
1707
1708#ifdef CONFIG_COMPAT
1709
1710#ifndef COMPAT_SHMLBA
1711#define COMPAT_SHMLBA	SHMLBA
1712#endif
1713
1714COMPAT_SYSCALL_DEFINE3(shmat, int, shmid, compat_uptr_t, shmaddr, int, shmflg)
1715{
1716	unsigned long ret;
1717	long err;
1718
1719	err = do_shmat(shmid, compat_ptr(shmaddr), shmflg, &ret, COMPAT_SHMLBA);
1720	if (err)
1721		return err;
1722	force_successful_syscall_return();
1723	return (long)ret;
1724}
1725#endif
1726
1727/*
1728 * detach and kill segment if marked destroyed.
1729 * The work is done in shm_close.
1730 */
1731long ksys_shmdt(char __user *shmaddr)
1732{
1733	struct mm_struct *mm = current->mm;
1734	struct vm_area_struct *vma;
1735	unsigned long addr = (unsigned long)shmaddr;
1736	int retval = -EINVAL;
1737#ifdef CONFIG_MMU
1738	loff_t size = 0;
1739	struct file *file;
1740	VMA_ITERATOR(vmi, mm, addr);
1741#endif
1742
1743	if (addr & ~PAGE_MASK)
1744		return retval;
1745
1746	if (mmap_write_lock_killable(mm))
1747		return -EINTR;
1748
1749	/*
1750	 * This function tries to be smart and unmap shm segments that
1751	 * were modified by partial mlock or munmap calls:
1752	 * - It first determines the size of the shm segment that should be
1753	 *   unmapped: It searches for a vma that is backed by shm and that
1754	 *   started at address shmaddr. It records it's size and then unmaps
1755	 *   it.
1756	 * - Then it unmaps all shm vmas that started at shmaddr and that
1757	 *   are within the initially determined size and that are from the
1758	 *   same shm segment from which we determined the size.
1759	 * Errors from do_munmap are ignored: the function only fails if
1760	 * it's called with invalid parameters or if it's called to unmap
1761	 * a part of a vma. Both calls in this function are for full vmas,
1762	 * the parameters are directly copied from the vma itself and always
1763	 * valid - therefore do_munmap cannot fail. (famous last words?)
1764	 */
1765	/*
1766	 * If it had been mremap()'d, the starting address would not
1767	 * match the usual checks anyway. So assume all vma's are
1768	 * above the starting address given.
1769	 */
 
1770
1771#ifdef CONFIG_MMU
1772	for_each_vma(vmi, vma) {
 
 
1773		/*
1774		 * Check if the starting address would match, i.e. it's
1775		 * a fragment created by mprotect() and/or munmap(), or it
1776		 * otherwise it starts at this address with no hassles.
1777		 */
1778		if ((vma->vm_ops == &shm_vm_ops) &&
1779			(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1780
1781			/*
1782			 * Record the file of the shm segment being
1783			 * unmapped.  With mremap(), someone could place
1784			 * page from another segment but with equal offsets
1785			 * in the range we are unmapping.
1786			 */
1787			file = vma->vm_file;
1788			size = i_size_read(file_inode(vma->vm_file));
1789			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1790			mas_pause(&vmi.mas);
1791			/*
1792			 * We discovered the size of the shm segment, so
1793			 * break out of here and fall through to the next
1794			 * loop that uses the size information to stop
1795			 * searching for matching vma's.
1796			 */
1797			retval = 0;
1798			vma = vma_next(&vmi);
1799			break;
1800		}
 
1801	}
1802
1803	/*
1804	 * We need look no further than the maximum address a fragment
1805	 * could possibly have landed at. Also cast things to loff_t to
1806	 * prevent overflows and make comparisons vs. equal-width types.
1807	 */
1808	size = PAGE_ALIGN(size);
1809	while (vma && (loff_t)(vma->vm_end - addr) <= size) {
 
 
1810		/* finding a matching vma now does not alter retval */
1811		if ((vma->vm_ops == &shm_vm_ops) &&
1812		    ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
1813		    (vma->vm_file == file)) {
1814			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1815			mas_pause(&vmi.mas);
1816		}
1817
1818		vma = vma_next(&vmi);
 
1819	}
1820
1821#else	/* CONFIG_MMU */
1822	vma = vma_lookup(mm, addr);
1823	/* under NOMMU conditions, the exact address to be destroyed must be
1824	 * given
1825	 */
1826	if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1827		do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1828		retval = 0;
1829	}
1830
1831#endif
1832
1833	mmap_write_unlock(mm);
1834	return retval;
1835}
1836
1837SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1838{
1839	return ksys_shmdt(shmaddr);
1840}
1841
1842#ifdef CONFIG_PROC_FS
1843static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1844{
1845	struct pid_namespace *pid_ns = ipc_seq_pid_ns(s);
1846	struct user_namespace *user_ns = seq_user_ns(s);
1847	struct kern_ipc_perm *ipcp = it;
1848	struct shmid_kernel *shp;
1849	unsigned long rss = 0, swp = 0;
1850
1851	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
1852	shm_add_rss_swap(shp, &rss, &swp);
1853
1854#if BITS_PER_LONG <= 32
1855#define SIZE_SPEC "%10lu"
1856#else
1857#define SIZE_SPEC "%21lu"
1858#endif
1859
1860	seq_printf(s,
1861		   "%10d %10d  %4o " SIZE_SPEC " %5u %5u  "
1862		   "%5lu %5u %5u %5u %5u %10llu %10llu %10llu "
1863		   SIZE_SPEC " " SIZE_SPEC "\n",
1864		   shp->shm_perm.key,
1865		   shp->shm_perm.id,
1866		   shp->shm_perm.mode,
1867		   shp->shm_segsz,
1868		   pid_nr_ns(shp->shm_cprid, pid_ns),
1869		   pid_nr_ns(shp->shm_lprid, pid_ns),
1870		   shp->shm_nattch,
1871		   from_kuid_munged(user_ns, shp->shm_perm.uid),
1872		   from_kgid_munged(user_ns, shp->shm_perm.gid),
1873		   from_kuid_munged(user_ns, shp->shm_perm.cuid),
1874		   from_kgid_munged(user_ns, shp->shm_perm.cgid),
1875		   shp->shm_atim,
1876		   shp->shm_dtim,
1877		   shp->shm_ctim,
1878		   rss * PAGE_SIZE,
1879		   swp * PAGE_SIZE);
1880
1881	return 0;
1882}
1883#endif