Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/ipc/shm.c
4 * Copyright (C) 1992, 1993 Krishna Balasubramanian
5 * Many improvements/fixes by Bruno Haible.
6 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
7 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
8 *
9 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
10 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
11 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
12 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
13 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
14 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
15 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
16 *
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
19 *
20 * namespaces support
21 * OpenVZ, SWsoft Inc.
22 * Pavel Emelianov <xemul@openvz.org>
23 *
24 * Better ipc lock (kern_ipc_perm.lock) handling
25 * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
26 */
27
28#include <linux/slab.h>
29#include <linux/mm.h>
30#include <linux/hugetlb.h>
31#include <linux/shm.h>
32#include <uapi/linux/shm.h>
33#include <linux/init.h>
34#include <linux/file.h>
35#include <linux/mman.h>
36#include <linux/shmem_fs.h>
37#include <linux/security.h>
38#include <linux/syscalls.h>
39#include <linux/audit.h>
40#include <linux/capability.h>
41#include <linux/ptrace.h>
42#include <linux/seq_file.h>
43#include <linux/rwsem.h>
44#include <linux/nsproxy.h>
45#include <linux/mount.h>
46#include <linux/ipc_namespace.h>
47#include <linux/rhashtable.h>
48
49#include <linux/uaccess.h>
50
51#include "util.h"
52
53struct shmid_kernel /* private to the kernel */
54{
55 struct kern_ipc_perm shm_perm;
56 struct file *shm_file;
57 unsigned long shm_nattch;
58 unsigned long shm_segsz;
59 time64_t shm_atim;
60 time64_t shm_dtim;
61 time64_t shm_ctim;
62 struct pid *shm_cprid;
63 struct pid *shm_lprid;
64 struct ucounts *mlock_ucounts;
65
66 /*
67 * The task created the shm object, for
68 * task_lock(shp->shm_creator)
69 */
70 struct task_struct *shm_creator;
71
72 /*
73 * List by creator. task_lock(->shm_creator) required for read/write.
74 * If list_empty(), then the creator is dead already.
75 */
76 struct list_head shm_clist;
77 struct ipc_namespace *ns;
78} __randomize_layout;
79
80/* shm_mode upper byte flags */
81#define SHM_DEST 01000 /* segment will be destroyed on last detach */
82#define SHM_LOCKED 02000 /* segment will not be swapped */
83
84struct shm_file_data {
85 int id;
86 struct ipc_namespace *ns;
87 struct file *file;
88 const struct vm_operations_struct *vm_ops;
89};
90
91#define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
92
93static const struct file_operations shm_file_operations;
94static const struct vm_operations_struct shm_vm_ops;
95
96#define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
97
98#define shm_unlock(shp) \
99 ipc_unlock(&(shp)->shm_perm)
100
101static int newseg(struct ipc_namespace *, struct ipc_params *);
102static void shm_open(struct vm_area_struct *vma);
103static void shm_close(struct vm_area_struct *vma);
104static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
105#ifdef CONFIG_PROC_FS
106static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
107#endif
108
109void shm_init_ns(struct ipc_namespace *ns)
110{
111 ns->shm_ctlmax = SHMMAX;
112 ns->shm_ctlall = SHMALL;
113 ns->shm_ctlmni = SHMMNI;
114 ns->shm_rmid_forced = 0;
115 ns->shm_tot = 0;
116 ipc_init_ids(&shm_ids(ns));
117}
118
119/*
120 * Called with shm_ids.rwsem (writer) and the shp structure locked.
121 * Only shm_ids.rwsem remains locked on exit.
122 */
123static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
124{
125 struct shmid_kernel *shp;
126
127 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
128 WARN_ON(ns != shp->ns);
129
130 if (shp->shm_nattch) {
131 shp->shm_perm.mode |= SHM_DEST;
132 /* Do not find it any more */
133 ipc_set_key_private(&shm_ids(ns), &shp->shm_perm);
134 shm_unlock(shp);
135 } else
136 shm_destroy(ns, shp);
137}
138
139#ifdef CONFIG_IPC_NS
140void shm_exit_ns(struct ipc_namespace *ns)
141{
142 free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
143 idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
144 rhashtable_destroy(&ns->ids[IPC_SHM_IDS].key_ht);
145}
146#endif
147
148static int __init ipc_ns_init(void)
149{
150 shm_init_ns(&init_ipc_ns);
151 return 0;
152}
153
154pure_initcall(ipc_ns_init);
155
156void __init shm_init(void)
157{
158 ipc_init_proc_interface("sysvipc/shm",
159#if BITS_PER_LONG <= 32
160 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
161#else
162 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
163#endif
164 IPC_SHM_IDS, sysvipc_shm_proc_show);
165}
166
167static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
168{
169 struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
170
171 if (IS_ERR(ipcp))
172 return ERR_CAST(ipcp);
173
174 return container_of(ipcp, struct shmid_kernel, shm_perm);
175}
176
177static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
178{
179 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
180
181 if (IS_ERR(ipcp))
182 return ERR_CAST(ipcp);
183
184 return container_of(ipcp, struct shmid_kernel, shm_perm);
185}
186
187/*
188 * shm_lock_(check_) routines are called in the paths where the rwsem
189 * is not necessarily held.
190 */
191static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
192{
193 struct kern_ipc_perm *ipcp;
194
195 rcu_read_lock();
196 ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
197 if (IS_ERR(ipcp))
198 goto err;
199
200 ipc_lock_object(ipcp);
201 /*
202 * ipc_rmid() may have already freed the ID while ipc_lock_object()
203 * was spinning: here verify that the structure is still valid.
204 * Upon races with RMID, return -EIDRM, thus indicating that
205 * the ID points to a removed identifier.
206 */
207 if (ipc_valid_object(ipcp)) {
208 /* return a locked ipc object upon success */
209 return container_of(ipcp, struct shmid_kernel, shm_perm);
210 }
211
212 ipc_unlock_object(ipcp);
213 ipcp = ERR_PTR(-EIDRM);
214err:
215 rcu_read_unlock();
216 /*
217 * Callers of shm_lock() must validate the status of the returned ipc
218 * object pointer and error out as appropriate.
219 */
220 return ERR_CAST(ipcp);
221}
222
223static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
224{
225 rcu_read_lock();
226 ipc_lock_object(&ipcp->shm_perm);
227}
228
229static void shm_rcu_free(struct rcu_head *head)
230{
231 struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm,
232 rcu);
233 struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel,
234 shm_perm);
235 security_shm_free(&shp->shm_perm);
236 kfree(shp);
237}
238
239/*
240 * It has to be called with shp locked.
241 * It must be called before ipc_rmid()
242 */
243static inline void shm_clist_rm(struct shmid_kernel *shp)
244{
245 struct task_struct *creator;
246
247 /* ensure that shm_creator does not disappear */
248 rcu_read_lock();
249
250 /*
251 * A concurrent exit_shm may do a list_del_init() as well.
252 * Just do nothing if exit_shm already did the work
253 */
254 if (!list_empty(&shp->shm_clist)) {
255 /*
256 * shp->shm_creator is guaranteed to be valid *only*
257 * if shp->shm_clist is not empty.
258 */
259 creator = shp->shm_creator;
260
261 task_lock(creator);
262 /*
263 * list_del_init() is a nop if the entry was already removed
264 * from the list.
265 */
266 list_del_init(&shp->shm_clist);
267 task_unlock(creator);
268 }
269 rcu_read_unlock();
270}
271
272static inline void shm_rmid(struct shmid_kernel *s)
273{
274 shm_clist_rm(s);
275 ipc_rmid(&shm_ids(s->ns), &s->shm_perm);
276}
277
278
279static int __shm_open(struct shm_file_data *sfd)
280{
281 struct shmid_kernel *shp;
282
283 shp = shm_lock(sfd->ns, sfd->id);
284
285 if (IS_ERR(shp))
286 return PTR_ERR(shp);
287
288 if (shp->shm_file != sfd->file) {
289 /* ID was reused */
290 shm_unlock(shp);
291 return -EINVAL;
292 }
293
294 shp->shm_atim = ktime_get_real_seconds();
295 ipc_update_pid(&shp->shm_lprid, task_tgid(current));
296 shp->shm_nattch++;
297 shm_unlock(shp);
298 return 0;
299}
300
301/* This is called by fork, once for every shm attach. */
302static void shm_open(struct vm_area_struct *vma)
303{
304 struct file *file = vma->vm_file;
305 struct shm_file_data *sfd = shm_file_data(file);
306 int err;
307
308 /* Always call underlying open if present */
309 if (sfd->vm_ops->open)
310 sfd->vm_ops->open(vma);
311
312 err = __shm_open(sfd);
313 /*
314 * We raced in the idr lookup or with shm_destroy().
315 * Either way, the ID is busted.
316 */
317 WARN_ON_ONCE(err);
318}
319
320/*
321 * shm_destroy - free the struct shmid_kernel
322 *
323 * @ns: namespace
324 * @shp: struct to free
325 *
326 * It has to be called with shp and shm_ids.rwsem (writer) locked,
327 * but returns with shp unlocked and freed.
328 */
329static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
330{
331 struct file *shm_file;
332
333 shm_file = shp->shm_file;
334 shp->shm_file = NULL;
335 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
336 shm_rmid(shp);
337 shm_unlock(shp);
338 if (!is_file_hugepages(shm_file))
339 shmem_lock(shm_file, 0, shp->mlock_ucounts);
340 fput(shm_file);
341 ipc_update_pid(&shp->shm_cprid, NULL);
342 ipc_update_pid(&shp->shm_lprid, NULL);
343 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
344}
345
346/*
347 * shm_may_destroy - identifies whether shm segment should be destroyed now
348 *
349 * Returns true if and only if there are no active users of the segment and
350 * one of the following is true:
351 *
352 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
353 *
354 * 2) sysctl kernel.shm_rmid_forced is set to 1.
355 */
356static bool shm_may_destroy(struct shmid_kernel *shp)
357{
358 return (shp->shm_nattch == 0) &&
359 (shp->ns->shm_rmid_forced ||
360 (shp->shm_perm.mode & SHM_DEST));
361}
362
363/*
364 * remove the attach descriptor vma.
365 * free memory for segment if it is marked destroyed.
366 * The descriptor has already been removed from the current->mm->mmap list
367 * and will later be kfree()d.
368 */
369static void __shm_close(struct shm_file_data *sfd)
370{
371 struct shmid_kernel *shp;
372 struct ipc_namespace *ns = sfd->ns;
373
374 down_write(&shm_ids(ns).rwsem);
375 /* remove from the list of attaches of the shm segment */
376 shp = shm_lock(ns, sfd->id);
377
378 /*
379 * We raced in the idr lookup or with shm_destroy().
380 * Either way, the ID is busted.
381 */
382 if (WARN_ON_ONCE(IS_ERR(shp)))
383 goto done; /* no-op */
384
385 ipc_update_pid(&shp->shm_lprid, task_tgid(current));
386 shp->shm_dtim = ktime_get_real_seconds();
387 shp->shm_nattch--;
388 if (shm_may_destroy(shp))
389 shm_destroy(ns, shp);
390 else
391 shm_unlock(shp);
392done:
393 up_write(&shm_ids(ns).rwsem);
394}
395
396static void shm_close(struct vm_area_struct *vma)
397{
398 struct file *file = vma->vm_file;
399 struct shm_file_data *sfd = shm_file_data(file);
400
401 /* Always call underlying close if present */
402 if (sfd->vm_ops->close)
403 sfd->vm_ops->close(vma);
404
405 __shm_close(sfd);
406}
407
408/* Called with ns->shm_ids(ns).rwsem locked */
409static int shm_try_destroy_orphaned(int id, void *p, void *data)
410{
411 struct ipc_namespace *ns = data;
412 struct kern_ipc_perm *ipcp = p;
413 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
414
415 /*
416 * We want to destroy segments without users and with already
417 * exit'ed originating process.
418 *
419 * As shp->* are changed under rwsem, it's safe to skip shp locking.
420 */
421 if (!list_empty(&shp->shm_clist))
422 return 0;
423
424 if (shm_may_destroy(shp)) {
425 shm_lock_by_ptr(shp);
426 shm_destroy(ns, shp);
427 }
428 return 0;
429}
430
431void shm_destroy_orphaned(struct ipc_namespace *ns)
432{
433 down_write(&shm_ids(ns).rwsem);
434 if (shm_ids(ns).in_use)
435 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
436 up_write(&shm_ids(ns).rwsem);
437}
438
439/* Locking assumes this will only be called with task == current */
440void exit_shm(struct task_struct *task)
441{
442 for (;;) {
443 struct shmid_kernel *shp;
444 struct ipc_namespace *ns;
445
446 task_lock(task);
447
448 if (list_empty(&task->sysvshm.shm_clist)) {
449 task_unlock(task);
450 break;
451 }
452
453 shp = list_first_entry(&task->sysvshm.shm_clist, struct shmid_kernel,
454 shm_clist);
455
456 /*
457 * 1) Get pointer to the ipc namespace. It is worth to say
458 * that this pointer is guaranteed to be valid because
459 * shp lifetime is always shorter than namespace lifetime
460 * in which shp lives.
461 * We taken task_lock it means that shp won't be freed.
462 */
463 ns = shp->ns;
464
465 /*
466 * 2) If kernel.shm_rmid_forced is not set then only keep track of
467 * which shmids are orphaned, so that a later set of the sysctl
468 * can clean them up.
469 */
470 if (!ns->shm_rmid_forced)
471 goto unlink_continue;
472
473 /*
474 * 3) get a reference to the namespace.
475 * The refcount could be already 0. If it is 0, then
476 * the shm objects will be free by free_ipc_work().
477 */
478 ns = get_ipc_ns_not_zero(ns);
479 if (!ns) {
480unlink_continue:
481 list_del_init(&shp->shm_clist);
482 task_unlock(task);
483 continue;
484 }
485
486 /*
487 * 4) get a reference to shp.
488 * This cannot fail: shm_clist_rm() is called before
489 * ipc_rmid(), thus the refcount cannot be 0.
490 */
491 WARN_ON(!ipc_rcu_getref(&shp->shm_perm));
492
493 /*
494 * 5) unlink the shm segment from the list of segments
495 * created by current.
496 * This must be done last. After unlinking,
497 * only the refcounts obtained above prevent IPC_RMID
498 * from destroying the segment or the namespace.
499 */
500 list_del_init(&shp->shm_clist);
501
502 task_unlock(task);
503
504 /*
505 * 6) we have all references
506 * Thus lock & if needed destroy shp.
507 */
508 down_write(&shm_ids(ns).rwsem);
509 shm_lock_by_ptr(shp);
510 /*
511 * rcu_read_lock was implicitly taken in shm_lock_by_ptr, it's
512 * safe to call ipc_rcu_putref here
513 */
514 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
515
516 if (ipc_valid_object(&shp->shm_perm)) {
517 if (shm_may_destroy(shp))
518 shm_destroy(ns, shp);
519 else
520 shm_unlock(shp);
521 } else {
522 /*
523 * Someone else deleted the shp from namespace
524 * idr/kht while we have waited.
525 * Just unlock and continue.
526 */
527 shm_unlock(shp);
528 }
529
530 up_write(&shm_ids(ns).rwsem);
531 put_ipc_ns(ns); /* paired with get_ipc_ns_not_zero */
532 }
533}
534
535static vm_fault_t shm_fault(struct vm_fault *vmf)
536{
537 struct file *file = vmf->vma->vm_file;
538 struct shm_file_data *sfd = shm_file_data(file);
539
540 return sfd->vm_ops->fault(vmf);
541}
542
543static int shm_may_split(struct vm_area_struct *vma, unsigned long addr)
544{
545 struct file *file = vma->vm_file;
546 struct shm_file_data *sfd = shm_file_data(file);
547
548 if (sfd->vm_ops->may_split)
549 return sfd->vm_ops->may_split(vma, addr);
550
551 return 0;
552}
553
554static unsigned long shm_pagesize(struct vm_area_struct *vma)
555{
556 struct file *file = vma->vm_file;
557 struct shm_file_data *sfd = shm_file_data(file);
558
559 if (sfd->vm_ops->pagesize)
560 return sfd->vm_ops->pagesize(vma);
561
562 return PAGE_SIZE;
563}
564
565#ifdef CONFIG_NUMA
566static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
567{
568 struct shm_file_data *sfd = shm_file_data(vma->vm_file);
569 int err = 0;
570
571 if (sfd->vm_ops->set_policy)
572 err = sfd->vm_ops->set_policy(vma, mpol);
573 return err;
574}
575
576static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
577 unsigned long addr, pgoff_t *ilx)
578{
579 struct shm_file_data *sfd = shm_file_data(vma->vm_file);
580 struct mempolicy *mpol = vma->vm_policy;
581
582 if (sfd->vm_ops->get_policy)
583 mpol = sfd->vm_ops->get_policy(vma, addr, ilx);
584 return mpol;
585}
586#endif
587
588static int shm_mmap(struct file *file, struct vm_area_struct *vma)
589{
590 struct shm_file_data *sfd = shm_file_data(file);
591 int ret;
592
593 /*
594 * In case of remap_file_pages() emulation, the file can represent an
595 * IPC ID that was removed, and possibly even reused by another shm
596 * segment already. Propagate this case as an error to caller.
597 */
598 ret = __shm_open(sfd);
599 if (ret)
600 return ret;
601
602 ret = call_mmap(sfd->file, vma);
603 if (ret) {
604 __shm_close(sfd);
605 return ret;
606 }
607 sfd->vm_ops = vma->vm_ops;
608#ifdef CONFIG_MMU
609 WARN_ON(!sfd->vm_ops->fault);
610#endif
611 vma->vm_ops = &shm_vm_ops;
612 return 0;
613}
614
615static int shm_release(struct inode *ino, struct file *file)
616{
617 struct shm_file_data *sfd = shm_file_data(file);
618
619 put_ipc_ns(sfd->ns);
620 fput(sfd->file);
621 shm_file_data(file) = NULL;
622 kfree(sfd);
623 return 0;
624}
625
626static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
627{
628 struct shm_file_data *sfd = shm_file_data(file);
629
630 if (!sfd->file->f_op->fsync)
631 return -EINVAL;
632 return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
633}
634
635static long shm_fallocate(struct file *file, int mode, loff_t offset,
636 loff_t len)
637{
638 struct shm_file_data *sfd = shm_file_data(file);
639
640 if (!sfd->file->f_op->fallocate)
641 return -EOPNOTSUPP;
642 return sfd->file->f_op->fallocate(file, mode, offset, len);
643}
644
645static unsigned long shm_get_unmapped_area(struct file *file,
646 unsigned long addr, unsigned long len, unsigned long pgoff,
647 unsigned long flags)
648{
649 struct shm_file_data *sfd = shm_file_data(file);
650
651 return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
652 pgoff, flags);
653}
654
655static const struct file_operations shm_file_operations = {
656 .mmap = shm_mmap,
657 .fsync = shm_fsync,
658 .release = shm_release,
659 .get_unmapped_area = shm_get_unmapped_area,
660 .llseek = noop_llseek,
661 .fallocate = shm_fallocate,
662};
663
664/*
665 * shm_file_operations_huge is now identical to shm_file_operations,
666 * but we keep it distinct for the sake of is_file_shm_hugepages().
667 */
668static const struct file_operations shm_file_operations_huge = {
669 .mmap = shm_mmap,
670 .fsync = shm_fsync,
671 .release = shm_release,
672 .get_unmapped_area = shm_get_unmapped_area,
673 .llseek = noop_llseek,
674 .fallocate = shm_fallocate,
675};
676
677bool is_file_shm_hugepages(struct file *file)
678{
679 return file->f_op == &shm_file_operations_huge;
680}
681
682static const struct vm_operations_struct shm_vm_ops = {
683 .open = shm_open, /* callback for a new vm-area open */
684 .close = shm_close, /* callback for when the vm-area is released */
685 .fault = shm_fault,
686 .may_split = shm_may_split,
687 .pagesize = shm_pagesize,
688#if defined(CONFIG_NUMA)
689 .set_policy = shm_set_policy,
690 .get_policy = shm_get_policy,
691#endif
692};
693
694/**
695 * newseg - Create a new shared memory segment
696 * @ns: namespace
697 * @params: ptr to the structure that contains key, size and shmflg
698 *
699 * Called with shm_ids.rwsem held as a writer.
700 */
701static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
702{
703 key_t key = params->key;
704 int shmflg = params->flg;
705 size_t size = params->u.size;
706 int error;
707 struct shmid_kernel *shp;
708 size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
709 struct file *file;
710 char name[13];
711 vm_flags_t acctflag = 0;
712
713 if (size < SHMMIN || size > ns->shm_ctlmax)
714 return -EINVAL;
715
716 if (numpages << PAGE_SHIFT < size)
717 return -ENOSPC;
718
719 if (ns->shm_tot + numpages < ns->shm_tot ||
720 ns->shm_tot + numpages > ns->shm_ctlall)
721 return -ENOSPC;
722
723 shp = kmalloc(sizeof(*shp), GFP_KERNEL_ACCOUNT);
724 if (unlikely(!shp))
725 return -ENOMEM;
726
727 shp->shm_perm.key = key;
728 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
729 shp->mlock_ucounts = NULL;
730
731 shp->shm_perm.security = NULL;
732 error = security_shm_alloc(&shp->shm_perm);
733 if (error) {
734 kfree(shp);
735 return error;
736 }
737
738 sprintf(name, "SYSV%08x", key);
739 if (shmflg & SHM_HUGETLB) {
740 struct hstate *hs;
741 size_t hugesize;
742
743 hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
744 if (!hs) {
745 error = -EINVAL;
746 goto no_file;
747 }
748 hugesize = ALIGN(size, huge_page_size(hs));
749
750 /* hugetlb_file_setup applies strict accounting */
751 if (shmflg & SHM_NORESERVE)
752 acctflag = VM_NORESERVE;
753 file = hugetlb_file_setup(name, hugesize, acctflag,
754 HUGETLB_SHMFS_INODE, (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
755 } else {
756 /*
757 * Do not allow no accounting for OVERCOMMIT_NEVER, even
758 * if it's asked for.
759 */
760 if ((shmflg & SHM_NORESERVE) &&
761 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
762 acctflag = VM_NORESERVE;
763 file = shmem_kernel_file_setup(name, size, acctflag);
764 }
765 error = PTR_ERR(file);
766 if (IS_ERR(file))
767 goto no_file;
768
769 shp->shm_cprid = get_pid(task_tgid(current));
770 shp->shm_lprid = NULL;
771 shp->shm_atim = shp->shm_dtim = 0;
772 shp->shm_ctim = ktime_get_real_seconds();
773 shp->shm_segsz = size;
774 shp->shm_nattch = 0;
775 shp->shm_file = file;
776 shp->shm_creator = current;
777
778 /* ipc_addid() locks shp upon success. */
779 error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
780 if (error < 0)
781 goto no_id;
782
783 shp->ns = ns;
784
785 task_lock(current);
786 list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist);
787 task_unlock(current);
788
789 /*
790 * shmid gets reported as "inode#" in /proc/pid/maps.
791 * proc-ps tools use this. Changing this will break them.
792 */
793 file_inode(file)->i_ino = shp->shm_perm.id;
794
795 ns->shm_tot += numpages;
796 error = shp->shm_perm.id;
797
798 ipc_unlock_object(&shp->shm_perm);
799 rcu_read_unlock();
800 return error;
801
802no_id:
803 ipc_update_pid(&shp->shm_cprid, NULL);
804 ipc_update_pid(&shp->shm_lprid, NULL);
805 fput(file);
806 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
807 return error;
808no_file:
809 call_rcu(&shp->shm_perm.rcu, shm_rcu_free);
810 return error;
811}
812
813/*
814 * Called with shm_ids.rwsem and ipcp locked.
815 */
816static int shm_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params)
817{
818 struct shmid_kernel *shp;
819
820 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
821 if (shp->shm_segsz < params->u.size)
822 return -EINVAL;
823
824 return 0;
825}
826
827long ksys_shmget(key_t key, size_t size, int shmflg)
828{
829 struct ipc_namespace *ns;
830 static const struct ipc_ops shm_ops = {
831 .getnew = newseg,
832 .associate = security_shm_associate,
833 .more_checks = shm_more_checks,
834 };
835 struct ipc_params shm_params;
836
837 ns = current->nsproxy->ipc_ns;
838
839 shm_params.key = key;
840 shm_params.flg = shmflg;
841 shm_params.u.size = size;
842
843 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
844}
845
846SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
847{
848 return ksys_shmget(key, size, shmflg);
849}
850
851static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
852{
853 switch (version) {
854 case IPC_64:
855 return copy_to_user(buf, in, sizeof(*in));
856 case IPC_OLD:
857 {
858 struct shmid_ds out;
859
860 memset(&out, 0, sizeof(out));
861 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
862 out.shm_segsz = in->shm_segsz;
863 out.shm_atime = in->shm_atime;
864 out.shm_dtime = in->shm_dtime;
865 out.shm_ctime = in->shm_ctime;
866 out.shm_cpid = in->shm_cpid;
867 out.shm_lpid = in->shm_lpid;
868 out.shm_nattch = in->shm_nattch;
869
870 return copy_to_user(buf, &out, sizeof(out));
871 }
872 default:
873 return -EINVAL;
874 }
875}
876
877static inline unsigned long
878copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
879{
880 switch (version) {
881 case IPC_64:
882 if (copy_from_user(out, buf, sizeof(*out)))
883 return -EFAULT;
884 return 0;
885 case IPC_OLD:
886 {
887 struct shmid_ds tbuf_old;
888
889 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
890 return -EFAULT;
891
892 out->shm_perm.uid = tbuf_old.shm_perm.uid;
893 out->shm_perm.gid = tbuf_old.shm_perm.gid;
894 out->shm_perm.mode = tbuf_old.shm_perm.mode;
895
896 return 0;
897 }
898 default:
899 return -EINVAL;
900 }
901}
902
903static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
904{
905 switch (version) {
906 case IPC_64:
907 return copy_to_user(buf, in, sizeof(*in));
908 case IPC_OLD:
909 {
910 struct shminfo out;
911
912 if (in->shmmax > INT_MAX)
913 out.shmmax = INT_MAX;
914 else
915 out.shmmax = (int)in->shmmax;
916
917 out.shmmin = in->shmmin;
918 out.shmmni = in->shmmni;
919 out.shmseg = in->shmseg;
920 out.shmall = in->shmall;
921
922 return copy_to_user(buf, &out, sizeof(out));
923 }
924 default:
925 return -EINVAL;
926 }
927}
928
929/*
930 * Calculate and add used RSS and swap pages of a shm.
931 * Called with shm_ids.rwsem held as a reader
932 */
933static void shm_add_rss_swap(struct shmid_kernel *shp,
934 unsigned long *rss_add, unsigned long *swp_add)
935{
936 struct inode *inode;
937
938 inode = file_inode(shp->shm_file);
939
940 if (is_file_hugepages(shp->shm_file)) {
941 struct address_space *mapping = inode->i_mapping;
942 struct hstate *h = hstate_file(shp->shm_file);
943 *rss_add += pages_per_huge_page(h) * mapping->nrpages;
944 } else {
945#ifdef CONFIG_SHMEM
946 struct shmem_inode_info *info = SHMEM_I(inode);
947
948 spin_lock_irq(&info->lock);
949 *rss_add += inode->i_mapping->nrpages;
950 *swp_add += info->swapped;
951 spin_unlock_irq(&info->lock);
952#else
953 *rss_add += inode->i_mapping->nrpages;
954#endif
955 }
956}
957
958/*
959 * Called with shm_ids.rwsem held as a reader
960 */
961static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
962 unsigned long *swp)
963{
964 int next_id;
965 int total, in_use;
966
967 *rss = 0;
968 *swp = 0;
969
970 in_use = shm_ids(ns).in_use;
971
972 for (total = 0, next_id = 0; total < in_use; next_id++) {
973 struct kern_ipc_perm *ipc;
974 struct shmid_kernel *shp;
975
976 ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
977 if (ipc == NULL)
978 continue;
979 shp = container_of(ipc, struct shmid_kernel, shm_perm);
980
981 shm_add_rss_swap(shp, rss, swp);
982
983 total++;
984 }
985}
986
987/*
988 * This function handles some shmctl commands which require the rwsem
989 * to be held in write mode.
990 * NOTE: no locks must be held, the rwsem is taken inside this function.
991 */
992static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
993 struct shmid64_ds *shmid64)
994{
995 struct kern_ipc_perm *ipcp;
996 struct shmid_kernel *shp;
997 int err;
998
999 down_write(&shm_ids(ns).rwsem);
1000 rcu_read_lock();
1001
1002 ipcp = ipcctl_obtain_check(ns, &shm_ids(ns), shmid, cmd,
1003 &shmid64->shm_perm, 0);
1004 if (IS_ERR(ipcp)) {
1005 err = PTR_ERR(ipcp);
1006 goto out_unlock1;
1007 }
1008
1009 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
1010
1011 err = security_shm_shmctl(&shp->shm_perm, cmd);
1012 if (err)
1013 goto out_unlock1;
1014
1015 switch (cmd) {
1016 case IPC_RMID:
1017 ipc_lock_object(&shp->shm_perm);
1018 /* do_shm_rmid unlocks the ipc object and rcu */
1019 do_shm_rmid(ns, ipcp);
1020 goto out_up;
1021 case IPC_SET:
1022 ipc_lock_object(&shp->shm_perm);
1023 err = ipc_update_perm(&shmid64->shm_perm, ipcp);
1024 if (err)
1025 goto out_unlock0;
1026 shp->shm_ctim = ktime_get_real_seconds();
1027 break;
1028 default:
1029 err = -EINVAL;
1030 goto out_unlock1;
1031 }
1032
1033out_unlock0:
1034 ipc_unlock_object(&shp->shm_perm);
1035out_unlock1:
1036 rcu_read_unlock();
1037out_up:
1038 up_write(&shm_ids(ns).rwsem);
1039 return err;
1040}
1041
1042static int shmctl_ipc_info(struct ipc_namespace *ns,
1043 struct shminfo64 *shminfo)
1044{
1045 int err = security_shm_shmctl(NULL, IPC_INFO);
1046 if (!err) {
1047 memset(shminfo, 0, sizeof(*shminfo));
1048 shminfo->shmmni = shminfo->shmseg = ns->shm_ctlmni;
1049 shminfo->shmmax = ns->shm_ctlmax;
1050 shminfo->shmall = ns->shm_ctlall;
1051 shminfo->shmmin = SHMMIN;
1052 down_read(&shm_ids(ns).rwsem);
1053 err = ipc_get_maxidx(&shm_ids(ns));
1054 up_read(&shm_ids(ns).rwsem);
1055 if (err < 0)
1056 err = 0;
1057 }
1058 return err;
1059}
1060
1061static int shmctl_shm_info(struct ipc_namespace *ns,
1062 struct shm_info *shm_info)
1063{
1064 int err = security_shm_shmctl(NULL, SHM_INFO);
1065 if (!err) {
1066 memset(shm_info, 0, sizeof(*shm_info));
1067 down_read(&shm_ids(ns).rwsem);
1068 shm_info->used_ids = shm_ids(ns).in_use;
1069 shm_get_stat(ns, &shm_info->shm_rss, &shm_info->shm_swp);
1070 shm_info->shm_tot = ns->shm_tot;
1071 shm_info->swap_attempts = 0;
1072 shm_info->swap_successes = 0;
1073 err = ipc_get_maxidx(&shm_ids(ns));
1074 up_read(&shm_ids(ns).rwsem);
1075 if (err < 0)
1076 err = 0;
1077 }
1078 return err;
1079}
1080
1081static int shmctl_stat(struct ipc_namespace *ns, int shmid,
1082 int cmd, struct shmid64_ds *tbuf)
1083{
1084 struct shmid_kernel *shp;
1085 int err;
1086
1087 memset(tbuf, 0, sizeof(*tbuf));
1088
1089 rcu_read_lock();
1090 if (cmd == SHM_STAT || cmd == SHM_STAT_ANY) {
1091 shp = shm_obtain_object(ns, shmid);
1092 if (IS_ERR(shp)) {
1093 err = PTR_ERR(shp);
1094 goto out_unlock;
1095 }
1096 } else { /* IPC_STAT */
1097 shp = shm_obtain_object_check(ns, shmid);
1098 if (IS_ERR(shp)) {
1099 err = PTR_ERR(shp);
1100 goto out_unlock;
1101 }
1102 }
1103
1104 /*
1105 * Semantically SHM_STAT_ANY ought to be identical to
1106 * that functionality provided by the /proc/sysvipc/
1107 * interface. As such, only audit these calls and
1108 * do not do traditional S_IRUGO permission checks on
1109 * the ipc object.
1110 */
1111 if (cmd == SHM_STAT_ANY)
1112 audit_ipc_obj(&shp->shm_perm);
1113 else {
1114 err = -EACCES;
1115 if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
1116 goto out_unlock;
1117 }
1118
1119 err = security_shm_shmctl(&shp->shm_perm, cmd);
1120 if (err)
1121 goto out_unlock;
1122
1123 ipc_lock_object(&shp->shm_perm);
1124
1125 if (!ipc_valid_object(&shp->shm_perm)) {
1126 ipc_unlock_object(&shp->shm_perm);
1127 err = -EIDRM;
1128 goto out_unlock;
1129 }
1130
1131 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf->shm_perm);
1132 tbuf->shm_segsz = shp->shm_segsz;
1133 tbuf->shm_atime = shp->shm_atim;
1134 tbuf->shm_dtime = shp->shm_dtim;
1135 tbuf->shm_ctime = shp->shm_ctim;
1136#ifndef CONFIG_64BIT
1137 tbuf->shm_atime_high = shp->shm_atim >> 32;
1138 tbuf->shm_dtime_high = shp->shm_dtim >> 32;
1139 tbuf->shm_ctime_high = shp->shm_ctim >> 32;
1140#endif
1141 tbuf->shm_cpid = pid_vnr(shp->shm_cprid);
1142 tbuf->shm_lpid = pid_vnr(shp->shm_lprid);
1143 tbuf->shm_nattch = shp->shm_nattch;
1144
1145 if (cmd == IPC_STAT) {
1146 /*
1147 * As defined in SUS:
1148 * Return 0 on success
1149 */
1150 err = 0;
1151 } else {
1152 /*
1153 * SHM_STAT and SHM_STAT_ANY (both Linux specific)
1154 * Return the full id, including the sequence number
1155 */
1156 err = shp->shm_perm.id;
1157 }
1158
1159 ipc_unlock_object(&shp->shm_perm);
1160out_unlock:
1161 rcu_read_unlock();
1162 return err;
1163}
1164
1165static int shmctl_do_lock(struct ipc_namespace *ns, int shmid, int cmd)
1166{
1167 struct shmid_kernel *shp;
1168 struct file *shm_file;
1169 int err;
1170
1171 rcu_read_lock();
1172 shp = shm_obtain_object_check(ns, shmid);
1173 if (IS_ERR(shp)) {
1174 err = PTR_ERR(shp);
1175 goto out_unlock1;
1176 }
1177
1178 audit_ipc_obj(&(shp->shm_perm));
1179 err = security_shm_shmctl(&shp->shm_perm, cmd);
1180 if (err)
1181 goto out_unlock1;
1182
1183 ipc_lock_object(&shp->shm_perm);
1184
1185 /* check if shm_destroy() is tearing down shp */
1186 if (!ipc_valid_object(&shp->shm_perm)) {
1187 err = -EIDRM;
1188 goto out_unlock0;
1189 }
1190
1191 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
1192 kuid_t euid = current_euid();
1193
1194 if (!uid_eq(euid, shp->shm_perm.uid) &&
1195 !uid_eq(euid, shp->shm_perm.cuid)) {
1196 err = -EPERM;
1197 goto out_unlock0;
1198 }
1199 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
1200 err = -EPERM;
1201 goto out_unlock0;
1202 }
1203 }
1204
1205 shm_file = shp->shm_file;
1206 if (is_file_hugepages(shm_file))
1207 goto out_unlock0;
1208
1209 if (cmd == SHM_LOCK) {
1210 struct ucounts *ucounts = current_ucounts();
1211
1212 err = shmem_lock(shm_file, 1, ucounts);
1213 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
1214 shp->shm_perm.mode |= SHM_LOCKED;
1215 shp->mlock_ucounts = ucounts;
1216 }
1217 goto out_unlock0;
1218 }
1219
1220 /* SHM_UNLOCK */
1221 if (!(shp->shm_perm.mode & SHM_LOCKED))
1222 goto out_unlock0;
1223 shmem_lock(shm_file, 0, shp->mlock_ucounts);
1224 shp->shm_perm.mode &= ~SHM_LOCKED;
1225 shp->mlock_ucounts = NULL;
1226 get_file(shm_file);
1227 ipc_unlock_object(&shp->shm_perm);
1228 rcu_read_unlock();
1229 shmem_unlock_mapping(shm_file->f_mapping);
1230
1231 fput(shm_file);
1232 return err;
1233
1234out_unlock0:
1235 ipc_unlock_object(&shp->shm_perm);
1236out_unlock1:
1237 rcu_read_unlock();
1238 return err;
1239}
1240
1241static long ksys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf, int version)
1242{
1243 int err;
1244 struct ipc_namespace *ns;
1245 struct shmid64_ds sem64;
1246
1247 if (cmd < 0 || shmid < 0)
1248 return -EINVAL;
1249
1250 ns = current->nsproxy->ipc_ns;
1251
1252 switch (cmd) {
1253 case IPC_INFO: {
1254 struct shminfo64 shminfo;
1255 err = shmctl_ipc_info(ns, &shminfo);
1256 if (err < 0)
1257 return err;
1258 if (copy_shminfo_to_user(buf, &shminfo, version))
1259 err = -EFAULT;
1260 return err;
1261 }
1262 case SHM_INFO: {
1263 struct shm_info shm_info;
1264 err = shmctl_shm_info(ns, &shm_info);
1265 if (err < 0)
1266 return err;
1267 if (copy_to_user(buf, &shm_info, sizeof(shm_info)))
1268 err = -EFAULT;
1269 return err;
1270 }
1271 case SHM_STAT:
1272 case SHM_STAT_ANY:
1273 case IPC_STAT: {
1274 err = shmctl_stat(ns, shmid, cmd, &sem64);
1275 if (err < 0)
1276 return err;
1277 if (copy_shmid_to_user(buf, &sem64, version))
1278 err = -EFAULT;
1279 return err;
1280 }
1281 case IPC_SET:
1282 if (copy_shmid_from_user(&sem64, buf, version))
1283 return -EFAULT;
1284 fallthrough;
1285 case IPC_RMID:
1286 return shmctl_down(ns, shmid, cmd, &sem64);
1287 case SHM_LOCK:
1288 case SHM_UNLOCK:
1289 return shmctl_do_lock(ns, shmid, cmd);
1290 default:
1291 return -EINVAL;
1292 }
1293}
1294
1295SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1296{
1297 return ksys_shmctl(shmid, cmd, buf, IPC_64);
1298}
1299
1300#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
1301long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf)
1302{
1303 int version = ipc_parse_version(&cmd);
1304
1305 return ksys_shmctl(shmid, cmd, buf, version);
1306}
1307
1308SYSCALL_DEFINE3(old_shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1309{
1310 return ksys_old_shmctl(shmid, cmd, buf);
1311}
1312#endif
1313
1314#ifdef CONFIG_COMPAT
1315
1316struct compat_shmid_ds {
1317 struct compat_ipc_perm shm_perm;
1318 int shm_segsz;
1319 old_time32_t shm_atime;
1320 old_time32_t shm_dtime;
1321 old_time32_t shm_ctime;
1322 compat_ipc_pid_t shm_cpid;
1323 compat_ipc_pid_t shm_lpid;
1324 unsigned short shm_nattch;
1325 unsigned short shm_unused;
1326 compat_uptr_t shm_unused2;
1327 compat_uptr_t shm_unused3;
1328};
1329
1330struct compat_shminfo64 {
1331 compat_ulong_t shmmax;
1332 compat_ulong_t shmmin;
1333 compat_ulong_t shmmni;
1334 compat_ulong_t shmseg;
1335 compat_ulong_t shmall;
1336 compat_ulong_t __unused1;
1337 compat_ulong_t __unused2;
1338 compat_ulong_t __unused3;
1339 compat_ulong_t __unused4;
1340};
1341
1342struct compat_shm_info {
1343 compat_int_t used_ids;
1344 compat_ulong_t shm_tot, shm_rss, shm_swp;
1345 compat_ulong_t swap_attempts, swap_successes;
1346};
1347
1348static int copy_compat_shminfo_to_user(void __user *buf, struct shminfo64 *in,
1349 int version)
1350{
1351 if (in->shmmax > INT_MAX)
1352 in->shmmax = INT_MAX;
1353 if (version == IPC_64) {
1354 struct compat_shminfo64 info;
1355 memset(&info, 0, sizeof(info));
1356 info.shmmax = in->shmmax;
1357 info.shmmin = in->shmmin;
1358 info.shmmni = in->shmmni;
1359 info.shmseg = in->shmseg;
1360 info.shmall = in->shmall;
1361 return copy_to_user(buf, &info, sizeof(info));
1362 } else {
1363 struct shminfo info;
1364 memset(&info, 0, sizeof(info));
1365 info.shmmax = in->shmmax;
1366 info.shmmin = in->shmmin;
1367 info.shmmni = in->shmmni;
1368 info.shmseg = in->shmseg;
1369 info.shmall = in->shmall;
1370 return copy_to_user(buf, &info, sizeof(info));
1371 }
1372}
1373
1374static int put_compat_shm_info(struct shm_info *ip,
1375 struct compat_shm_info __user *uip)
1376{
1377 struct compat_shm_info info;
1378
1379 memset(&info, 0, sizeof(info));
1380 info.used_ids = ip->used_ids;
1381 info.shm_tot = ip->shm_tot;
1382 info.shm_rss = ip->shm_rss;
1383 info.shm_swp = ip->shm_swp;
1384 info.swap_attempts = ip->swap_attempts;
1385 info.swap_successes = ip->swap_successes;
1386 return copy_to_user(uip, &info, sizeof(info));
1387}
1388
1389static int copy_compat_shmid_to_user(void __user *buf, struct shmid64_ds *in,
1390 int version)
1391{
1392 if (version == IPC_64) {
1393 struct compat_shmid64_ds v;
1394 memset(&v, 0, sizeof(v));
1395 to_compat_ipc64_perm(&v.shm_perm, &in->shm_perm);
1396 v.shm_atime = lower_32_bits(in->shm_atime);
1397 v.shm_atime_high = upper_32_bits(in->shm_atime);
1398 v.shm_dtime = lower_32_bits(in->shm_dtime);
1399 v.shm_dtime_high = upper_32_bits(in->shm_dtime);
1400 v.shm_ctime = lower_32_bits(in->shm_ctime);
1401 v.shm_ctime_high = upper_32_bits(in->shm_ctime);
1402 v.shm_segsz = in->shm_segsz;
1403 v.shm_nattch = in->shm_nattch;
1404 v.shm_cpid = in->shm_cpid;
1405 v.shm_lpid = in->shm_lpid;
1406 return copy_to_user(buf, &v, sizeof(v));
1407 } else {
1408 struct compat_shmid_ds v;
1409 memset(&v, 0, sizeof(v));
1410 to_compat_ipc_perm(&v.shm_perm, &in->shm_perm);
1411 v.shm_perm.key = in->shm_perm.key;
1412 v.shm_atime = in->shm_atime;
1413 v.shm_dtime = in->shm_dtime;
1414 v.shm_ctime = in->shm_ctime;
1415 v.shm_segsz = in->shm_segsz;
1416 v.shm_nattch = in->shm_nattch;
1417 v.shm_cpid = in->shm_cpid;
1418 v.shm_lpid = in->shm_lpid;
1419 return copy_to_user(buf, &v, sizeof(v));
1420 }
1421}
1422
1423static int copy_compat_shmid_from_user(struct shmid64_ds *out, void __user *buf,
1424 int version)
1425{
1426 memset(out, 0, sizeof(*out));
1427 if (version == IPC_64) {
1428 struct compat_shmid64_ds __user *p = buf;
1429 return get_compat_ipc64_perm(&out->shm_perm, &p->shm_perm);
1430 } else {
1431 struct compat_shmid_ds __user *p = buf;
1432 return get_compat_ipc_perm(&out->shm_perm, &p->shm_perm);
1433 }
1434}
1435
1436static long compat_ksys_shmctl(int shmid, int cmd, void __user *uptr, int version)
1437{
1438 struct ipc_namespace *ns;
1439 struct shmid64_ds sem64;
1440 int err;
1441
1442 ns = current->nsproxy->ipc_ns;
1443
1444 if (cmd < 0 || shmid < 0)
1445 return -EINVAL;
1446
1447 switch (cmd) {
1448 case IPC_INFO: {
1449 struct shminfo64 shminfo;
1450 err = shmctl_ipc_info(ns, &shminfo);
1451 if (err < 0)
1452 return err;
1453 if (copy_compat_shminfo_to_user(uptr, &shminfo, version))
1454 err = -EFAULT;
1455 return err;
1456 }
1457 case SHM_INFO: {
1458 struct shm_info shm_info;
1459 err = shmctl_shm_info(ns, &shm_info);
1460 if (err < 0)
1461 return err;
1462 if (put_compat_shm_info(&shm_info, uptr))
1463 err = -EFAULT;
1464 return err;
1465 }
1466 case IPC_STAT:
1467 case SHM_STAT_ANY:
1468 case SHM_STAT:
1469 err = shmctl_stat(ns, shmid, cmd, &sem64);
1470 if (err < 0)
1471 return err;
1472 if (copy_compat_shmid_to_user(uptr, &sem64, version))
1473 err = -EFAULT;
1474 return err;
1475
1476 case IPC_SET:
1477 if (copy_compat_shmid_from_user(&sem64, uptr, version))
1478 return -EFAULT;
1479 fallthrough;
1480 case IPC_RMID:
1481 return shmctl_down(ns, shmid, cmd, &sem64);
1482 case SHM_LOCK:
1483 case SHM_UNLOCK:
1484 return shmctl_do_lock(ns, shmid, cmd);
1485 default:
1486 return -EINVAL;
1487 }
1488 return err;
1489}
1490
1491COMPAT_SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, void __user *, uptr)
1492{
1493 return compat_ksys_shmctl(shmid, cmd, uptr, IPC_64);
1494}
1495
1496#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION
1497long compat_ksys_old_shmctl(int shmid, int cmd, void __user *uptr)
1498{
1499 int version = compat_ipc_parse_version(&cmd);
1500
1501 return compat_ksys_shmctl(shmid, cmd, uptr, version);
1502}
1503
1504COMPAT_SYSCALL_DEFINE3(old_shmctl, int, shmid, int, cmd, void __user *, uptr)
1505{
1506 return compat_ksys_old_shmctl(shmid, cmd, uptr);
1507}
1508#endif
1509#endif
1510
1511/*
1512 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1513 *
1514 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1515 * "raddr" thing points to kernel space, and there has to be a wrapper around
1516 * this.
1517 */
1518long do_shmat(int shmid, char __user *shmaddr, int shmflg,
1519 ulong *raddr, unsigned long shmlba)
1520{
1521 struct shmid_kernel *shp;
1522 unsigned long addr = (unsigned long)shmaddr;
1523 unsigned long size;
1524 struct file *file, *base;
1525 int err;
1526 unsigned long flags = MAP_SHARED;
1527 unsigned long prot;
1528 int acc_mode;
1529 struct ipc_namespace *ns;
1530 struct shm_file_data *sfd;
1531 int f_flags;
1532 unsigned long populate = 0;
1533
1534 err = -EINVAL;
1535 if (shmid < 0)
1536 goto out;
1537
1538 if (addr) {
1539 if (addr & (shmlba - 1)) {
1540 if (shmflg & SHM_RND) {
1541 addr &= ~(shmlba - 1); /* round down */
1542
1543 /*
1544 * Ensure that the round-down is non-nil
1545 * when remapping. This can happen for
1546 * cases when addr < shmlba.
1547 */
1548 if (!addr && (shmflg & SHM_REMAP))
1549 goto out;
1550 } else
1551#ifndef __ARCH_FORCE_SHMLBA
1552 if (addr & ~PAGE_MASK)
1553#endif
1554 goto out;
1555 }
1556
1557 flags |= MAP_FIXED;
1558 } else if ((shmflg & SHM_REMAP))
1559 goto out;
1560
1561 if (shmflg & SHM_RDONLY) {
1562 prot = PROT_READ;
1563 acc_mode = S_IRUGO;
1564 f_flags = O_RDONLY;
1565 } else {
1566 prot = PROT_READ | PROT_WRITE;
1567 acc_mode = S_IRUGO | S_IWUGO;
1568 f_flags = O_RDWR;
1569 }
1570 if (shmflg & SHM_EXEC) {
1571 prot |= PROT_EXEC;
1572 acc_mode |= S_IXUGO;
1573 }
1574
1575 /*
1576 * We cannot rely on the fs check since SYSV IPC does have an
1577 * additional creator id...
1578 */
1579 ns = current->nsproxy->ipc_ns;
1580 rcu_read_lock();
1581 shp = shm_obtain_object_check(ns, shmid);
1582 if (IS_ERR(shp)) {
1583 err = PTR_ERR(shp);
1584 goto out_unlock;
1585 }
1586
1587 err = -EACCES;
1588 if (ipcperms(ns, &shp->shm_perm, acc_mode))
1589 goto out_unlock;
1590
1591 err = security_shm_shmat(&shp->shm_perm, shmaddr, shmflg);
1592 if (err)
1593 goto out_unlock;
1594
1595 ipc_lock_object(&shp->shm_perm);
1596
1597 /* check if shm_destroy() is tearing down shp */
1598 if (!ipc_valid_object(&shp->shm_perm)) {
1599 ipc_unlock_object(&shp->shm_perm);
1600 err = -EIDRM;
1601 goto out_unlock;
1602 }
1603
1604 /*
1605 * We need to take a reference to the real shm file to prevent the
1606 * pointer from becoming stale in cases where the lifetime of the outer
1607 * file extends beyond that of the shm segment. It's not usually
1608 * possible, but it can happen during remap_file_pages() emulation as
1609 * that unmaps the memory, then does ->mmap() via file reference only.
1610 * We'll deny the ->mmap() if the shm segment was since removed, but to
1611 * detect shm ID reuse we need to compare the file pointers.
1612 */
1613 base = get_file(shp->shm_file);
1614 shp->shm_nattch++;
1615 size = i_size_read(file_inode(base));
1616 ipc_unlock_object(&shp->shm_perm);
1617 rcu_read_unlock();
1618
1619 err = -ENOMEM;
1620 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1621 if (!sfd) {
1622 fput(base);
1623 goto out_nattch;
1624 }
1625
1626 file = alloc_file_clone(base, f_flags,
1627 is_file_hugepages(base) ?
1628 &shm_file_operations_huge :
1629 &shm_file_operations);
1630 err = PTR_ERR(file);
1631 if (IS_ERR(file)) {
1632 kfree(sfd);
1633 fput(base);
1634 goto out_nattch;
1635 }
1636
1637 sfd->id = shp->shm_perm.id;
1638 sfd->ns = get_ipc_ns(ns);
1639 sfd->file = base;
1640 sfd->vm_ops = NULL;
1641 file->private_data = sfd;
1642
1643 err = security_mmap_file(file, prot, flags);
1644 if (err)
1645 goto out_fput;
1646
1647 if (mmap_write_lock_killable(current->mm)) {
1648 err = -EINTR;
1649 goto out_fput;
1650 }
1651
1652 if (addr && !(shmflg & SHM_REMAP)) {
1653 err = -EINVAL;
1654 if (addr + size < addr)
1655 goto invalid;
1656
1657 if (find_vma_intersection(current->mm, addr, addr + size))
1658 goto invalid;
1659 }
1660
1661 addr = do_mmap(file, addr, size, prot, flags, 0, 0, &populate, NULL);
1662 *raddr = addr;
1663 err = 0;
1664 if (IS_ERR_VALUE(addr))
1665 err = (long)addr;
1666invalid:
1667 mmap_write_unlock(current->mm);
1668 if (populate)
1669 mm_populate(addr, populate);
1670
1671out_fput:
1672 fput(file);
1673
1674out_nattch:
1675 down_write(&shm_ids(ns).rwsem);
1676 shp = shm_lock(ns, shmid);
1677 shp->shm_nattch--;
1678
1679 if (shm_may_destroy(shp))
1680 shm_destroy(ns, shp);
1681 else
1682 shm_unlock(shp);
1683 up_write(&shm_ids(ns).rwsem);
1684 return err;
1685
1686out_unlock:
1687 rcu_read_unlock();
1688out:
1689 return err;
1690}
1691
1692SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1693{
1694 unsigned long ret;
1695 long err;
1696
1697 err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
1698 if (err)
1699 return err;
1700 force_successful_syscall_return();
1701 return (long)ret;
1702}
1703
1704#ifdef CONFIG_COMPAT
1705
1706#ifndef COMPAT_SHMLBA
1707#define COMPAT_SHMLBA SHMLBA
1708#endif
1709
1710COMPAT_SYSCALL_DEFINE3(shmat, int, shmid, compat_uptr_t, shmaddr, int, shmflg)
1711{
1712 unsigned long ret;
1713 long err;
1714
1715 err = do_shmat(shmid, compat_ptr(shmaddr), shmflg, &ret, COMPAT_SHMLBA);
1716 if (err)
1717 return err;
1718 force_successful_syscall_return();
1719 return (long)ret;
1720}
1721#endif
1722
1723/*
1724 * detach and kill segment if marked destroyed.
1725 * The work is done in shm_close.
1726 */
1727long ksys_shmdt(char __user *shmaddr)
1728{
1729 struct mm_struct *mm = current->mm;
1730 struct vm_area_struct *vma;
1731 unsigned long addr = (unsigned long)shmaddr;
1732 int retval = -EINVAL;
1733#ifdef CONFIG_MMU
1734 loff_t size = 0;
1735 struct file *file;
1736 VMA_ITERATOR(vmi, mm, addr);
1737#endif
1738
1739 if (addr & ~PAGE_MASK)
1740 return retval;
1741
1742 if (mmap_write_lock_killable(mm))
1743 return -EINTR;
1744
1745 /*
1746 * This function tries to be smart and unmap shm segments that
1747 * were modified by partial mlock or munmap calls:
1748 * - It first determines the size of the shm segment that should be
1749 * unmapped: It searches for a vma that is backed by shm and that
1750 * started at address shmaddr. It records it's size and then unmaps
1751 * it.
1752 * - Then it unmaps all shm vmas that started at shmaddr and that
1753 * are within the initially determined size and that are from the
1754 * same shm segment from which we determined the size.
1755 * Errors from do_munmap are ignored: the function only fails if
1756 * it's called with invalid parameters or if it's called to unmap
1757 * a part of a vma. Both calls in this function are for full vmas,
1758 * the parameters are directly copied from the vma itself and always
1759 * valid - therefore do_munmap cannot fail. (famous last words?)
1760 */
1761 /*
1762 * If it had been mremap()'d, the starting address would not
1763 * match the usual checks anyway. So assume all vma's are
1764 * above the starting address given.
1765 */
1766
1767#ifdef CONFIG_MMU
1768 for_each_vma(vmi, vma) {
1769 /*
1770 * Check if the starting address would match, i.e. it's
1771 * a fragment created by mprotect() and/or munmap(), or it
1772 * otherwise it starts at this address with no hassles.
1773 */
1774 if ((vma->vm_ops == &shm_vm_ops) &&
1775 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1776
1777 /*
1778 * Record the file of the shm segment being
1779 * unmapped. With mremap(), someone could place
1780 * page from another segment but with equal offsets
1781 * in the range we are unmapping.
1782 */
1783 file = vma->vm_file;
1784 size = i_size_read(file_inode(vma->vm_file));
1785 do_vma_munmap(&vmi, vma, vma->vm_start, vma->vm_end,
1786 NULL, false);
1787 /*
1788 * We discovered the size of the shm segment, so
1789 * break out of here and fall through to the next
1790 * loop that uses the size information to stop
1791 * searching for matching vma's.
1792 */
1793 retval = 0;
1794 vma = vma_next(&vmi);
1795 break;
1796 }
1797 }
1798
1799 /*
1800 * We need look no further than the maximum address a fragment
1801 * could possibly have landed at. Also cast things to loff_t to
1802 * prevent overflows and make comparisons vs. equal-width types.
1803 */
1804 size = PAGE_ALIGN(size);
1805 while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1806 /* finding a matching vma now does not alter retval */
1807 if ((vma->vm_ops == &shm_vm_ops) &&
1808 ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
1809 (vma->vm_file == file)) {
1810 do_vma_munmap(&vmi, vma, vma->vm_start, vma->vm_end,
1811 NULL, false);
1812 }
1813
1814 vma = vma_next(&vmi);
1815 }
1816
1817#else /* CONFIG_MMU */
1818 vma = vma_lookup(mm, addr);
1819 /* under NOMMU conditions, the exact address to be destroyed must be
1820 * given
1821 */
1822 if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1823 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1824 retval = 0;
1825 }
1826
1827#endif
1828
1829 mmap_write_unlock(mm);
1830 return retval;
1831}
1832
1833SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1834{
1835 return ksys_shmdt(shmaddr);
1836}
1837
1838#ifdef CONFIG_PROC_FS
1839static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1840{
1841 struct pid_namespace *pid_ns = ipc_seq_pid_ns(s);
1842 struct user_namespace *user_ns = seq_user_ns(s);
1843 struct kern_ipc_perm *ipcp = it;
1844 struct shmid_kernel *shp;
1845 unsigned long rss = 0, swp = 0;
1846
1847 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
1848 shm_add_rss_swap(shp, &rss, &swp);
1849
1850#if BITS_PER_LONG <= 32
1851#define SIZE_SPEC "%10lu"
1852#else
1853#define SIZE_SPEC "%21lu"
1854#endif
1855
1856 seq_printf(s,
1857 "%10d %10d %4o " SIZE_SPEC " %5u %5u "
1858 "%5lu %5u %5u %5u %5u %10llu %10llu %10llu "
1859 SIZE_SPEC " " SIZE_SPEC "\n",
1860 shp->shm_perm.key,
1861 shp->shm_perm.id,
1862 shp->shm_perm.mode,
1863 shp->shm_segsz,
1864 pid_nr_ns(shp->shm_cprid, pid_ns),
1865 pid_nr_ns(shp->shm_lprid, pid_ns),
1866 shp->shm_nattch,
1867 from_kuid_munged(user_ns, shp->shm_perm.uid),
1868 from_kgid_munged(user_ns, shp->shm_perm.gid),
1869 from_kuid_munged(user_ns, shp->shm_perm.cuid),
1870 from_kgid_munged(user_ns, shp->shm_perm.cgid),
1871 shp->shm_atim,
1872 shp->shm_dtim,
1873 shp->shm_ctim,
1874 rss * PAGE_SIZE,
1875 swp * PAGE_SIZE);
1876
1877 return 0;
1878}
1879#endif
1/*
2 * linux/ipc/shm.c
3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 * Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
7 *
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
15 *
16 * support for audit of ipc object properties and permission changes
17 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
18 *
19 * namespaces support
20 * OpenVZ, SWsoft Inc.
21 * Pavel Emelianov <xemul@openvz.org>
22 *
23 * Better ipc lock (kern_ipc_perm.lock) handling
24 * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
25 */
26
27#include <linux/slab.h>
28#include <linux/mm.h>
29#include <linux/hugetlb.h>
30#include <linux/shm.h>
31#include <linux/init.h>
32#include <linux/file.h>
33#include <linux/mman.h>
34#include <linux/shmem_fs.h>
35#include <linux/security.h>
36#include <linux/syscalls.h>
37#include <linux/audit.h>
38#include <linux/capability.h>
39#include <linux/ptrace.h>
40#include <linux/seq_file.h>
41#include <linux/rwsem.h>
42#include <linux/nsproxy.h>
43#include <linux/mount.h>
44#include <linux/ipc_namespace.h>
45
46#include <asm/uaccess.h>
47
48#include "util.h"
49
50struct shm_file_data {
51 int id;
52 struct ipc_namespace *ns;
53 struct file *file;
54 const struct vm_operations_struct *vm_ops;
55};
56
57#define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
58
59static const struct file_operations shm_file_operations;
60static const struct vm_operations_struct shm_vm_ops;
61
62#define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
63
64#define shm_unlock(shp) \
65 ipc_unlock(&(shp)->shm_perm)
66
67static int newseg(struct ipc_namespace *, struct ipc_params *);
68static void shm_open(struct vm_area_struct *vma);
69static void shm_close(struct vm_area_struct *vma);
70static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
71#ifdef CONFIG_PROC_FS
72static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
73#endif
74
75void shm_init_ns(struct ipc_namespace *ns)
76{
77 ns->shm_ctlmax = SHMMAX;
78 ns->shm_ctlall = SHMALL;
79 ns->shm_ctlmni = SHMMNI;
80 ns->shm_rmid_forced = 0;
81 ns->shm_tot = 0;
82 ipc_init_ids(&shm_ids(ns));
83}
84
85/*
86 * Called with shm_ids.rwsem (writer) and the shp structure locked.
87 * Only shm_ids.rwsem remains locked on exit.
88 */
89static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
90{
91 struct shmid_kernel *shp;
92 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
93
94 if (shp->shm_nattch) {
95 shp->shm_perm.mode |= SHM_DEST;
96 /* Do not find it any more */
97 shp->shm_perm.key = IPC_PRIVATE;
98 shm_unlock(shp);
99 } else
100 shm_destroy(ns, shp);
101}
102
103#ifdef CONFIG_IPC_NS
104void shm_exit_ns(struct ipc_namespace *ns)
105{
106 free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
107 idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
108}
109#endif
110
111static int __init ipc_ns_init(void)
112{
113 shm_init_ns(&init_ipc_ns);
114 return 0;
115}
116
117pure_initcall(ipc_ns_init);
118
119void __init shm_init(void)
120{
121 ipc_init_proc_interface("sysvipc/shm",
122#if BITS_PER_LONG <= 32
123 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
124#else
125 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
126#endif
127 IPC_SHM_IDS, sysvipc_shm_proc_show);
128}
129
130static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
131{
132 struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id);
133
134 if (IS_ERR(ipcp))
135 return ERR_CAST(ipcp);
136
137 return container_of(ipcp, struct shmid_kernel, shm_perm);
138}
139
140static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
141{
142 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
143
144 if (IS_ERR(ipcp))
145 return ERR_CAST(ipcp);
146
147 return container_of(ipcp, struct shmid_kernel, shm_perm);
148}
149
150/*
151 * shm_lock_(check_) routines are called in the paths where the rwsem
152 * is not necessarily held.
153 */
154static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
155{
156 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
157
158 if (IS_ERR(ipcp))
159 return (struct shmid_kernel *)ipcp;
160
161 return container_of(ipcp, struct shmid_kernel, shm_perm);
162}
163
164static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
165{
166 rcu_read_lock();
167 ipc_lock_object(&ipcp->shm_perm);
168}
169
170static void shm_rcu_free(struct rcu_head *head)
171{
172 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
173 struct shmid_kernel *shp = ipc_rcu_to_struct(p);
174
175 security_shm_free(shp);
176 ipc_rcu_free(head);
177}
178
179static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
180{
181 ipc_rmid(&shm_ids(ns), &s->shm_perm);
182}
183
184
185/* This is called by fork, once for every shm attach. */
186static void shm_open(struct vm_area_struct *vma)
187{
188 struct file *file = vma->vm_file;
189 struct shm_file_data *sfd = shm_file_data(file);
190 struct shmid_kernel *shp;
191
192 shp = shm_lock(sfd->ns, sfd->id);
193 BUG_ON(IS_ERR(shp));
194 shp->shm_atim = get_seconds();
195 shp->shm_lprid = task_tgid_vnr(current);
196 shp->shm_nattch++;
197 shm_unlock(shp);
198}
199
200/*
201 * shm_destroy - free the struct shmid_kernel
202 *
203 * @ns: namespace
204 * @shp: struct to free
205 *
206 * It has to be called with shp and shm_ids.rwsem (writer) locked,
207 * but returns with shp unlocked and freed.
208 */
209static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
210{
211 struct file *shm_file;
212
213 shm_file = shp->shm_file;
214 shp->shm_file = NULL;
215 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
216 shm_rmid(ns, shp);
217 shm_unlock(shp);
218 if (!is_file_hugepages(shm_file))
219 shmem_lock(shm_file, 0, shp->mlock_user);
220 else if (shp->mlock_user)
221 user_shm_unlock(file_inode(shm_file)->i_size, shp->mlock_user);
222 fput(shm_file);
223 ipc_rcu_putref(shp, shm_rcu_free);
224}
225
226/*
227 * shm_may_destroy - identifies whether shm segment should be destroyed now
228 *
229 * Returns true if and only if there are no active users of the segment and
230 * one of the following is true:
231 *
232 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
233 *
234 * 2) sysctl kernel.shm_rmid_forced is set to 1.
235 */
236static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
237{
238 return (shp->shm_nattch == 0) &&
239 (ns->shm_rmid_forced ||
240 (shp->shm_perm.mode & SHM_DEST));
241}
242
243/*
244 * remove the attach descriptor vma.
245 * free memory for segment if it is marked destroyed.
246 * The descriptor has already been removed from the current->mm->mmap list
247 * and will later be kfree()d.
248 */
249static void shm_close(struct vm_area_struct *vma)
250{
251 struct file *file = vma->vm_file;
252 struct shm_file_data *sfd = shm_file_data(file);
253 struct shmid_kernel *shp;
254 struct ipc_namespace *ns = sfd->ns;
255
256 down_write(&shm_ids(ns).rwsem);
257 /* remove from the list of attaches of the shm segment */
258 shp = shm_lock(ns, sfd->id);
259 BUG_ON(IS_ERR(shp));
260 shp->shm_lprid = task_tgid_vnr(current);
261 shp->shm_dtim = get_seconds();
262 shp->shm_nattch--;
263 if (shm_may_destroy(ns, shp))
264 shm_destroy(ns, shp);
265 else
266 shm_unlock(shp);
267 up_write(&shm_ids(ns).rwsem);
268}
269
270/* Called with ns->shm_ids(ns).rwsem locked */
271static int shm_try_destroy_current(int id, void *p, void *data)
272{
273 struct ipc_namespace *ns = data;
274 struct kern_ipc_perm *ipcp = p;
275 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
276
277 if (shp->shm_creator != current)
278 return 0;
279
280 /*
281 * Mark it as orphaned to destroy the segment when
282 * kernel.shm_rmid_forced is changed.
283 * It is noop if the following shm_may_destroy() returns true.
284 */
285 shp->shm_creator = NULL;
286
287 /*
288 * Don't even try to destroy it. If shm_rmid_forced=0 and IPC_RMID
289 * is not set, it shouldn't be deleted here.
290 */
291 if (!ns->shm_rmid_forced)
292 return 0;
293
294 if (shm_may_destroy(ns, shp)) {
295 shm_lock_by_ptr(shp);
296 shm_destroy(ns, shp);
297 }
298 return 0;
299}
300
301/* Called with ns->shm_ids(ns).rwsem locked */
302static int shm_try_destroy_orphaned(int id, void *p, void *data)
303{
304 struct ipc_namespace *ns = data;
305 struct kern_ipc_perm *ipcp = p;
306 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
307
308 /*
309 * We want to destroy segments without users and with already
310 * exit'ed originating process.
311 *
312 * As shp->* are changed under rwsem, it's safe to skip shp locking.
313 */
314 if (shp->shm_creator != NULL)
315 return 0;
316
317 if (shm_may_destroy(ns, shp)) {
318 shm_lock_by_ptr(shp);
319 shm_destroy(ns, shp);
320 }
321 return 0;
322}
323
324void shm_destroy_orphaned(struct ipc_namespace *ns)
325{
326 down_write(&shm_ids(ns).rwsem);
327 if (shm_ids(ns).in_use)
328 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
329 up_write(&shm_ids(ns).rwsem);
330}
331
332
333void exit_shm(struct task_struct *task)
334{
335 struct ipc_namespace *ns = task->nsproxy->ipc_ns;
336
337 if (shm_ids(ns).in_use == 0)
338 return;
339
340 /* Destroy all already created segments, but not mapped yet */
341 down_write(&shm_ids(ns).rwsem);
342 if (shm_ids(ns).in_use)
343 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
344 up_write(&shm_ids(ns).rwsem);
345}
346
347static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
348{
349 struct file *file = vma->vm_file;
350 struct shm_file_data *sfd = shm_file_data(file);
351
352 return sfd->vm_ops->fault(vma, vmf);
353}
354
355#ifdef CONFIG_NUMA
356static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
357{
358 struct file *file = vma->vm_file;
359 struct shm_file_data *sfd = shm_file_data(file);
360 int err = 0;
361 if (sfd->vm_ops->set_policy)
362 err = sfd->vm_ops->set_policy(vma, new);
363 return err;
364}
365
366static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
367 unsigned long addr)
368{
369 struct file *file = vma->vm_file;
370 struct shm_file_data *sfd = shm_file_data(file);
371 struct mempolicy *pol = NULL;
372
373 if (sfd->vm_ops->get_policy)
374 pol = sfd->vm_ops->get_policy(vma, addr);
375 else if (vma->vm_policy)
376 pol = vma->vm_policy;
377
378 return pol;
379}
380#endif
381
382static int shm_mmap(struct file *file, struct vm_area_struct *vma)
383{
384 struct shm_file_data *sfd = shm_file_data(file);
385 int ret;
386
387 ret = sfd->file->f_op->mmap(sfd->file, vma);
388 if (ret != 0)
389 return ret;
390 sfd->vm_ops = vma->vm_ops;
391#ifdef CONFIG_MMU
392 BUG_ON(!sfd->vm_ops->fault);
393#endif
394 vma->vm_ops = &shm_vm_ops;
395 shm_open(vma);
396
397 return ret;
398}
399
400static int shm_release(struct inode *ino, struct file *file)
401{
402 struct shm_file_data *sfd = shm_file_data(file);
403
404 put_ipc_ns(sfd->ns);
405 shm_file_data(file) = NULL;
406 kfree(sfd);
407 return 0;
408}
409
410static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
411{
412 struct shm_file_data *sfd = shm_file_data(file);
413
414 if (!sfd->file->f_op->fsync)
415 return -EINVAL;
416 return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
417}
418
419static long shm_fallocate(struct file *file, int mode, loff_t offset,
420 loff_t len)
421{
422 struct shm_file_data *sfd = shm_file_data(file);
423
424 if (!sfd->file->f_op->fallocate)
425 return -EOPNOTSUPP;
426 return sfd->file->f_op->fallocate(file, mode, offset, len);
427}
428
429static unsigned long shm_get_unmapped_area(struct file *file,
430 unsigned long addr, unsigned long len, unsigned long pgoff,
431 unsigned long flags)
432{
433 struct shm_file_data *sfd = shm_file_data(file);
434 return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
435 pgoff, flags);
436}
437
438static const struct file_operations shm_file_operations = {
439 .mmap = shm_mmap,
440 .fsync = shm_fsync,
441 .release = shm_release,
442#ifndef CONFIG_MMU
443 .get_unmapped_area = shm_get_unmapped_area,
444#endif
445 .llseek = noop_llseek,
446 .fallocate = shm_fallocate,
447};
448
449static const struct file_operations shm_file_operations_huge = {
450 .mmap = shm_mmap,
451 .fsync = shm_fsync,
452 .release = shm_release,
453 .get_unmapped_area = shm_get_unmapped_area,
454 .llseek = noop_llseek,
455 .fallocate = shm_fallocate,
456};
457
458int is_file_shm_hugepages(struct file *file)
459{
460 return file->f_op == &shm_file_operations_huge;
461}
462
463static const struct vm_operations_struct shm_vm_ops = {
464 .open = shm_open, /* callback for a new vm-area open */
465 .close = shm_close, /* callback for when the vm-area is released */
466 .fault = shm_fault,
467#if defined(CONFIG_NUMA)
468 .set_policy = shm_set_policy,
469 .get_policy = shm_get_policy,
470#endif
471};
472
473/**
474 * newseg - Create a new shared memory segment
475 * @ns: namespace
476 * @params: ptr to the structure that contains key, size and shmflg
477 *
478 * Called with shm_ids.rwsem held as a writer.
479 */
480static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
481{
482 key_t key = params->key;
483 int shmflg = params->flg;
484 size_t size = params->u.size;
485 int error;
486 struct shmid_kernel *shp;
487 size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
488 struct file *file;
489 char name[13];
490 int id;
491 vm_flags_t acctflag = 0;
492
493 if (size < SHMMIN || size > ns->shm_ctlmax)
494 return -EINVAL;
495
496 if (ns->shm_tot + numpages > ns->shm_ctlall)
497 return -ENOSPC;
498
499 shp = ipc_rcu_alloc(sizeof(*shp));
500 if (!shp)
501 return -ENOMEM;
502
503 shp->shm_perm.key = key;
504 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
505 shp->mlock_user = NULL;
506
507 shp->shm_perm.security = NULL;
508 error = security_shm_alloc(shp);
509 if (error) {
510 ipc_rcu_putref(shp, ipc_rcu_free);
511 return error;
512 }
513
514 sprintf(name, "SYSV%08x", key);
515 if (shmflg & SHM_HUGETLB) {
516 struct hstate *hs;
517 size_t hugesize;
518
519 hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
520 if (!hs) {
521 error = -EINVAL;
522 goto no_file;
523 }
524 hugesize = ALIGN(size, huge_page_size(hs));
525
526 /* hugetlb_file_setup applies strict accounting */
527 if (shmflg & SHM_NORESERVE)
528 acctflag = VM_NORESERVE;
529 file = hugetlb_file_setup(name, hugesize, acctflag,
530 &shp->mlock_user, HUGETLB_SHMFS_INODE,
531 (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
532 } else {
533 /*
534 * Do not allow no accounting for OVERCOMMIT_NEVER, even
535 * if it's asked for.
536 */
537 if ((shmflg & SHM_NORESERVE) &&
538 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
539 acctflag = VM_NORESERVE;
540 file = shmem_file_setup(name, size, acctflag);
541 }
542 error = PTR_ERR(file);
543 if (IS_ERR(file))
544 goto no_file;
545
546 id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
547 if (id < 0) {
548 error = id;
549 goto no_id;
550 }
551
552 shp->shm_cprid = task_tgid_vnr(current);
553 shp->shm_lprid = 0;
554 shp->shm_atim = shp->shm_dtim = 0;
555 shp->shm_ctim = get_seconds();
556 shp->shm_segsz = size;
557 shp->shm_nattch = 0;
558 shp->shm_file = file;
559 shp->shm_creator = current;
560
561 /*
562 * shmid gets reported as "inode#" in /proc/pid/maps.
563 * proc-ps tools use this. Changing this will break them.
564 */
565 file_inode(file)->i_ino = shp->shm_perm.id;
566
567 ns->shm_tot += numpages;
568 error = shp->shm_perm.id;
569
570 ipc_unlock_object(&shp->shm_perm);
571 rcu_read_unlock();
572 return error;
573
574no_id:
575 if (is_file_hugepages(file) && shp->mlock_user)
576 user_shm_unlock(size, shp->mlock_user);
577 fput(file);
578no_file:
579 ipc_rcu_putref(shp, shm_rcu_free);
580 return error;
581}
582
583/*
584 * Called with shm_ids.rwsem and ipcp locked.
585 */
586static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
587{
588 struct shmid_kernel *shp;
589
590 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
591 return security_shm_associate(shp, shmflg);
592}
593
594/*
595 * Called with shm_ids.rwsem and ipcp locked.
596 */
597static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
598 struct ipc_params *params)
599{
600 struct shmid_kernel *shp;
601
602 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
603 if (shp->shm_segsz < params->u.size)
604 return -EINVAL;
605
606 return 0;
607}
608
609SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
610{
611 struct ipc_namespace *ns;
612 struct ipc_ops shm_ops;
613 struct ipc_params shm_params;
614
615 ns = current->nsproxy->ipc_ns;
616
617 shm_ops.getnew = newseg;
618 shm_ops.associate = shm_security;
619 shm_ops.more_checks = shm_more_checks;
620
621 shm_params.key = key;
622 shm_params.flg = shmflg;
623 shm_params.u.size = size;
624
625 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
626}
627
628static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
629{
630 switch (version) {
631 case IPC_64:
632 return copy_to_user(buf, in, sizeof(*in));
633 case IPC_OLD:
634 {
635 struct shmid_ds out;
636
637 memset(&out, 0, sizeof(out));
638 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
639 out.shm_segsz = in->shm_segsz;
640 out.shm_atime = in->shm_atime;
641 out.shm_dtime = in->shm_dtime;
642 out.shm_ctime = in->shm_ctime;
643 out.shm_cpid = in->shm_cpid;
644 out.shm_lpid = in->shm_lpid;
645 out.shm_nattch = in->shm_nattch;
646
647 return copy_to_user(buf, &out, sizeof(out));
648 }
649 default:
650 return -EINVAL;
651 }
652}
653
654static inline unsigned long
655copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
656{
657 switch (version) {
658 case IPC_64:
659 if (copy_from_user(out, buf, sizeof(*out)))
660 return -EFAULT;
661 return 0;
662 case IPC_OLD:
663 {
664 struct shmid_ds tbuf_old;
665
666 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
667 return -EFAULT;
668
669 out->shm_perm.uid = tbuf_old.shm_perm.uid;
670 out->shm_perm.gid = tbuf_old.shm_perm.gid;
671 out->shm_perm.mode = tbuf_old.shm_perm.mode;
672
673 return 0;
674 }
675 default:
676 return -EINVAL;
677 }
678}
679
680static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
681{
682 switch (version) {
683 case IPC_64:
684 return copy_to_user(buf, in, sizeof(*in));
685 case IPC_OLD:
686 {
687 struct shminfo out;
688
689 if (in->shmmax > INT_MAX)
690 out.shmmax = INT_MAX;
691 else
692 out.shmmax = (int)in->shmmax;
693
694 out.shmmin = in->shmmin;
695 out.shmmni = in->shmmni;
696 out.shmseg = in->shmseg;
697 out.shmall = in->shmall;
698
699 return copy_to_user(buf, &out, sizeof(out));
700 }
701 default:
702 return -EINVAL;
703 }
704}
705
706/*
707 * Calculate and add used RSS and swap pages of a shm.
708 * Called with shm_ids.rwsem held as a reader
709 */
710static void shm_add_rss_swap(struct shmid_kernel *shp,
711 unsigned long *rss_add, unsigned long *swp_add)
712{
713 struct inode *inode;
714
715 inode = file_inode(shp->shm_file);
716
717 if (is_file_hugepages(shp->shm_file)) {
718 struct address_space *mapping = inode->i_mapping;
719 struct hstate *h = hstate_file(shp->shm_file);
720 *rss_add += pages_per_huge_page(h) * mapping->nrpages;
721 } else {
722#ifdef CONFIG_SHMEM
723 struct shmem_inode_info *info = SHMEM_I(inode);
724 spin_lock(&info->lock);
725 *rss_add += inode->i_mapping->nrpages;
726 *swp_add += info->swapped;
727 spin_unlock(&info->lock);
728#else
729 *rss_add += inode->i_mapping->nrpages;
730#endif
731 }
732}
733
734/*
735 * Called with shm_ids.rwsem held as a reader
736 */
737static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
738 unsigned long *swp)
739{
740 int next_id;
741 int total, in_use;
742
743 *rss = 0;
744 *swp = 0;
745
746 in_use = shm_ids(ns).in_use;
747
748 for (total = 0, next_id = 0; total < in_use; next_id++) {
749 struct kern_ipc_perm *ipc;
750 struct shmid_kernel *shp;
751
752 ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
753 if (ipc == NULL)
754 continue;
755 shp = container_of(ipc, struct shmid_kernel, shm_perm);
756
757 shm_add_rss_swap(shp, rss, swp);
758
759 total++;
760 }
761}
762
763/*
764 * This function handles some shmctl commands which require the rwsem
765 * to be held in write mode.
766 * NOTE: no locks must be held, the rwsem is taken inside this function.
767 */
768static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
769 struct shmid_ds __user *buf, int version)
770{
771 struct kern_ipc_perm *ipcp;
772 struct shmid64_ds shmid64;
773 struct shmid_kernel *shp;
774 int err;
775
776 if (cmd == IPC_SET) {
777 if (copy_shmid_from_user(&shmid64, buf, version))
778 return -EFAULT;
779 }
780
781 down_write(&shm_ids(ns).rwsem);
782 rcu_read_lock();
783
784 ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
785 &shmid64.shm_perm, 0);
786 if (IS_ERR(ipcp)) {
787 err = PTR_ERR(ipcp);
788 goto out_unlock1;
789 }
790
791 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
792
793 err = security_shm_shmctl(shp, cmd);
794 if (err)
795 goto out_unlock1;
796
797 switch (cmd) {
798 case IPC_RMID:
799 ipc_lock_object(&shp->shm_perm);
800 /* do_shm_rmid unlocks the ipc object and rcu */
801 do_shm_rmid(ns, ipcp);
802 goto out_up;
803 case IPC_SET:
804 ipc_lock_object(&shp->shm_perm);
805 err = ipc_update_perm(&shmid64.shm_perm, ipcp);
806 if (err)
807 goto out_unlock0;
808 shp->shm_ctim = get_seconds();
809 break;
810 default:
811 err = -EINVAL;
812 goto out_unlock1;
813 }
814
815out_unlock0:
816 ipc_unlock_object(&shp->shm_perm);
817out_unlock1:
818 rcu_read_unlock();
819out_up:
820 up_write(&shm_ids(ns).rwsem);
821 return err;
822}
823
824static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
825 int cmd, int version, void __user *buf)
826{
827 int err;
828 struct shmid_kernel *shp;
829
830 /* preliminary security checks for *_INFO */
831 if (cmd == IPC_INFO || cmd == SHM_INFO) {
832 err = security_shm_shmctl(NULL, cmd);
833 if (err)
834 return err;
835 }
836
837 switch (cmd) {
838 case IPC_INFO:
839 {
840 struct shminfo64 shminfo;
841
842 memset(&shminfo, 0, sizeof(shminfo));
843 shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
844 shminfo.shmmax = ns->shm_ctlmax;
845 shminfo.shmall = ns->shm_ctlall;
846
847 shminfo.shmmin = SHMMIN;
848 if (copy_shminfo_to_user(buf, &shminfo, version))
849 return -EFAULT;
850
851 down_read(&shm_ids(ns).rwsem);
852 err = ipc_get_maxid(&shm_ids(ns));
853 up_read(&shm_ids(ns).rwsem);
854
855 if (err < 0)
856 err = 0;
857 goto out;
858 }
859 case SHM_INFO:
860 {
861 struct shm_info shm_info;
862
863 memset(&shm_info, 0, sizeof(shm_info));
864 down_read(&shm_ids(ns).rwsem);
865 shm_info.used_ids = shm_ids(ns).in_use;
866 shm_get_stat(ns, &shm_info.shm_rss, &shm_info.shm_swp);
867 shm_info.shm_tot = ns->shm_tot;
868 shm_info.swap_attempts = 0;
869 shm_info.swap_successes = 0;
870 err = ipc_get_maxid(&shm_ids(ns));
871 up_read(&shm_ids(ns).rwsem);
872 if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
873 err = -EFAULT;
874 goto out;
875 }
876
877 err = err < 0 ? 0 : err;
878 goto out;
879 }
880 case SHM_STAT:
881 case IPC_STAT:
882 {
883 struct shmid64_ds tbuf;
884 int result;
885
886 rcu_read_lock();
887 if (cmd == SHM_STAT) {
888 shp = shm_obtain_object(ns, shmid);
889 if (IS_ERR(shp)) {
890 err = PTR_ERR(shp);
891 goto out_unlock;
892 }
893 result = shp->shm_perm.id;
894 } else {
895 shp = shm_obtain_object_check(ns, shmid);
896 if (IS_ERR(shp)) {
897 err = PTR_ERR(shp);
898 goto out_unlock;
899 }
900 result = 0;
901 }
902
903 err = -EACCES;
904 if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
905 goto out_unlock;
906
907 err = security_shm_shmctl(shp, cmd);
908 if (err)
909 goto out_unlock;
910
911 memset(&tbuf, 0, sizeof(tbuf));
912 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
913 tbuf.shm_segsz = shp->shm_segsz;
914 tbuf.shm_atime = shp->shm_atim;
915 tbuf.shm_dtime = shp->shm_dtim;
916 tbuf.shm_ctime = shp->shm_ctim;
917 tbuf.shm_cpid = shp->shm_cprid;
918 tbuf.shm_lpid = shp->shm_lprid;
919 tbuf.shm_nattch = shp->shm_nattch;
920 rcu_read_unlock();
921
922 if (copy_shmid_to_user(buf, &tbuf, version))
923 err = -EFAULT;
924 else
925 err = result;
926 goto out;
927 }
928 default:
929 return -EINVAL;
930 }
931
932out_unlock:
933 rcu_read_unlock();
934out:
935 return err;
936}
937
938SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
939{
940 struct shmid_kernel *shp;
941 int err, version;
942 struct ipc_namespace *ns;
943
944 if (cmd < 0 || shmid < 0)
945 return -EINVAL;
946
947 version = ipc_parse_version(&cmd);
948 ns = current->nsproxy->ipc_ns;
949
950 switch (cmd) {
951 case IPC_INFO:
952 case SHM_INFO:
953 case SHM_STAT:
954 case IPC_STAT:
955 return shmctl_nolock(ns, shmid, cmd, version, buf);
956 case IPC_RMID:
957 case IPC_SET:
958 return shmctl_down(ns, shmid, cmd, buf, version);
959 case SHM_LOCK:
960 case SHM_UNLOCK:
961 {
962 struct file *shm_file;
963
964 rcu_read_lock();
965 shp = shm_obtain_object_check(ns, shmid);
966 if (IS_ERR(shp)) {
967 err = PTR_ERR(shp);
968 goto out_unlock1;
969 }
970
971 audit_ipc_obj(&(shp->shm_perm));
972 err = security_shm_shmctl(shp, cmd);
973 if (err)
974 goto out_unlock1;
975
976 ipc_lock_object(&shp->shm_perm);
977
978 /* check if shm_destroy() is tearing down shp */
979 if (!ipc_valid_object(&shp->shm_perm)) {
980 err = -EIDRM;
981 goto out_unlock0;
982 }
983
984 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
985 kuid_t euid = current_euid();
986 if (!uid_eq(euid, shp->shm_perm.uid) &&
987 !uid_eq(euid, shp->shm_perm.cuid)) {
988 err = -EPERM;
989 goto out_unlock0;
990 }
991 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
992 err = -EPERM;
993 goto out_unlock0;
994 }
995 }
996
997 shm_file = shp->shm_file;
998 if (is_file_hugepages(shm_file))
999 goto out_unlock0;
1000
1001 if (cmd == SHM_LOCK) {
1002 struct user_struct *user = current_user();
1003 err = shmem_lock(shm_file, 1, user);
1004 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
1005 shp->shm_perm.mode |= SHM_LOCKED;
1006 shp->mlock_user = user;
1007 }
1008 goto out_unlock0;
1009 }
1010
1011 /* SHM_UNLOCK */
1012 if (!(shp->shm_perm.mode & SHM_LOCKED))
1013 goto out_unlock0;
1014 shmem_lock(shm_file, 0, shp->mlock_user);
1015 shp->shm_perm.mode &= ~SHM_LOCKED;
1016 shp->mlock_user = NULL;
1017 get_file(shm_file);
1018 ipc_unlock_object(&shp->shm_perm);
1019 rcu_read_unlock();
1020 shmem_unlock_mapping(shm_file->f_mapping);
1021
1022 fput(shm_file);
1023 return err;
1024 }
1025 default:
1026 return -EINVAL;
1027 }
1028
1029out_unlock0:
1030 ipc_unlock_object(&shp->shm_perm);
1031out_unlock1:
1032 rcu_read_unlock();
1033 return err;
1034}
1035
1036/*
1037 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1038 *
1039 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1040 * "raddr" thing points to kernel space, and there has to be a wrapper around
1041 * this.
1042 */
1043long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
1044 unsigned long shmlba)
1045{
1046 struct shmid_kernel *shp;
1047 unsigned long addr;
1048 unsigned long size;
1049 struct file *file;
1050 int err;
1051 unsigned long flags;
1052 unsigned long prot;
1053 int acc_mode;
1054 struct ipc_namespace *ns;
1055 struct shm_file_data *sfd;
1056 struct path path;
1057 fmode_t f_mode;
1058 unsigned long populate = 0;
1059
1060 err = -EINVAL;
1061 if (shmid < 0)
1062 goto out;
1063 else if ((addr = (ulong)shmaddr)) {
1064 if (addr & (shmlba - 1)) {
1065 if (shmflg & SHM_RND)
1066 addr &= ~(shmlba - 1); /* round down */
1067 else
1068#ifndef __ARCH_FORCE_SHMLBA
1069 if (addr & ~PAGE_MASK)
1070#endif
1071 goto out;
1072 }
1073 flags = MAP_SHARED | MAP_FIXED;
1074 } else {
1075 if ((shmflg & SHM_REMAP))
1076 goto out;
1077
1078 flags = MAP_SHARED;
1079 }
1080
1081 if (shmflg & SHM_RDONLY) {
1082 prot = PROT_READ;
1083 acc_mode = S_IRUGO;
1084 f_mode = FMODE_READ;
1085 } else {
1086 prot = PROT_READ | PROT_WRITE;
1087 acc_mode = S_IRUGO | S_IWUGO;
1088 f_mode = FMODE_READ | FMODE_WRITE;
1089 }
1090 if (shmflg & SHM_EXEC) {
1091 prot |= PROT_EXEC;
1092 acc_mode |= S_IXUGO;
1093 }
1094
1095 /*
1096 * We cannot rely on the fs check since SYSV IPC does have an
1097 * additional creator id...
1098 */
1099 ns = current->nsproxy->ipc_ns;
1100 rcu_read_lock();
1101 shp = shm_obtain_object_check(ns, shmid);
1102 if (IS_ERR(shp)) {
1103 err = PTR_ERR(shp);
1104 goto out_unlock;
1105 }
1106
1107 err = -EACCES;
1108 if (ipcperms(ns, &shp->shm_perm, acc_mode))
1109 goto out_unlock;
1110
1111 err = security_shm_shmat(shp, shmaddr, shmflg);
1112 if (err)
1113 goto out_unlock;
1114
1115 ipc_lock_object(&shp->shm_perm);
1116
1117 /* check if shm_destroy() is tearing down shp */
1118 if (!ipc_valid_object(&shp->shm_perm)) {
1119 ipc_unlock_object(&shp->shm_perm);
1120 err = -EIDRM;
1121 goto out_unlock;
1122 }
1123
1124 path = shp->shm_file->f_path;
1125 path_get(&path);
1126 shp->shm_nattch++;
1127 size = i_size_read(path.dentry->d_inode);
1128 ipc_unlock_object(&shp->shm_perm);
1129 rcu_read_unlock();
1130
1131 err = -ENOMEM;
1132 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1133 if (!sfd) {
1134 path_put(&path);
1135 goto out_nattch;
1136 }
1137
1138 file = alloc_file(&path, f_mode,
1139 is_file_hugepages(shp->shm_file) ?
1140 &shm_file_operations_huge :
1141 &shm_file_operations);
1142 err = PTR_ERR(file);
1143 if (IS_ERR(file)) {
1144 kfree(sfd);
1145 path_put(&path);
1146 goto out_nattch;
1147 }
1148
1149 file->private_data = sfd;
1150 file->f_mapping = shp->shm_file->f_mapping;
1151 sfd->id = shp->shm_perm.id;
1152 sfd->ns = get_ipc_ns(ns);
1153 sfd->file = shp->shm_file;
1154 sfd->vm_ops = NULL;
1155
1156 err = security_mmap_file(file, prot, flags);
1157 if (err)
1158 goto out_fput;
1159
1160 down_write(¤t->mm->mmap_sem);
1161 if (addr && !(shmflg & SHM_REMAP)) {
1162 err = -EINVAL;
1163 if (find_vma_intersection(current->mm, addr, addr + size))
1164 goto invalid;
1165 /*
1166 * If shm segment goes below stack, make sure there is some
1167 * space left for the stack to grow (at least 4 pages).
1168 */
1169 if (addr < current->mm->start_stack &&
1170 addr > current->mm->start_stack - size - PAGE_SIZE * 5)
1171 goto invalid;
1172 }
1173
1174 addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate);
1175 *raddr = addr;
1176 err = 0;
1177 if (IS_ERR_VALUE(addr))
1178 err = (long)addr;
1179invalid:
1180 up_write(¤t->mm->mmap_sem);
1181 if (populate)
1182 mm_populate(addr, populate);
1183
1184out_fput:
1185 fput(file);
1186
1187out_nattch:
1188 down_write(&shm_ids(ns).rwsem);
1189 shp = shm_lock(ns, shmid);
1190 BUG_ON(IS_ERR(shp));
1191 shp->shm_nattch--;
1192 if (shm_may_destroy(ns, shp))
1193 shm_destroy(ns, shp);
1194 else
1195 shm_unlock(shp);
1196 up_write(&shm_ids(ns).rwsem);
1197 return err;
1198
1199out_unlock:
1200 rcu_read_unlock();
1201out:
1202 return err;
1203}
1204
1205SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1206{
1207 unsigned long ret;
1208 long err;
1209
1210 err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
1211 if (err)
1212 return err;
1213 force_successful_syscall_return();
1214 return (long)ret;
1215}
1216
1217/*
1218 * detach and kill segment if marked destroyed.
1219 * The work is done in shm_close.
1220 */
1221SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1222{
1223 struct mm_struct *mm = current->mm;
1224 struct vm_area_struct *vma;
1225 unsigned long addr = (unsigned long)shmaddr;
1226 int retval = -EINVAL;
1227#ifdef CONFIG_MMU
1228 loff_t size = 0;
1229 struct vm_area_struct *next;
1230#endif
1231
1232 if (addr & ~PAGE_MASK)
1233 return retval;
1234
1235 down_write(&mm->mmap_sem);
1236
1237 /*
1238 * This function tries to be smart and unmap shm segments that
1239 * were modified by partial mlock or munmap calls:
1240 * - It first determines the size of the shm segment that should be
1241 * unmapped: It searches for a vma that is backed by shm and that
1242 * started at address shmaddr. It records it's size and then unmaps
1243 * it.
1244 * - Then it unmaps all shm vmas that started at shmaddr and that
1245 * are within the initially determined size.
1246 * Errors from do_munmap are ignored: the function only fails if
1247 * it's called with invalid parameters or if it's called to unmap
1248 * a part of a vma. Both calls in this function are for full vmas,
1249 * the parameters are directly copied from the vma itself and always
1250 * valid - therefore do_munmap cannot fail. (famous last words?)
1251 */
1252 /*
1253 * If it had been mremap()'d, the starting address would not
1254 * match the usual checks anyway. So assume all vma's are
1255 * above the starting address given.
1256 */
1257 vma = find_vma(mm, addr);
1258
1259#ifdef CONFIG_MMU
1260 while (vma) {
1261 next = vma->vm_next;
1262
1263 /*
1264 * Check if the starting address would match, i.e. it's
1265 * a fragment created by mprotect() and/or munmap(), or it
1266 * otherwise it starts at this address with no hassles.
1267 */
1268 if ((vma->vm_ops == &shm_vm_ops) &&
1269 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1270
1271
1272 size = file_inode(vma->vm_file)->i_size;
1273 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1274 /*
1275 * We discovered the size of the shm segment, so
1276 * break out of here and fall through to the next
1277 * loop that uses the size information to stop
1278 * searching for matching vma's.
1279 */
1280 retval = 0;
1281 vma = next;
1282 break;
1283 }
1284 vma = next;
1285 }
1286
1287 /*
1288 * We need look no further than the maximum address a fragment
1289 * could possibly have landed at. Also cast things to loff_t to
1290 * prevent overflows and make comparisons vs. equal-width types.
1291 */
1292 size = PAGE_ALIGN(size);
1293 while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1294 next = vma->vm_next;
1295
1296 /* finding a matching vma now does not alter retval */
1297 if ((vma->vm_ops == &shm_vm_ops) &&
1298 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
1299
1300 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1301 vma = next;
1302 }
1303
1304#else /* CONFIG_MMU */
1305 /* under NOMMU conditions, the exact address to be destroyed must be
1306 * given */
1307 if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1308 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1309 retval = 0;
1310 }
1311
1312#endif
1313
1314 up_write(&mm->mmap_sem);
1315 return retval;
1316}
1317
1318#ifdef CONFIG_PROC_FS
1319static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1320{
1321 struct user_namespace *user_ns = seq_user_ns(s);
1322 struct shmid_kernel *shp = it;
1323 unsigned long rss = 0, swp = 0;
1324
1325 shm_add_rss_swap(shp, &rss, &swp);
1326
1327#if BITS_PER_LONG <= 32
1328#define SIZE_SPEC "%10lu"
1329#else
1330#define SIZE_SPEC "%21lu"
1331#endif
1332
1333 return seq_printf(s,
1334 "%10d %10d %4o " SIZE_SPEC " %5u %5u "
1335 "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
1336 SIZE_SPEC " " SIZE_SPEC "\n",
1337 shp->shm_perm.key,
1338 shp->shm_perm.id,
1339 shp->shm_perm.mode,
1340 shp->shm_segsz,
1341 shp->shm_cprid,
1342 shp->shm_lprid,
1343 shp->shm_nattch,
1344 from_kuid_munged(user_ns, shp->shm_perm.uid),
1345 from_kgid_munged(user_ns, shp->shm_perm.gid),
1346 from_kuid_munged(user_ns, shp->shm_perm.cuid),
1347 from_kgid_munged(user_ns, shp->shm_perm.cgid),
1348 shp->shm_atim,
1349 shp->shm_dtim,
1350 shp->shm_ctim,
1351 rss * PAGE_SIZE,
1352 swp * PAGE_SIZE);
1353}
1354#endif