Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/ipc/shm.c
4 * Copyright (C) 1992, 1993 Krishna Balasubramanian
5 * Many improvements/fixes by Bruno Haible.
6 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
7 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
8 *
9 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
10 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
11 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
12 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
13 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
14 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
15 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
16 *
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
19 *
20 * namespaces support
21 * OpenVZ, SWsoft Inc.
22 * Pavel Emelianov <xemul@openvz.org>
23 *
24 * Better ipc lock (kern_ipc_perm.lock) handling
25 * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
26 */
27
28#include <linux/slab.h>
29#include <linux/mm.h>
30#include <linux/hugetlb.h>
31#include <linux/shm.h>
32#include <linux/init.h>
33#include <linux/file.h>
34#include <linux/mman.h>
35#include <linux/shmem_fs.h>
36#include <linux/security.h>
37#include <linux/syscalls.h>
38#include <linux/audit.h>
39#include <linux/capability.h>
40#include <linux/ptrace.h>
41#include <linux/seq_file.h>
42#include <linux/rwsem.h>
43#include <linux/nsproxy.h>
44#include <linux/mount.h>
45#include <linux/ipc_namespace.h>
46#include <linux/rhashtable.h>
47
48#include <linux/uaccess.h>
49
50#include "util.h"
51
52struct shmid_kernel /* private to the kernel */
53{
54 struct kern_ipc_perm shm_perm;
55 struct file *shm_file;
56 unsigned long shm_nattch;
57 unsigned long shm_segsz;
58 time64_t shm_atim;
59 time64_t shm_dtim;
60 time64_t shm_ctim;
61 struct pid *shm_cprid;
62 struct pid *shm_lprid;
63 struct ucounts *mlock_ucounts;
64
65 /*
66 * The task created the shm object, for
67 * task_lock(shp->shm_creator)
68 */
69 struct task_struct *shm_creator;
70
71 /*
72 * List by creator. task_lock(->shm_creator) required for read/write.
73 * If list_empty(), then the creator is dead already.
74 */
75 struct list_head shm_clist;
76 struct ipc_namespace *ns;
77} __randomize_layout;
78
79/* shm_mode upper byte flags */
80#define SHM_DEST 01000 /* segment will be destroyed on last detach */
81#define SHM_LOCKED 02000 /* segment will not be swapped */
82
83struct shm_file_data {
84 int id;
85 struct ipc_namespace *ns;
86 struct file *file;
87 const struct vm_operations_struct *vm_ops;
88};
89
90#define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
91
92static const struct file_operations shm_file_operations;
93static const struct vm_operations_struct shm_vm_ops;
94
95#define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
96
97#define shm_unlock(shp) \
98 ipc_unlock(&(shp)->shm_perm)
99
100static int newseg(struct ipc_namespace *, struct ipc_params *);
101static void shm_open(struct vm_area_struct *vma);
102static void shm_close(struct vm_area_struct *vma);
103static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
104#ifdef CONFIG_PROC_FS
105static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
106#endif
107
108void shm_init_ns(struct ipc_namespace *ns)
109{
110 ns->shm_ctlmax = SHMMAX;
111 ns->shm_ctlall = SHMALL;
112 ns->shm_ctlmni = SHMMNI;
113 ns->shm_rmid_forced = 0;
114 ns->shm_tot = 0;
115 ipc_init_ids(&shm_ids(ns));
116}
117
118/*
119 * Called with shm_ids.rwsem (writer) and the shp structure locked.
120 * Only shm_ids.rwsem remains locked on exit.
121 */
122static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
123{
124 struct shmid_kernel *shp;
125
126 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
127 WARN_ON(ns != shp->ns);
128
129 if (shp->shm_nattch) {
130 shp->shm_perm.mode |= SHM_DEST;
131 /* Do not find it any more */
132 ipc_set_key_private(&shm_ids(ns), &shp->shm_perm);
133 shm_unlock(shp);
134 } else
135 shm_destroy(ns, shp);
136}
137
138#ifdef CONFIG_IPC_NS
139void shm_exit_ns(struct ipc_namespace *ns)
140{
141 free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
142 idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
143 rhashtable_destroy(&ns->ids[IPC_SHM_IDS].key_ht);
144}
145#endif
146
147static int __init ipc_ns_init(void)
148{
149 shm_init_ns(&init_ipc_ns);
150 return 0;
151}
152
153pure_initcall(ipc_ns_init);
154
155void __init shm_init(void)
156{
157 ipc_init_proc_interface("sysvipc/shm",
158#if BITS_PER_LONG <= 32
159 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
160#else
161 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
162#endif
163 IPC_SHM_IDS, sysvipc_shm_proc_show);
164}
165
166static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
167{
168 struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
169
170 if (IS_ERR(ipcp))
171 return ERR_CAST(ipcp);
172
173 return container_of(ipcp, struct shmid_kernel, shm_perm);
174}
175
176static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
177{
178 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
179
180 if (IS_ERR(ipcp))
181 return ERR_CAST(ipcp);
182
183 return container_of(ipcp, struct shmid_kernel, shm_perm);
184}
185
186/*
187 * shm_lock_(check_) routines are called in the paths where the rwsem
188 * is not necessarily held.
189 */
190static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
191{
192 struct kern_ipc_perm *ipcp;
193
194 rcu_read_lock();
195 ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
196 if (IS_ERR(ipcp))
197 goto err;
198
199 ipc_lock_object(ipcp);
200 /*
201 * ipc_rmid() may have already freed the ID while ipc_lock_object()
202 * was spinning: here verify that the structure is still valid.
203 * Upon races with RMID, return -EIDRM, thus indicating that
204 * the ID points to a removed identifier.
205 */
206 if (ipc_valid_object(ipcp)) {
207 /* return a locked ipc object upon success */
208 return container_of(ipcp, struct shmid_kernel, shm_perm);
209 }
210
211 ipc_unlock_object(ipcp);
212 ipcp = ERR_PTR(-EIDRM);
213err:
214 rcu_read_unlock();
215 /*
216 * Callers of shm_lock() must validate the status of the returned ipc
217 * object pointer and error out as appropriate.
218 */
219 return ERR_CAST(ipcp);
220}
221
222static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
223{
224 rcu_read_lock();
225 ipc_lock_object(&ipcp->shm_perm);
226}
227
228static void shm_rcu_free(struct rcu_head *head)
229{
230 struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm,
231 rcu);
232 struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel,
233 shm_perm);
234 security_shm_free(&shp->shm_perm);
235 kfree(shp);
236}
237
238/*
239 * It has to be called with shp locked.
240 * It must be called before ipc_rmid()
241 */
242static inline void shm_clist_rm(struct shmid_kernel *shp)
243{
244 struct task_struct *creator;
245
246 /* ensure that shm_creator does not disappear */
247 rcu_read_lock();
248
249 /*
250 * A concurrent exit_shm may do a list_del_init() as well.
251 * Just do nothing if exit_shm already did the work
252 */
253 if (!list_empty(&shp->shm_clist)) {
254 /*
255 * shp->shm_creator is guaranteed to be valid *only*
256 * if shp->shm_clist is not empty.
257 */
258 creator = shp->shm_creator;
259
260 task_lock(creator);
261 /*
262 * list_del_init() is a nop if the entry was already removed
263 * from the list.
264 */
265 list_del_init(&shp->shm_clist);
266 task_unlock(creator);
267 }
268 rcu_read_unlock();
269}
270
271static inline void shm_rmid(struct shmid_kernel *s)
272{
273 shm_clist_rm(s);
274 ipc_rmid(&shm_ids(s->ns), &s->shm_perm);
275}
276
277
278static int __shm_open(struct shm_file_data *sfd)
279{
280 struct shmid_kernel *shp;
281
282 shp = shm_lock(sfd->ns, sfd->id);
283
284 if (IS_ERR(shp))
285 return PTR_ERR(shp);
286
287 if (shp->shm_file != sfd->file) {
288 /* ID was reused */
289 shm_unlock(shp);
290 return -EINVAL;
291 }
292
293 shp->shm_atim = ktime_get_real_seconds();
294 ipc_update_pid(&shp->shm_lprid, task_tgid(current));
295 shp->shm_nattch++;
296 shm_unlock(shp);
297 return 0;
298}
299
300/* This is called by fork, once for every shm attach. */
301static void shm_open(struct vm_area_struct *vma)
302{
303 struct file *file = vma->vm_file;
304 struct shm_file_data *sfd = shm_file_data(file);
305 int err;
306
307 /* Always call underlying open if present */
308 if (sfd->vm_ops->open)
309 sfd->vm_ops->open(vma);
310
311 err = __shm_open(sfd);
312 /*
313 * We raced in the idr lookup or with shm_destroy().
314 * Either way, the ID is busted.
315 */
316 WARN_ON_ONCE(err);
317}
318
319/*
320 * shm_destroy - free the struct shmid_kernel
321 *
322 * @ns: namespace
323 * @shp: struct to free
324 *
325 * It has to be called with shp and shm_ids.rwsem (writer) locked,
326 * but returns with shp unlocked and freed.
327 */
328static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
329{
330 struct file *shm_file;
331
332 shm_file = shp->shm_file;
333 shp->shm_file = NULL;
334 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
335 shm_rmid(shp);
336 shm_unlock(shp);
337 if (!is_file_hugepages(shm_file))
338 shmem_lock(shm_file, 0, shp->mlock_ucounts);
339 fput(shm_file);
340 ipc_update_pid(&shp->shm_cprid, NULL);
341 ipc_update_pid(&shp->shm_lprid, NULL);
342 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
343}
344
345/*
346 * shm_may_destroy - identifies whether shm segment should be destroyed now
347 *
348 * Returns true if and only if there are no active users of the segment and
349 * one of the following is true:
350 *
351 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
352 *
353 * 2) sysctl kernel.shm_rmid_forced is set to 1.
354 */
355static bool shm_may_destroy(struct shmid_kernel *shp)
356{
357 return (shp->shm_nattch == 0) &&
358 (shp->ns->shm_rmid_forced ||
359 (shp->shm_perm.mode & SHM_DEST));
360}
361
362/*
363 * remove the attach descriptor vma.
364 * free memory for segment if it is marked destroyed.
365 * The descriptor has already been removed from the current->mm->mmap list
366 * and will later be kfree()d.
367 */
368static void __shm_close(struct shm_file_data *sfd)
369{
370 struct shmid_kernel *shp;
371 struct ipc_namespace *ns = sfd->ns;
372
373 down_write(&shm_ids(ns).rwsem);
374 /* remove from the list of attaches of the shm segment */
375 shp = shm_lock(ns, sfd->id);
376
377 /*
378 * We raced in the idr lookup or with shm_destroy().
379 * Either way, the ID is busted.
380 */
381 if (WARN_ON_ONCE(IS_ERR(shp)))
382 goto done; /* no-op */
383
384 ipc_update_pid(&shp->shm_lprid, task_tgid(current));
385 shp->shm_dtim = ktime_get_real_seconds();
386 shp->shm_nattch--;
387 if (shm_may_destroy(shp))
388 shm_destroy(ns, shp);
389 else
390 shm_unlock(shp);
391done:
392 up_write(&shm_ids(ns).rwsem);
393}
394
395static void shm_close(struct vm_area_struct *vma)
396{
397 struct file *file = vma->vm_file;
398 struct shm_file_data *sfd = shm_file_data(file);
399
400 /* Always call underlying close if present */
401 if (sfd->vm_ops->close)
402 sfd->vm_ops->close(vma);
403
404 __shm_close(sfd);
405}
406
407/* Called with ns->shm_ids(ns).rwsem locked */
408static int shm_try_destroy_orphaned(int id, void *p, void *data)
409{
410 struct ipc_namespace *ns = data;
411 struct kern_ipc_perm *ipcp = p;
412 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
413
414 /*
415 * We want to destroy segments without users and with already
416 * exit'ed originating process.
417 *
418 * As shp->* are changed under rwsem, it's safe to skip shp locking.
419 */
420 if (!list_empty(&shp->shm_clist))
421 return 0;
422
423 if (shm_may_destroy(shp)) {
424 shm_lock_by_ptr(shp);
425 shm_destroy(ns, shp);
426 }
427 return 0;
428}
429
430void shm_destroy_orphaned(struct ipc_namespace *ns)
431{
432 down_write(&shm_ids(ns).rwsem);
433 if (shm_ids(ns).in_use)
434 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
435 up_write(&shm_ids(ns).rwsem);
436}
437
438/* Locking assumes this will only be called with task == current */
439void exit_shm(struct task_struct *task)
440{
441 for (;;) {
442 struct shmid_kernel *shp;
443 struct ipc_namespace *ns;
444
445 task_lock(task);
446
447 if (list_empty(&task->sysvshm.shm_clist)) {
448 task_unlock(task);
449 break;
450 }
451
452 shp = list_first_entry(&task->sysvshm.shm_clist, struct shmid_kernel,
453 shm_clist);
454
455 /*
456 * 1) Get pointer to the ipc namespace. It is worth to say
457 * that this pointer is guaranteed to be valid because
458 * shp lifetime is always shorter than namespace lifetime
459 * in which shp lives.
460 * We taken task_lock it means that shp won't be freed.
461 */
462 ns = shp->ns;
463
464 /*
465 * 2) If kernel.shm_rmid_forced is not set then only keep track of
466 * which shmids are orphaned, so that a later set of the sysctl
467 * can clean them up.
468 */
469 if (!ns->shm_rmid_forced)
470 goto unlink_continue;
471
472 /*
473 * 3) get a reference to the namespace.
474 * The refcount could be already 0. If it is 0, then
475 * the shm objects will be free by free_ipc_work().
476 */
477 ns = get_ipc_ns_not_zero(ns);
478 if (!ns) {
479unlink_continue:
480 list_del_init(&shp->shm_clist);
481 task_unlock(task);
482 continue;
483 }
484
485 /*
486 * 4) get a reference to shp.
487 * This cannot fail: shm_clist_rm() is called before
488 * ipc_rmid(), thus the refcount cannot be 0.
489 */
490 WARN_ON(!ipc_rcu_getref(&shp->shm_perm));
491
492 /*
493 * 5) unlink the shm segment from the list of segments
494 * created by current.
495 * This must be done last. After unlinking,
496 * only the refcounts obtained above prevent IPC_RMID
497 * from destroying the segment or the namespace.
498 */
499 list_del_init(&shp->shm_clist);
500
501 task_unlock(task);
502
503 /*
504 * 6) we have all references
505 * Thus lock & if needed destroy shp.
506 */
507 down_write(&shm_ids(ns).rwsem);
508 shm_lock_by_ptr(shp);
509 /*
510 * rcu_read_lock was implicitly taken in shm_lock_by_ptr, it's
511 * safe to call ipc_rcu_putref here
512 */
513 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
514
515 if (ipc_valid_object(&shp->shm_perm)) {
516 if (shm_may_destroy(shp))
517 shm_destroy(ns, shp);
518 else
519 shm_unlock(shp);
520 } else {
521 /*
522 * Someone else deleted the shp from namespace
523 * idr/kht while we have waited.
524 * Just unlock and continue.
525 */
526 shm_unlock(shp);
527 }
528
529 up_write(&shm_ids(ns).rwsem);
530 put_ipc_ns(ns); /* paired with get_ipc_ns_not_zero */
531 }
532}
533
534static vm_fault_t shm_fault(struct vm_fault *vmf)
535{
536 struct file *file = vmf->vma->vm_file;
537 struct shm_file_data *sfd = shm_file_data(file);
538
539 return sfd->vm_ops->fault(vmf);
540}
541
542static int shm_may_split(struct vm_area_struct *vma, unsigned long addr)
543{
544 struct file *file = vma->vm_file;
545 struct shm_file_data *sfd = shm_file_data(file);
546
547 if (sfd->vm_ops->may_split)
548 return sfd->vm_ops->may_split(vma, addr);
549
550 return 0;
551}
552
553static unsigned long shm_pagesize(struct vm_area_struct *vma)
554{
555 struct file *file = vma->vm_file;
556 struct shm_file_data *sfd = shm_file_data(file);
557
558 if (sfd->vm_ops->pagesize)
559 return sfd->vm_ops->pagesize(vma);
560
561 return PAGE_SIZE;
562}
563
564#ifdef CONFIG_NUMA
565static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
566{
567 struct file *file = vma->vm_file;
568 struct shm_file_data *sfd = shm_file_data(file);
569 int err = 0;
570
571 if (sfd->vm_ops->set_policy)
572 err = sfd->vm_ops->set_policy(vma, new);
573 return err;
574}
575
576static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
577 unsigned long addr)
578{
579 struct file *file = vma->vm_file;
580 struct shm_file_data *sfd = shm_file_data(file);
581 struct mempolicy *pol = NULL;
582
583 if (sfd->vm_ops->get_policy)
584 pol = sfd->vm_ops->get_policy(vma, addr);
585 else if (vma->vm_policy)
586 pol = vma->vm_policy;
587
588 return pol;
589}
590#endif
591
592static int shm_mmap(struct file *file, struct vm_area_struct *vma)
593{
594 struct shm_file_data *sfd = shm_file_data(file);
595 int ret;
596
597 /*
598 * In case of remap_file_pages() emulation, the file can represent an
599 * IPC ID that was removed, and possibly even reused by another shm
600 * segment already. Propagate this case as an error to caller.
601 */
602 ret = __shm_open(sfd);
603 if (ret)
604 return ret;
605
606 ret = call_mmap(sfd->file, vma);
607 if (ret) {
608 __shm_close(sfd);
609 return ret;
610 }
611 sfd->vm_ops = vma->vm_ops;
612#ifdef CONFIG_MMU
613 WARN_ON(!sfd->vm_ops->fault);
614#endif
615 vma->vm_ops = &shm_vm_ops;
616 return 0;
617}
618
619static int shm_release(struct inode *ino, struct file *file)
620{
621 struct shm_file_data *sfd = shm_file_data(file);
622
623 put_ipc_ns(sfd->ns);
624 fput(sfd->file);
625 shm_file_data(file) = NULL;
626 kfree(sfd);
627 return 0;
628}
629
630static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
631{
632 struct shm_file_data *sfd = shm_file_data(file);
633
634 if (!sfd->file->f_op->fsync)
635 return -EINVAL;
636 return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
637}
638
639static long shm_fallocate(struct file *file, int mode, loff_t offset,
640 loff_t len)
641{
642 struct shm_file_data *sfd = shm_file_data(file);
643
644 if (!sfd->file->f_op->fallocate)
645 return -EOPNOTSUPP;
646 return sfd->file->f_op->fallocate(file, mode, offset, len);
647}
648
649static unsigned long shm_get_unmapped_area(struct file *file,
650 unsigned long addr, unsigned long len, unsigned long pgoff,
651 unsigned long flags)
652{
653 struct shm_file_data *sfd = shm_file_data(file);
654
655 return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
656 pgoff, flags);
657}
658
659static const struct file_operations shm_file_operations = {
660 .mmap = shm_mmap,
661 .fsync = shm_fsync,
662 .release = shm_release,
663 .get_unmapped_area = shm_get_unmapped_area,
664 .llseek = noop_llseek,
665 .fallocate = shm_fallocate,
666};
667
668/*
669 * shm_file_operations_huge is now identical to shm_file_operations,
670 * but we keep it distinct for the sake of is_file_shm_hugepages().
671 */
672static const struct file_operations shm_file_operations_huge = {
673 .mmap = shm_mmap,
674 .fsync = shm_fsync,
675 .release = shm_release,
676 .get_unmapped_area = shm_get_unmapped_area,
677 .llseek = noop_llseek,
678 .fallocate = shm_fallocate,
679};
680
681bool is_file_shm_hugepages(struct file *file)
682{
683 return file->f_op == &shm_file_operations_huge;
684}
685
686static const struct vm_operations_struct shm_vm_ops = {
687 .open = shm_open, /* callback for a new vm-area open */
688 .close = shm_close, /* callback for when the vm-area is released */
689 .fault = shm_fault,
690 .may_split = shm_may_split,
691 .pagesize = shm_pagesize,
692#if defined(CONFIG_NUMA)
693 .set_policy = shm_set_policy,
694 .get_policy = shm_get_policy,
695#endif
696};
697
698/**
699 * newseg - Create a new shared memory segment
700 * @ns: namespace
701 * @params: ptr to the structure that contains key, size and shmflg
702 *
703 * Called with shm_ids.rwsem held as a writer.
704 */
705static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
706{
707 key_t key = params->key;
708 int shmflg = params->flg;
709 size_t size = params->u.size;
710 int error;
711 struct shmid_kernel *shp;
712 size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
713 struct file *file;
714 char name[13];
715 vm_flags_t acctflag = 0;
716
717 if (size < SHMMIN || size > ns->shm_ctlmax)
718 return -EINVAL;
719
720 if (numpages << PAGE_SHIFT < size)
721 return -ENOSPC;
722
723 if (ns->shm_tot + numpages < ns->shm_tot ||
724 ns->shm_tot + numpages > ns->shm_ctlall)
725 return -ENOSPC;
726
727 shp = kmalloc(sizeof(*shp), GFP_KERNEL_ACCOUNT);
728 if (unlikely(!shp))
729 return -ENOMEM;
730
731 shp->shm_perm.key = key;
732 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
733 shp->mlock_ucounts = NULL;
734
735 shp->shm_perm.security = NULL;
736 error = security_shm_alloc(&shp->shm_perm);
737 if (error) {
738 kfree(shp);
739 return error;
740 }
741
742 sprintf(name, "SYSV%08x", key);
743 if (shmflg & SHM_HUGETLB) {
744 struct hstate *hs;
745 size_t hugesize;
746
747 hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
748 if (!hs) {
749 error = -EINVAL;
750 goto no_file;
751 }
752 hugesize = ALIGN(size, huge_page_size(hs));
753
754 /* hugetlb_file_setup applies strict accounting */
755 if (shmflg & SHM_NORESERVE)
756 acctflag = VM_NORESERVE;
757 file = hugetlb_file_setup(name, hugesize, acctflag,
758 HUGETLB_SHMFS_INODE, (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
759 } else {
760 /*
761 * Do not allow no accounting for OVERCOMMIT_NEVER, even
762 * if it's asked for.
763 */
764 if ((shmflg & SHM_NORESERVE) &&
765 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
766 acctflag = VM_NORESERVE;
767 file = shmem_kernel_file_setup(name, size, acctflag);
768 }
769 error = PTR_ERR(file);
770 if (IS_ERR(file))
771 goto no_file;
772
773 shp->shm_cprid = get_pid(task_tgid(current));
774 shp->shm_lprid = NULL;
775 shp->shm_atim = shp->shm_dtim = 0;
776 shp->shm_ctim = ktime_get_real_seconds();
777 shp->shm_segsz = size;
778 shp->shm_nattch = 0;
779 shp->shm_file = file;
780 shp->shm_creator = current;
781
782 /* ipc_addid() locks shp upon success. */
783 error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
784 if (error < 0)
785 goto no_id;
786
787 shp->ns = ns;
788
789 task_lock(current);
790 list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist);
791 task_unlock(current);
792
793 /*
794 * shmid gets reported as "inode#" in /proc/pid/maps.
795 * proc-ps tools use this. Changing this will break them.
796 */
797 file_inode(file)->i_ino = shp->shm_perm.id;
798
799 ns->shm_tot += numpages;
800 error = shp->shm_perm.id;
801
802 ipc_unlock_object(&shp->shm_perm);
803 rcu_read_unlock();
804 return error;
805
806no_id:
807 ipc_update_pid(&shp->shm_cprid, NULL);
808 ipc_update_pid(&shp->shm_lprid, NULL);
809 fput(file);
810 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
811 return error;
812no_file:
813 call_rcu(&shp->shm_perm.rcu, shm_rcu_free);
814 return error;
815}
816
817/*
818 * Called with shm_ids.rwsem and ipcp locked.
819 */
820static int shm_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params)
821{
822 struct shmid_kernel *shp;
823
824 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
825 if (shp->shm_segsz < params->u.size)
826 return -EINVAL;
827
828 return 0;
829}
830
831long ksys_shmget(key_t key, size_t size, int shmflg)
832{
833 struct ipc_namespace *ns;
834 static const struct ipc_ops shm_ops = {
835 .getnew = newseg,
836 .associate = security_shm_associate,
837 .more_checks = shm_more_checks,
838 };
839 struct ipc_params shm_params;
840
841 ns = current->nsproxy->ipc_ns;
842
843 shm_params.key = key;
844 shm_params.flg = shmflg;
845 shm_params.u.size = size;
846
847 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
848}
849
850SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
851{
852 return ksys_shmget(key, size, shmflg);
853}
854
855static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
856{
857 switch (version) {
858 case IPC_64:
859 return copy_to_user(buf, in, sizeof(*in));
860 case IPC_OLD:
861 {
862 struct shmid_ds out;
863
864 memset(&out, 0, sizeof(out));
865 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
866 out.shm_segsz = in->shm_segsz;
867 out.shm_atime = in->shm_atime;
868 out.shm_dtime = in->shm_dtime;
869 out.shm_ctime = in->shm_ctime;
870 out.shm_cpid = in->shm_cpid;
871 out.shm_lpid = in->shm_lpid;
872 out.shm_nattch = in->shm_nattch;
873
874 return copy_to_user(buf, &out, sizeof(out));
875 }
876 default:
877 return -EINVAL;
878 }
879}
880
881static inline unsigned long
882copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
883{
884 switch (version) {
885 case IPC_64:
886 if (copy_from_user(out, buf, sizeof(*out)))
887 return -EFAULT;
888 return 0;
889 case IPC_OLD:
890 {
891 struct shmid_ds tbuf_old;
892
893 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
894 return -EFAULT;
895
896 out->shm_perm.uid = tbuf_old.shm_perm.uid;
897 out->shm_perm.gid = tbuf_old.shm_perm.gid;
898 out->shm_perm.mode = tbuf_old.shm_perm.mode;
899
900 return 0;
901 }
902 default:
903 return -EINVAL;
904 }
905}
906
907static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
908{
909 switch (version) {
910 case IPC_64:
911 return copy_to_user(buf, in, sizeof(*in));
912 case IPC_OLD:
913 {
914 struct shminfo out;
915
916 if (in->shmmax > INT_MAX)
917 out.shmmax = INT_MAX;
918 else
919 out.shmmax = (int)in->shmmax;
920
921 out.shmmin = in->shmmin;
922 out.shmmni = in->shmmni;
923 out.shmseg = in->shmseg;
924 out.shmall = in->shmall;
925
926 return copy_to_user(buf, &out, sizeof(out));
927 }
928 default:
929 return -EINVAL;
930 }
931}
932
933/*
934 * Calculate and add used RSS and swap pages of a shm.
935 * Called with shm_ids.rwsem held as a reader
936 */
937static void shm_add_rss_swap(struct shmid_kernel *shp,
938 unsigned long *rss_add, unsigned long *swp_add)
939{
940 struct inode *inode;
941
942 inode = file_inode(shp->shm_file);
943
944 if (is_file_hugepages(shp->shm_file)) {
945 struct address_space *mapping = inode->i_mapping;
946 struct hstate *h = hstate_file(shp->shm_file);
947 *rss_add += pages_per_huge_page(h) * mapping->nrpages;
948 } else {
949#ifdef CONFIG_SHMEM
950 struct shmem_inode_info *info = SHMEM_I(inode);
951
952 spin_lock_irq(&info->lock);
953 *rss_add += inode->i_mapping->nrpages;
954 *swp_add += info->swapped;
955 spin_unlock_irq(&info->lock);
956#else
957 *rss_add += inode->i_mapping->nrpages;
958#endif
959 }
960}
961
962/*
963 * Called with shm_ids.rwsem held as a reader
964 */
965static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
966 unsigned long *swp)
967{
968 int next_id;
969 int total, in_use;
970
971 *rss = 0;
972 *swp = 0;
973
974 in_use = shm_ids(ns).in_use;
975
976 for (total = 0, next_id = 0; total < in_use; next_id++) {
977 struct kern_ipc_perm *ipc;
978 struct shmid_kernel *shp;
979
980 ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
981 if (ipc == NULL)
982 continue;
983 shp = container_of(ipc, struct shmid_kernel, shm_perm);
984
985 shm_add_rss_swap(shp, rss, swp);
986
987 total++;
988 }
989}
990
991/*
992 * This function handles some shmctl commands which require the rwsem
993 * to be held in write mode.
994 * NOTE: no locks must be held, the rwsem is taken inside this function.
995 */
996static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
997 struct shmid64_ds *shmid64)
998{
999 struct kern_ipc_perm *ipcp;
1000 struct shmid_kernel *shp;
1001 int err;
1002
1003 down_write(&shm_ids(ns).rwsem);
1004 rcu_read_lock();
1005
1006 ipcp = ipcctl_obtain_check(ns, &shm_ids(ns), shmid, cmd,
1007 &shmid64->shm_perm, 0);
1008 if (IS_ERR(ipcp)) {
1009 err = PTR_ERR(ipcp);
1010 goto out_unlock1;
1011 }
1012
1013 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
1014
1015 err = security_shm_shmctl(&shp->shm_perm, cmd);
1016 if (err)
1017 goto out_unlock1;
1018
1019 switch (cmd) {
1020 case IPC_RMID:
1021 ipc_lock_object(&shp->shm_perm);
1022 /* do_shm_rmid unlocks the ipc object and rcu */
1023 do_shm_rmid(ns, ipcp);
1024 goto out_up;
1025 case IPC_SET:
1026 ipc_lock_object(&shp->shm_perm);
1027 err = ipc_update_perm(&shmid64->shm_perm, ipcp);
1028 if (err)
1029 goto out_unlock0;
1030 shp->shm_ctim = ktime_get_real_seconds();
1031 break;
1032 default:
1033 err = -EINVAL;
1034 goto out_unlock1;
1035 }
1036
1037out_unlock0:
1038 ipc_unlock_object(&shp->shm_perm);
1039out_unlock1:
1040 rcu_read_unlock();
1041out_up:
1042 up_write(&shm_ids(ns).rwsem);
1043 return err;
1044}
1045
1046static int shmctl_ipc_info(struct ipc_namespace *ns,
1047 struct shminfo64 *shminfo)
1048{
1049 int err = security_shm_shmctl(NULL, IPC_INFO);
1050 if (!err) {
1051 memset(shminfo, 0, sizeof(*shminfo));
1052 shminfo->shmmni = shminfo->shmseg = ns->shm_ctlmni;
1053 shminfo->shmmax = ns->shm_ctlmax;
1054 shminfo->shmall = ns->shm_ctlall;
1055 shminfo->shmmin = SHMMIN;
1056 down_read(&shm_ids(ns).rwsem);
1057 err = ipc_get_maxidx(&shm_ids(ns));
1058 up_read(&shm_ids(ns).rwsem);
1059 if (err < 0)
1060 err = 0;
1061 }
1062 return err;
1063}
1064
1065static int shmctl_shm_info(struct ipc_namespace *ns,
1066 struct shm_info *shm_info)
1067{
1068 int err = security_shm_shmctl(NULL, SHM_INFO);
1069 if (!err) {
1070 memset(shm_info, 0, sizeof(*shm_info));
1071 down_read(&shm_ids(ns).rwsem);
1072 shm_info->used_ids = shm_ids(ns).in_use;
1073 shm_get_stat(ns, &shm_info->shm_rss, &shm_info->shm_swp);
1074 shm_info->shm_tot = ns->shm_tot;
1075 shm_info->swap_attempts = 0;
1076 shm_info->swap_successes = 0;
1077 err = ipc_get_maxidx(&shm_ids(ns));
1078 up_read(&shm_ids(ns).rwsem);
1079 if (err < 0)
1080 err = 0;
1081 }
1082 return err;
1083}
1084
1085static int shmctl_stat(struct ipc_namespace *ns, int shmid,
1086 int cmd, struct shmid64_ds *tbuf)
1087{
1088 struct shmid_kernel *shp;
1089 int err;
1090
1091 memset(tbuf, 0, sizeof(*tbuf));
1092
1093 rcu_read_lock();
1094 if (cmd == SHM_STAT || cmd == SHM_STAT_ANY) {
1095 shp = shm_obtain_object(ns, shmid);
1096 if (IS_ERR(shp)) {
1097 err = PTR_ERR(shp);
1098 goto out_unlock;
1099 }
1100 } else { /* IPC_STAT */
1101 shp = shm_obtain_object_check(ns, shmid);
1102 if (IS_ERR(shp)) {
1103 err = PTR_ERR(shp);
1104 goto out_unlock;
1105 }
1106 }
1107
1108 /*
1109 * Semantically SHM_STAT_ANY ought to be identical to
1110 * that functionality provided by the /proc/sysvipc/
1111 * interface. As such, only audit these calls and
1112 * do not do traditional S_IRUGO permission checks on
1113 * the ipc object.
1114 */
1115 if (cmd == SHM_STAT_ANY)
1116 audit_ipc_obj(&shp->shm_perm);
1117 else {
1118 err = -EACCES;
1119 if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
1120 goto out_unlock;
1121 }
1122
1123 err = security_shm_shmctl(&shp->shm_perm, cmd);
1124 if (err)
1125 goto out_unlock;
1126
1127 ipc_lock_object(&shp->shm_perm);
1128
1129 if (!ipc_valid_object(&shp->shm_perm)) {
1130 ipc_unlock_object(&shp->shm_perm);
1131 err = -EIDRM;
1132 goto out_unlock;
1133 }
1134
1135 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf->shm_perm);
1136 tbuf->shm_segsz = shp->shm_segsz;
1137 tbuf->shm_atime = shp->shm_atim;
1138 tbuf->shm_dtime = shp->shm_dtim;
1139 tbuf->shm_ctime = shp->shm_ctim;
1140#ifndef CONFIG_64BIT
1141 tbuf->shm_atime_high = shp->shm_atim >> 32;
1142 tbuf->shm_dtime_high = shp->shm_dtim >> 32;
1143 tbuf->shm_ctime_high = shp->shm_ctim >> 32;
1144#endif
1145 tbuf->shm_cpid = pid_vnr(shp->shm_cprid);
1146 tbuf->shm_lpid = pid_vnr(shp->shm_lprid);
1147 tbuf->shm_nattch = shp->shm_nattch;
1148
1149 if (cmd == IPC_STAT) {
1150 /*
1151 * As defined in SUS:
1152 * Return 0 on success
1153 */
1154 err = 0;
1155 } else {
1156 /*
1157 * SHM_STAT and SHM_STAT_ANY (both Linux specific)
1158 * Return the full id, including the sequence number
1159 */
1160 err = shp->shm_perm.id;
1161 }
1162
1163 ipc_unlock_object(&shp->shm_perm);
1164out_unlock:
1165 rcu_read_unlock();
1166 return err;
1167}
1168
1169static int shmctl_do_lock(struct ipc_namespace *ns, int shmid, int cmd)
1170{
1171 struct shmid_kernel *shp;
1172 struct file *shm_file;
1173 int err;
1174
1175 rcu_read_lock();
1176 shp = shm_obtain_object_check(ns, shmid);
1177 if (IS_ERR(shp)) {
1178 err = PTR_ERR(shp);
1179 goto out_unlock1;
1180 }
1181
1182 audit_ipc_obj(&(shp->shm_perm));
1183 err = security_shm_shmctl(&shp->shm_perm, cmd);
1184 if (err)
1185 goto out_unlock1;
1186
1187 ipc_lock_object(&shp->shm_perm);
1188
1189 /* check if shm_destroy() is tearing down shp */
1190 if (!ipc_valid_object(&shp->shm_perm)) {
1191 err = -EIDRM;
1192 goto out_unlock0;
1193 }
1194
1195 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
1196 kuid_t euid = current_euid();
1197
1198 if (!uid_eq(euid, shp->shm_perm.uid) &&
1199 !uid_eq(euid, shp->shm_perm.cuid)) {
1200 err = -EPERM;
1201 goto out_unlock0;
1202 }
1203 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
1204 err = -EPERM;
1205 goto out_unlock0;
1206 }
1207 }
1208
1209 shm_file = shp->shm_file;
1210 if (is_file_hugepages(shm_file))
1211 goto out_unlock0;
1212
1213 if (cmd == SHM_LOCK) {
1214 struct ucounts *ucounts = current_ucounts();
1215
1216 err = shmem_lock(shm_file, 1, ucounts);
1217 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
1218 shp->shm_perm.mode |= SHM_LOCKED;
1219 shp->mlock_ucounts = ucounts;
1220 }
1221 goto out_unlock0;
1222 }
1223
1224 /* SHM_UNLOCK */
1225 if (!(shp->shm_perm.mode & SHM_LOCKED))
1226 goto out_unlock0;
1227 shmem_lock(shm_file, 0, shp->mlock_ucounts);
1228 shp->shm_perm.mode &= ~SHM_LOCKED;
1229 shp->mlock_ucounts = NULL;
1230 get_file(shm_file);
1231 ipc_unlock_object(&shp->shm_perm);
1232 rcu_read_unlock();
1233 shmem_unlock_mapping(shm_file->f_mapping);
1234
1235 fput(shm_file);
1236 return err;
1237
1238out_unlock0:
1239 ipc_unlock_object(&shp->shm_perm);
1240out_unlock1:
1241 rcu_read_unlock();
1242 return err;
1243}
1244
1245static long ksys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf, int version)
1246{
1247 int err;
1248 struct ipc_namespace *ns;
1249 struct shmid64_ds sem64;
1250
1251 if (cmd < 0 || shmid < 0)
1252 return -EINVAL;
1253
1254 ns = current->nsproxy->ipc_ns;
1255
1256 switch (cmd) {
1257 case IPC_INFO: {
1258 struct shminfo64 shminfo;
1259 err = shmctl_ipc_info(ns, &shminfo);
1260 if (err < 0)
1261 return err;
1262 if (copy_shminfo_to_user(buf, &shminfo, version))
1263 err = -EFAULT;
1264 return err;
1265 }
1266 case SHM_INFO: {
1267 struct shm_info shm_info;
1268 err = shmctl_shm_info(ns, &shm_info);
1269 if (err < 0)
1270 return err;
1271 if (copy_to_user(buf, &shm_info, sizeof(shm_info)))
1272 err = -EFAULT;
1273 return err;
1274 }
1275 case SHM_STAT:
1276 case SHM_STAT_ANY:
1277 case IPC_STAT: {
1278 err = shmctl_stat(ns, shmid, cmd, &sem64);
1279 if (err < 0)
1280 return err;
1281 if (copy_shmid_to_user(buf, &sem64, version))
1282 err = -EFAULT;
1283 return err;
1284 }
1285 case IPC_SET:
1286 if (copy_shmid_from_user(&sem64, buf, version))
1287 return -EFAULT;
1288 fallthrough;
1289 case IPC_RMID:
1290 return shmctl_down(ns, shmid, cmd, &sem64);
1291 case SHM_LOCK:
1292 case SHM_UNLOCK:
1293 return shmctl_do_lock(ns, shmid, cmd);
1294 default:
1295 return -EINVAL;
1296 }
1297}
1298
1299SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1300{
1301 return ksys_shmctl(shmid, cmd, buf, IPC_64);
1302}
1303
1304#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
1305long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf)
1306{
1307 int version = ipc_parse_version(&cmd);
1308
1309 return ksys_shmctl(shmid, cmd, buf, version);
1310}
1311
1312SYSCALL_DEFINE3(old_shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1313{
1314 return ksys_old_shmctl(shmid, cmd, buf);
1315}
1316#endif
1317
1318#ifdef CONFIG_COMPAT
1319
1320struct compat_shmid_ds {
1321 struct compat_ipc_perm shm_perm;
1322 int shm_segsz;
1323 old_time32_t shm_atime;
1324 old_time32_t shm_dtime;
1325 old_time32_t shm_ctime;
1326 compat_ipc_pid_t shm_cpid;
1327 compat_ipc_pid_t shm_lpid;
1328 unsigned short shm_nattch;
1329 unsigned short shm_unused;
1330 compat_uptr_t shm_unused2;
1331 compat_uptr_t shm_unused3;
1332};
1333
1334struct compat_shminfo64 {
1335 compat_ulong_t shmmax;
1336 compat_ulong_t shmmin;
1337 compat_ulong_t shmmni;
1338 compat_ulong_t shmseg;
1339 compat_ulong_t shmall;
1340 compat_ulong_t __unused1;
1341 compat_ulong_t __unused2;
1342 compat_ulong_t __unused3;
1343 compat_ulong_t __unused4;
1344};
1345
1346struct compat_shm_info {
1347 compat_int_t used_ids;
1348 compat_ulong_t shm_tot, shm_rss, shm_swp;
1349 compat_ulong_t swap_attempts, swap_successes;
1350};
1351
1352static int copy_compat_shminfo_to_user(void __user *buf, struct shminfo64 *in,
1353 int version)
1354{
1355 if (in->shmmax > INT_MAX)
1356 in->shmmax = INT_MAX;
1357 if (version == IPC_64) {
1358 struct compat_shminfo64 info;
1359 memset(&info, 0, sizeof(info));
1360 info.shmmax = in->shmmax;
1361 info.shmmin = in->shmmin;
1362 info.shmmni = in->shmmni;
1363 info.shmseg = in->shmseg;
1364 info.shmall = in->shmall;
1365 return copy_to_user(buf, &info, sizeof(info));
1366 } else {
1367 struct shminfo info;
1368 memset(&info, 0, sizeof(info));
1369 info.shmmax = in->shmmax;
1370 info.shmmin = in->shmmin;
1371 info.shmmni = in->shmmni;
1372 info.shmseg = in->shmseg;
1373 info.shmall = in->shmall;
1374 return copy_to_user(buf, &info, sizeof(info));
1375 }
1376}
1377
1378static int put_compat_shm_info(struct shm_info *ip,
1379 struct compat_shm_info __user *uip)
1380{
1381 struct compat_shm_info info;
1382
1383 memset(&info, 0, sizeof(info));
1384 info.used_ids = ip->used_ids;
1385 info.shm_tot = ip->shm_tot;
1386 info.shm_rss = ip->shm_rss;
1387 info.shm_swp = ip->shm_swp;
1388 info.swap_attempts = ip->swap_attempts;
1389 info.swap_successes = ip->swap_successes;
1390 return copy_to_user(uip, &info, sizeof(info));
1391}
1392
1393static int copy_compat_shmid_to_user(void __user *buf, struct shmid64_ds *in,
1394 int version)
1395{
1396 if (version == IPC_64) {
1397 struct compat_shmid64_ds v;
1398 memset(&v, 0, sizeof(v));
1399 to_compat_ipc64_perm(&v.shm_perm, &in->shm_perm);
1400 v.shm_atime = lower_32_bits(in->shm_atime);
1401 v.shm_atime_high = upper_32_bits(in->shm_atime);
1402 v.shm_dtime = lower_32_bits(in->shm_dtime);
1403 v.shm_dtime_high = upper_32_bits(in->shm_dtime);
1404 v.shm_ctime = lower_32_bits(in->shm_ctime);
1405 v.shm_ctime_high = upper_32_bits(in->shm_ctime);
1406 v.shm_segsz = in->shm_segsz;
1407 v.shm_nattch = in->shm_nattch;
1408 v.shm_cpid = in->shm_cpid;
1409 v.shm_lpid = in->shm_lpid;
1410 return copy_to_user(buf, &v, sizeof(v));
1411 } else {
1412 struct compat_shmid_ds v;
1413 memset(&v, 0, sizeof(v));
1414 to_compat_ipc_perm(&v.shm_perm, &in->shm_perm);
1415 v.shm_perm.key = in->shm_perm.key;
1416 v.shm_atime = in->shm_atime;
1417 v.shm_dtime = in->shm_dtime;
1418 v.shm_ctime = in->shm_ctime;
1419 v.shm_segsz = in->shm_segsz;
1420 v.shm_nattch = in->shm_nattch;
1421 v.shm_cpid = in->shm_cpid;
1422 v.shm_lpid = in->shm_lpid;
1423 return copy_to_user(buf, &v, sizeof(v));
1424 }
1425}
1426
1427static int copy_compat_shmid_from_user(struct shmid64_ds *out, void __user *buf,
1428 int version)
1429{
1430 memset(out, 0, sizeof(*out));
1431 if (version == IPC_64) {
1432 struct compat_shmid64_ds __user *p = buf;
1433 return get_compat_ipc64_perm(&out->shm_perm, &p->shm_perm);
1434 } else {
1435 struct compat_shmid_ds __user *p = buf;
1436 return get_compat_ipc_perm(&out->shm_perm, &p->shm_perm);
1437 }
1438}
1439
1440static long compat_ksys_shmctl(int shmid, int cmd, void __user *uptr, int version)
1441{
1442 struct ipc_namespace *ns;
1443 struct shmid64_ds sem64;
1444 int err;
1445
1446 ns = current->nsproxy->ipc_ns;
1447
1448 if (cmd < 0 || shmid < 0)
1449 return -EINVAL;
1450
1451 switch (cmd) {
1452 case IPC_INFO: {
1453 struct shminfo64 shminfo;
1454 err = shmctl_ipc_info(ns, &shminfo);
1455 if (err < 0)
1456 return err;
1457 if (copy_compat_shminfo_to_user(uptr, &shminfo, version))
1458 err = -EFAULT;
1459 return err;
1460 }
1461 case SHM_INFO: {
1462 struct shm_info shm_info;
1463 err = shmctl_shm_info(ns, &shm_info);
1464 if (err < 0)
1465 return err;
1466 if (put_compat_shm_info(&shm_info, uptr))
1467 err = -EFAULT;
1468 return err;
1469 }
1470 case IPC_STAT:
1471 case SHM_STAT_ANY:
1472 case SHM_STAT:
1473 err = shmctl_stat(ns, shmid, cmd, &sem64);
1474 if (err < 0)
1475 return err;
1476 if (copy_compat_shmid_to_user(uptr, &sem64, version))
1477 err = -EFAULT;
1478 return err;
1479
1480 case IPC_SET:
1481 if (copy_compat_shmid_from_user(&sem64, uptr, version))
1482 return -EFAULT;
1483 fallthrough;
1484 case IPC_RMID:
1485 return shmctl_down(ns, shmid, cmd, &sem64);
1486 case SHM_LOCK:
1487 case SHM_UNLOCK:
1488 return shmctl_do_lock(ns, shmid, cmd);
1489 default:
1490 return -EINVAL;
1491 }
1492 return err;
1493}
1494
1495COMPAT_SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, void __user *, uptr)
1496{
1497 return compat_ksys_shmctl(shmid, cmd, uptr, IPC_64);
1498}
1499
1500#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION
1501long compat_ksys_old_shmctl(int shmid, int cmd, void __user *uptr)
1502{
1503 int version = compat_ipc_parse_version(&cmd);
1504
1505 return compat_ksys_shmctl(shmid, cmd, uptr, version);
1506}
1507
1508COMPAT_SYSCALL_DEFINE3(old_shmctl, int, shmid, int, cmd, void __user *, uptr)
1509{
1510 return compat_ksys_old_shmctl(shmid, cmd, uptr);
1511}
1512#endif
1513#endif
1514
1515/*
1516 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1517 *
1518 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1519 * "raddr" thing points to kernel space, and there has to be a wrapper around
1520 * this.
1521 */
1522long do_shmat(int shmid, char __user *shmaddr, int shmflg,
1523 ulong *raddr, unsigned long shmlba)
1524{
1525 struct shmid_kernel *shp;
1526 unsigned long addr = (unsigned long)shmaddr;
1527 unsigned long size;
1528 struct file *file, *base;
1529 int err;
1530 unsigned long flags = MAP_SHARED;
1531 unsigned long prot;
1532 int acc_mode;
1533 struct ipc_namespace *ns;
1534 struct shm_file_data *sfd;
1535 int f_flags;
1536 unsigned long populate = 0;
1537
1538 err = -EINVAL;
1539 if (shmid < 0)
1540 goto out;
1541
1542 if (addr) {
1543 if (addr & (shmlba - 1)) {
1544 if (shmflg & SHM_RND) {
1545 addr &= ~(shmlba - 1); /* round down */
1546
1547 /*
1548 * Ensure that the round-down is non-nil
1549 * when remapping. This can happen for
1550 * cases when addr < shmlba.
1551 */
1552 if (!addr && (shmflg & SHM_REMAP))
1553 goto out;
1554 } else
1555#ifndef __ARCH_FORCE_SHMLBA
1556 if (addr & ~PAGE_MASK)
1557#endif
1558 goto out;
1559 }
1560
1561 flags |= MAP_FIXED;
1562 } else if ((shmflg & SHM_REMAP))
1563 goto out;
1564
1565 if (shmflg & SHM_RDONLY) {
1566 prot = PROT_READ;
1567 acc_mode = S_IRUGO;
1568 f_flags = O_RDONLY;
1569 } else {
1570 prot = PROT_READ | PROT_WRITE;
1571 acc_mode = S_IRUGO | S_IWUGO;
1572 f_flags = O_RDWR;
1573 }
1574 if (shmflg & SHM_EXEC) {
1575 prot |= PROT_EXEC;
1576 acc_mode |= S_IXUGO;
1577 }
1578
1579 /*
1580 * We cannot rely on the fs check since SYSV IPC does have an
1581 * additional creator id...
1582 */
1583 ns = current->nsproxy->ipc_ns;
1584 rcu_read_lock();
1585 shp = shm_obtain_object_check(ns, shmid);
1586 if (IS_ERR(shp)) {
1587 err = PTR_ERR(shp);
1588 goto out_unlock;
1589 }
1590
1591 err = -EACCES;
1592 if (ipcperms(ns, &shp->shm_perm, acc_mode))
1593 goto out_unlock;
1594
1595 err = security_shm_shmat(&shp->shm_perm, shmaddr, shmflg);
1596 if (err)
1597 goto out_unlock;
1598
1599 ipc_lock_object(&shp->shm_perm);
1600
1601 /* check if shm_destroy() is tearing down shp */
1602 if (!ipc_valid_object(&shp->shm_perm)) {
1603 ipc_unlock_object(&shp->shm_perm);
1604 err = -EIDRM;
1605 goto out_unlock;
1606 }
1607
1608 /*
1609 * We need to take a reference to the real shm file to prevent the
1610 * pointer from becoming stale in cases where the lifetime of the outer
1611 * file extends beyond that of the shm segment. It's not usually
1612 * possible, but it can happen during remap_file_pages() emulation as
1613 * that unmaps the memory, then does ->mmap() via file reference only.
1614 * We'll deny the ->mmap() if the shm segment was since removed, but to
1615 * detect shm ID reuse we need to compare the file pointers.
1616 */
1617 base = get_file(shp->shm_file);
1618 shp->shm_nattch++;
1619 size = i_size_read(file_inode(base));
1620 ipc_unlock_object(&shp->shm_perm);
1621 rcu_read_unlock();
1622
1623 err = -ENOMEM;
1624 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1625 if (!sfd) {
1626 fput(base);
1627 goto out_nattch;
1628 }
1629
1630 file = alloc_file_clone(base, f_flags,
1631 is_file_hugepages(base) ?
1632 &shm_file_operations_huge :
1633 &shm_file_operations);
1634 err = PTR_ERR(file);
1635 if (IS_ERR(file)) {
1636 kfree(sfd);
1637 fput(base);
1638 goto out_nattch;
1639 }
1640
1641 sfd->id = shp->shm_perm.id;
1642 sfd->ns = get_ipc_ns(ns);
1643 sfd->file = base;
1644 sfd->vm_ops = NULL;
1645 file->private_data = sfd;
1646
1647 err = security_mmap_file(file, prot, flags);
1648 if (err)
1649 goto out_fput;
1650
1651 if (mmap_write_lock_killable(current->mm)) {
1652 err = -EINTR;
1653 goto out_fput;
1654 }
1655
1656 if (addr && !(shmflg & SHM_REMAP)) {
1657 err = -EINVAL;
1658 if (addr + size < addr)
1659 goto invalid;
1660
1661 if (find_vma_intersection(current->mm, addr, addr + size))
1662 goto invalid;
1663 }
1664
1665 addr = do_mmap(file, addr, size, prot, flags, 0, &populate, NULL);
1666 *raddr = addr;
1667 err = 0;
1668 if (IS_ERR_VALUE(addr))
1669 err = (long)addr;
1670invalid:
1671 mmap_write_unlock(current->mm);
1672 if (populate)
1673 mm_populate(addr, populate);
1674
1675out_fput:
1676 fput(file);
1677
1678out_nattch:
1679 down_write(&shm_ids(ns).rwsem);
1680 shp = shm_lock(ns, shmid);
1681 shp->shm_nattch--;
1682
1683 if (shm_may_destroy(shp))
1684 shm_destroy(ns, shp);
1685 else
1686 shm_unlock(shp);
1687 up_write(&shm_ids(ns).rwsem);
1688 return err;
1689
1690out_unlock:
1691 rcu_read_unlock();
1692out:
1693 return err;
1694}
1695
1696SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1697{
1698 unsigned long ret;
1699 long err;
1700
1701 err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
1702 if (err)
1703 return err;
1704 force_successful_syscall_return();
1705 return (long)ret;
1706}
1707
1708#ifdef CONFIG_COMPAT
1709
1710#ifndef COMPAT_SHMLBA
1711#define COMPAT_SHMLBA SHMLBA
1712#endif
1713
1714COMPAT_SYSCALL_DEFINE3(shmat, int, shmid, compat_uptr_t, shmaddr, int, shmflg)
1715{
1716 unsigned long ret;
1717 long err;
1718
1719 err = do_shmat(shmid, compat_ptr(shmaddr), shmflg, &ret, COMPAT_SHMLBA);
1720 if (err)
1721 return err;
1722 force_successful_syscall_return();
1723 return (long)ret;
1724}
1725#endif
1726
1727/*
1728 * detach and kill segment if marked destroyed.
1729 * The work is done in shm_close.
1730 */
1731long ksys_shmdt(char __user *shmaddr)
1732{
1733 struct mm_struct *mm = current->mm;
1734 struct vm_area_struct *vma;
1735 unsigned long addr = (unsigned long)shmaddr;
1736 int retval = -EINVAL;
1737#ifdef CONFIG_MMU
1738 loff_t size = 0;
1739 struct file *file;
1740 VMA_ITERATOR(vmi, mm, addr);
1741#endif
1742
1743 if (addr & ~PAGE_MASK)
1744 return retval;
1745
1746 if (mmap_write_lock_killable(mm))
1747 return -EINTR;
1748
1749 /*
1750 * This function tries to be smart and unmap shm segments that
1751 * were modified by partial mlock or munmap calls:
1752 * - It first determines the size of the shm segment that should be
1753 * unmapped: It searches for a vma that is backed by shm and that
1754 * started at address shmaddr. It records it's size and then unmaps
1755 * it.
1756 * - Then it unmaps all shm vmas that started at shmaddr and that
1757 * are within the initially determined size and that are from the
1758 * same shm segment from which we determined the size.
1759 * Errors from do_munmap are ignored: the function only fails if
1760 * it's called with invalid parameters or if it's called to unmap
1761 * a part of a vma. Both calls in this function are for full vmas,
1762 * the parameters are directly copied from the vma itself and always
1763 * valid - therefore do_munmap cannot fail. (famous last words?)
1764 */
1765 /*
1766 * If it had been mremap()'d, the starting address would not
1767 * match the usual checks anyway. So assume all vma's are
1768 * above the starting address given.
1769 */
1770
1771#ifdef CONFIG_MMU
1772 for_each_vma(vmi, vma) {
1773 /*
1774 * Check if the starting address would match, i.e. it's
1775 * a fragment created by mprotect() and/or munmap(), or it
1776 * otherwise it starts at this address with no hassles.
1777 */
1778 if ((vma->vm_ops == &shm_vm_ops) &&
1779 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1780
1781 /*
1782 * Record the file of the shm segment being
1783 * unmapped. With mremap(), someone could place
1784 * page from another segment but with equal offsets
1785 * in the range we are unmapping.
1786 */
1787 file = vma->vm_file;
1788 size = i_size_read(file_inode(vma->vm_file));
1789 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1790 mas_pause(&vmi.mas);
1791 /*
1792 * We discovered the size of the shm segment, so
1793 * break out of here and fall through to the next
1794 * loop that uses the size information to stop
1795 * searching for matching vma's.
1796 */
1797 retval = 0;
1798 vma = vma_next(&vmi);
1799 break;
1800 }
1801 }
1802
1803 /*
1804 * We need look no further than the maximum address a fragment
1805 * could possibly have landed at. Also cast things to loff_t to
1806 * prevent overflows and make comparisons vs. equal-width types.
1807 */
1808 size = PAGE_ALIGN(size);
1809 while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1810 /* finding a matching vma now does not alter retval */
1811 if ((vma->vm_ops == &shm_vm_ops) &&
1812 ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
1813 (vma->vm_file == file)) {
1814 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1815 mas_pause(&vmi.mas);
1816 }
1817
1818 vma = vma_next(&vmi);
1819 }
1820
1821#else /* CONFIG_MMU */
1822 vma = vma_lookup(mm, addr);
1823 /* under NOMMU conditions, the exact address to be destroyed must be
1824 * given
1825 */
1826 if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1827 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1828 retval = 0;
1829 }
1830
1831#endif
1832
1833 mmap_write_unlock(mm);
1834 return retval;
1835}
1836
1837SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1838{
1839 return ksys_shmdt(shmaddr);
1840}
1841
1842#ifdef CONFIG_PROC_FS
1843static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1844{
1845 struct pid_namespace *pid_ns = ipc_seq_pid_ns(s);
1846 struct user_namespace *user_ns = seq_user_ns(s);
1847 struct kern_ipc_perm *ipcp = it;
1848 struct shmid_kernel *shp;
1849 unsigned long rss = 0, swp = 0;
1850
1851 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
1852 shm_add_rss_swap(shp, &rss, &swp);
1853
1854#if BITS_PER_LONG <= 32
1855#define SIZE_SPEC "%10lu"
1856#else
1857#define SIZE_SPEC "%21lu"
1858#endif
1859
1860 seq_printf(s,
1861 "%10d %10d %4o " SIZE_SPEC " %5u %5u "
1862 "%5lu %5u %5u %5u %5u %10llu %10llu %10llu "
1863 SIZE_SPEC " " SIZE_SPEC "\n",
1864 shp->shm_perm.key,
1865 shp->shm_perm.id,
1866 shp->shm_perm.mode,
1867 shp->shm_segsz,
1868 pid_nr_ns(shp->shm_cprid, pid_ns),
1869 pid_nr_ns(shp->shm_lprid, pid_ns),
1870 shp->shm_nattch,
1871 from_kuid_munged(user_ns, shp->shm_perm.uid),
1872 from_kgid_munged(user_ns, shp->shm_perm.gid),
1873 from_kuid_munged(user_ns, shp->shm_perm.cuid),
1874 from_kgid_munged(user_ns, shp->shm_perm.cgid),
1875 shp->shm_atim,
1876 shp->shm_dtim,
1877 shp->shm_ctim,
1878 rss * PAGE_SIZE,
1879 swp * PAGE_SIZE);
1880
1881 return 0;
1882}
1883#endif
1/*
2 * linux/ipc/shm.c
3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 * Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
7 *
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
15 *
16 * support for audit of ipc object properties and permission changes
17 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
18 *
19 * namespaces support
20 * OpenVZ, SWsoft Inc.
21 * Pavel Emelianov <xemul@openvz.org>
22 *
23 * Better ipc lock (kern_ipc_perm.lock) handling
24 * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
25 */
26
27#include <linux/slab.h>
28#include <linux/mm.h>
29#include <linux/hugetlb.h>
30#include <linux/shm.h>
31#include <linux/init.h>
32#include <linux/file.h>
33#include <linux/mman.h>
34#include <linux/shmem_fs.h>
35#include <linux/security.h>
36#include <linux/syscalls.h>
37#include <linux/audit.h>
38#include <linux/capability.h>
39#include <linux/ptrace.h>
40#include <linux/seq_file.h>
41#include <linux/rwsem.h>
42#include <linux/nsproxy.h>
43#include <linux/mount.h>
44#include <linux/ipc_namespace.h>
45
46#include <linux/uaccess.h>
47
48#include "util.h"
49
50struct shm_file_data {
51 int id;
52 struct ipc_namespace *ns;
53 struct file *file;
54 const struct vm_operations_struct *vm_ops;
55};
56
57#define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
58
59static const struct file_operations shm_file_operations;
60static const struct vm_operations_struct shm_vm_ops;
61
62#define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
63
64#define shm_unlock(shp) \
65 ipc_unlock(&(shp)->shm_perm)
66
67static int newseg(struct ipc_namespace *, struct ipc_params *);
68static void shm_open(struct vm_area_struct *vma);
69static void shm_close(struct vm_area_struct *vma);
70static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
71#ifdef CONFIG_PROC_FS
72static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
73#endif
74
75void shm_init_ns(struct ipc_namespace *ns)
76{
77 ns->shm_ctlmax = SHMMAX;
78 ns->shm_ctlall = SHMALL;
79 ns->shm_ctlmni = SHMMNI;
80 ns->shm_rmid_forced = 0;
81 ns->shm_tot = 0;
82 ipc_init_ids(&shm_ids(ns));
83}
84
85/*
86 * Called with shm_ids.rwsem (writer) and the shp structure locked.
87 * Only shm_ids.rwsem remains locked on exit.
88 */
89static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
90{
91 struct shmid_kernel *shp;
92
93 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
94
95 if (shp->shm_nattch) {
96 shp->shm_perm.mode |= SHM_DEST;
97 /* Do not find it any more */
98 shp->shm_perm.key = IPC_PRIVATE;
99 shm_unlock(shp);
100 } else
101 shm_destroy(ns, shp);
102}
103
104#ifdef CONFIG_IPC_NS
105void shm_exit_ns(struct ipc_namespace *ns)
106{
107 free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
108 idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
109}
110#endif
111
112static int __init ipc_ns_init(void)
113{
114 shm_init_ns(&init_ipc_ns);
115 return 0;
116}
117
118pure_initcall(ipc_ns_init);
119
120void __init shm_init(void)
121{
122 ipc_init_proc_interface("sysvipc/shm",
123#if BITS_PER_LONG <= 32
124 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
125#else
126 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
127#endif
128 IPC_SHM_IDS, sysvipc_shm_proc_show);
129}
130
131static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
132{
133 struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
134
135 if (IS_ERR(ipcp))
136 return ERR_CAST(ipcp);
137
138 return container_of(ipcp, struct shmid_kernel, shm_perm);
139}
140
141static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
142{
143 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
144
145 if (IS_ERR(ipcp))
146 return ERR_CAST(ipcp);
147
148 return container_of(ipcp, struct shmid_kernel, shm_perm);
149}
150
151/*
152 * shm_lock_(check_) routines are called in the paths where the rwsem
153 * is not necessarily held.
154 */
155static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
156{
157 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
158
159 /*
160 * Callers of shm_lock() must validate the status of the returned ipc
161 * object pointer (as returned by ipc_lock()), and error out as
162 * appropriate.
163 */
164 if (IS_ERR(ipcp))
165 return (void *)ipcp;
166 return container_of(ipcp, struct shmid_kernel, shm_perm);
167}
168
169static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
170{
171 rcu_read_lock();
172 ipc_lock_object(&ipcp->shm_perm);
173}
174
175static void shm_rcu_free(struct rcu_head *head)
176{
177 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
178 struct shmid_kernel *shp = ipc_rcu_to_struct(p);
179
180 security_shm_free(shp);
181 ipc_rcu_free(head);
182}
183
184static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
185{
186 list_del(&s->shm_clist);
187 ipc_rmid(&shm_ids(ns), &s->shm_perm);
188}
189
190
191static int __shm_open(struct vm_area_struct *vma)
192{
193 struct file *file = vma->vm_file;
194 struct shm_file_data *sfd = shm_file_data(file);
195 struct shmid_kernel *shp;
196
197 shp = shm_lock(sfd->ns, sfd->id);
198
199 if (IS_ERR(shp))
200 return PTR_ERR(shp);
201
202 shp->shm_atim = get_seconds();
203 shp->shm_lprid = task_tgid_vnr(current);
204 shp->shm_nattch++;
205 shm_unlock(shp);
206 return 0;
207}
208
209/* This is called by fork, once for every shm attach. */
210static void shm_open(struct vm_area_struct *vma)
211{
212 int err = __shm_open(vma);
213 /*
214 * We raced in the idr lookup or with shm_destroy().
215 * Either way, the ID is busted.
216 */
217 WARN_ON_ONCE(err);
218}
219
220/*
221 * shm_destroy - free the struct shmid_kernel
222 *
223 * @ns: namespace
224 * @shp: struct to free
225 *
226 * It has to be called with shp and shm_ids.rwsem (writer) locked,
227 * but returns with shp unlocked and freed.
228 */
229static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
230{
231 struct file *shm_file;
232
233 shm_file = shp->shm_file;
234 shp->shm_file = NULL;
235 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
236 shm_rmid(ns, shp);
237 shm_unlock(shp);
238 if (!is_file_hugepages(shm_file))
239 shmem_lock(shm_file, 0, shp->mlock_user);
240 else if (shp->mlock_user)
241 user_shm_unlock(i_size_read(file_inode(shm_file)),
242 shp->mlock_user);
243 fput(shm_file);
244 ipc_rcu_putref(shp, shm_rcu_free);
245}
246
247/*
248 * shm_may_destroy - identifies whether shm segment should be destroyed now
249 *
250 * Returns true if and only if there are no active users of the segment and
251 * one of the following is true:
252 *
253 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
254 *
255 * 2) sysctl kernel.shm_rmid_forced is set to 1.
256 */
257static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
258{
259 return (shp->shm_nattch == 0) &&
260 (ns->shm_rmid_forced ||
261 (shp->shm_perm.mode & SHM_DEST));
262}
263
264/*
265 * remove the attach descriptor vma.
266 * free memory for segment if it is marked destroyed.
267 * The descriptor has already been removed from the current->mm->mmap list
268 * and will later be kfree()d.
269 */
270static void shm_close(struct vm_area_struct *vma)
271{
272 struct file *file = vma->vm_file;
273 struct shm_file_data *sfd = shm_file_data(file);
274 struct shmid_kernel *shp;
275 struct ipc_namespace *ns = sfd->ns;
276
277 down_write(&shm_ids(ns).rwsem);
278 /* remove from the list of attaches of the shm segment */
279 shp = shm_lock(ns, sfd->id);
280
281 /*
282 * We raced in the idr lookup or with shm_destroy().
283 * Either way, the ID is busted.
284 */
285 if (WARN_ON_ONCE(IS_ERR(shp)))
286 goto done; /* no-op */
287
288 shp->shm_lprid = task_tgid_vnr(current);
289 shp->shm_dtim = get_seconds();
290 shp->shm_nattch--;
291 if (shm_may_destroy(ns, shp))
292 shm_destroy(ns, shp);
293 else
294 shm_unlock(shp);
295done:
296 up_write(&shm_ids(ns).rwsem);
297}
298
299/* Called with ns->shm_ids(ns).rwsem locked */
300static int shm_try_destroy_orphaned(int id, void *p, void *data)
301{
302 struct ipc_namespace *ns = data;
303 struct kern_ipc_perm *ipcp = p;
304 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
305
306 /*
307 * We want to destroy segments without users and with already
308 * exit'ed originating process.
309 *
310 * As shp->* are changed under rwsem, it's safe to skip shp locking.
311 */
312 if (shp->shm_creator != NULL)
313 return 0;
314
315 if (shm_may_destroy(ns, shp)) {
316 shm_lock_by_ptr(shp);
317 shm_destroy(ns, shp);
318 }
319 return 0;
320}
321
322void shm_destroy_orphaned(struct ipc_namespace *ns)
323{
324 down_write(&shm_ids(ns).rwsem);
325 if (shm_ids(ns).in_use)
326 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
327 up_write(&shm_ids(ns).rwsem);
328}
329
330/* Locking assumes this will only be called with task == current */
331void exit_shm(struct task_struct *task)
332{
333 struct ipc_namespace *ns = task->nsproxy->ipc_ns;
334 struct shmid_kernel *shp, *n;
335
336 if (list_empty(&task->sysvshm.shm_clist))
337 return;
338
339 /*
340 * If kernel.shm_rmid_forced is not set then only keep track of
341 * which shmids are orphaned, so that a later set of the sysctl
342 * can clean them up.
343 */
344 if (!ns->shm_rmid_forced) {
345 down_read(&shm_ids(ns).rwsem);
346 list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist)
347 shp->shm_creator = NULL;
348 /*
349 * Only under read lock but we are only called on current
350 * so no entry on the list will be shared.
351 */
352 list_del(&task->sysvshm.shm_clist);
353 up_read(&shm_ids(ns).rwsem);
354 return;
355 }
356
357 /*
358 * Destroy all already created segments, that were not yet mapped,
359 * and mark any mapped as orphan to cover the sysctl toggling.
360 * Destroy is skipped if shm_may_destroy() returns false.
361 */
362 down_write(&shm_ids(ns).rwsem);
363 list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) {
364 shp->shm_creator = NULL;
365
366 if (shm_may_destroy(ns, shp)) {
367 shm_lock_by_ptr(shp);
368 shm_destroy(ns, shp);
369 }
370 }
371
372 /* Remove the list head from any segments still attached. */
373 list_del(&task->sysvshm.shm_clist);
374 up_write(&shm_ids(ns).rwsem);
375}
376
377static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
378{
379 struct file *file = vma->vm_file;
380 struct shm_file_data *sfd = shm_file_data(file);
381
382 return sfd->vm_ops->fault(vma, vmf);
383}
384
385#ifdef CONFIG_NUMA
386static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
387{
388 struct file *file = vma->vm_file;
389 struct shm_file_data *sfd = shm_file_data(file);
390 int err = 0;
391
392 if (sfd->vm_ops->set_policy)
393 err = sfd->vm_ops->set_policy(vma, new);
394 return err;
395}
396
397static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
398 unsigned long addr)
399{
400 struct file *file = vma->vm_file;
401 struct shm_file_data *sfd = shm_file_data(file);
402 struct mempolicy *pol = NULL;
403
404 if (sfd->vm_ops->get_policy)
405 pol = sfd->vm_ops->get_policy(vma, addr);
406 else if (vma->vm_policy)
407 pol = vma->vm_policy;
408
409 return pol;
410}
411#endif
412
413static int shm_mmap(struct file *file, struct vm_area_struct *vma)
414{
415 struct shm_file_data *sfd = shm_file_data(file);
416 int ret;
417
418 /*
419 * In case of remap_file_pages() emulation, the file can represent
420 * removed IPC ID: propogate shm_lock() error to caller.
421 */
422 ret = __shm_open(vma);
423 if (ret)
424 return ret;
425
426 ret = sfd->file->f_op->mmap(sfd->file, vma);
427 if (ret) {
428 shm_close(vma);
429 return ret;
430 }
431 sfd->vm_ops = vma->vm_ops;
432#ifdef CONFIG_MMU
433 WARN_ON(!sfd->vm_ops->fault);
434#endif
435 vma->vm_ops = &shm_vm_ops;
436 return 0;
437}
438
439static int shm_release(struct inode *ino, struct file *file)
440{
441 struct shm_file_data *sfd = shm_file_data(file);
442
443 put_ipc_ns(sfd->ns);
444 shm_file_data(file) = NULL;
445 kfree(sfd);
446 return 0;
447}
448
449static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
450{
451 struct shm_file_data *sfd = shm_file_data(file);
452
453 if (!sfd->file->f_op->fsync)
454 return -EINVAL;
455 return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
456}
457
458static long shm_fallocate(struct file *file, int mode, loff_t offset,
459 loff_t len)
460{
461 struct shm_file_data *sfd = shm_file_data(file);
462
463 if (!sfd->file->f_op->fallocate)
464 return -EOPNOTSUPP;
465 return sfd->file->f_op->fallocate(file, mode, offset, len);
466}
467
468static unsigned long shm_get_unmapped_area(struct file *file,
469 unsigned long addr, unsigned long len, unsigned long pgoff,
470 unsigned long flags)
471{
472 struct shm_file_data *sfd = shm_file_data(file);
473
474 return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
475 pgoff, flags);
476}
477
478static const struct file_operations shm_file_operations = {
479 .mmap = shm_mmap,
480 .fsync = shm_fsync,
481 .release = shm_release,
482 .get_unmapped_area = shm_get_unmapped_area,
483 .llseek = noop_llseek,
484 .fallocate = shm_fallocate,
485};
486
487/*
488 * shm_file_operations_huge is now identical to shm_file_operations,
489 * but we keep it distinct for the sake of is_file_shm_hugepages().
490 */
491static const struct file_operations shm_file_operations_huge = {
492 .mmap = shm_mmap,
493 .fsync = shm_fsync,
494 .release = shm_release,
495 .get_unmapped_area = shm_get_unmapped_area,
496 .llseek = noop_llseek,
497 .fallocate = shm_fallocate,
498};
499
500bool is_file_shm_hugepages(struct file *file)
501{
502 return file->f_op == &shm_file_operations_huge;
503}
504
505static const struct vm_operations_struct shm_vm_ops = {
506 .open = shm_open, /* callback for a new vm-area open */
507 .close = shm_close, /* callback for when the vm-area is released */
508 .fault = shm_fault,
509#if defined(CONFIG_NUMA)
510 .set_policy = shm_set_policy,
511 .get_policy = shm_get_policy,
512#endif
513};
514
515/**
516 * newseg - Create a new shared memory segment
517 * @ns: namespace
518 * @params: ptr to the structure that contains key, size and shmflg
519 *
520 * Called with shm_ids.rwsem held as a writer.
521 */
522static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
523{
524 key_t key = params->key;
525 int shmflg = params->flg;
526 size_t size = params->u.size;
527 int error;
528 struct shmid_kernel *shp;
529 size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
530 struct file *file;
531 char name[13];
532 int id;
533 vm_flags_t acctflag = 0;
534
535 if (size < SHMMIN || size > ns->shm_ctlmax)
536 return -EINVAL;
537
538 if (numpages << PAGE_SHIFT < size)
539 return -ENOSPC;
540
541 if (ns->shm_tot + numpages < ns->shm_tot ||
542 ns->shm_tot + numpages > ns->shm_ctlall)
543 return -ENOSPC;
544
545 shp = ipc_rcu_alloc(sizeof(*shp));
546 if (!shp)
547 return -ENOMEM;
548
549 shp->shm_perm.key = key;
550 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
551 shp->mlock_user = NULL;
552
553 shp->shm_perm.security = NULL;
554 error = security_shm_alloc(shp);
555 if (error) {
556 ipc_rcu_putref(shp, ipc_rcu_free);
557 return error;
558 }
559
560 sprintf(name, "SYSV%08x", key);
561 if (shmflg & SHM_HUGETLB) {
562 struct hstate *hs;
563 size_t hugesize;
564
565 hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
566 if (!hs) {
567 error = -EINVAL;
568 goto no_file;
569 }
570 hugesize = ALIGN(size, huge_page_size(hs));
571
572 /* hugetlb_file_setup applies strict accounting */
573 if (shmflg & SHM_NORESERVE)
574 acctflag = VM_NORESERVE;
575 file = hugetlb_file_setup(name, hugesize, acctflag,
576 &shp->mlock_user, HUGETLB_SHMFS_INODE,
577 (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
578 } else {
579 /*
580 * Do not allow no accounting for OVERCOMMIT_NEVER, even
581 * if it's asked for.
582 */
583 if ((shmflg & SHM_NORESERVE) &&
584 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
585 acctflag = VM_NORESERVE;
586 file = shmem_kernel_file_setup(name, size, acctflag);
587 }
588 error = PTR_ERR(file);
589 if (IS_ERR(file))
590 goto no_file;
591
592 shp->shm_cprid = task_tgid_vnr(current);
593 shp->shm_lprid = 0;
594 shp->shm_atim = shp->shm_dtim = 0;
595 shp->shm_ctim = get_seconds();
596 shp->shm_segsz = size;
597 shp->shm_nattch = 0;
598 shp->shm_file = file;
599 shp->shm_creator = current;
600
601 id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
602 if (id < 0) {
603 error = id;
604 goto no_id;
605 }
606
607 list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist);
608
609 /*
610 * shmid gets reported as "inode#" in /proc/pid/maps.
611 * proc-ps tools use this. Changing this will break them.
612 */
613 file_inode(file)->i_ino = shp->shm_perm.id;
614
615 ns->shm_tot += numpages;
616 error = shp->shm_perm.id;
617
618 ipc_unlock_object(&shp->shm_perm);
619 rcu_read_unlock();
620 return error;
621
622no_id:
623 if (is_file_hugepages(file) && shp->mlock_user)
624 user_shm_unlock(size, shp->mlock_user);
625 fput(file);
626no_file:
627 ipc_rcu_putref(shp, shm_rcu_free);
628 return error;
629}
630
631/*
632 * Called with shm_ids.rwsem and ipcp locked.
633 */
634static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
635{
636 struct shmid_kernel *shp;
637
638 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
639 return security_shm_associate(shp, shmflg);
640}
641
642/*
643 * Called with shm_ids.rwsem and ipcp locked.
644 */
645static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
646 struct ipc_params *params)
647{
648 struct shmid_kernel *shp;
649
650 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
651 if (shp->shm_segsz < params->u.size)
652 return -EINVAL;
653
654 return 0;
655}
656
657SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
658{
659 struct ipc_namespace *ns;
660 static const struct ipc_ops shm_ops = {
661 .getnew = newseg,
662 .associate = shm_security,
663 .more_checks = shm_more_checks,
664 };
665 struct ipc_params shm_params;
666
667 ns = current->nsproxy->ipc_ns;
668
669 shm_params.key = key;
670 shm_params.flg = shmflg;
671 shm_params.u.size = size;
672
673 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
674}
675
676static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
677{
678 switch (version) {
679 case IPC_64:
680 return copy_to_user(buf, in, sizeof(*in));
681 case IPC_OLD:
682 {
683 struct shmid_ds out;
684
685 memset(&out, 0, sizeof(out));
686 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
687 out.shm_segsz = in->shm_segsz;
688 out.shm_atime = in->shm_atime;
689 out.shm_dtime = in->shm_dtime;
690 out.shm_ctime = in->shm_ctime;
691 out.shm_cpid = in->shm_cpid;
692 out.shm_lpid = in->shm_lpid;
693 out.shm_nattch = in->shm_nattch;
694
695 return copy_to_user(buf, &out, sizeof(out));
696 }
697 default:
698 return -EINVAL;
699 }
700}
701
702static inline unsigned long
703copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
704{
705 switch (version) {
706 case IPC_64:
707 if (copy_from_user(out, buf, sizeof(*out)))
708 return -EFAULT;
709 return 0;
710 case IPC_OLD:
711 {
712 struct shmid_ds tbuf_old;
713
714 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
715 return -EFAULT;
716
717 out->shm_perm.uid = tbuf_old.shm_perm.uid;
718 out->shm_perm.gid = tbuf_old.shm_perm.gid;
719 out->shm_perm.mode = tbuf_old.shm_perm.mode;
720
721 return 0;
722 }
723 default:
724 return -EINVAL;
725 }
726}
727
728static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
729{
730 switch (version) {
731 case IPC_64:
732 return copy_to_user(buf, in, sizeof(*in));
733 case IPC_OLD:
734 {
735 struct shminfo out;
736
737 if (in->shmmax > INT_MAX)
738 out.shmmax = INT_MAX;
739 else
740 out.shmmax = (int)in->shmmax;
741
742 out.shmmin = in->shmmin;
743 out.shmmni = in->shmmni;
744 out.shmseg = in->shmseg;
745 out.shmall = in->shmall;
746
747 return copy_to_user(buf, &out, sizeof(out));
748 }
749 default:
750 return -EINVAL;
751 }
752}
753
754/*
755 * Calculate and add used RSS and swap pages of a shm.
756 * Called with shm_ids.rwsem held as a reader
757 */
758static void shm_add_rss_swap(struct shmid_kernel *shp,
759 unsigned long *rss_add, unsigned long *swp_add)
760{
761 struct inode *inode;
762
763 inode = file_inode(shp->shm_file);
764
765 if (is_file_hugepages(shp->shm_file)) {
766 struct address_space *mapping = inode->i_mapping;
767 struct hstate *h = hstate_file(shp->shm_file);
768 *rss_add += pages_per_huge_page(h) * mapping->nrpages;
769 } else {
770#ifdef CONFIG_SHMEM
771 struct shmem_inode_info *info = SHMEM_I(inode);
772
773 spin_lock_irq(&info->lock);
774 *rss_add += inode->i_mapping->nrpages;
775 *swp_add += info->swapped;
776 spin_unlock_irq(&info->lock);
777#else
778 *rss_add += inode->i_mapping->nrpages;
779#endif
780 }
781}
782
783/*
784 * Called with shm_ids.rwsem held as a reader
785 */
786static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
787 unsigned long *swp)
788{
789 int next_id;
790 int total, in_use;
791
792 *rss = 0;
793 *swp = 0;
794
795 in_use = shm_ids(ns).in_use;
796
797 for (total = 0, next_id = 0; total < in_use; next_id++) {
798 struct kern_ipc_perm *ipc;
799 struct shmid_kernel *shp;
800
801 ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
802 if (ipc == NULL)
803 continue;
804 shp = container_of(ipc, struct shmid_kernel, shm_perm);
805
806 shm_add_rss_swap(shp, rss, swp);
807
808 total++;
809 }
810}
811
812/*
813 * This function handles some shmctl commands which require the rwsem
814 * to be held in write mode.
815 * NOTE: no locks must be held, the rwsem is taken inside this function.
816 */
817static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
818 struct shmid_ds __user *buf, int version)
819{
820 struct kern_ipc_perm *ipcp;
821 struct shmid64_ds shmid64;
822 struct shmid_kernel *shp;
823 int err;
824
825 if (cmd == IPC_SET) {
826 if (copy_shmid_from_user(&shmid64, buf, version))
827 return -EFAULT;
828 }
829
830 down_write(&shm_ids(ns).rwsem);
831 rcu_read_lock();
832
833 ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
834 &shmid64.shm_perm, 0);
835 if (IS_ERR(ipcp)) {
836 err = PTR_ERR(ipcp);
837 goto out_unlock1;
838 }
839
840 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
841
842 err = security_shm_shmctl(shp, cmd);
843 if (err)
844 goto out_unlock1;
845
846 switch (cmd) {
847 case IPC_RMID:
848 ipc_lock_object(&shp->shm_perm);
849 /* do_shm_rmid unlocks the ipc object and rcu */
850 do_shm_rmid(ns, ipcp);
851 goto out_up;
852 case IPC_SET:
853 ipc_lock_object(&shp->shm_perm);
854 err = ipc_update_perm(&shmid64.shm_perm, ipcp);
855 if (err)
856 goto out_unlock0;
857 shp->shm_ctim = get_seconds();
858 break;
859 default:
860 err = -EINVAL;
861 goto out_unlock1;
862 }
863
864out_unlock0:
865 ipc_unlock_object(&shp->shm_perm);
866out_unlock1:
867 rcu_read_unlock();
868out_up:
869 up_write(&shm_ids(ns).rwsem);
870 return err;
871}
872
873static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
874 int cmd, int version, void __user *buf)
875{
876 int err;
877 struct shmid_kernel *shp;
878
879 /* preliminary security checks for *_INFO */
880 if (cmd == IPC_INFO || cmd == SHM_INFO) {
881 err = security_shm_shmctl(NULL, cmd);
882 if (err)
883 return err;
884 }
885
886 switch (cmd) {
887 case IPC_INFO:
888 {
889 struct shminfo64 shminfo;
890
891 memset(&shminfo, 0, sizeof(shminfo));
892 shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
893 shminfo.shmmax = ns->shm_ctlmax;
894 shminfo.shmall = ns->shm_ctlall;
895
896 shminfo.shmmin = SHMMIN;
897 if (copy_shminfo_to_user(buf, &shminfo, version))
898 return -EFAULT;
899
900 down_read(&shm_ids(ns).rwsem);
901 err = ipc_get_maxid(&shm_ids(ns));
902 up_read(&shm_ids(ns).rwsem);
903
904 if (err < 0)
905 err = 0;
906 goto out;
907 }
908 case SHM_INFO:
909 {
910 struct shm_info shm_info;
911
912 memset(&shm_info, 0, sizeof(shm_info));
913 down_read(&shm_ids(ns).rwsem);
914 shm_info.used_ids = shm_ids(ns).in_use;
915 shm_get_stat(ns, &shm_info.shm_rss, &shm_info.shm_swp);
916 shm_info.shm_tot = ns->shm_tot;
917 shm_info.swap_attempts = 0;
918 shm_info.swap_successes = 0;
919 err = ipc_get_maxid(&shm_ids(ns));
920 up_read(&shm_ids(ns).rwsem);
921 if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
922 err = -EFAULT;
923 goto out;
924 }
925
926 err = err < 0 ? 0 : err;
927 goto out;
928 }
929 case SHM_STAT:
930 case IPC_STAT:
931 {
932 struct shmid64_ds tbuf;
933 int result;
934
935 rcu_read_lock();
936 if (cmd == SHM_STAT) {
937 shp = shm_obtain_object(ns, shmid);
938 if (IS_ERR(shp)) {
939 err = PTR_ERR(shp);
940 goto out_unlock;
941 }
942 result = shp->shm_perm.id;
943 } else {
944 shp = shm_obtain_object_check(ns, shmid);
945 if (IS_ERR(shp)) {
946 err = PTR_ERR(shp);
947 goto out_unlock;
948 }
949 result = 0;
950 }
951
952 err = -EACCES;
953 if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
954 goto out_unlock;
955
956 err = security_shm_shmctl(shp, cmd);
957 if (err)
958 goto out_unlock;
959
960 memset(&tbuf, 0, sizeof(tbuf));
961 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
962 tbuf.shm_segsz = shp->shm_segsz;
963 tbuf.shm_atime = shp->shm_atim;
964 tbuf.shm_dtime = shp->shm_dtim;
965 tbuf.shm_ctime = shp->shm_ctim;
966 tbuf.shm_cpid = shp->shm_cprid;
967 tbuf.shm_lpid = shp->shm_lprid;
968 tbuf.shm_nattch = shp->shm_nattch;
969 rcu_read_unlock();
970
971 if (copy_shmid_to_user(buf, &tbuf, version))
972 err = -EFAULT;
973 else
974 err = result;
975 goto out;
976 }
977 default:
978 return -EINVAL;
979 }
980
981out_unlock:
982 rcu_read_unlock();
983out:
984 return err;
985}
986
987SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
988{
989 struct shmid_kernel *shp;
990 int err, version;
991 struct ipc_namespace *ns;
992
993 if (cmd < 0 || shmid < 0)
994 return -EINVAL;
995
996 version = ipc_parse_version(&cmd);
997 ns = current->nsproxy->ipc_ns;
998
999 switch (cmd) {
1000 case IPC_INFO:
1001 case SHM_INFO:
1002 case SHM_STAT:
1003 case IPC_STAT:
1004 return shmctl_nolock(ns, shmid, cmd, version, buf);
1005 case IPC_RMID:
1006 case IPC_SET:
1007 return shmctl_down(ns, shmid, cmd, buf, version);
1008 case SHM_LOCK:
1009 case SHM_UNLOCK:
1010 {
1011 struct file *shm_file;
1012
1013 rcu_read_lock();
1014 shp = shm_obtain_object_check(ns, shmid);
1015 if (IS_ERR(shp)) {
1016 err = PTR_ERR(shp);
1017 goto out_unlock1;
1018 }
1019
1020 audit_ipc_obj(&(shp->shm_perm));
1021 err = security_shm_shmctl(shp, cmd);
1022 if (err)
1023 goto out_unlock1;
1024
1025 ipc_lock_object(&shp->shm_perm);
1026
1027 /* check if shm_destroy() is tearing down shp */
1028 if (!ipc_valid_object(&shp->shm_perm)) {
1029 err = -EIDRM;
1030 goto out_unlock0;
1031 }
1032
1033 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
1034 kuid_t euid = current_euid();
1035
1036 if (!uid_eq(euid, shp->shm_perm.uid) &&
1037 !uid_eq(euid, shp->shm_perm.cuid)) {
1038 err = -EPERM;
1039 goto out_unlock0;
1040 }
1041 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
1042 err = -EPERM;
1043 goto out_unlock0;
1044 }
1045 }
1046
1047 shm_file = shp->shm_file;
1048 if (is_file_hugepages(shm_file))
1049 goto out_unlock0;
1050
1051 if (cmd == SHM_LOCK) {
1052 struct user_struct *user = current_user();
1053
1054 err = shmem_lock(shm_file, 1, user);
1055 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
1056 shp->shm_perm.mode |= SHM_LOCKED;
1057 shp->mlock_user = user;
1058 }
1059 goto out_unlock0;
1060 }
1061
1062 /* SHM_UNLOCK */
1063 if (!(shp->shm_perm.mode & SHM_LOCKED))
1064 goto out_unlock0;
1065 shmem_lock(shm_file, 0, shp->mlock_user);
1066 shp->shm_perm.mode &= ~SHM_LOCKED;
1067 shp->mlock_user = NULL;
1068 get_file(shm_file);
1069 ipc_unlock_object(&shp->shm_perm);
1070 rcu_read_unlock();
1071 shmem_unlock_mapping(shm_file->f_mapping);
1072
1073 fput(shm_file);
1074 return err;
1075 }
1076 default:
1077 return -EINVAL;
1078 }
1079
1080out_unlock0:
1081 ipc_unlock_object(&shp->shm_perm);
1082out_unlock1:
1083 rcu_read_unlock();
1084 return err;
1085}
1086
1087/*
1088 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1089 *
1090 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1091 * "raddr" thing points to kernel space, and there has to be a wrapper around
1092 * this.
1093 */
1094long do_shmat(int shmid, char __user *shmaddr, int shmflg,
1095 ulong *raddr, unsigned long shmlba)
1096{
1097 struct shmid_kernel *shp;
1098 unsigned long addr;
1099 unsigned long size;
1100 struct file *file;
1101 int err;
1102 unsigned long flags;
1103 unsigned long prot;
1104 int acc_mode;
1105 struct ipc_namespace *ns;
1106 struct shm_file_data *sfd;
1107 struct path path;
1108 fmode_t f_mode;
1109 unsigned long populate = 0;
1110
1111 err = -EINVAL;
1112 if (shmid < 0)
1113 goto out;
1114 else if ((addr = (ulong)shmaddr)) {
1115 if (addr & (shmlba - 1)) {
1116 /*
1117 * Round down to the nearest multiple of shmlba.
1118 * For sane do_mmap_pgoff() parameters, avoid
1119 * round downs that trigger nil-page and MAP_FIXED.
1120 */
1121 if ((shmflg & SHM_RND) && addr >= shmlba)
1122 addr &= ~(shmlba - 1);
1123 else
1124#ifndef __ARCH_FORCE_SHMLBA
1125 if (addr & ~PAGE_MASK)
1126#endif
1127 goto out;
1128 }
1129 flags = MAP_SHARED | MAP_FIXED;
1130 } else {
1131 if ((shmflg & SHM_REMAP))
1132 goto out;
1133
1134 flags = MAP_SHARED;
1135 }
1136
1137 if (shmflg & SHM_RDONLY) {
1138 prot = PROT_READ;
1139 acc_mode = S_IRUGO;
1140 f_mode = FMODE_READ;
1141 } else {
1142 prot = PROT_READ | PROT_WRITE;
1143 acc_mode = S_IRUGO | S_IWUGO;
1144 f_mode = FMODE_READ | FMODE_WRITE;
1145 }
1146 if (shmflg & SHM_EXEC) {
1147 prot |= PROT_EXEC;
1148 acc_mode |= S_IXUGO;
1149 }
1150
1151 /*
1152 * We cannot rely on the fs check since SYSV IPC does have an
1153 * additional creator id...
1154 */
1155 ns = current->nsproxy->ipc_ns;
1156 rcu_read_lock();
1157 shp = shm_obtain_object_check(ns, shmid);
1158 if (IS_ERR(shp)) {
1159 err = PTR_ERR(shp);
1160 goto out_unlock;
1161 }
1162
1163 err = -EACCES;
1164 if (ipcperms(ns, &shp->shm_perm, acc_mode))
1165 goto out_unlock;
1166
1167 err = security_shm_shmat(shp, shmaddr, shmflg);
1168 if (err)
1169 goto out_unlock;
1170
1171 ipc_lock_object(&shp->shm_perm);
1172
1173 /* check if shm_destroy() is tearing down shp */
1174 if (!ipc_valid_object(&shp->shm_perm)) {
1175 ipc_unlock_object(&shp->shm_perm);
1176 err = -EIDRM;
1177 goto out_unlock;
1178 }
1179
1180 path = shp->shm_file->f_path;
1181 path_get(&path);
1182 shp->shm_nattch++;
1183 size = i_size_read(d_inode(path.dentry));
1184 ipc_unlock_object(&shp->shm_perm);
1185 rcu_read_unlock();
1186
1187 err = -ENOMEM;
1188 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1189 if (!sfd) {
1190 path_put(&path);
1191 goto out_nattch;
1192 }
1193
1194 file = alloc_file(&path, f_mode,
1195 is_file_hugepages(shp->shm_file) ?
1196 &shm_file_operations_huge :
1197 &shm_file_operations);
1198 err = PTR_ERR(file);
1199 if (IS_ERR(file)) {
1200 kfree(sfd);
1201 path_put(&path);
1202 goto out_nattch;
1203 }
1204
1205 file->private_data = sfd;
1206 file->f_mapping = shp->shm_file->f_mapping;
1207 sfd->id = shp->shm_perm.id;
1208 sfd->ns = get_ipc_ns(ns);
1209 sfd->file = shp->shm_file;
1210 sfd->vm_ops = NULL;
1211
1212 err = security_mmap_file(file, prot, flags);
1213 if (err)
1214 goto out_fput;
1215
1216 if (down_write_killable(¤t->mm->mmap_sem)) {
1217 err = -EINTR;
1218 goto out_fput;
1219 }
1220
1221 if (addr && !(shmflg & SHM_REMAP)) {
1222 err = -EINVAL;
1223 if (addr + size < addr)
1224 goto invalid;
1225
1226 if (find_vma_intersection(current->mm, addr, addr + size))
1227 goto invalid;
1228 }
1229
1230 addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate);
1231 *raddr = addr;
1232 err = 0;
1233 if (IS_ERR_VALUE(addr))
1234 err = (long)addr;
1235invalid:
1236 up_write(¤t->mm->mmap_sem);
1237 if (populate)
1238 mm_populate(addr, populate);
1239
1240out_fput:
1241 fput(file);
1242
1243out_nattch:
1244 down_write(&shm_ids(ns).rwsem);
1245 shp = shm_lock(ns, shmid);
1246 shp->shm_nattch--;
1247 if (shm_may_destroy(ns, shp))
1248 shm_destroy(ns, shp);
1249 else
1250 shm_unlock(shp);
1251 up_write(&shm_ids(ns).rwsem);
1252 return err;
1253
1254out_unlock:
1255 rcu_read_unlock();
1256out:
1257 return err;
1258}
1259
1260SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1261{
1262 unsigned long ret;
1263 long err;
1264
1265 err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
1266 if (err)
1267 return err;
1268 force_successful_syscall_return();
1269 return (long)ret;
1270}
1271
1272/*
1273 * detach and kill segment if marked destroyed.
1274 * The work is done in shm_close.
1275 */
1276SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1277{
1278 struct mm_struct *mm = current->mm;
1279 struct vm_area_struct *vma;
1280 unsigned long addr = (unsigned long)shmaddr;
1281 int retval = -EINVAL;
1282#ifdef CONFIG_MMU
1283 loff_t size = 0;
1284 struct file *file;
1285 struct vm_area_struct *next;
1286#endif
1287
1288 if (addr & ~PAGE_MASK)
1289 return retval;
1290
1291 if (down_write_killable(&mm->mmap_sem))
1292 return -EINTR;
1293
1294 /*
1295 * This function tries to be smart and unmap shm segments that
1296 * were modified by partial mlock or munmap calls:
1297 * - It first determines the size of the shm segment that should be
1298 * unmapped: It searches for a vma that is backed by shm and that
1299 * started at address shmaddr. It records it's size and then unmaps
1300 * it.
1301 * - Then it unmaps all shm vmas that started at shmaddr and that
1302 * are within the initially determined size and that are from the
1303 * same shm segment from which we determined the size.
1304 * Errors from do_munmap are ignored: the function only fails if
1305 * it's called with invalid parameters or if it's called to unmap
1306 * a part of a vma. Both calls in this function are for full vmas,
1307 * the parameters are directly copied from the vma itself and always
1308 * valid - therefore do_munmap cannot fail. (famous last words?)
1309 */
1310 /*
1311 * If it had been mremap()'d, the starting address would not
1312 * match the usual checks anyway. So assume all vma's are
1313 * above the starting address given.
1314 */
1315 vma = find_vma(mm, addr);
1316
1317#ifdef CONFIG_MMU
1318 while (vma) {
1319 next = vma->vm_next;
1320
1321 /*
1322 * Check if the starting address would match, i.e. it's
1323 * a fragment created by mprotect() and/or munmap(), or it
1324 * otherwise it starts at this address with no hassles.
1325 */
1326 if ((vma->vm_ops == &shm_vm_ops) &&
1327 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1328
1329 /*
1330 * Record the file of the shm segment being
1331 * unmapped. With mremap(), someone could place
1332 * page from another segment but with equal offsets
1333 * in the range we are unmapping.
1334 */
1335 file = vma->vm_file;
1336 size = i_size_read(file_inode(vma->vm_file));
1337 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1338 /*
1339 * We discovered the size of the shm segment, so
1340 * break out of here and fall through to the next
1341 * loop that uses the size information to stop
1342 * searching for matching vma's.
1343 */
1344 retval = 0;
1345 vma = next;
1346 break;
1347 }
1348 vma = next;
1349 }
1350
1351 /*
1352 * We need look no further than the maximum address a fragment
1353 * could possibly have landed at. Also cast things to loff_t to
1354 * prevent overflows and make comparisons vs. equal-width types.
1355 */
1356 size = PAGE_ALIGN(size);
1357 while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1358 next = vma->vm_next;
1359
1360 /* finding a matching vma now does not alter retval */
1361 if ((vma->vm_ops == &shm_vm_ops) &&
1362 ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
1363 (vma->vm_file == file))
1364 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1365 vma = next;
1366 }
1367
1368#else /* CONFIG_MMU */
1369 /* under NOMMU conditions, the exact address to be destroyed must be
1370 * given
1371 */
1372 if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1373 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1374 retval = 0;
1375 }
1376
1377#endif
1378
1379 up_write(&mm->mmap_sem);
1380 return retval;
1381}
1382
1383#ifdef CONFIG_PROC_FS
1384static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1385{
1386 struct user_namespace *user_ns = seq_user_ns(s);
1387 struct shmid_kernel *shp = it;
1388 unsigned long rss = 0, swp = 0;
1389
1390 shm_add_rss_swap(shp, &rss, &swp);
1391
1392#if BITS_PER_LONG <= 32
1393#define SIZE_SPEC "%10lu"
1394#else
1395#define SIZE_SPEC "%21lu"
1396#endif
1397
1398 seq_printf(s,
1399 "%10d %10d %4o " SIZE_SPEC " %5u %5u "
1400 "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
1401 SIZE_SPEC " " SIZE_SPEC "\n",
1402 shp->shm_perm.key,
1403 shp->shm_perm.id,
1404 shp->shm_perm.mode,
1405 shp->shm_segsz,
1406 shp->shm_cprid,
1407 shp->shm_lprid,
1408 shp->shm_nattch,
1409 from_kuid_munged(user_ns, shp->shm_perm.uid),
1410 from_kgid_munged(user_ns, shp->shm_perm.gid),
1411 from_kuid_munged(user_ns, shp->shm_perm.cuid),
1412 from_kgid_munged(user_ns, shp->shm_perm.cgid),
1413 shp->shm_atim,
1414 shp->shm_dtim,
1415 shp->shm_ctim,
1416 rss * PAGE_SIZE,
1417 swp * PAGE_SIZE);
1418
1419 return 0;
1420}
1421#endif