Loading...
1/*
2 * POSIX message queues filesystem for Linux.
3 *
4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl)
5 * Michal Wronski (michal.wronski@gmail.com)
6 *
7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com)
8 * Lockless receive & send, fd based notify:
9 * Manfred Spraul (manfred@colorfullife.com)
10 *
11 * Audit: George Wilson (ltcgcw@us.ibm.com)
12 *
13 * This file is released under the GPL.
14 */
15
16#include <linux/capability.h>
17#include <linux/init.h>
18#include <linux/pagemap.h>
19#include <linux/file.h>
20#include <linux/mount.h>
21#include <linux/fs_context.h>
22#include <linux/namei.h>
23#include <linux/sysctl.h>
24#include <linux/poll.h>
25#include <linux/mqueue.h>
26#include <linux/msg.h>
27#include <linux/skbuff.h>
28#include <linux/vmalloc.h>
29#include <linux/netlink.h>
30#include <linux/syscalls.h>
31#include <linux/audit.h>
32#include <linux/signal.h>
33#include <linux/mutex.h>
34#include <linux/nsproxy.h>
35#include <linux/pid.h>
36#include <linux/ipc_namespace.h>
37#include <linux/user_namespace.h>
38#include <linux/slab.h>
39#include <linux/sched/wake_q.h>
40#include <linux/sched/signal.h>
41#include <linux/sched/user.h>
42
43#include <net/sock.h>
44#include "util.h"
45
46struct mqueue_fs_context {
47 struct ipc_namespace *ipc_ns;
48 bool newns; /* Set if newly created ipc namespace */
49};
50
51#define MQUEUE_MAGIC 0x19800202
52#define DIRENT_SIZE 20
53#define FILENT_SIZE 80
54
55#define SEND 0
56#define RECV 1
57
58#define STATE_NONE 0
59#define STATE_READY 1
60
61struct posix_msg_tree_node {
62 struct rb_node rb_node;
63 struct list_head msg_list;
64 int priority;
65};
66
67/*
68 * Locking:
69 *
70 * Accesses to a message queue are synchronized by acquiring info->lock.
71 *
72 * There are two notable exceptions:
73 * - The actual wakeup of a sleeping task is performed using the wake_q
74 * framework. info->lock is already released when wake_up_q is called.
75 * - The exit codepaths after sleeping check ext_wait_queue->state without
76 * any locks. If it is STATE_READY, then the syscall is completed without
77 * acquiring info->lock.
78 *
79 * MQ_BARRIER:
80 * To achieve proper release/acquire memory barrier pairing, the state is set to
81 * STATE_READY with smp_store_release(), and it is read with READ_ONCE followed
82 * by smp_acquire__after_ctrl_dep(). In addition, wake_q_add_safe() is used.
83 *
84 * This prevents the following races:
85 *
86 * 1) With the simple wake_q_add(), the task could be gone already before
87 * the increase of the reference happens
88 * Thread A
89 * Thread B
90 * WRITE_ONCE(wait.state, STATE_NONE);
91 * schedule_hrtimeout()
92 * wake_q_add(A)
93 * if (cmpxchg()) // success
94 * ->state = STATE_READY (reordered)
95 * <timeout returns>
96 * if (wait.state == STATE_READY) return;
97 * sysret to user space
98 * sys_exit()
99 * get_task_struct() // UaF
100 *
101 * Solution: Use wake_q_add_safe() and perform the get_task_struct() before
102 * the smp_store_release() that does ->state = STATE_READY.
103 *
104 * 2) Without proper _release/_acquire barriers, the woken up task
105 * could read stale data
106 *
107 * Thread A
108 * Thread B
109 * do_mq_timedreceive
110 * WRITE_ONCE(wait.state, STATE_NONE);
111 * schedule_hrtimeout()
112 * state = STATE_READY;
113 * <timeout returns>
114 * if (wait.state == STATE_READY) return;
115 * msg_ptr = wait.msg; // Access to stale data!
116 * receiver->msg = message; (reordered)
117 *
118 * Solution: use _release and _acquire barriers.
119 *
120 * 3) There is intentionally no barrier when setting current->state
121 * to TASK_INTERRUPTIBLE: spin_unlock(&info->lock) provides the
122 * release memory barrier, and the wakeup is triggered when holding
123 * info->lock, i.e. spin_lock(&info->lock) provided a pairing
124 * acquire memory barrier.
125 */
126
127struct ext_wait_queue { /* queue of sleeping tasks */
128 struct task_struct *task;
129 struct list_head list;
130 struct msg_msg *msg; /* ptr of loaded message */
131 int state; /* one of STATE_* values */
132};
133
134struct mqueue_inode_info {
135 spinlock_t lock;
136 struct inode vfs_inode;
137 wait_queue_head_t wait_q;
138
139 struct rb_root msg_tree;
140 struct rb_node *msg_tree_rightmost;
141 struct posix_msg_tree_node *node_cache;
142 struct mq_attr attr;
143
144 struct sigevent notify;
145 struct pid *notify_owner;
146 u32 notify_self_exec_id;
147 struct user_namespace *notify_user_ns;
148 struct ucounts *ucounts; /* user who created, for accounting */
149 struct sock *notify_sock;
150 struct sk_buff *notify_cookie;
151
152 /* for tasks waiting for free space and messages, respectively */
153 struct ext_wait_queue e_wait_q[2];
154
155 unsigned long qsize; /* size of queue in memory (sum of all msgs) */
156};
157
158static struct file_system_type mqueue_fs_type;
159static const struct inode_operations mqueue_dir_inode_operations;
160static const struct file_operations mqueue_file_operations;
161static const struct super_operations mqueue_super_ops;
162static const struct fs_context_operations mqueue_fs_context_ops;
163static void remove_notification(struct mqueue_inode_info *info);
164
165static struct kmem_cache *mqueue_inode_cachep;
166
167static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
168{
169 return container_of(inode, struct mqueue_inode_info, vfs_inode);
170}
171
172/*
173 * This routine should be called with the mq_lock held.
174 */
175static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode)
176{
177 return get_ipc_ns(inode->i_sb->s_fs_info);
178}
179
180static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
181{
182 struct ipc_namespace *ns;
183
184 spin_lock(&mq_lock);
185 ns = __get_ns_from_inode(inode);
186 spin_unlock(&mq_lock);
187 return ns;
188}
189
190/* Auxiliary functions to manipulate messages' list */
191static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
192{
193 struct rb_node **p, *parent = NULL;
194 struct posix_msg_tree_node *leaf;
195 bool rightmost = true;
196
197 p = &info->msg_tree.rb_node;
198 while (*p) {
199 parent = *p;
200 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
201
202 if (likely(leaf->priority == msg->m_type))
203 goto insert_msg;
204 else if (msg->m_type < leaf->priority) {
205 p = &(*p)->rb_left;
206 rightmost = false;
207 } else
208 p = &(*p)->rb_right;
209 }
210 if (info->node_cache) {
211 leaf = info->node_cache;
212 info->node_cache = NULL;
213 } else {
214 leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
215 if (!leaf)
216 return -ENOMEM;
217 INIT_LIST_HEAD(&leaf->msg_list);
218 }
219 leaf->priority = msg->m_type;
220
221 if (rightmost)
222 info->msg_tree_rightmost = &leaf->rb_node;
223
224 rb_link_node(&leaf->rb_node, parent, p);
225 rb_insert_color(&leaf->rb_node, &info->msg_tree);
226insert_msg:
227 info->attr.mq_curmsgs++;
228 info->qsize += msg->m_ts;
229 list_add_tail(&msg->m_list, &leaf->msg_list);
230 return 0;
231}
232
233static inline void msg_tree_erase(struct posix_msg_tree_node *leaf,
234 struct mqueue_inode_info *info)
235{
236 struct rb_node *node = &leaf->rb_node;
237
238 if (info->msg_tree_rightmost == node)
239 info->msg_tree_rightmost = rb_prev(node);
240
241 rb_erase(node, &info->msg_tree);
242 if (info->node_cache)
243 kfree(leaf);
244 else
245 info->node_cache = leaf;
246}
247
248static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
249{
250 struct rb_node *parent = NULL;
251 struct posix_msg_tree_node *leaf;
252 struct msg_msg *msg;
253
254try_again:
255 /*
256 * During insert, low priorities go to the left and high to the
257 * right. On receive, we want the highest priorities first, so
258 * walk all the way to the right.
259 */
260 parent = info->msg_tree_rightmost;
261 if (!parent) {
262 if (info->attr.mq_curmsgs) {
263 pr_warn_once("Inconsistency in POSIX message queue, "
264 "no tree element, but supposedly messages "
265 "should exist!\n");
266 info->attr.mq_curmsgs = 0;
267 }
268 return NULL;
269 }
270 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
271 if (unlikely(list_empty(&leaf->msg_list))) {
272 pr_warn_once("Inconsistency in POSIX message queue, "
273 "empty leaf node but we haven't implemented "
274 "lazy leaf delete!\n");
275 msg_tree_erase(leaf, info);
276 goto try_again;
277 } else {
278 msg = list_first_entry(&leaf->msg_list,
279 struct msg_msg, m_list);
280 list_del(&msg->m_list);
281 if (list_empty(&leaf->msg_list)) {
282 msg_tree_erase(leaf, info);
283 }
284 }
285 info->attr.mq_curmsgs--;
286 info->qsize -= msg->m_ts;
287 return msg;
288}
289
290static struct inode *mqueue_get_inode(struct super_block *sb,
291 struct ipc_namespace *ipc_ns, umode_t mode,
292 struct mq_attr *attr)
293{
294 struct inode *inode;
295 int ret = -ENOMEM;
296
297 inode = new_inode(sb);
298 if (!inode)
299 goto err;
300
301 inode->i_ino = get_next_ino();
302 inode->i_mode = mode;
303 inode->i_uid = current_fsuid();
304 inode->i_gid = current_fsgid();
305 inode->i_mtime = inode->i_ctime = inode->i_atime = current_time(inode);
306
307 if (S_ISREG(mode)) {
308 struct mqueue_inode_info *info;
309 unsigned long mq_bytes, mq_treesize;
310
311 inode->i_fop = &mqueue_file_operations;
312 inode->i_size = FILENT_SIZE;
313 /* mqueue specific info */
314 info = MQUEUE_I(inode);
315 spin_lock_init(&info->lock);
316 init_waitqueue_head(&info->wait_q);
317 INIT_LIST_HEAD(&info->e_wait_q[0].list);
318 INIT_LIST_HEAD(&info->e_wait_q[1].list);
319 info->notify_owner = NULL;
320 info->notify_user_ns = NULL;
321 info->qsize = 0;
322 info->ucounts = NULL; /* set when all is ok */
323 info->msg_tree = RB_ROOT;
324 info->msg_tree_rightmost = NULL;
325 info->node_cache = NULL;
326 memset(&info->attr, 0, sizeof(info->attr));
327 info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
328 ipc_ns->mq_msg_default);
329 info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
330 ipc_ns->mq_msgsize_default);
331 if (attr) {
332 info->attr.mq_maxmsg = attr->mq_maxmsg;
333 info->attr.mq_msgsize = attr->mq_msgsize;
334 }
335 /*
336 * We used to allocate a static array of pointers and account
337 * the size of that array as well as one msg_msg struct per
338 * possible message into the queue size. That's no longer
339 * accurate as the queue is now an rbtree and will grow and
340 * shrink depending on usage patterns. We can, however, still
341 * account one msg_msg struct per message, but the nodes are
342 * allocated depending on priority usage, and most programs
343 * only use one, or a handful, of priorities. However, since
344 * this is pinned memory, we need to assume worst case, so
345 * that means the min(mq_maxmsg, max_priorities) * struct
346 * posix_msg_tree_node.
347 */
348
349 ret = -EINVAL;
350 if (info->attr.mq_maxmsg <= 0 || info->attr.mq_msgsize <= 0)
351 goto out_inode;
352 if (capable(CAP_SYS_RESOURCE)) {
353 if (info->attr.mq_maxmsg > HARD_MSGMAX ||
354 info->attr.mq_msgsize > HARD_MSGSIZEMAX)
355 goto out_inode;
356 } else {
357 if (info->attr.mq_maxmsg > ipc_ns->mq_msg_max ||
358 info->attr.mq_msgsize > ipc_ns->mq_msgsize_max)
359 goto out_inode;
360 }
361 ret = -EOVERFLOW;
362 /* check for overflow */
363 if (info->attr.mq_msgsize > ULONG_MAX/info->attr.mq_maxmsg)
364 goto out_inode;
365 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
366 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
367 sizeof(struct posix_msg_tree_node);
368 mq_bytes = info->attr.mq_maxmsg * info->attr.mq_msgsize;
369 if (mq_bytes + mq_treesize < mq_bytes)
370 goto out_inode;
371 mq_bytes += mq_treesize;
372 info->ucounts = get_ucounts(current_ucounts());
373 if (info->ucounts) {
374 long msgqueue;
375
376 spin_lock(&mq_lock);
377 msgqueue = inc_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes);
378 if (msgqueue == LONG_MAX || msgqueue > rlimit(RLIMIT_MSGQUEUE)) {
379 dec_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes);
380 spin_unlock(&mq_lock);
381 put_ucounts(info->ucounts);
382 info->ucounts = NULL;
383 /* mqueue_evict_inode() releases info->messages */
384 ret = -EMFILE;
385 goto out_inode;
386 }
387 spin_unlock(&mq_lock);
388 }
389 } else if (S_ISDIR(mode)) {
390 inc_nlink(inode);
391 /* Some things misbehave if size == 0 on a directory */
392 inode->i_size = 2 * DIRENT_SIZE;
393 inode->i_op = &mqueue_dir_inode_operations;
394 inode->i_fop = &simple_dir_operations;
395 }
396
397 return inode;
398out_inode:
399 iput(inode);
400err:
401 return ERR_PTR(ret);
402}
403
404static int mqueue_fill_super(struct super_block *sb, struct fs_context *fc)
405{
406 struct inode *inode;
407 struct ipc_namespace *ns = sb->s_fs_info;
408
409 sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV;
410 sb->s_blocksize = PAGE_SIZE;
411 sb->s_blocksize_bits = PAGE_SHIFT;
412 sb->s_magic = MQUEUE_MAGIC;
413 sb->s_op = &mqueue_super_ops;
414
415 inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
416 if (IS_ERR(inode))
417 return PTR_ERR(inode);
418
419 sb->s_root = d_make_root(inode);
420 if (!sb->s_root)
421 return -ENOMEM;
422 return 0;
423}
424
425static int mqueue_get_tree(struct fs_context *fc)
426{
427 struct mqueue_fs_context *ctx = fc->fs_private;
428
429 /*
430 * With a newly created ipc namespace, we don't need to do a search
431 * for an ipc namespace match, but we still need to set s_fs_info.
432 */
433 if (ctx->newns) {
434 fc->s_fs_info = ctx->ipc_ns;
435 return get_tree_nodev(fc, mqueue_fill_super);
436 }
437 return get_tree_keyed(fc, mqueue_fill_super, ctx->ipc_ns);
438}
439
440static void mqueue_fs_context_free(struct fs_context *fc)
441{
442 struct mqueue_fs_context *ctx = fc->fs_private;
443
444 put_ipc_ns(ctx->ipc_ns);
445 kfree(ctx);
446}
447
448static int mqueue_init_fs_context(struct fs_context *fc)
449{
450 struct mqueue_fs_context *ctx;
451
452 ctx = kzalloc(sizeof(struct mqueue_fs_context), GFP_KERNEL);
453 if (!ctx)
454 return -ENOMEM;
455
456 ctx->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
457 put_user_ns(fc->user_ns);
458 fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
459 fc->fs_private = ctx;
460 fc->ops = &mqueue_fs_context_ops;
461 return 0;
462}
463
464/*
465 * mq_init_ns() is currently the only caller of mq_create_mount().
466 * So the ns parameter is always a newly created ipc namespace.
467 */
468static struct vfsmount *mq_create_mount(struct ipc_namespace *ns)
469{
470 struct mqueue_fs_context *ctx;
471 struct fs_context *fc;
472 struct vfsmount *mnt;
473
474 fc = fs_context_for_mount(&mqueue_fs_type, SB_KERNMOUNT);
475 if (IS_ERR(fc))
476 return ERR_CAST(fc);
477
478 ctx = fc->fs_private;
479 ctx->newns = true;
480 put_ipc_ns(ctx->ipc_ns);
481 ctx->ipc_ns = get_ipc_ns(ns);
482 put_user_ns(fc->user_ns);
483 fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
484
485 mnt = fc_mount(fc);
486 put_fs_context(fc);
487 return mnt;
488}
489
490static void init_once(void *foo)
491{
492 struct mqueue_inode_info *p = foo;
493
494 inode_init_once(&p->vfs_inode);
495}
496
497static struct inode *mqueue_alloc_inode(struct super_block *sb)
498{
499 struct mqueue_inode_info *ei;
500
501 ei = alloc_inode_sb(sb, mqueue_inode_cachep, GFP_KERNEL);
502 if (!ei)
503 return NULL;
504 return &ei->vfs_inode;
505}
506
507static void mqueue_free_inode(struct inode *inode)
508{
509 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
510}
511
512static void mqueue_evict_inode(struct inode *inode)
513{
514 struct mqueue_inode_info *info;
515 struct ipc_namespace *ipc_ns;
516 struct msg_msg *msg, *nmsg;
517 LIST_HEAD(tmp_msg);
518
519 clear_inode(inode);
520
521 if (S_ISDIR(inode->i_mode))
522 return;
523
524 ipc_ns = get_ns_from_inode(inode);
525 info = MQUEUE_I(inode);
526 spin_lock(&info->lock);
527 while ((msg = msg_get(info)) != NULL)
528 list_add_tail(&msg->m_list, &tmp_msg);
529 kfree(info->node_cache);
530 spin_unlock(&info->lock);
531
532 list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) {
533 list_del(&msg->m_list);
534 free_msg(msg);
535 }
536
537 if (info->ucounts) {
538 unsigned long mq_bytes, mq_treesize;
539
540 /* Total amount of bytes accounted for the mqueue */
541 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
542 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
543 sizeof(struct posix_msg_tree_node);
544
545 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
546 info->attr.mq_msgsize);
547
548 spin_lock(&mq_lock);
549 dec_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes);
550 /*
551 * get_ns_from_inode() ensures that the
552 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
553 * to which we now hold a reference, or it is NULL.
554 * We can't put it here under mq_lock, though.
555 */
556 if (ipc_ns)
557 ipc_ns->mq_queues_count--;
558 spin_unlock(&mq_lock);
559 put_ucounts(info->ucounts);
560 info->ucounts = NULL;
561 }
562 if (ipc_ns)
563 put_ipc_ns(ipc_ns);
564}
565
566static int mqueue_create_attr(struct dentry *dentry, umode_t mode, void *arg)
567{
568 struct inode *dir = dentry->d_parent->d_inode;
569 struct inode *inode;
570 struct mq_attr *attr = arg;
571 int error;
572 struct ipc_namespace *ipc_ns;
573
574 spin_lock(&mq_lock);
575 ipc_ns = __get_ns_from_inode(dir);
576 if (!ipc_ns) {
577 error = -EACCES;
578 goto out_unlock;
579 }
580
581 if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
582 !capable(CAP_SYS_RESOURCE)) {
583 error = -ENOSPC;
584 goto out_unlock;
585 }
586 ipc_ns->mq_queues_count++;
587 spin_unlock(&mq_lock);
588
589 inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr);
590 if (IS_ERR(inode)) {
591 error = PTR_ERR(inode);
592 spin_lock(&mq_lock);
593 ipc_ns->mq_queues_count--;
594 goto out_unlock;
595 }
596
597 put_ipc_ns(ipc_ns);
598 dir->i_size += DIRENT_SIZE;
599 dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
600
601 d_instantiate(dentry, inode);
602 dget(dentry);
603 return 0;
604out_unlock:
605 spin_unlock(&mq_lock);
606 if (ipc_ns)
607 put_ipc_ns(ipc_ns);
608 return error;
609}
610
611static int mqueue_create(struct user_namespace *mnt_userns, struct inode *dir,
612 struct dentry *dentry, umode_t mode, bool excl)
613{
614 return mqueue_create_attr(dentry, mode, NULL);
615}
616
617static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
618{
619 struct inode *inode = d_inode(dentry);
620
621 dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
622 dir->i_size -= DIRENT_SIZE;
623 drop_nlink(inode);
624 dput(dentry);
625 return 0;
626}
627
628/*
629* This is routine for system read from queue file.
630* To avoid mess with doing here some sort of mq_receive we allow
631* to read only queue size & notification info (the only values
632* that are interesting from user point of view and aren't accessible
633* through std routines)
634*/
635static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
636 size_t count, loff_t *off)
637{
638 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
639 char buffer[FILENT_SIZE];
640 ssize_t ret;
641
642 spin_lock(&info->lock);
643 snprintf(buffer, sizeof(buffer),
644 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
645 info->qsize,
646 info->notify_owner ? info->notify.sigev_notify : 0,
647 (info->notify_owner &&
648 info->notify.sigev_notify == SIGEV_SIGNAL) ?
649 info->notify.sigev_signo : 0,
650 pid_vnr(info->notify_owner));
651 spin_unlock(&info->lock);
652 buffer[sizeof(buffer)-1] = '\0';
653
654 ret = simple_read_from_buffer(u_data, count, off, buffer,
655 strlen(buffer));
656 if (ret <= 0)
657 return ret;
658
659 file_inode(filp)->i_atime = file_inode(filp)->i_ctime = current_time(file_inode(filp));
660 return ret;
661}
662
663static int mqueue_flush_file(struct file *filp, fl_owner_t id)
664{
665 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
666
667 spin_lock(&info->lock);
668 if (task_tgid(current) == info->notify_owner)
669 remove_notification(info);
670
671 spin_unlock(&info->lock);
672 return 0;
673}
674
675static __poll_t mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
676{
677 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
678 __poll_t retval = 0;
679
680 poll_wait(filp, &info->wait_q, poll_tab);
681
682 spin_lock(&info->lock);
683 if (info->attr.mq_curmsgs)
684 retval = EPOLLIN | EPOLLRDNORM;
685
686 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
687 retval |= EPOLLOUT | EPOLLWRNORM;
688 spin_unlock(&info->lock);
689
690 return retval;
691}
692
693/* Adds current to info->e_wait_q[sr] before element with smaller prio */
694static void wq_add(struct mqueue_inode_info *info, int sr,
695 struct ext_wait_queue *ewp)
696{
697 struct ext_wait_queue *walk;
698
699 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
700 if (walk->task->prio <= current->prio) {
701 list_add_tail(&ewp->list, &walk->list);
702 return;
703 }
704 }
705 list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
706}
707
708/*
709 * Puts current task to sleep. Caller must hold queue lock. After return
710 * lock isn't held.
711 * sr: SEND or RECV
712 */
713static int wq_sleep(struct mqueue_inode_info *info, int sr,
714 ktime_t *timeout, struct ext_wait_queue *ewp)
715 __releases(&info->lock)
716{
717 int retval;
718 signed long time;
719
720 wq_add(info, sr, ewp);
721
722 for (;;) {
723 /* memory barrier not required, we hold info->lock */
724 __set_current_state(TASK_INTERRUPTIBLE);
725
726 spin_unlock(&info->lock);
727 time = schedule_hrtimeout_range_clock(timeout, 0,
728 HRTIMER_MODE_ABS, CLOCK_REALTIME);
729
730 if (READ_ONCE(ewp->state) == STATE_READY) {
731 /* see MQ_BARRIER for purpose/pairing */
732 smp_acquire__after_ctrl_dep();
733 retval = 0;
734 goto out;
735 }
736 spin_lock(&info->lock);
737
738 /* we hold info->lock, so no memory barrier required */
739 if (READ_ONCE(ewp->state) == STATE_READY) {
740 retval = 0;
741 goto out_unlock;
742 }
743 if (signal_pending(current)) {
744 retval = -ERESTARTSYS;
745 break;
746 }
747 if (time == 0) {
748 retval = -ETIMEDOUT;
749 break;
750 }
751 }
752 list_del(&ewp->list);
753out_unlock:
754 spin_unlock(&info->lock);
755out:
756 return retval;
757}
758
759/*
760 * Returns waiting task that should be serviced first or NULL if none exists
761 */
762static struct ext_wait_queue *wq_get_first_waiter(
763 struct mqueue_inode_info *info, int sr)
764{
765 struct list_head *ptr;
766
767 ptr = info->e_wait_q[sr].list.prev;
768 if (ptr == &info->e_wait_q[sr].list)
769 return NULL;
770 return list_entry(ptr, struct ext_wait_queue, list);
771}
772
773
774static inline void set_cookie(struct sk_buff *skb, char code)
775{
776 ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
777}
778
779/*
780 * The next function is only to split too long sys_mq_timedsend
781 */
782static void __do_notify(struct mqueue_inode_info *info)
783{
784 /* notification
785 * invoked when there is registered process and there isn't process
786 * waiting synchronously for message AND state of queue changed from
787 * empty to not empty. Here we are sure that no one is waiting
788 * synchronously. */
789 if (info->notify_owner &&
790 info->attr.mq_curmsgs == 1) {
791 switch (info->notify.sigev_notify) {
792 case SIGEV_NONE:
793 break;
794 case SIGEV_SIGNAL: {
795 struct kernel_siginfo sig_i;
796 struct task_struct *task;
797
798 /* do_mq_notify() accepts sigev_signo == 0, why?? */
799 if (!info->notify.sigev_signo)
800 break;
801
802 clear_siginfo(&sig_i);
803 sig_i.si_signo = info->notify.sigev_signo;
804 sig_i.si_errno = 0;
805 sig_i.si_code = SI_MESGQ;
806 sig_i.si_value = info->notify.sigev_value;
807 rcu_read_lock();
808 /* map current pid/uid into info->owner's namespaces */
809 sig_i.si_pid = task_tgid_nr_ns(current,
810 ns_of_pid(info->notify_owner));
811 sig_i.si_uid = from_kuid_munged(info->notify_user_ns,
812 current_uid());
813 /*
814 * We can't use kill_pid_info(), this signal should
815 * bypass check_kill_permission(). It is from kernel
816 * but si_fromuser() can't know this.
817 * We do check the self_exec_id, to avoid sending
818 * signals to programs that don't expect them.
819 */
820 task = pid_task(info->notify_owner, PIDTYPE_TGID);
821 if (task && task->self_exec_id ==
822 info->notify_self_exec_id) {
823 do_send_sig_info(info->notify.sigev_signo,
824 &sig_i, task, PIDTYPE_TGID);
825 }
826 rcu_read_unlock();
827 break;
828 }
829 case SIGEV_THREAD:
830 set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
831 netlink_sendskb(info->notify_sock, info->notify_cookie);
832 break;
833 }
834 /* after notification unregisters process */
835 put_pid(info->notify_owner);
836 put_user_ns(info->notify_user_ns);
837 info->notify_owner = NULL;
838 info->notify_user_ns = NULL;
839 }
840 wake_up(&info->wait_q);
841}
842
843static int prepare_timeout(const struct __kernel_timespec __user *u_abs_timeout,
844 struct timespec64 *ts)
845{
846 if (get_timespec64(ts, u_abs_timeout))
847 return -EFAULT;
848 if (!timespec64_valid(ts))
849 return -EINVAL;
850 return 0;
851}
852
853static void remove_notification(struct mqueue_inode_info *info)
854{
855 if (info->notify_owner != NULL &&
856 info->notify.sigev_notify == SIGEV_THREAD) {
857 set_cookie(info->notify_cookie, NOTIFY_REMOVED);
858 netlink_sendskb(info->notify_sock, info->notify_cookie);
859 }
860 put_pid(info->notify_owner);
861 put_user_ns(info->notify_user_ns);
862 info->notify_owner = NULL;
863 info->notify_user_ns = NULL;
864}
865
866static int prepare_open(struct dentry *dentry, int oflag, int ro,
867 umode_t mode, struct filename *name,
868 struct mq_attr *attr)
869{
870 static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
871 MAY_READ | MAY_WRITE };
872 int acc;
873
874 if (d_really_is_negative(dentry)) {
875 if (!(oflag & O_CREAT))
876 return -ENOENT;
877 if (ro)
878 return ro;
879 audit_inode_parent_hidden(name, dentry->d_parent);
880 return vfs_mkobj(dentry, mode & ~current_umask(),
881 mqueue_create_attr, attr);
882 }
883 /* it already existed */
884 audit_inode(name, dentry, 0);
885 if ((oflag & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
886 return -EEXIST;
887 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
888 return -EINVAL;
889 acc = oflag2acc[oflag & O_ACCMODE];
890 return inode_permission(&init_user_ns, d_inode(dentry), acc);
891}
892
893static int do_mq_open(const char __user *u_name, int oflag, umode_t mode,
894 struct mq_attr *attr)
895{
896 struct vfsmount *mnt = current->nsproxy->ipc_ns->mq_mnt;
897 struct dentry *root = mnt->mnt_root;
898 struct filename *name;
899 struct path path;
900 int fd, error;
901 int ro;
902
903 audit_mq_open(oflag, mode, attr);
904
905 if (IS_ERR(name = getname(u_name)))
906 return PTR_ERR(name);
907
908 fd = get_unused_fd_flags(O_CLOEXEC);
909 if (fd < 0)
910 goto out_putname;
911
912 ro = mnt_want_write(mnt); /* we'll drop it in any case */
913 inode_lock(d_inode(root));
914 path.dentry = lookup_one_len(name->name, root, strlen(name->name));
915 if (IS_ERR(path.dentry)) {
916 error = PTR_ERR(path.dentry);
917 goto out_putfd;
918 }
919 path.mnt = mntget(mnt);
920 error = prepare_open(path.dentry, oflag, ro, mode, name, attr);
921 if (!error) {
922 struct file *file = dentry_open(&path, oflag, current_cred());
923 if (!IS_ERR(file))
924 fd_install(fd, file);
925 else
926 error = PTR_ERR(file);
927 }
928 path_put(&path);
929out_putfd:
930 if (error) {
931 put_unused_fd(fd);
932 fd = error;
933 }
934 inode_unlock(d_inode(root));
935 if (!ro)
936 mnt_drop_write(mnt);
937out_putname:
938 putname(name);
939 return fd;
940}
941
942SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
943 struct mq_attr __user *, u_attr)
944{
945 struct mq_attr attr;
946 if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
947 return -EFAULT;
948
949 return do_mq_open(u_name, oflag, mode, u_attr ? &attr : NULL);
950}
951
952SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
953{
954 int err;
955 struct filename *name;
956 struct dentry *dentry;
957 struct inode *inode = NULL;
958 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
959 struct vfsmount *mnt = ipc_ns->mq_mnt;
960
961 name = getname(u_name);
962 if (IS_ERR(name))
963 return PTR_ERR(name);
964
965 audit_inode_parent_hidden(name, mnt->mnt_root);
966 err = mnt_want_write(mnt);
967 if (err)
968 goto out_name;
969 inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT);
970 dentry = lookup_one_len(name->name, mnt->mnt_root,
971 strlen(name->name));
972 if (IS_ERR(dentry)) {
973 err = PTR_ERR(dentry);
974 goto out_unlock;
975 }
976
977 inode = d_inode(dentry);
978 if (!inode) {
979 err = -ENOENT;
980 } else {
981 ihold(inode);
982 err = vfs_unlink(&init_user_ns, d_inode(dentry->d_parent),
983 dentry, NULL);
984 }
985 dput(dentry);
986
987out_unlock:
988 inode_unlock(d_inode(mnt->mnt_root));
989 iput(inode);
990 mnt_drop_write(mnt);
991out_name:
992 putname(name);
993
994 return err;
995}
996
997/* Pipelined send and receive functions.
998 *
999 * If a receiver finds no waiting message, then it registers itself in the
1000 * list of waiting receivers. A sender checks that list before adding the new
1001 * message into the message array. If there is a waiting receiver, then it
1002 * bypasses the message array and directly hands the message over to the
1003 * receiver. The receiver accepts the message and returns without grabbing the
1004 * queue spinlock:
1005 *
1006 * - Set pointer to message.
1007 * - Queue the receiver task for later wakeup (without the info->lock).
1008 * - Update its state to STATE_READY. Now the receiver can continue.
1009 * - Wake up the process after the lock is dropped. Should the process wake up
1010 * before this wakeup (due to a timeout or a signal) it will either see
1011 * STATE_READY and continue or acquire the lock to check the state again.
1012 *
1013 * The same algorithm is used for senders.
1014 */
1015
1016static inline void __pipelined_op(struct wake_q_head *wake_q,
1017 struct mqueue_inode_info *info,
1018 struct ext_wait_queue *this)
1019{
1020 struct task_struct *task;
1021
1022 list_del(&this->list);
1023 task = get_task_struct(this->task);
1024
1025 /* see MQ_BARRIER for purpose/pairing */
1026 smp_store_release(&this->state, STATE_READY);
1027 wake_q_add_safe(wake_q, task);
1028}
1029
1030/* pipelined_send() - send a message directly to the task waiting in
1031 * sys_mq_timedreceive() (without inserting message into a queue).
1032 */
1033static inline void pipelined_send(struct wake_q_head *wake_q,
1034 struct mqueue_inode_info *info,
1035 struct msg_msg *message,
1036 struct ext_wait_queue *receiver)
1037{
1038 receiver->msg = message;
1039 __pipelined_op(wake_q, info, receiver);
1040}
1041
1042/* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
1043 * gets its message and put to the queue (we have one free place for sure). */
1044static inline void pipelined_receive(struct wake_q_head *wake_q,
1045 struct mqueue_inode_info *info)
1046{
1047 struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
1048
1049 if (!sender) {
1050 /* for poll */
1051 wake_up_interruptible(&info->wait_q);
1052 return;
1053 }
1054 if (msg_insert(sender->msg, info))
1055 return;
1056
1057 __pipelined_op(wake_q, info, sender);
1058}
1059
1060static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
1061 size_t msg_len, unsigned int msg_prio,
1062 struct timespec64 *ts)
1063{
1064 struct fd f;
1065 struct inode *inode;
1066 struct ext_wait_queue wait;
1067 struct ext_wait_queue *receiver;
1068 struct msg_msg *msg_ptr;
1069 struct mqueue_inode_info *info;
1070 ktime_t expires, *timeout = NULL;
1071 struct posix_msg_tree_node *new_leaf = NULL;
1072 int ret = 0;
1073 DEFINE_WAKE_Q(wake_q);
1074
1075 if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
1076 return -EINVAL;
1077
1078 if (ts) {
1079 expires = timespec64_to_ktime(*ts);
1080 timeout = &expires;
1081 }
1082
1083 audit_mq_sendrecv(mqdes, msg_len, msg_prio, ts);
1084
1085 f = fdget(mqdes);
1086 if (unlikely(!f.file)) {
1087 ret = -EBADF;
1088 goto out;
1089 }
1090
1091 inode = file_inode(f.file);
1092 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1093 ret = -EBADF;
1094 goto out_fput;
1095 }
1096 info = MQUEUE_I(inode);
1097 audit_file(f.file);
1098
1099 if (unlikely(!(f.file->f_mode & FMODE_WRITE))) {
1100 ret = -EBADF;
1101 goto out_fput;
1102 }
1103
1104 if (unlikely(msg_len > info->attr.mq_msgsize)) {
1105 ret = -EMSGSIZE;
1106 goto out_fput;
1107 }
1108
1109 /* First try to allocate memory, before doing anything with
1110 * existing queues. */
1111 msg_ptr = load_msg(u_msg_ptr, msg_len);
1112 if (IS_ERR(msg_ptr)) {
1113 ret = PTR_ERR(msg_ptr);
1114 goto out_fput;
1115 }
1116 msg_ptr->m_ts = msg_len;
1117 msg_ptr->m_type = msg_prio;
1118
1119 /*
1120 * msg_insert really wants us to have a valid, spare node struct so
1121 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1122 * fall back to that if necessary.
1123 */
1124 if (!info->node_cache)
1125 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1126
1127 spin_lock(&info->lock);
1128
1129 if (!info->node_cache && new_leaf) {
1130 /* Save our speculative allocation into the cache */
1131 INIT_LIST_HEAD(&new_leaf->msg_list);
1132 info->node_cache = new_leaf;
1133 new_leaf = NULL;
1134 } else {
1135 kfree(new_leaf);
1136 }
1137
1138 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
1139 if (f.file->f_flags & O_NONBLOCK) {
1140 ret = -EAGAIN;
1141 } else {
1142 wait.task = current;
1143 wait.msg = (void *) msg_ptr;
1144
1145 /* memory barrier not required, we hold info->lock */
1146 WRITE_ONCE(wait.state, STATE_NONE);
1147 ret = wq_sleep(info, SEND, timeout, &wait);
1148 /*
1149 * wq_sleep must be called with info->lock held, and
1150 * returns with the lock released
1151 */
1152 goto out_free;
1153 }
1154 } else {
1155 receiver = wq_get_first_waiter(info, RECV);
1156 if (receiver) {
1157 pipelined_send(&wake_q, info, msg_ptr, receiver);
1158 } else {
1159 /* adds message to the queue */
1160 ret = msg_insert(msg_ptr, info);
1161 if (ret)
1162 goto out_unlock;
1163 __do_notify(info);
1164 }
1165 inode->i_atime = inode->i_mtime = inode->i_ctime =
1166 current_time(inode);
1167 }
1168out_unlock:
1169 spin_unlock(&info->lock);
1170 wake_up_q(&wake_q);
1171out_free:
1172 if (ret)
1173 free_msg(msg_ptr);
1174out_fput:
1175 fdput(f);
1176out:
1177 return ret;
1178}
1179
1180static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
1181 size_t msg_len, unsigned int __user *u_msg_prio,
1182 struct timespec64 *ts)
1183{
1184 ssize_t ret;
1185 struct msg_msg *msg_ptr;
1186 struct fd f;
1187 struct inode *inode;
1188 struct mqueue_inode_info *info;
1189 struct ext_wait_queue wait;
1190 ktime_t expires, *timeout = NULL;
1191 struct posix_msg_tree_node *new_leaf = NULL;
1192
1193 if (ts) {
1194 expires = timespec64_to_ktime(*ts);
1195 timeout = &expires;
1196 }
1197
1198 audit_mq_sendrecv(mqdes, msg_len, 0, ts);
1199
1200 f = fdget(mqdes);
1201 if (unlikely(!f.file)) {
1202 ret = -EBADF;
1203 goto out;
1204 }
1205
1206 inode = file_inode(f.file);
1207 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1208 ret = -EBADF;
1209 goto out_fput;
1210 }
1211 info = MQUEUE_I(inode);
1212 audit_file(f.file);
1213
1214 if (unlikely(!(f.file->f_mode & FMODE_READ))) {
1215 ret = -EBADF;
1216 goto out_fput;
1217 }
1218
1219 /* checks if buffer is big enough */
1220 if (unlikely(msg_len < info->attr.mq_msgsize)) {
1221 ret = -EMSGSIZE;
1222 goto out_fput;
1223 }
1224
1225 /*
1226 * msg_insert really wants us to have a valid, spare node struct so
1227 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1228 * fall back to that if necessary.
1229 */
1230 if (!info->node_cache)
1231 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1232
1233 spin_lock(&info->lock);
1234
1235 if (!info->node_cache && new_leaf) {
1236 /* Save our speculative allocation into the cache */
1237 INIT_LIST_HEAD(&new_leaf->msg_list);
1238 info->node_cache = new_leaf;
1239 } else {
1240 kfree(new_leaf);
1241 }
1242
1243 if (info->attr.mq_curmsgs == 0) {
1244 if (f.file->f_flags & O_NONBLOCK) {
1245 spin_unlock(&info->lock);
1246 ret = -EAGAIN;
1247 } else {
1248 wait.task = current;
1249
1250 /* memory barrier not required, we hold info->lock */
1251 WRITE_ONCE(wait.state, STATE_NONE);
1252 ret = wq_sleep(info, RECV, timeout, &wait);
1253 msg_ptr = wait.msg;
1254 }
1255 } else {
1256 DEFINE_WAKE_Q(wake_q);
1257
1258 msg_ptr = msg_get(info);
1259
1260 inode->i_atime = inode->i_mtime = inode->i_ctime =
1261 current_time(inode);
1262
1263 /* There is now free space in queue. */
1264 pipelined_receive(&wake_q, info);
1265 spin_unlock(&info->lock);
1266 wake_up_q(&wake_q);
1267 ret = 0;
1268 }
1269 if (ret == 0) {
1270 ret = msg_ptr->m_ts;
1271
1272 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
1273 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
1274 ret = -EFAULT;
1275 }
1276 free_msg(msg_ptr);
1277 }
1278out_fput:
1279 fdput(f);
1280out:
1281 return ret;
1282}
1283
1284SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
1285 size_t, msg_len, unsigned int, msg_prio,
1286 const struct __kernel_timespec __user *, u_abs_timeout)
1287{
1288 struct timespec64 ts, *p = NULL;
1289 if (u_abs_timeout) {
1290 int res = prepare_timeout(u_abs_timeout, &ts);
1291 if (res)
1292 return res;
1293 p = &ts;
1294 }
1295 return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1296}
1297
1298SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
1299 size_t, msg_len, unsigned int __user *, u_msg_prio,
1300 const struct __kernel_timespec __user *, u_abs_timeout)
1301{
1302 struct timespec64 ts, *p = NULL;
1303 if (u_abs_timeout) {
1304 int res = prepare_timeout(u_abs_timeout, &ts);
1305 if (res)
1306 return res;
1307 p = &ts;
1308 }
1309 return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1310}
1311
1312/*
1313 * Notes: the case when user wants us to deregister (with NULL as pointer)
1314 * and he isn't currently owner of notification, will be silently discarded.
1315 * It isn't explicitly defined in the POSIX.
1316 */
1317static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification)
1318{
1319 int ret;
1320 struct fd f;
1321 struct sock *sock;
1322 struct inode *inode;
1323 struct mqueue_inode_info *info;
1324 struct sk_buff *nc;
1325
1326 audit_mq_notify(mqdes, notification);
1327
1328 nc = NULL;
1329 sock = NULL;
1330 if (notification != NULL) {
1331 if (unlikely(notification->sigev_notify != SIGEV_NONE &&
1332 notification->sigev_notify != SIGEV_SIGNAL &&
1333 notification->sigev_notify != SIGEV_THREAD))
1334 return -EINVAL;
1335 if (notification->sigev_notify == SIGEV_SIGNAL &&
1336 !valid_signal(notification->sigev_signo)) {
1337 return -EINVAL;
1338 }
1339 if (notification->sigev_notify == SIGEV_THREAD) {
1340 long timeo;
1341
1342 /* create the notify skb */
1343 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
1344 if (!nc)
1345 return -ENOMEM;
1346
1347 if (copy_from_user(nc->data,
1348 notification->sigev_value.sival_ptr,
1349 NOTIFY_COOKIE_LEN)) {
1350 ret = -EFAULT;
1351 goto free_skb;
1352 }
1353
1354 /* TODO: add a header? */
1355 skb_put(nc, NOTIFY_COOKIE_LEN);
1356 /* and attach it to the socket */
1357retry:
1358 f = fdget(notification->sigev_signo);
1359 if (!f.file) {
1360 ret = -EBADF;
1361 goto out;
1362 }
1363 sock = netlink_getsockbyfilp(f.file);
1364 fdput(f);
1365 if (IS_ERR(sock)) {
1366 ret = PTR_ERR(sock);
1367 goto free_skb;
1368 }
1369
1370 timeo = MAX_SCHEDULE_TIMEOUT;
1371 ret = netlink_attachskb(sock, nc, &timeo, NULL);
1372 if (ret == 1) {
1373 sock = NULL;
1374 goto retry;
1375 }
1376 if (ret)
1377 return ret;
1378 }
1379 }
1380
1381 f = fdget(mqdes);
1382 if (!f.file) {
1383 ret = -EBADF;
1384 goto out;
1385 }
1386
1387 inode = file_inode(f.file);
1388 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1389 ret = -EBADF;
1390 goto out_fput;
1391 }
1392 info = MQUEUE_I(inode);
1393
1394 ret = 0;
1395 spin_lock(&info->lock);
1396 if (notification == NULL) {
1397 if (info->notify_owner == task_tgid(current)) {
1398 remove_notification(info);
1399 inode->i_atime = inode->i_ctime = current_time(inode);
1400 }
1401 } else if (info->notify_owner != NULL) {
1402 ret = -EBUSY;
1403 } else {
1404 switch (notification->sigev_notify) {
1405 case SIGEV_NONE:
1406 info->notify.sigev_notify = SIGEV_NONE;
1407 break;
1408 case SIGEV_THREAD:
1409 info->notify_sock = sock;
1410 info->notify_cookie = nc;
1411 sock = NULL;
1412 nc = NULL;
1413 info->notify.sigev_notify = SIGEV_THREAD;
1414 break;
1415 case SIGEV_SIGNAL:
1416 info->notify.sigev_signo = notification->sigev_signo;
1417 info->notify.sigev_value = notification->sigev_value;
1418 info->notify.sigev_notify = SIGEV_SIGNAL;
1419 info->notify_self_exec_id = current->self_exec_id;
1420 break;
1421 }
1422
1423 info->notify_owner = get_pid(task_tgid(current));
1424 info->notify_user_ns = get_user_ns(current_user_ns());
1425 inode->i_atime = inode->i_ctime = current_time(inode);
1426 }
1427 spin_unlock(&info->lock);
1428out_fput:
1429 fdput(f);
1430out:
1431 if (sock)
1432 netlink_detachskb(sock, nc);
1433 else
1434free_skb:
1435 dev_kfree_skb(nc);
1436
1437 return ret;
1438}
1439
1440SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1441 const struct sigevent __user *, u_notification)
1442{
1443 struct sigevent n, *p = NULL;
1444 if (u_notification) {
1445 if (copy_from_user(&n, u_notification, sizeof(struct sigevent)))
1446 return -EFAULT;
1447 p = &n;
1448 }
1449 return do_mq_notify(mqdes, p);
1450}
1451
1452static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old)
1453{
1454 struct fd f;
1455 struct inode *inode;
1456 struct mqueue_inode_info *info;
1457
1458 if (new && (new->mq_flags & (~O_NONBLOCK)))
1459 return -EINVAL;
1460
1461 f = fdget(mqdes);
1462 if (!f.file)
1463 return -EBADF;
1464
1465 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1466 fdput(f);
1467 return -EBADF;
1468 }
1469
1470 inode = file_inode(f.file);
1471 info = MQUEUE_I(inode);
1472
1473 spin_lock(&info->lock);
1474
1475 if (old) {
1476 *old = info->attr;
1477 old->mq_flags = f.file->f_flags & O_NONBLOCK;
1478 }
1479 if (new) {
1480 audit_mq_getsetattr(mqdes, new);
1481 spin_lock(&f.file->f_lock);
1482 if (new->mq_flags & O_NONBLOCK)
1483 f.file->f_flags |= O_NONBLOCK;
1484 else
1485 f.file->f_flags &= ~O_NONBLOCK;
1486 spin_unlock(&f.file->f_lock);
1487
1488 inode->i_atime = inode->i_ctime = current_time(inode);
1489 }
1490
1491 spin_unlock(&info->lock);
1492 fdput(f);
1493 return 0;
1494}
1495
1496SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1497 const struct mq_attr __user *, u_mqstat,
1498 struct mq_attr __user *, u_omqstat)
1499{
1500 int ret;
1501 struct mq_attr mqstat, omqstat;
1502 struct mq_attr *new = NULL, *old = NULL;
1503
1504 if (u_mqstat) {
1505 new = &mqstat;
1506 if (copy_from_user(new, u_mqstat, sizeof(struct mq_attr)))
1507 return -EFAULT;
1508 }
1509 if (u_omqstat)
1510 old = &omqstat;
1511
1512 ret = do_mq_getsetattr(mqdes, new, old);
1513 if (ret || !old)
1514 return ret;
1515
1516 if (copy_to_user(u_omqstat, old, sizeof(struct mq_attr)))
1517 return -EFAULT;
1518 return 0;
1519}
1520
1521#ifdef CONFIG_COMPAT
1522
1523struct compat_mq_attr {
1524 compat_long_t mq_flags; /* message queue flags */
1525 compat_long_t mq_maxmsg; /* maximum number of messages */
1526 compat_long_t mq_msgsize; /* maximum message size */
1527 compat_long_t mq_curmsgs; /* number of messages currently queued */
1528 compat_long_t __reserved[4]; /* ignored for input, zeroed for output */
1529};
1530
1531static inline int get_compat_mq_attr(struct mq_attr *attr,
1532 const struct compat_mq_attr __user *uattr)
1533{
1534 struct compat_mq_attr v;
1535
1536 if (copy_from_user(&v, uattr, sizeof(*uattr)))
1537 return -EFAULT;
1538
1539 memset(attr, 0, sizeof(*attr));
1540 attr->mq_flags = v.mq_flags;
1541 attr->mq_maxmsg = v.mq_maxmsg;
1542 attr->mq_msgsize = v.mq_msgsize;
1543 attr->mq_curmsgs = v.mq_curmsgs;
1544 return 0;
1545}
1546
1547static inline int put_compat_mq_attr(const struct mq_attr *attr,
1548 struct compat_mq_attr __user *uattr)
1549{
1550 struct compat_mq_attr v;
1551
1552 memset(&v, 0, sizeof(v));
1553 v.mq_flags = attr->mq_flags;
1554 v.mq_maxmsg = attr->mq_maxmsg;
1555 v.mq_msgsize = attr->mq_msgsize;
1556 v.mq_curmsgs = attr->mq_curmsgs;
1557 if (copy_to_user(uattr, &v, sizeof(*uattr)))
1558 return -EFAULT;
1559 return 0;
1560}
1561
1562COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name,
1563 int, oflag, compat_mode_t, mode,
1564 struct compat_mq_attr __user *, u_attr)
1565{
1566 struct mq_attr attr, *p = NULL;
1567 if (u_attr && oflag & O_CREAT) {
1568 p = &attr;
1569 if (get_compat_mq_attr(&attr, u_attr))
1570 return -EFAULT;
1571 }
1572 return do_mq_open(u_name, oflag, mode, p);
1573}
1574
1575COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1576 const struct compat_sigevent __user *, u_notification)
1577{
1578 struct sigevent n, *p = NULL;
1579 if (u_notification) {
1580 if (get_compat_sigevent(&n, u_notification))
1581 return -EFAULT;
1582 if (n.sigev_notify == SIGEV_THREAD)
1583 n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int);
1584 p = &n;
1585 }
1586 return do_mq_notify(mqdes, p);
1587}
1588
1589COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1590 const struct compat_mq_attr __user *, u_mqstat,
1591 struct compat_mq_attr __user *, u_omqstat)
1592{
1593 int ret;
1594 struct mq_attr mqstat, omqstat;
1595 struct mq_attr *new = NULL, *old = NULL;
1596
1597 if (u_mqstat) {
1598 new = &mqstat;
1599 if (get_compat_mq_attr(new, u_mqstat))
1600 return -EFAULT;
1601 }
1602 if (u_omqstat)
1603 old = &omqstat;
1604
1605 ret = do_mq_getsetattr(mqdes, new, old);
1606 if (ret || !old)
1607 return ret;
1608
1609 if (put_compat_mq_attr(old, u_omqstat))
1610 return -EFAULT;
1611 return 0;
1612}
1613#endif
1614
1615#ifdef CONFIG_COMPAT_32BIT_TIME
1616static int compat_prepare_timeout(const struct old_timespec32 __user *p,
1617 struct timespec64 *ts)
1618{
1619 if (get_old_timespec32(ts, p))
1620 return -EFAULT;
1621 if (!timespec64_valid(ts))
1622 return -EINVAL;
1623 return 0;
1624}
1625
1626SYSCALL_DEFINE5(mq_timedsend_time32, mqd_t, mqdes,
1627 const char __user *, u_msg_ptr,
1628 unsigned int, msg_len, unsigned int, msg_prio,
1629 const struct old_timespec32 __user *, u_abs_timeout)
1630{
1631 struct timespec64 ts, *p = NULL;
1632 if (u_abs_timeout) {
1633 int res = compat_prepare_timeout(u_abs_timeout, &ts);
1634 if (res)
1635 return res;
1636 p = &ts;
1637 }
1638 return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1639}
1640
1641SYSCALL_DEFINE5(mq_timedreceive_time32, mqd_t, mqdes,
1642 char __user *, u_msg_ptr,
1643 unsigned int, msg_len, unsigned int __user *, u_msg_prio,
1644 const struct old_timespec32 __user *, u_abs_timeout)
1645{
1646 struct timespec64 ts, *p = NULL;
1647 if (u_abs_timeout) {
1648 int res = compat_prepare_timeout(u_abs_timeout, &ts);
1649 if (res)
1650 return res;
1651 p = &ts;
1652 }
1653 return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1654}
1655#endif
1656
1657static const struct inode_operations mqueue_dir_inode_operations = {
1658 .lookup = simple_lookup,
1659 .create = mqueue_create,
1660 .unlink = mqueue_unlink,
1661};
1662
1663static const struct file_operations mqueue_file_operations = {
1664 .flush = mqueue_flush_file,
1665 .poll = mqueue_poll_file,
1666 .read = mqueue_read_file,
1667 .llseek = default_llseek,
1668};
1669
1670static const struct super_operations mqueue_super_ops = {
1671 .alloc_inode = mqueue_alloc_inode,
1672 .free_inode = mqueue_free_inode,
1673 .evict_inode = mqueue_evict_inode,
1674 .statfs = simple_statfs,
1675};
1676
1677static const struct fs_context_operations mqueue_fs_context_ops = {
1678 .free = mqueue_fs_context_free,
1679 .get_tree = mqueue_get_tree,
1680};
1681
1682static struct file_system_type mqueue_fs_type = {
1683 .name = "mqueue",
1684 .init_fs_context = mqueue_init_fs_context,
1685 .kill_sb = kill_litter_super,
1686 .fs_flags = FS_USERNS_MOUNT,
1687};
1688
1689int mq_init_ns(struct ipc_namespace *ns)
1690{
1691 struct vfsmount *m;
1692
1693 ns->mq_queues_count = 0;
1694 ns->mq_queues_max = DFLT_QUEUESMAX;
1695 ns->mq_msg_max = DFLT_MSGMAX;
1696 ns->mq_msgsize_max = DFLT_MSGSIZEMAX;
1697 ns->mq_msg_default = DFLT_MSG;
1698 ns->mq_msgsize_default = DFLT_MSGSIZE;
1699
1700 m = mq_create_mount(ns);
1701 if (IS_ERR(m))
1702 return PTR_ERR(m);
1703 ns->mq_mnt = m;
1704 return 0;
1705}
1706
1707void mq_clear_sbinfo(struct ipc_namespace *ns)
1708{
1709 ns->mq_mnt->mnt_sb->s_fs_info = NULL;
1710}
1711
1712void mq_put_mnt(struct ipc_namespace *ns)
1713{
1714 kern_unmount(ns->mq_mnt);
1715}
1716
1717static int __init init_mqueue_fs(void)
1718{
1719 int error;
1720
1721 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1722 sizeof(struct mqueue_inode_info), 0,
1723 SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once);
1724 if (mqueue_inode_cachep == NULL)
1725 return -ENOMEM;
1726
1727 if (!setup_mq_sysctls(&init_ipc_ns)) {
1728 pr_warn("sysctl registration failed\n");
1729 error = -ENOMEM;
1730 goto out_kmem;
1731 }
1732
1733 error = register_filesystem(&mqueue_fs_type);
1734 if (error)
1735 goto out_sysctl;
1736
1737 spin_lock_init(&mq_lock);
1738
1739 error = mq_init_ns(&init_ipc_ns);
1740 if (error)
1741 goto out_filesystem;
1742
1743 return 0;
1744
1745out_filesystem:
1746 unregister_filesystem(&mqueue_fs_type);
1747out_sysctl:
1748 retire_mq_sysctls(&init_ipc_ns);
1749out_kmem:
1750 kmem_cache_destroy(mqueue_inode_cachep);
1751 return error;
1752}
1753
1754device_initcall(init_mqueue_fs);
1/*
2 * POSIX message queues filesystem for Linux.
3 *
4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl)
5 * Michal Wronski (michal.wronski@gmail.com)
6 *
7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com)
8 * Lockless receive & send, fd based notify:
9 * Manfred Spraul (manfred@colorfullife.com)
10 *
11 * Audit: George Wilson (ltcgcw@us.ibm.com)
12 *
13 * This file is released under the GPL.
14 */
15
16#include <linux/capability.h>
17#include <linux/init.h>
18#include <linux/pagemap.h>
19#include <linux/file.h>
20#include <linux/mount.h>
21#include <linux/fs_context.h>
22#include <linux/namei.h>
23#include <linux/sysctl.h>
24#include <linux/poll.h>
25#include <linux/mqueue.h>
26#include <linux/msg.h>
27#include <linux/skbuff.h>
28#include <linux/vmalloc.h>
29#include <linux/netlink.h>
30#include <linux/syscalls.h>
31#include <linux/audit.h>
32#include <linux/signal.h>
33#include <linux/mutex.h>
34#include <linux/nsproxy.h>
35#include <linux/pid.h>
36#include <linux/ipc_namespace.h>
37#include <linux/user_namespace.h>
38#include <linux/slab.h>
39#include <linux/sched/wake_q.h>
40#include <linux/sched/signal.h>
41#include <linux/sched/user.h>
42
43#include <net/sock.h>
44#include "util.h"
45
46struct mqueue_fs_context {
47 struct ipc_namespace *ipc_ns;
48};
49
50#define MQUEUE_MAGIC 0x19800202
51#define DIRENT_SIZE 20
52#define FILENT_SIZE 80
53
54#define SEND 0
55#define RECV 1
56
57#define STATE_NONE 0
58#define STATE_READY 1
59
60struct posix_msg_tree_node {
61 struct rb_node rb_node;
62 struct list_head msg_list;
63 int priority;
64};
65
66struct ext_wait_queue { /* queue of sleeping tasks */
67 struct task_struct *task;
68 struct list_head list;
69 struct msg_msg *msg; /* ptr of loaded message */
70 int state; /* one of STATE_* values */
71};
72
73struct mqueue_inode_info {
74 spinlock_t lock;
75 struct inode vfs_inode;
76 wait_queue_head_t wait_q;
77
78 struct rb_root msg_tree;
79 struct rb_node *msg_tree_rightmost;
80 struct posix_msg_tree_node *node_cache;
81 struct mq_attr attr;
82
83 struct sigevent notify;
84 struct pid *notify_owner;
85 struct user_namespace *notify_user_ns;
86 struct user_struct *user; /* user who created, for accounting */
87 struct sock *notify_sock;
88 struct sk_buff *notify_cookie;
89
90 /* for tasks waiting for free space and messages, respectively */
91 struct ext_wait_queue e_wait_q[2];
92
93 unsigned long qsize; /* size of queue in memory (sum of all msgs) */
94};
95
96static struct file_system_type mqueue_fs_type;
97static const struct inode_operations mqueue_dir_inode_operations;
98static const struct file_operations mqueue_file_operations;
99static const struct super_operations mqueue_super_ops;
100static const struct fs_context_operations mqueue_fs_context_ops;
101static void remove_notification(struct mqueue_inode_info *info);
102
103static struct kmem_cache *mqueue_inode_cachep;
104
105static struct ctl_table_header *mq_sysctl_table;
106
107static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
108{
109 return container_of(inode, struct mqueue_inode_info, vfs_inode);
110}
111
112/*
113 * This routine should be called with the mq_lock held.
114 */
115static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode)
116{
117 return get_ipc_ns(inode->i_sb->s_fs_info);
118}
119
120static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
121{
122 struct ipc_namespace *ns;
123
124 spin_lock(&mq_lock);
125 ns = __get_ns_from_inode(inode);
126 spin_unlock(&mq_lock);
127 return ns;
128}
129
130/* Auxiliary functions to manipulate messages' list */
131static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
132{
133 struct rb_node **p, *parent = NULL;
134 struct posix_msg_tree_node *leaf;
135 bool rightmost = true;
136
137 p = &info->msg_tree.rb_node;
138 while (*p) {
139 parent = *p;
140 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
141
142 if (likely(leaf->priority == msg->m_type))
143 goto insert_msg;
144 else if (msg->m_type < leaf->priority) {
145 p = &(*p)->rb_left;
146 rightmost = false;
147 } else
148 p = &(*p)->rb_right;
149 }
150 if (info->node_cache) {
151 leaf = info->node_cache;
152 info->node_cache = NULL;
153 } else {
154 leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
155 if (!leaf)
156 return -ENOMEM;
157 INIT_LIST_HEAD(&leaf->msg_list);
158 }
159 leaf->priority = msg->m_type;
160
161 if (rightmost)
162 info->msg_tree_rightmost = &leaf->rb_node;
163
164 rb_link_node(&leaf->rb_node, parent, p);
165 rb_insert_color(&leaf->rb_node, &info->msg_tree);
166insert_msg:
167 info->attr.mq_curmsgs++;
168 info->qsize += msg->m_ts;
169 list_add_tail(&msg->m_list, &leaf->msg_list);
170 return 0;
171}
172
173static inline void msg_tree_erase(struct posix_msg_tree_node *leaf,
174 struct mqueue_inode_info *info)
175{
176 struct rb_node *node = &leaf->rb_node;
177
178 if (info->msg_tree_rightmost == node)
179 info->msg_tree_rightmost = rb_prev(node);
180
181 rb_erase(node, &info->msg_tree);
182 if (info->node_cache) {
183 kfree(leaf);
184 } else {
185 info->node_cache = leaf;
186 }
187}
188
189static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
190{
191 struct rb_node *parent = NULL;
192 struct posix_msg_tree_node *leaf;
193 struct msg_msg *msg;
194
195try_again:
196 /*
197 * During insert, low priorities go to the left and high to the
198 * right. On receive, we want the highest priorities first, so
199 * walk all the way to the right.
200 */
201 parent = info->msg_tree_rightmost;
202 if (!parent) {
203 if (info->attr.mq_curmsgs) {
204 pr_warn_once("Inconsistency in POSIX message queue, "
205 "no tree element, but supposedly messages "
206 "should exist!\n");
207 info->attr.mq_curmsgs = 0;
208 }
209 return NULL;
210 }
211 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
212 if (unlikely(list_empty(&leaf->msg_list))) {
213 pr_warn_once("Inconsistency in POSIX message queue, "
214 "empty leaf node but we haven't implemented "
215 "lazy leaf delete!\n");
216 msg_tree_erase(leaf, info);
217 goto try_again;
218 } else {
219 msg = list_first_entry(&leaf->msg_list,
220 struct msg_msg, m_list);
221 list_del(&msg->m_list);
222 if (list_empty(&leaf->msg_list)) {
223 msg_tree_erase(leaf, info);
224 }
225 }
226 info->attr.mq_curmsgs--;
227 info->qsize -= msg->m_ts;
228 return msg;
229}
230
231static struct inode *mqueue_get_inode(struct super_block *sb,
232 struct ipc_namespace *ipc_ns, umode_t mode,
233 struct mq_attr *attr)
234{
235 struct user_struct *u = current_user();
236 struct inode *inode;
237 int ret = -ENOMEM;
238
239 inode = new_inode(sb);
240 if (!inode)
241 goto err;
242
243 inode->i_ino = get_next_ino();
244 inode->i_mode = mode;
245 inode->i_uid = current_fsuid();
246 inode->i_gid = current_fsgid();
247 inode->i_mtime = inode->i_ctime = inode->i_atime = current_time(inode);
248
249 if (S_ISREG(mode)) {
250 struct mqueue_inode_info *info;
251 unsigned long mq_bytes, mq_treesize;
252
253 inode->i_fop = &mqueue_file_operations;
254 inode->i_size = FILENT_SIZE;
255 /* mqueue specific info */
256 info = MQUEUE_I(inode);
257 spin_lock_init(&info->lock);
258 init_waitqueue_head(&info->wait_q);
259 INIT_LIST_HEAD(&info->e_wait_q[0].list);
260 INIT_LIST_HEAD(&info->e_wait_q[1].list);
261 info->notify_owner = NULL;
262 info->notify_user_ns = NULL;
263 info->qsize = 0;
264 info->user = NULL; /* set when all is ok */
265 info->msg_tree = RB_ROOT;
266 info->msg_tree_rightmost = NULL;
267 info->node_cache = NULL;
268 memset(&info->attr, 0, sizeof(info->attr));
269 info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
270 ipc_ns->mq_msg_default);
271 info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
272 ipc_ns->mq_msgsize_default);
273 if (attr) {
274 info->attr.mq_maxmsg = attr->mq_maxmsg;
275 info->attr.mq_msgsize = attr->mq_msgsize;
276 }
277 /*
278 * We used to allocate a static array of pointers and account
279 * the size of that array as well as one msg_msg struct per
280 * possible message into the queue size. That's no longer
281 * accurate as the queue is now an rbtree and will grow and
282 * shrink depending on usage patterns. We can, however, still
283 * account one msg_msg struct per message, but the nodes are
284 * allocated depending on priority usage, and most programs
285 * only use one, or a handful, of priorities. However, since
286 * this is pinned memory, we need to assume worst case, so
287 * that means the min(mq_maxmsg, max_priorities) * struct
288 * posix_msg_tree_node.
289 */
290
291 ret = -EINVAL;
292 if (info->attr.mq_maxmsg <= 0 || info->attr.mq_msgsize <= 0)
293 goto out_inode;
294 if (capable(CAP_SYS_RESOURCE)) {
295 if (info->attr.mq_maxmsg > HARD_MSGMAX ||
296 info->attr.mq_msgsize > HARD_MSGSIZEMAX)
297 goto out_inode;
298 } else {
299 if (info->attr.mq_maxmsg > ipc_ns->mq_msg_max ||
300 info->attr.mq_msgsize > ipc_ns->mq_msgsize_max)
301 goto out_inode;
302 }
303 ret = -EOVERFLOW;
304 /* check for overflow */
305 if (info->attr.mq_msgsize > ULONG_MAX/info->attr.mq_maxmsg)
306 goto out_inode;
307 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
308 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
309 sizeof(struct posix_msg_tree_node);
310 mq_bytes = info->attr.mq_maxmsg * info->attr.mq_msgsize;
311 if (mq_bytes + mq_treesize < mq_bytes)
312 goto out_inode;
313 mq_bytes += mq_treesize;
314 spin_lock(&mq_lock);
315 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
316 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
317 spin_unlock(&mq_lock);
318 /* mqueue_evict_inode() releases info->messages */
319 ret = -EMFILE;
320 goto out_inode;
321 }
322 u->mq_bytes += mq_bytes;
323 spin_unlock(&mq_lock);
324
325 /* all is ok */
326 info->user = get_uid(u);
327 } else if (S_ISDIR(mode)) {
328 inc_nlink(inode);
329 /* Some things misbehave if size == 0 on a directory */
330 inode->i_size = 2 * DIRENT_SIZE;
331 inode->i_op = &mqueue_dir_inode_operations;
332 inode->i_fop = &simple_dir_operations;
333 }
334
335 return inode;
336out_inode:
337 iput(inode);
338err:
339 return ERR_PTR(ret);
340}
341
342static int mqueue_fill_super(struct super_block *sb, struct fs_context *fc)
343{
344 struct inode *inode;
345 struct ipc_namespace *ns = sb->s_fs_info;
346
347 sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV;
348 sb->s_blocksize = PAGE_SIZE;
349 sb->s_blocksize_bits = PAGE_SHIFT;
350 sb->s_magic = MQUEUE_MAGIC;
351 sb->s_op = &mqueue_super_ops;
352
353 inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
354 if (IS_ERR(inode))
355 return PTR_ERR(inode);
356
357 sb->s_root = d_make_root(inode);
358 if (!sb->s_root)
359 return -ENOMEM;
360 return 0;
361}
362
363static int mqueue_get_tree(struct fs_context *fc)
364{
365 struct mqueue_fs_context *ctx = fc->fs_private;
366
367 return get_tree_keyed(fc, mqueue_fill_super, ctx->ipc_ns);
368}
369
370static void mqueue_fs_context_free(struct fs_context *fc)
371{
372 struct mqueue_fs_context *ctx = fc->fs_private;
373
374 put_ipc_ns(ctx->ipc_ns);
375 kfree(ctx);
376}
377
378static int mqueue_init_fs_context(struct fs_context *fc)
379{
380 struct mqueue_fs_context *ctx;
381
382 ctx = kzalloc(sizeof(struct mqueue_fs_context), GFP_KERNEL);
383 if (!ctx)
384 return -ENOMEM;
385
386 ctx->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
387 put_user_ns(fc->user_ns);
388 fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
389 fc->fs_private = ctx;
390 fc->ops = &mqueue_fs_context_ops;
391 return 0;
392}
393
394static struct vfsmount *mq_create_mount(struct ipc_namespace *ns)
395{
396 struct mqueue_fs_context *ctx;
397 struct fs_context *fc;
398 struct vfsmount *mnt;
399
400 fc = fs_context_for_mount(&mqueue_fs_type, SB_KERNMOUNT);
401 if (IS_ERR(fc))
402 return ERR_CAST(fc);
403
404 ctx = fc->fs_private;
405 put_ipc_ns(ctx->ipc_ns);
406 ctx->ipc_ns = get_ipc_ns(ns);
407 put_user_ns(fc->user_ns);
408 fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
409
410 mnt = fc_mount(fc);
411 put_fs_context(fc);
412 return mnt;
413}
414
415static void init_once(void *foo)
416{
417 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
418
419 inode_init_once(&p->vfs_inode);
420}
421
422static struct inode *mqueue_alloc_inode(struct super_block *sb)
423{
424 struct mqueue_inode_info *ei;
425
426 ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
427 if (!ei)
428 return NULL;
429 return &ei->vfs_inode;
430}
431
432static void mqueue_free_inode(struct inode *inode)
433{
434 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
435}
436
437static void mqueue_evict_inode(struct inode *inode)
438{
439 struct mqueue_inode_info *info;
440 struct user_struct *user;
441 struct ipc_namespace *ipc_ns;
442 struct msg_msg *msg, *nmsg;
443 LIST_HEAD(tmp_msg);
444
445 clear_inode(inode);
446
447 if (S_ISDIR(inode->i_mode))
448 return;
449
450 ipc_ns = get_ns_from_inode(inode);
451 info = MQUEUE_I(inode);
452 spin_lock(&info->lock);
453 while ((msg = msg_get(info)) != NULL)
454 list_add_tail(&msg->m_list, &tmp_msg);
455 kfree(info->node_cache);
456 spin_unlock(&info->lock);
457
458 list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) {
459 list_del(&msg->m_list);
460 free_msg(msg);
461 }
462
463 user = info->user;
464 if (user) {
465 unsigned long mq_bytes, mq_treesize;
466
467 /* Total amount of bytes accounted for the mqueue */
468 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
469 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
470 sizeof(struct posix_msg_tree_node);
471
472 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
473 info->attr.mq_msgsize);
474
475 spin_lock(&mq_lock);
476 user->mq_bytes -= mq_bytes;
477 /*
478 * get_ns_from_inode() ensures that the
479 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
480 * to which we now hold a reference, or it is NULL.
481 * We can't put it here under mq_lock, though.
482 */
483 if (ipc_ns)
484 ipc_ns->mq_queues_count--;
485 spin_unlock(&mq_lock);
486 free_uid(user);
487 }
488 if (ipc_ns)
489 put_ipc_ns(ipc_ns);
490}
491
492static int mqueue_create_attr(struct dentry *dentry, umode_t mode, void *arg)
493{
494 struct inode *dir = dentry->d_parent->d_inode;
495 struct inode *inode;
496 struct mq_attr *attr = arg;
497 int error;
498 struct ipc_namespace *ipc_ns;
499
500 spin_lock(&mq_lock);
501 ipc_ns = __get_ns_from_inode(dir);
502 if (!ipc_ns) {
503 error = -EACCES;
504 goto out_unlock;
505 }
506
507 if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
508 !capable(CAP_SYS_RESOURCE)) {
509 error = -ENOSPC;
510 goto out_unlock;
511 }
512 ipc_ns->mq_queues_count++;
513 spin_unlock(&mq_lock);
514
515 inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr);
516 if (IS_ERR(inode)) {
517 error = PTR_ERR(inode);
518 spin_lock(&mq_lock);
519 ipc_ns->mq_queues_count--;
520 goto out_unlock;
521 }
522
523 put_ipc_ns(ipc_ns);
524 dir->i_size += DIRENT_SIZE;
525 dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
526
527 d_instantiate(dentry, inode);
528 dget(dentry);
529 return 0;
530out_unlock:
531 spin_unlock(&mq_lock);
532 if (ipc_ns)
533 put_ipc_ns(ipc_ns);
534 return error;
535}
536
537static int mqueue_create(struct inode *dir, struct dentry *dentry,
538 umode_t mode, bool excl)
539{
540 return mqueue_create_attr(dentry, mode, NULL);
541}
542
543static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
544{
545 struct inode *inode = d_inode(dentry);
546
547 dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
548 dir->i_size -= DIRENT_SIZE;
549 drop_nlink(inode);
550 dput(dentry);
551 return 0;
552}
553
554/*
555* This is routine for system read from queue file.
556* To avoid mess with doing here some sort of mq_receive we allow
557* to read only queue size & notification info (the only values
558* that are interesting from user point of view and aren't accessible
559* through std routines)
560*/
561static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
562 size_t count, loff_t *off)
563{
564 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
565 char buffer[FILENT_SIZE];
566 ssize_t ret;
567
568 spin_lock(&info->lock);
569 snprintf(buffer, sizeof(buffer),
570 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
571 info->qsize,
572 info->notify_owner ? info->notify.sigev_notify : 0,
573 (info->notify_owner &&
574 info->notify.sigev_notify == SIGEV_SIGNAL) ?
575 info->notify.sigev_signo : 0,
576 pid_vnr(info->notify_owner));
577 spin_unlock(&info->lock);
578 buffer[sizeof(buffer)-1] = '\0';
579
580 ret = simple_read_from_buffer(u_data, count, off, buffer,
581 strlen(buffer));
582 if (ret <= 0)
583 return ret;
584
585 file_inode(filp)->i_atime = file_inode(filp)->i_ctime = current_time(file_inode(filp));
586 return ret;
587}
588
589static int mqueue_flush_file(struct file *filp, fl_owner_t id)
590{
591 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
592
593 spin_lock(&info->lock);
594 if (task_tgid(current) == info->notify_owner)
595 remove_notification(info);
596
597 spin_unlock(&info->lock);
598 return 0;
599}
600
601static __poll_t mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
602{
603 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
604 __poll_t retval = 0;
605
606 poll_wait(filp, &info->wait_q, poll_tab);
607
608 spin_lock(&info->lock);
609 if (info->attr.mq_curmsgs)
610 retval = EPOLLIN | EPOLLRDNORM;
611
612 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
613 retval |= EPOLLOUT | EPOLLWRNORM;
614 spin_unlock(&info->lock);
615
616 return retval;
617}
618
619/* Adds current to info->e_wait_q[sr] before element with smaller prio */
620static void wq_add(struct mqueue_inode_info *info, int sr,
621 struct ext_wait_queue *ewp)
622{
623 struct ext_wait_queue *walk;
624
625 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
626 if (walk->task->prio <= current->prio) {
627 list_add_tail(&ewp->list, &walk->list);
628 return;
629 }
630 }
631 list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
632}
633
634/*
635 * Puts current task to sleep. Caller must hold queue lock. After return
636 * lock isn't held.
637 * sr: SEND or RECV
638 */
639static int wq_sleep(struct mqueue_inode_info *info, int sr,
640 ktime_t *timeout, struct ext_wait_queue *ewp)
641 __releases(&info->lock)
642{
643 int retval;
644 signed long time;
645
646 wq_add(info, sr, ewp);
647
648 for (;;) {
649 __set_current_state(TASK_INTERRUPTIBLE);
650
651 spin_unlock(&info->lock);
652 time = schedule_hrtimeout_range_clock(timeout, 0,
653 HRTIMER_MODE_ABS, CLOCK_REALTIME);
654
655 if (ewp->state == STATE_READY) {
656 retval = 0;
657 goto out;
658 }
659 spin_lock(&info->lock);
660 if (ewp->state == STATE_READY) {
661 retval = 0;
662 goto out_unlock;
663 }
664 if (signal_pending(current)) {
665 retval = -ERESTARTSYS;
666 break;
667 }
668 if (time == 0) {
669 retval = -ETIMEDOUT;
670 break;
671 }
672 }
673 list_del(&ewp->list);
674out_unlock:
675 spin_unlock(&info->lock);
676out:
677 return retval;
678}
679
680/*
681 * Returns waiting task that should be serviced first or NULL if none exists
682 */
683static struct ext_wait_queue *wq_get_first_waiter(
684 struct mqueue_inode_info *info, int sr)
685{
686 struct list_head *ptr;
687
688 ptr = info->e_wait_q[sr].list.prev;
689 if (ptr == &info->e_wait_q[sr].list)
690 return NULL;
691 return list_entry(ptr, struct ext_wait_queue, list);
692}
693
694
695static inline void set_cookie(struct sk_buff *skb, char code)
696{
697 ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
698}
699
700/*
701 * The next function is only to split too long sys_mq_timedsend
702 */
703static void __do_notify(struct mqueue_inode_info *info)
704{
705 /* notification
706 * invoked when there is registered process and there isn't process
707 * waiting synchronously for message AND state of queue changed from
708 * empty to not empty. Here we are sure that no one is waiting
709 * synchronously. */
710 if (info->notify_owner &&
711 info->attr.mq_curmsgs == 1) {
712 struct kernel_siginfo sig_i;
713 switch (info->notify.sigev_notify) {
714 case SIGEV_NONE:
715 break;
716 case SIGEV_SIGNAL:
717 /* sends signal */
718
719 clear_siginfo(&sig_i);
720 sig_i.si_signo = info->notify.sigev_signo;
721 sig_i.si_errno = 0;
722 sig_i.si_code = SI_MESGQ;
723 sig_i.si_value = info->notify.sigev_value;
724 /* map current pid/uid into info->owner's namespaces */
725 rcu_read_lock();
726 sig_i.si_pid = task_tgid_nr_ns(current,
727 ns_of_pid(info->notify_owner));
728 sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid());
729 rcu_read_unlock();
730
731 kill_pid_info(info->notify.sigev_signo,
732 &sig_i, info->notify_owner);
733 break;
734 case SIGEV_THREAD:
735 set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
736 netlink_sendskb(info->notify_sock, info->notify_cookie);
737 break;
738 }
739 /* after notification unregisters process */
740 put_pid(info->notify_owner);
741 put_user_ns(info->notify_user_ns);
742 info->notify_owner = NULL;
743 info->notify_user_ns = NULL;
744 }
745 wake_up(&info->wait_q);
746}
747
748static int prepare_timeout(const struct __kernel_timespec __user *u_abs_timeout,
749 struct timespec64 *ts)
750{
751 if (get_timespec64(ts, u_abs_timeout))
752 return -EFAULT;
753 if (!timespec64_valid(ts))
754 return -EINVAL;
755 return 0;
756}
757
758static void remove_notification(struct mqueue_inode_info *info)
759{
760 if (info->notify_owner != NULL &&
761 info->notify.sigev_notify == SIGEV_THREAD) {
762 set_cookie(info->notify_cookie, NOTIFY_REMOVED);
763 netlink_sendskb(info->notify_sock, info->notify_cookie);
764 }
765 put_pid(info->notify_owner);
766 put_user_ns(info->notify_user_ns);
767 info->notify_owner = NULL;
768 info->notify_user_ns = NULL;
769}
770
771static int prepare_open(struct dentry *dentry, int oflag, int ro,
772 umode_t mode, struct filename *name,
773 struct mq_attr *attr)
774{
775 static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
776 MAY_READ | MAY_WRITE };
777 int acc;
778
779 if (d_really_is_negative(dentry)) {
780 if (!(oflag & O_CREAT))
781 return -ENOENT;
782 if (ro)
783 return ro;
784 audit_inode_parent_hidden(name, dentry->d_parent);
785 return vfs_mkobj(dentry, mode & ~current_umask(),
786 mqueue_create_attr, attr);
787 }
788 /* it already existed */
789 audit_inode(name, dentry, 0);
790 if ((oflag & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
791 return -EEXIST;
792 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
793 return -EINVAL;
794 acc = oflag2acc[oflag & O_ACCMODE];
795 return inode_permission(d_inode(dentry), acc);
796}
797
798static int do_mq_open(const char __user *u_name, int oflag, umode_t mode,
799 struct mq_attr *attr)
800{
801 struct vfsmount *mnt = current->nsproxy->ipc_ns->mq_mnt;
802 struct dentry *root = mnt->mnt_root;
803 struct filename *name;
804 struct path path;
805 int fd, error;
806 int ro;
807
808 audit_mq_open(oflag, mode, attr);
809
810 if (IS_ERR(name = getname(u_name)))
811 return PTR_ERR(name);
812
813 fd = get_unused_fd_flags(O_CLOEXEC);
814 if (fd < 0)
815 goto out_putname;
816
817 ro = mnt_want_write(mnt); /* we'll drop it in any case */
818 inode_lock(d_inode(root));
819 path.dentry = lookup_one_len(name->name, root, strlen(name->name));
820 if (IS_ERR(path.dentry)) {
821 error = PTR_ERR(path.dentry);
822 goto out_putfd;
823 }
824 path.mnt = mntget(mnt);
825 error = prepare_open(path.dentry, oflag, ro, mode, name, attr);
826 if (!error) {
827 struct file *file = dentry_open(&path, oflag, current_cred());
828 if (!IS_ERR(file))
829 fd_install(fd, file);
830 else
831 error = PTR_ERR(file);
832 }
833 path_put(&path);
834out_putfd:
835 if (error) {
836 put_unused_fd(fd);
837 fd = error;
838 }
839 inode_unlock(d_inode(root));
840 if (!ro)
841 mnt_drop_write(mnt);
842out_putname:
843 putname(name);
844 return fd;
845}
846
847SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
848 struct mq_attr __user *, u_attr)
849{
850 struct mq_attr attr;
851 if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
852 return -EFAULT;
853
854 return do_mq_open(u_name, oflag, mode, u_attr ? &attr : NULL);
855}
856
857SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
858{
859 int err;
860 struct filename *name;
861 struct dentry *dentry;
862 struct inode *inode = NULL;
863 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
864 struct vfsmount *mnt = ipc_ns->mq_mnt;
865
866 name = getname(u_name);
867 if (IS_ERR(name))
868 return PTR_ERR(name);
869
870 audit_inode_parent_hidden(name, mnt->mnt_root);
871 err = mnt_want_write(mnt);
872 if (err)
873 goto out_name;
874 inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT);
875 dentry = lookup_one_len(name->name, mnt->mnt_root,
876 strlen(name->name));
877 if (IS_ERR(dentry)) {
878 err = PTR_ERR(dentry);
879 goto out_unlock;
880 }
881
882 inode = d_inode(dentry);
883 if (!inode) {
884 err = -ENOENT;
885 } else {
886 ihold(inode);
887 err = vfs_unlink(d_inode(dentry->d_parent), dentry, NULL);
888 }
889 dput(dentry);
890
891out_unlock:
892 inode_unlock(d_inode(mnt->mnt_root));
893 if (inode)
894 iput(inode);
895 mnt_drop_write(mnt);
896out_name:
897 putname(name);
898
899 return err;
900}
901
902/* Pipelined send and receive functions.
903 *
904 * If a receiver finds no waiting message, then it registers itself in the
905 * list of waiting receivers. A sender checks that list before adding the new
906 * message into the message array. If there is a waiting receiver, then it
907 * bypasses the message array and directly hands the message over to the
908 * receiver. The receiver accepts the message and returns without grabbing the
909 * queue spinlock:
910 *
911 * - Set pointer to message.
912 * - Queue the receiver task for later wakeup (without the info->lock).
913 * - Update its state to STATE_READY. Now the receiver can continue.
914 * - Wake up the process after the lock is dropped. Should the process wake up
915 * before this wakeup (due to a timeout or a signal) it will either see
916 * STATE_READY and continue or acquire the lock to check the state again.
917 *
918 * The same algorithm is used for senders.
919 */
920
921/* pipelined_send() - send a message directly to the task waiting in
922 * sys_mq_timedreceive() (without inserting message into a queue).
923 */
924static inline void pipelined_send(struct wake_q_head *wake_q,
925 struct mqueue_inode_info *info,
926 struct msg_msg *message,
927 struct ext_wait_queue *receiver)
928{
929 receiver->msg = message;
930 list_del(&receiver->list);
931 wake_q_add(wake_q, receiver->task);
932 /*
933 * Rely on the implicit cmpxchg barrier from wake_q_add such
934 * that we can ensure that updating receiver->state is the last
935 * write operation: As once set, the receiver can continue,
936 * and if we don't have the reference count from the wake_q,
937 * yet, at that point we can later have a use-after-free
938 * condition and bogus wakeup.
939 */
940 receiver->state = STATE_READY;
941}
942
943/* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
944 * gets its message and put to the queue (we have one free place for sure). */
945static inline void pipelined_receive(struct wake_q_head *wake_q,
946 struct mqueue_inode_info *info)
947{
948 struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
949
950 if (!sender) {
951 /* for poll */
952 wake_up_interruptible(&info->wait_q);
953 return;
954 }
955 if (msg_insert(sender->msg, info))
956 return;
957
958 list_del(&sender->list);
959 wake_q_add(wake_q, sender->task);
960 sender->state = STATE_READY;
961}
962
963static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
964 size_t msg_len, unsigned int msg_prio,
965 struct timespec64 *ts)
966{
967 struct fd f;
968 struct inode *inode;
969 struct ext_wait_queue wait;
970 struct ext_wait_queue *receiver;
971 struct msg_msg *msg_ptr;
972 struct mqueue_inode_info *info;
973 ktime_t expires, *timeout = NULL;
974 struct posix_msg_tree_node *new_leaf = NULL;
975 int ret = 0;
976 DEFINE_WAKE_Q(wake_q);
977
978 if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
979 return -EINVAL;
980
981 if (ts) {
982 expires = timespec64_to_ktime(*ts);
983 timeout = &expires;
984 }
985
986 audit_mq_sendrecv(mqdes, msg_len, msg_prio, ts);
987
988 f = fdget(mqdes);
989 if (unlikely(!f.file)) {
990 ret = -EBADF;
991 goto out;
992 }
993
994 inode = file_inode(f.file);
995 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
996 ret = -EBADF;
997 goto out_fput;
998 }
999 info = MQUEUE_I(inode);
1000 audit_file(f.file);
1001
1002 if (unlikely(!(f.file->f_mode & FMODE_WRITE))) {
1003 ret = -EBADF;
1004 goto out_fput;
1005 }
1006
1007 if (unlikely(msg_len > info->attr.mq_msgsize)) {
1008 ret = -EMSGSIZE;
1009 goto out_fput;
1010 }
1011
1012 /* First try to allocate memory, before doing anything with
1013 * existing queues. */
1014 msg_ptr = load_msg(u_msg_ptr, msg_len);
1015 if (IS_ERR(msg_ptr)) {
1016 ret = PTR_ERR(msg_ptr);
1017 goto out_fput;
1018 }
1019 msg_ptr->m_ts = msg_len;
1020 msg_ptr->m_type = msg_prio;
1021
1022 /*
1023 * msg_insert really wants us to have a valid, spare node struct so
1024 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1025 * fall back to that if necessary.
1026 */
1027 if (!info->node_cache)
1028 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1029
1030 spin_lock(&info->lock);
1031
1032 if (!info->node_cache && new_leaf) {
1033 /* Save our speculative allocation into the cache */
1034 INIT_LIST_HEAD(&new_leaf->msg_list);
1035 info->node_cache = new_leaf;
1036 new_leaf = NULL;
1037 } else {
1038 kfree(new_leaf);
1039 }
1040
1041 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
1042 if (f.file->f_flags & O_NONBLOCK) {
1043 ret = -EAGAIN;
1044 } else {
1045 wait.task = current;
1046 wait.msg = (void *) msg_ptr;
1047 wait.state = STATE_NONE;
1048 ret = wq_sleep(info, SEND, timeout, &wait);
1049 /*
1050 * wq_sleep must be called with info->lock held, and
1051 * returns with the lock released
1052 */
1053 goto out_free;
1054 }
1055 } else {
1056 receiver = wq_get_first_waiter(info, RECV);
1057 if (receiver) {
1058 pipelined_send(&wake_q, info, msg_ptr, receiver);
1059 } else {
1060 /* adds message to the queue */
1061 ret = msg_insert(msg_ptr, info);
1062 if (ret)
1063 goto out_unlock;
1064 __do_notify(info);
1065 }
1066 inode->i_atime = inode->i_mtime = inode->i_ctime =
1067 current_time(inode);
1068 }
1069out_unlock:
1070 spin_unlock(&info->lock);
1071 wake_up_q(&wake_q);
1072out_free:
1073 if (ret)
1074 free_msg(msg_ptr);
1075out_fput:
1076 fdput(f);
1077out:
1078 return ret;
1079}
1080
1081static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
1082 size_t msg_len, unsigned int __user *u_msg_prio,
1083 struct timespec64 *ts)
1084{
1085 ssize_t ret;
1086 struct msg_msg *msg_ptr;
1087 struct fd f;
1088 struct inode *inode;
1089 struct mqueue_inode_info *info;
1090 struct ext_wait_queue wait;
1091 ktime_t expires, *timeout = NULL;
1092 struct posix_msg_tree_node *new_leaf = NULL;
1093
1094 if (ts) {
1095 expires = timespec64_to_ktime(*ts);
1096 timeout = &expires;
1097 }
1098
1099 audit_mq_sendrecv(mqdes, msg_len, 0, ts);
1100
1101 f = fdget(mqdes);
1102 if (unlikely(!f.file)) {
1103 ret = -EBADF;
1104 goto out;
1105 }
1106
1107 inode = file_inode(f.file);
1108 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1109 ret = -EBADF;
1110 goto out_fput;
1111 }
1112 info = MQUEUE_I(inode);
1113 audit_file(f.file);
1114
1115 if (unlikely(!(f.file->f_mode & FMODE_READ))) {
1116 ret = -EBADF;
1117 goto out_fput;
1118 }
1119
1120 /* checks if buffer is big enough */
1121 if (unlikely(msg_len < info->attr.mq_msgsize)) {
1122 ret = -EMSGSIZE;
1123 goto out_fput;
1124 }
1125
1126 /*
1127 * msg_insert really wants us to have a valid, spare node struct so
1128 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1129 * fall back to that if necessary.
1130 */
1131 if (!info->node_cache)
1132 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1133
1134 spin_lock(&info->lock);
1135
1136 if (!info->node_cache && new_leaf) {
1137 /* Save our speculative allocation into the cache */
1138 INIT_LIST_HEAD(&new_leaf->msg_list);
1139 info->node_cache = new_leaf;
1140 } else {
1141 kfree(new_leaf);
1142 }
1143
1144 if (info->attr.mq_curmsgs == 0) {
1145 if (f.file->f_flags & O_NONBLOCK) {
1146 spin_unlock(&info->lock);
1147 ret = -EAGAIN;
1148 } else {
1149 wait.task = current;
1150 wait.state = STATE_NONE;
1151 ret = wq_sleep(info, RECV, timeout, &wait);
1152 msg_ptr = wait.msg;
1153 }
1154 } else {
1155 DEFINE_WAKE_Q(wake_q);
1156
1157 msg_ptr = msg_get(info);
1158
1159 inode->i_atime = inode->i_mtime = inode->i_ctime =
1160 current_time(inode);
1161
1162 /* There is now free space in queue. */
1163 pipelined_receive(&wake_q, info);
1164 spin_unlock(&info->lock);
1165 wake_up_q(&wake_q);
1166 ret = 0;
1167 }
1168 if (ret == 0) {
1169 ret = msg_ptr->m_ts;
1170
1171 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
1172 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
1173 ret = -EFAULT;
1174 }
1175 free_msg(msg_ptr);
1176 }
1177out_fput:
1178 fdput(f);
1179out:
1180 return ret;
1181}
1182
1183SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
1184 size_t, msg_len, unsigned int, msg_prio,
1185 const struct __kernel_timespec __user *, u_abs_timeout)
1186{
1187 struct timespec64 ts, *p = NULL;
1188 if (u_abs_timeout) {
1189 int res = prepare_timeout(u_abs_timeout, &ts);
1190 if (res)
1191 return res;
1192 p = &ts;
1193 }
1194 return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1195}
1196
1197SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
1198 size_t, msg_len, unsigned int __user *, u_msg_prio,
1199 const struct __kernel_timespec __user *, u_abs_timeout)
1200{
1201 struct timespec64 ts, *p = NULL;
1202 if (u_abs_timeout) {
1203 int res = prepare_timeout(u_abs_timeout, &ts);
1204 if (res)
1205 return res;
1206 p = &ts;
1207 }
1208 return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1209}
1210
1211/*
1212 * Notes: the case when user wants us to deregister (with NULL as pointer)
1213 * and he isn't currently owner of notification, will be silently discarded.
1214 * It isn't explicitly defined in the POSIX.
1215 */
1216static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification)
1217{
1218 int ret;
1219 struct fd f;
1220 struct sock *sock;
1221 struct inode *inode;
1222 struct mqueue_inode_info *info;
1223 struct sk_buff *nc;
1224
1225 audit_mq_notify(mqdes, notification);
1226
1227 nc = NULL;
1228 sock = NULL;
1229 if (notification != NULL) {
1230 if (unlikely(notification->sigev_notify != SIGEV_NONE &&
1231 notification->sigev_notify != SIGEV_SIGNAL &&
1232 notification->sigev_notify != SIGEV_THREAD))
1233 return -EINVAL;
1234 if (notification->sigev_notify == SIGEV_SIGNAL &&
1235 !valid_signal(notification->sigev_signo)) {
1236 return -EINVAL;
1237 }
1238 if (notification->sigev_notify == SIGEV_THREAD) {
1239 long timeo;
1240
1241 /* create the notify skb */
1242 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
1243 if (!nc)
1244 return -ENOMEM;
1245
1246 if (copy_from_user(nc->data,
1247 notification->sigev_value.sival_ptr,
1248 NOTIFY_COOKIE_LEN)) {
1249 ret = -EFAULT;
1250 goto free_skb;
1251 }
1252
1253 /* TODO: add a header? */
1254 skb_put(nc, NOTIFY_COOKIE_LEN);
1255 /* and attach it to the socket */
1256retry:
1257 f = fdget(notification->sigev_signo);
1258 if (!f.file) {
1259 ret = -EBADF;
1260 goto out;
1261 }
1262 sock = netlink_getsockbyfilp(f.file);
1263 fdput(f);
1264 if (IS_ERR(sock)) {
1265 ret = PTR_ERR(sock);
1266 goto free_skb;
1267 }
1268
1269 timeo = MAX_SCHEDULE_TIMEOUT;
1270 ret = netlink_attachskb(sock, nc, &timeo, NULL);
1271 if (ret == 1) {
1272 sock = NULL;
1273 goto retry;
1274 }
1275 if (ret)
1276 return ret;
1277 }
1278 }
1279
1280 f = fdget(mqdes);
1281 if (!f.file) {
1282 ret = -EBADF;
1283 goto out;
1284 }
1285
1286 inode = file_inode(f.file);
1287 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1288 ret = -EBADF;
1289 goto out_fput;
1290 }
1291 info = MQUEUE_I(inode);
1292
1293 ret = 0;
1294 spin_lock(&info->lock);
1295 if (notification == NULL) {
1296 if (info->notify_owner == task_tgid(current)) {
1297 remove_notification(info);
1298 inode->i_atime = inode->i_ctime = current_time(inode);
1299 }
1300 } else if (info->notify_owner != NULL) {
1301 ret = -EBUSY;
1302 } else {
1303 switch (notification->sigev_notify) {
1304 case SIGEV_NONE:
1305 info->notify.sigev_notify = SIGEV_NONE;
1306 break;
1307 case SIGEV_THREAD:
1308 info->notify_sock = sock;
1309 info->notify_cookie = nc;
1310 sock = NULL;
1311 nc = NULL;
1312 info->notify.sigev_notify = SIGEV_THREAD;
1313 break;
1314 case SIGEV_SIGNAL:
1315 info->notify.sigev_signo = notification->sigev_signo;
1316 info->notify.sigev_value = notification->sigev_value;
1317 info->notify.sigev_notify = SIGEV_SIGNAL;
1318 break;
1319 }
1320
1321 info->notify_owner = get_pid(task_tgid(current));
1322 info->notify_user_ns = get_user_ns(current_user_ns());
1323 inode->i_atime = inode->i_ctime = current_time(inode);
1324 }
1325 spin_unlock(&info->lock);
1326out_fput:
1327 fdput(f);
1328out:
1329 if (sock)
1330 netlink_detachskb(sock, nc);
1331 else
1332free_skb:
1333 dev_kfree_skb(nc);
1334
1335 return ret;
1336}
1337
1338SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1339 const struct sigevent __user *, u_notification)
1340{
1341 struct sigevent n, *p = NULL;
1342 if (u_notification) {
1343 if (copy_from_user(&n, u_notification, sizeof(struct sigevent)))
1344 return -EFAULT;
1345 p = &n;
1346 }
1347 return do_mq_notify(mqdes, p);
1348}
1349
1350static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old)
1351{
1352 struct fd f;
1353 struct inode *inode;
1354 struct mqueue_inode_info *info;
1355
1356 if (new && (new->mq_flags & (~O_NONBLOCK)))
1357 return -EINVAL;
1358
1359 f = fdget(mqdes);
1360 if (!f.file)
1361 return -EBADF;
1362
1363 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1364 fdput(f);
1365 return -EBADF;
1366 }
1367
1368 inode = file_inode(f.file);
1369 info = MQUEUE_I(inode);
1370
1371 spin_lock(&info->lock);
1372
1373 if (old) {
1374 *old = info->attr;
1375 old->mq_flags = f.file->f_flags & O_NONBLOCK;
1376 }
1377 if (new) {
1378 audit_mq_getsetattr(mqdes, new);
1379 spin_lock(&f.file->f_lock);
1380 if (new->mq_flags & O_NONBLOCK)
1381 f.file->f_flags |= O_NONBLOCK;
1382 else
1383 f.file->f_flags &= ~O_NONBLOCK;
1384 spin_unlock(&f.file->f_lock);
1385
1386 inode->i_atime = inode->i_ctime = current_time(inode);
1387 }
1388
1389 spin_unlock(&info->lock);
1390 fdput(f);
1391 return 0;
1392}
1393
1394SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1395 const struct mq_attr __user *, u_mqstat,
1396 struct mq_attr __user *, u_omqstat)
1397{
1398 int ret;
1399 struct mq_attr mqstat, omqstat;
1400 struct mq_attr *new = NULL, *old = NULL;
1401
1402 if (u_mqstat) {
1403 new = &mqstat;
1404 if (copy_from_user(new, u_mqstat, sizeof(struct mq_attr)))
1405 return -EFAULT;
1406 }
1407 if (u_omqstat)
1408 old = &omqstat;
1409
1410 ret = do_mq_getsetattr(mqdes, new, old);
1411 if (ret || !old)
1412 return ret;
1413
1414 if (copy_to_user(u_omqstat, old, sizeof(struct mq_attr)))
1415 return -EFAULT;
1416 return 0;
1417}
1418
1419#ifdef CONFIG_COMPAT
1420
1421struct compat_mq_attr {
1422 compat_long_t mq_flags; /* message queue flags */
1423 compat_long_t mq_maxmsg; /* maximum number of messages */
1424 compat_long_t mq_msgsize; /* maximum message size */
1425 compat_long_t mq_curmsgs; /* number of messages currently queued */
1426 compat_long_t __reserved[4]; /* ignored for input, zeroed for output */
1427};
1428
1429static inline int get_compat_mq_attr(struct mq_attr *attr,
1430 const struct compat_mq_attr __user *uattr)
1431{
1432 struct compat_mq_attr v;
1433
1434 if (copy_from_user(&v, uattr, sizeof(*uattr)))
1435 return -EFAULT;
1436
1437 memset(attr, 0, sizeof(*attr));
1438 attr->mq_flags = v.mq_flags;
1439 attr->mq_maxmsg = v.mq_maxmsg;
1440 attr->mq_msgsize = v.mq_msgsize;
1441 attr->mq_curmsgs = v.mq_curmsgs;
1442 return 0;
1443}
1444
1445static inline int put_compat_mq_attr(const struct mq_attr *attr,
1446 struct compat_mq_attr __user *uattr)
1447{
1448 struct compat_mq_attr v;
1449
1450 memset(&v, 0, sizeof(v));
1451 v.mq_flags = attr->mq_flags;
1452 v.mq_maxmsg = attr->mq_maxmsg;
1453 v.mq_msgsize = attr->mq_msgsize;
1454 v.mq_curmsgs = attr->mq_curmsgs;
1455 if (copy_to_user(uattr, &v, sizeof(*uattr)))
1456 return -EFAULT;
1457 return 0;
1458}
1459
1460COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name,
1461 int, oflag, compat_mode_t, mode,
1462 struct compat_mq_attr __user *, u_attr)
1463{
1464 struct mq_attr attr, *p = NULL;
1465 if (u_attr && oflag & O_CREAT) {
1466 p = &attr;
1467 if (get_compat_mq_attr(&attr, u_attr))
1468 return -EFAULT;
1469 }
1470 return do_mq_open(u_name, oflag, mode, p);
1471}
1472
1473COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1474 const struct compat_sigevent __user *, u_notification)
1475{
1476 struct sigevent n, *p = NULL;
1477 if (u_notification) {
1478 if (get_compat_sigevent(&n, u_notification))
1479 return -EFAULT;
1480 if (n.sigev_notify == SIGEV_THREAD)
1481 n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int);
1482 p = &n;
1483 }
1484 return do_mq_notify(mqdes, p);
1485}
1486
1487COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1488 const struct compat_mq_attr __user *, u_mqstat,
1489 struct compat_mq_attr __user *, u_omqstat)
1490{
1491 int ret;
1492 struct mq_attr mqstat, omqstat;
1493 struct mq_attr *new = NULL, *old = NULL;
1494
1495 if (u_mqstat) {
1496 new = &mqstat;
1497 if (get_compat_mq_attr(new, u_mqstat))
1498 return -EFAULT;
1499 }
1500 if (u_omqstat)
1501 old = &omqstat;
1502
1503 ret = do_mq_getsetattr(mqdes, new, old);
1504 if (ret || !old)
1505 return ret;
1506
1507 if (put_compat_mq_attr(old, u_omqstat))
1508 return -EFAULT;
1509 return 0;
1510}
1511#endif
1512
1513#ifdef CONFIG_COMPAT_32BIT_TIME
1514static int compat_prepare_timeout(const struct old_timespec32 __user *p,
1515 struct timespec64 *ts)
1516{
1517 if (get_old_timespec32(ts, p))
1518 return -EFAULT;
1519 if (!timespec64_valid(ts))
1520 return -EINVAL;
1521 return 0;
1522}
1523
1524SYSCALL_DEFINE5(mq_timedsend_time32, mqd_t, mqdes,
1525 const char __user *, u_msg_ptr,
1526 unsigned int, msg_len, unsigned int, msg_prio,
1527 const struct old_timespec32 __user *, u_abs_timeout)
1528{
1529 struct timespec64 ts, *p = NULL;
1530 if (u_abs_timeout) {
1531 int res = compat_prepare_timeout(u_abs_timeout, &ts);
1532 if (res)
1533 return res;
1534 p = &ts;
1535 }
1536 return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1537}
1538
1539SYSCALL_DEFINE5(mq_timedreceive_time32, mqd_t, mqdes,
1540 char __user *, u_msg_ptr,
1541 unsigned int, msg_len, unsigned int __user *, u_msg_prio,
1542 const struct old_timespec32 __user *, u_abs_timeout)
1543{
1544 struct timespec64 ts, *p = NULL;
1545 if (u_abs_timeout) {
1546 int res = compat_prepare_timeout(u_abs_timeout, &ts);
1547 if (res)
1548 return res;
1549 p = &ts;
1550 }
1551 return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1552}
1553#endif
1554
1555static const struct inode_operations mqueue_dir_inode_operations = {
1556 .lookup = simple_lookup,
1557 .create = mqueue_create,
1558 .unlink = mqueue_unlink,
1559};
1560
1561static const struct file_operations mqueue_file_operations = {
1562 .flush = mqueue_flush_file,
1563 .poll = mqueue_poll_file,
1564 .read = mqueue_read_file,
1565 .llseek = default_llseek,
1566};
1567
1568static const struct super_operations mqueue_super_ops = {
1569 .alloc_inode = mqueue_alloc_inode,
1570 .free_inode = mqueue_free_inode,
1571 .evict_inode = mqueue_evict_inode,
1572 .statfs = simple_statfs,
1573};
1574
1575static const struct fs_context_operations mqueue_fs_context_ops = {
1576 .free = mqueue_fs_context_free,
1577 .get_tree = mqueue_get_tree,
1578};
1579
1580static struct file_system_type mqueue_fs_type = {
1581 .name = "mqueue",
1582 .init_fs_context = mqueue_init_fs_context,
1583 .kill_sb = kill_litter_super,
1584 .fs_flags = FS_USERNS_MOUNT,
1585};
1586
1587int mq_init_ns(struct ipc_namespace *ns)
1588{
1589 struct vfsmount *m;
1590
1591 ns->mq_queues_count = 0;
1592 ns->mq_queues_max = DFLT_QUEUESMAX;
1593 ns->mq_msg_max = DFLT_MSGMAX;
1594 ns->mq_msgsize_max = DFLT_MSGSIZEMAX;
1595 ns->mq_msg_default = DFLT_MSG;
1596 ns->mq_msgsize_default = DFLT_MSGSIZE;
1597
1598 m = mq_create_mount(ns);
1599 if (IS_ERR(m))
1600 return PTR_ERR(m);
1601 ns->mq_mnt = m;
1602 return 0;
1603}
1604
1605void mq_clear_sbinfo(struct ipc_namespace *ns)
1606{
1607 ns->mq_mnt->mnt_sb->s_fs_info = NULL;
1608}
1609
1610void mq_put_mnt(struct ipc_namespace *ns)
1611{
1612 kern_unmount(ns->mq_mnt);
1613}
1614
1615static int __init init_mqueue_fs(void)
1616{
1617 int error;
1618
1619 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1620 sizeof(struct mqueue_inode_info), 0,
1621 SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once);
1622 if (mqueue_inode_cachep == NULL)
1623 return -ENOMEM;
1624
1625 /* ignore failures - they are not fatal */
1626 mq_sysctl_table = mq_register_sysctl_table();
1627
1628 error = register_filesystem(&mqueue_fs_type);
1629 if (error)
1630 goto out_sysctl;
1631
1632 spin_lock_init(&mq_lock);
1633
1634 error = mq_init_ns(&init_ipc_ns);
1635 if (error)
1636 goto out_filesystem;
1637
1638 return 0;
1639
1640out_filesystem:
1641 unregister_filesystem(&mqueue_fs_type);
1642out_sysctl:
1643 if (mq_sysctl_table)
1644 unregister_sysctl_table(mq_sysctl_table);
1645 kmem_cache_destroy(mqueue_inode_cachep);
1646 return error;
1647}
1648
1649device_initcall(init_mqueue_fs);