Loading...
1/*
2 * POSIX message queues filesystem for Linux.
3 *
4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl)
5 * Michal Wronski (michal.wronski@gmail.com)
6 *
7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com)
8 * Lockless receive & send, fd based notify:
9 * Manfred Spraul (manfred@colorfullife.com)
10 *
11 * Audit: George Wilson (ltcgcw@us.ibm.com)
12 *
13 * This file is released under the GPL.
14 */
15
16#include <linux/capability.h>
17#include <linux/init.h>
18#include <linux/pagemap.h>
19#include <linux/file.h>
20#include <linux/mount.h>
21#include <linux/namei.h>
22#include <linux/sysctl.h>
23#include <linux/poll.h>
24#include <linux/mqueue.h>
25#include <linux/msg.h>
26#include <linux/skbuff.h>
27#include <linux/vmalloc.h>
28#include <linux/netlink.h>
29#include <linux/syscalls.h>
30#include <linux/audit.h>
31#include <linux/signal.h>
32#include <linux/mutex.h>
33#include <linux/nsproxy.h>
34#include <linux/pid.h>
35#include <linux/ipc_namespace.h>
36#include <linux/user_namespace.h>
37#include <linux/slab.h>
38
39#include <net/sock.h>
40#include "util.h"
41
42#define MQUEUE_MAGIC 0x19800202
43#define DIRENT_SIZE 20
44#define FILENT_SIZE 80
45
46#define SEND 0
47#define RECV 1
48
49#define STATE_NONE 0
50#define STATE_READY 1
51
52struct posix_msg_tree_node {
53 struct rb_node rb_node;
54 struct list_head msg_list;
55 int priority;
56};
57
58struct ext_wait_queue { /* queue of sleeping tasks */
59 struct task_struct *task;
60 struct list_head list;
61 struct msg_msg *msg; /* ptr of loaded message */
62 int state; /* one of STATE_* values */
63};
64
65struct mqueue_inode_info {
66 spinlock_t lock;
67 struct inode vfs_inode;
68 wait_queue_head_t wait_q;
69
70 struct rb_root msg_tree;
71 struct posix_msg_tree_node *node_cache;
72 struct mq_attr attr;
73
74 struct sigevent notify;
75 struct pid *notify_owner;
76 struct user_namespace *notify_user_ns;
77 struct user_struct *user; /* user who created, for accounting */
78 struct sock *notify_sock;
79 struct sk_buff *notify_cookie;
80
81 /* for tasks waiting for free space and messages, respectively */
82 struct ext_wait_queue e_wait_q[2];
83
84 unsigned long qsize; /* size of queue in memory (sum of all msgs) */
85};
86
87static const struct inode_operations mqueue_dir_inode_operations;
88static const struct file_operations mqueue_file_operations;
89static const struct super_operations mqueue_super_ops;
90static void remove_notification(struct mqueue_inode_info *info);
91
92static struct kmem_cache *mqueue_inode_cachep;
93
94static struct ctl_table_header *mq_sysctl_table;
95
96static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
97{
98 return container_of(inode, struct mqueue_inode_info, vfs_inode);
99}
100
101/*
102 * This routine should be called with the mq_lock held.
103 */
104static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode)
105{
106 return get_ipc_ns(inode->i_sb->s_fs_info);
107}
108
109static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
110{
111 struct ipc_namespace *ns;
112
113 spin_lock(&mq_lock);
114 ns = __get_ns_from_inode(inode);
115 spin_unlock(&mq_lock);
116 return ns;
117}
118
119/* Auxiliary functions to manipulate messages' list */
120static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
121{
122 struct rb_node **p, *parent = NULL;
123 struct posix_msg_tree_node *leaf;
124
125 p = &info->msg_tree.rb_node;
126 while (*p) {
127 parent = *p;
128 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
129
130 if (likely(leaf->priority == msg->m_type))
131 goto insert_msg;
132 else if (msg->m_type < leaf->priority)
133 p = &(*p)->rb_left;
134 else
135 p = &(*p)->rb_right;
136 }
137 if (info->node_cache) {
138 leaf = info->node_cache;
139 info->node_cache = NULL;
140 } else {
141 leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
142 if (!leaf)
143 return -ENOMEM;
144 INIT_LIST_HEAD(&leaf->msg_list);
145 }
146 leaf->priority = msg->m_type;
147 rb_link_node(&leaf->rb_node, parent, p);
148 rb_insert_color(&leaf->rb_node, &info->msg_tree);
149insert_msg:
150 info->attr.mq_curmsgs++;
151 info->qsize += msg->m_ts;
152 list_add_tail(&msg->m_list, &leaf->msg_list);
153 return 0;
154}
155
156static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
157{
158 struct rb_node **p, *parent = NULL;
159 struct posix_msg_tree_node *leaf;
160 struct msg_msg *msg;
161
162try_again:
163 p = &info->msg_tree.rb_node;
164 while (*p) {
165 parent = *p;
166 /*
167 * During insert, low priorities go to the left and high to the
168 * right. On receive, we want the highest priorities first, so
169 * walk all the way to the right.
170 */
171 p = &(*p)->rb_right;
172 }
173 if (!parent) {
174 if (info->attr.mq_curmsgs) {
175 pr_warn_once("Inconsistency in POSIX message queue, "
176 "no tree element, but supposedly messages "
177 "should exist!\n");
178 info->attr.mq_curmsgs = 0;
179 }
180 return NULL;
181 }
182 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
183 if (unlikely(list_empty(&leaf->msg_list))) {
184 pr_warn_once("Inconsistency in POSIX message queue, "
185 "empty leaf node but we haven't implemented "
186 "lazy leaf delete!\n");
187 rb_erase(&leaf->rb_node, &info->msg_tree);
188 if (info->node_cache) {
189 kfree(leaf);
190 } else {
191 info->node_cache = leaf;
192 }
193 goto try_again;
194 } else {
195 msg = list_first_entry(&leaf->msg_list,
196 struct msg_msg, m_list);
197 list_del(&msg->m_list);
198 if (list_empty(&leaf->msg_list)) {
199 rb_erase(&leaf->rb_node, &info->msg_tree);
200 if (info->node_cache) {
201 kfree(leaf);
202 } else {
203 info->node_cache = leaf;
204 }
205 }
206 }
207 info->attr.mq_curmsgs--;
208 info->qsize -= msg->m_ts;
209 return msg;
210}
211
212static struct inode *mqueue_get_inode(struct super_block *sb,
213 struct ipc_namespace *ipc_ns, umode_t mode,
214 struct mq_attr *attr)
215{
216 struct user_struct *u = current_user();
217 struct inode *inode;
218 int ret = -ENOMEM;
219
220 inode = new_inode(sb);
221 if (!inode)
222 goto err;
223
224 inode->i_ino = get_next_ino();
225 inode->i_mode = mode;
226 inode->i_uid = current_fsuid();
227 inode->i_gid = current_fsgid();
228 inode->i_mtime = inode->i_ctime = inode->i_atime = current_time(inode);
229
230 if (S_ISREG(mode)) {
231 struct mqueue_inode_info *info;
232 unsigned long mq_bytes, mq_treesize;
233
234 inode->i_fop = &mqueue_file_operations;
235 inode->i_size = FILENT_SIZE;
236 /* mqueue specific info */
237 info = MQUEUE_I(inode);
238 spin_lock_init(&info->lock);
239 init_waitqueue_head(&info->wait_q);
240 INIT_LIST_HEAD(&info->e_wait_q[0].list);
241 INIT_LIST_HEAD(&info->e_wait_q[1].list);
242 info->notify_owner = NULL;
243 info->notify_user_ns = NULL;
244 info->qsize = 0;
245 info->user = NULL; /* set when all is ok */
246 info->msg_tree = RB_ROOT;
247 info->node_cache = NULL;
248 memset(&info->attr, 0, sizeof(info->attr));
249 info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
250 ipc_ns->mq_msg_default);
251 info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
252 ipc_ns->mq_msgsize_default);
253 if (attr) {
254 info->attr.mq_maxmsg = attr->mq_maxmsg;
255 info->attr.mq_msgsize = attr->mq_msgsize;
256 }
257 /*
258 * We used to allocate a static array of pointers and account
259 * the size of that array as well as one msg_msg struct per
260 * possible message into the queue size. That's no longer
261 * accurate as the queue is now an rbtree and will grow and
262 * shrink depending on usage patterns. We can, however, still
263 * account one msg_msg struct per message, but the nodes are
264 * allocated depending on priority usage, and most programs
265 * only use one, or a handful, of priorities. However, since
266 * this is pinned memory, we need to assume worst case, so
267 * that means the min(mq_maxmsg, max_priorities) * struct
268 * posix_msg_tree_node.
269 */
270 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
271 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
272 sizeof(struct posix_msg_tree_node);
273
274 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
275 info->attr.mq_msgsize);
276
277 spin_lock(&mq_lock);
278 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
279 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
280 spin_unlock(&mq_lock);
281 /* mqueue_evict_inode() releases info->messages */
282 ret = -EMFILE;
283 goto out_inode;
284 }
285 u->mq_bytes += mq_bytes;
286 spin_unlock(&mq_lock);
287
288 /* all is ok */
289 info->user = get_uid(u);
290 } else if (S_ISDIR(mode)) {
291 inc_nlink(inode);
292 /* Some things misbehave if size == 0 on a directory */
293 inode->i_size = 2 * DIRENT_SIZE;
294 inode->i_op = &mqueue_dir_inode_operations;
295 inode->i_fop = &simple_dir_operations;
296 }
297
298 return inode;
299out_inode:
300 iput(inode);
301err:
302 return ERR_PTR(ret);
303}
304
305static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
306{
307 struct inode *inode;
308 struct ipc_namespace *ns = sb->s_fs_info;
309
310 sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV;
311 sb->s_blocksize = PAGE_SIZE;
312 sb->s_blocksize_bits = PAGE_SHIFT;
313 sb->s_magic = MQUEUE_MAGIC;
314 sb->s_op = &mqueue_super_ops;
315
316 inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
317 if (IS_ERR(inode))
318 return PTR_ERR(inode);
319
320 sb->s_root = d_make_root(inode);
321 if (!sb->s_root)
322 return -ENOMEM;
323 return 0;
324}
325
326static struct dentry *mqueue_mount(struct file_system_type *fs_type,
327 int flags, const char *dev_name,
328 void *data)
329{
330 struct ipc_namespace *ns;
331 if (flags & MS_KERNMOUNT) {
332 ns = data;
333 data = NULL;
334 } else {
335 ns = current->nsproxy->ipc_ns;
336 }
337 return mount_ns(fs_type, flags, data, ns, ns->user_ns, mqueue_fill_super);
338}
339
340static void init_once(void *foo)
341{
342 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
343
344 inode_init_once(&p->vfs_inode);
345}
346
347static struct inode *mqueue_alloc_inode(struct super_block *sb)
348{
349 struct mqueue_inode_info *ei;
350
351 ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
352 if (!ei)
353 return NULL;
354 return &ei->vfs_inode;
355}
356
357static void mqueue_i_callback(struct rcu_head *head)
358{
359 struct inode *inode = container_of(head, struct inode, i_rcu);
360 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
361}
362
363static void mqueue_destroy_inode(struct inode *inode)
364{
365 call_rcu(&inode->i_rcu, mqueue_i_callback);
366}
367
368static void mqueue_evict_inode(struct inode *inode)
369{
370 struct mqueue_inode_info *info;
371 struct user_struct *user;
372 unsigned long mq_bytes, mq_treesize;
373 struct ipc_namespace *ipc_ns;
374 struct msg_msg *msg;
375
376 clear_inode(inode);
377
378 if (S_ISDIR(inode->i_mode))
379 return;
380
381 ipc_ns = get_ns_from_inode(inode);
382 info = MQUEUE_I(inode);
383 spin_lock(&info->lock);
384 while ((msg = msg_get(info)) != NULL)
385 free_msg(msg);
386 kfree(info->node_cache);
387 spin_unlock(&info->lock);
388
389 /* Total amount of bytes accounted for the mqueue */
390 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
391 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
392 sizeof(struct posix_msg_tree_node);
393
394 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
395 info->attr.mq_msgsize);
396
397 user = info->user;
398 if (user) {
399 spin_lock(&mq_lock);
400 user->mq_bytes -= mq_bytes;
401 /*
402 * get_ns_from_inode() ensures that the
403 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
404 * to which we now hold a reference, or it is NULL.
405 * We can't put it here under mq_lock, though.
406 */
407 if (ipc_ns)
408 ipc_ns->mq_queues_count--;
409 spin_unlock(&mq_lock);
410 free_uid(user);
411 }
412 if (ipc_ns)
413 put_ipc_ns(ipc_ns);
414}
415
416static int mqueue_create(struct inode *dir, struct dentry *dentry,
417 umode_t mode, bool excl)
418{
419 struct inode *inode;
420 struct mq_attr *attr = dentry->d_fsdata;
421 int error;
422 struct ipc_namespace *ipc_ns;
423
424 spin_lock(&mq_lock);
425 ipc_ns = __get_ns_from_inode(dir);
426 if (!ipc_ns) {
427 error = -EACCES;
428 goto out_unlock;
429 }
430
431 if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
432 !capable(CAP_SYS_RESOURCE)) {
433 error = -ENOSPC;
434 goto out_unlock;
435 }
436 ipc_ns->mq_queues_count++;
437 spin_unlock(&mq_lock);
438
439 inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr);
440 if (IS_ERR(inode)) {
441 error = PTR_ERR(inode);
442 spin_lock(&mq_lock);
443 ipc_ns->mq_queues_count--;
444 goto out_unlock;
445 }
446
447 put_ipc_ns(ipc_ns);
448 dir->i_size += DIRENT_SIZE;
449 dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
450
451 d_instantiate(dentry, inode);
452 dget(dentry);
453 return 0;
454out_unlock:
455 spin_unlock(&mq_lock);
456 if (ipc_ns)
457 put_ipc_ns(ipc_ns);
458 return error;
459}
460
461static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
462{
463 struct inode *inode = d_inode(dentry);
464
465 dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
466 dir->i_size -= DIRENT_SIZE;
467 drop_nlink(inode);
468 dput(dentry);
469 return 0;
470}
471
472/*
473* This is routine for system read from queue file.
474* To avoid mess with doing here some sort of mq_receive we allow
475* to read only queue size & notification info (the only values
476* that are interesting from user point of view and aren't accessible
477* through std routines)
478*/
479static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
480 size_t count, loff_t *off)
481{
482 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
483 char buffer[FILENT_SIZE];
484 ssize_t ret;
485
486 spin_lock(&info->lock);
487 snprintf(buffer, sizeof(buffer),
488 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
489 info->qsize,
490 info->notify_owner ? info->notify.sigev_notify : 0,
491 (info->notify_owner &&
492 info->notify.sigev_notify == SIGEV_SIGNAL) ?
493 info->notify.sigev_signo : 0,
494 pid_vnr(info->notify_owner));
495 spin_unlock(&info->lock);
496 buffer[sizeof(buffer)-1] = '\0';
497
498 ret = simple_read_from_buffer(u_data, count, off, buffer,
499 strlen(buffer));
500 if (ret <= 0)
501 return ret;
502
503 file_inode(filp)->i_atime = file_inode(filp)->i_ctime = current_time(file_inode(filp));
504 return ret;
505}
506
507static int mqueue_flush_file(struct file *filp, fl_owner_t id)
508{
509 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
510
511 spin_lock(&info->lock);
512 if (task_tgid(current) == info->notify_owner)
513 remove_notification(info);
514
515 spin_unlock(&info->lock);
516 return 0;
517}
518
519static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
520{
521 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
522 int retval = 0;
523
524 poll_wait(filp, &info->wait_q, poll_tab);
525
526 spin_lock(&info->lock);
527 if (info->attr.mq_curmsgs)
528 retval = POLLIN | POLLRDNORM;
529
530 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
531 retval |= POLLOUT | POLLWRNORM;
532 spin_unlock(&info->lock);
533
534 return retval;
535}
536
537/* Adds current to info->e_wait_q[sr] before element with smaller prio */
538static void wq_add(struct mqueue_inode_info *info, int sr,
539 struct ext_wait_queue *ewp)
540{
541 struct ext_wait_queue *walk;
542
543 ewp->task = current;
544
545 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
546 if (walk->task->static_prio <= current->static_prio) {
547 list_add_tail(&ewp->list, &walk->list);
548 return;
549 }
550 }
551 list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
552}
553
554/*
555 * Puts current task to sleep. Caller must hold queue lock. After return
556 * lock isn't held.
557 * sr: SEND or RECV
558 */
559static int wq_sleep(struct mqueue_inode_info *info, int sr,
560 ktime_t *timeout, struct ext_wait_queue *ewp)
561{
562 int retval;
563 signed long time;
564
565 wq_add(info, sr, ewp);
566
567 for (;;) {
568 __set_current_state(TASK_INTERRUPTIBLE);
569
570 spin_unlock(&info->lock);
571 time = schedule_hrtimeout_range_clock(timeout, 0,
572 HRTIMER_MODE_ABS, CLOCK_REALTIME);
573
574 if (ewp->state == STATE_READY) {
575 retval = 0;
576 goto out;
577 }
578 spin_lock(&info->lock);
579 if (ewp->state == STATE_READY) {
580 retval = 0;
581 goto out_unlock;
582 }
583 if (signal_pending(current)) {
584 retval = -ERESTARTSYS;
585 break;
586 }
587 if (time == 0) {
588 retval = -ETIMEDOUT;
589 break;
590 }
591 }
592 list_del(&ewp->list);
593out_unlock:
594 spin_unlock(&info->lock);
595out:
596 return retval;
597}
598
599/*
600 * Returns waiting task that should be serviced first or NULL if none exists
601 */
602static struct ext_wait_queue *wq_get_first_waiter(
603 struct mqueue_inode_info *info, int sr)
604{
605 struct list_head *ptr;
606
607 ptr = info->e_wait_q[sr].list.prev;
608 if (ptr == &info->e_wait_q[sr].list)
609 return NULL;
610 return list_entry(ptr, struct ext_wait_queue, list);
611}
612
613
614static inline void set_cookie(struct sk_buff *skb, char code)
615{
616 ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
617}
618
619/*
620 * The next function is only to split too long sys_mq_timedsend
621 */
622static void __do_notify(struct mqueue_inode_info *info)
623{
624 /* notification
625 * invoked when there is registered process and there isn't process
626 * waiting synchronously for message AND state of queue changed from
627 * empty to not empty. Here we are sure that no one is waiting
628 * synchronously. */
629 if (info->notify_owner &&
630 info->attr.mq_curmsgs == 1) {
631 struct siginfo sig_i;
632 switch (info->notify.sigev_notify) {
633 case SIGEV_NONE:
634 break;
635 case SIGEV_SIGNAL:
636 /* sends signal */
637
638 sig_i.si_signo = info->notify.sigev_signo;
639 sig_i.si_errno = 0;
640 sig_i.si_code = SI_MESGQ;
641 sig_i.si_value = info->notify.sigev_value;
642 /* map current pid/uid into info->owner's namespaces */
643 rcu_read_lock();
644 sig_i.si_pid = task_tgid_nr_ns(current,
645 ns_of_pid(info->notify_owner));
646 sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid());
647 rcu_read_unlock();
648
649 kill_pid_info(info->notify.sigev_signo,
650 &sig_i, info->notify_owner);
651 break;
652 case SIGEV_THREAD:
653 set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
654 netlink_sendskb(info->notify_sock, info->notify_cookie);
655 break;
656 }
657 /* after notification unregisters process */
658 put_pid(info->notify_owner);
659 put_user_ns(info->notify_user_ns);
660 info->notify_owner = NULL;
661 info->notify_user_ns = NULL;
662 }
663 wake_up(&info->wait_q);
664}
665
666static int prepare_timeout(const struct timespec __user *u_abs_timeout,
667 ktime_t *expires, struct timespec *ts)
668{
669 if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec)))
670 return -EFAULT;
671 if (!timespec_valid(ts))
672 return -EINVAL;
673
674 *expires = timespec_to_ktime(*ts);
675 return 0;
676}
677
678static void remove_notification(struct mqueue_inode_info *info)
679{
680 if (info->notify_owner != NULL &&
681 info->notify.sigev_notify == SIGEV_THREAD) {
682 set_cookie(info->notify_cookie, NOTIFY_REMOVED);
683 netlink_sendskb(info->notify_sock, info->notify_cookie);
684 }
685 put_pid(info->notify_owner);
686 put_user_ns(info->notify_user_ns);
687 info->notify_owner = NULL;
688 info->notify_user_ns = NULL;
689}
690
691static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
692{
693 int mq_treesize;
694 unsigned long total_size;
695
696 if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
697 return -EINVAL;
698 if (capable(CAP_SYS_RESOURCE)) {
699 if (attr->mq_maxmsg > HARD_MSGMAX ||
700 attr->mq_msgsize > HARD_MSGSIZEMAX)
701 return -EINVAL;
702 } else {
703 if (attr->mq_maxmsg > ipc_ns->mq_msg_max ||
704 attr->mq_msgsize > ipc_ns->mq_msgsize_max)
705 return -EINVAL;
706 }
707 /* check for overflow */
708 if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
709 return -EOVERFLOW;
710 mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) +
711 min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) *
712 sizeof(struct posix_msg_tree_node);
713 total_size = attr->mq_maxmsg * attr->mq_msgsize;
714 if (total_size + mq_treesize < total_size)
715 return -EOVERFLOW;
716 return 0;
717}
718
719/*
720 * Invoked when creating a new queue via sys_mq_open
721 */
722static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir,
723 struct path *path, int oflag, umode_t mode,
724 struct mq_attr *attr)
725{
726 const struct cred *cred = current_cred();
727 int ret;
728
729 if (attr) {
730 ret = mq_attr_ok(ipc_ns, attr);
731 if (ret)
732 return ERR_PTR(ret);
733 /* store for use during create */
734 path->dentry->d_fsdata = attr;
735 } else {
736 struct mq_attr def_attr;
737
738 def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
739 ipc_ns->mq_msg_default);
740 def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
741 ipc_ns->mq_msgsize_default);
742 ret = mq_attr_ok(ipc_ns, &def_attr);
743 if (ret)
744 return ERR_PTR(ret);
745 }
746
747 mode &= ~current_umask();
748 ret = vfs_create(dir, path->dentry, mode, true);
749 path->dentry->d_fsdata = NULL;
750 if (ret)
751 return ERR_PTR(ret);
752 return dentry_open(path, oflag, cred);
753}
754
755/* Opens existing queue */
756static struct file *do_open(struct path *path, int oflag)
757{
758 static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
759 MAY_READ | MAY_WRITE };
760 int acc;
761 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
762 return ERR_PTR(-EINVAL);
763 acc = oflag2acc[oflag & O_ACCMODE];
764 if (inode_permission(d_inode(path->dentry), acc))
765 return ERR_PTR(-EACCES);
766 return dentry_open(path, oflag, current_cred());
767}
768
769SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
770 struct mq_attr __user *, u_attr)
771{
772 struct path path;
773 struct file *filp;
774 struct filename *name;
775 struct mq_attr attr;
776 int fd, error;
777 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
778 struct vfsmount *mnt = ipc_ns->mq_mnt;
779 struct dentry *root = mnt->mnt_root;
780 int ro;
781
782 if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
783 return -EFAULT;
784
785 audit_mq_open(oflag, mode, u_attr ? &attr : NULL);
786
787 if (IS_ERR(name = getname(u_name)))
788 return PTR_ERR(name);
789
790 fd = get_unused_fd_flags(O_CLOEXEC);
791 if (fd < 0)
792 goto out_putname;
793
794 ro = mnt_want_write(mnt); /* we'll drop it in any case */
795 error = 0;
796 inode_lock(d_inode(root));
797 path.dentry = lookup_one_len(name->name, root, strlen(name->name));
798 if (IS_ERR(path.dentry)) {
799 error = PTR_ERR(path.dentry);
800 goto out_putfd;
801 }
802 path.mnt = mntget(mnt);
803
804 if (oflag & O_CREAT) {
805 if (d_really_is_positive(path.dentry)) { /* entry already exists */
806 audit_inode(name, path.dentry, 0);
807 if (oflag & O_EXCL) {
808 error = -EEXIST;
809 goto out;
810 }
811 filp = do_open(&path, oflag);
812 } else {
813 if (ro) {
814 error = ro;
815 goto out;
816 }
817 audit_inode_parent_hidden(name, root);
818 filp = do_create(ipc_ns, d_inode(root),
819 &path, oflag, mode,
820 u_attr ? &attr : NULL);
821 }
822 } else {
823 if (d_really_is_negative(path.dentry)) {
824 error = -ENOENT;
825 goto out;
826 }
827 audit_inode(name, path.dentry, 0);
828 filp = do_open(&path, oflag);
829 }
830
831 if (!IS_ERR(filp))
832 fd_install(fd, filp);
833 else
834 error = PTR_ERR(filp);
835out:
836 path_put(&path);
837out_putfd:
838 if (error) {
839 put_unused_fd(fd);
840 fd = error;
841 }
842 inode_unlock(d_inode(root));
843 if (!ro)
844 mnt_drop_write(mnt);
845out_putname:
846 putname(name);
847 return fd;
848}
849
850SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
851{
852 int err;
853 struct filename *name;
854 struct dentry *dentry;
855 struct inode *inode = NULL;
856 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
857 struct vfsmount *mnt = ipc_ns->mq_mnt;
858
859 name = getname(u_name);
860 if (IS_ERR(name))
861 return PTR_ERR(name);
862
863 audit_inode_parent_hidden(name, mnt->mnt_root);
864 err = mnt_want_write(mnt);
865 if (err)
866 goto out_name;
867 inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT);
868 dentry = lookup_one_len(name->name, mnt->mnt_root,
869 strlen(name->name));
870 if (IS_ERR(dentry)) {
871 err = PTR_ERR(dentry);
872 goto out_unlock;
873 }
874
875 inode = d_inode(dentry);
876 if (!inode) {
877 err = -ENOENT;
878 } else {
879 ihold(inode);
880 err = vfs_unlink(d_inode(dentry->d_parent), dentry, NULL);
881 }
882 dput(dentry);
883
884out_unlock:
885 inode_unlock(d_inode(mnt->mnt_root));
886 if (inode)
887 iput(inode);
888 mnt_drop_write(mnt);
889out_name:
890 putname(name);
891
892 return err;
893}
894
895/* Pipelined send and receive functions.
896 *
897 * If a receiver finds no waiting message, then it registers itself in the
898 * list of waiting receivers. A sender checks that list before adding the new
899 * message into the message array. If there is a waiting receiver, then it
900 * bypasses the message array and directly hands the message over to the
901 * receiver. The receiver accepts the message and returns without grabbing the
902 * queue spinlock:
903 *
904 * - Set pointer to message.
905 * - Queue the receiver task for later wakeup (without the info->lock).
906 * - Update its state to STATE_READY. Now the receiver can continue.
907 * - Wake up the process after the lock is dropped. Should the process wake up
908 * before this wakeup (due to a timeout or a signal) it will either see
909 * STATE_READY and continue or acquire the lock to check the state again.
910 *
911 * The same algorithm is used for senders.
912 */
913
914/* pipelined_send() - send a message directly to the task waiting in
915 * sys_mq_timedreceive() (without inserting message into a queue).
916 */
917static inline void pipelined_send(struct wake_q_head *wake_q,
918 struct mqueue_inode_info *info,
919 struct msg_msg *message,
920 struct ext_wait_queue *receiver)
921{
922 receiver->msg = message;
923 list_del(&receiver->list);
924 wake_q_add(wake_q, receiver->task);
925 /*
926 * Rely on the implicit cmpxchg barrier from wake_q_add such
927 * that we can ensure that updating receiver->state is the last
928 * write operation: As once set, the receiver can continue,
929 * and if we don't have the reference count from the wake_q,
930 * yet, at that point we can later have a use-after-free
931 * condition and bogus wakeup.
932 */
933 receiver->state = STATE_READY;
934}
935
936/* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
937 * gets its message and put to the queue (we have one free place for sure). */
938static inline void pipelined_receive(struct wake_q_head *wake_q,
939 struct mqueue_inode_info *info)
940{
941 struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
942
943 if (!sender) {
944 /* for poll */
945 wake_up_interruptible(&info->wait_q);
946 return;
947 }
948 if (msg_insert(sender->msg, info))
949 return;
950
951 list_del(&sender->list);
952 wake_q_add(wake_q, sender->task);
953 sender->state = STATE_READY;
954}
955
956SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
957 size_t, msg_len, unsigned int, msg_prio,
958 const struct timespec __user *, u_abs_timeout)
959{
960 struct fd f;
961 struct inode *inode;
962 struct ext_wait_queue wait;
963 struct ext_wait_queue *receiver;
964 struct msg_msg *msg_ptr;
965 struct mqueue_inode_info *info;
966 ktime_t expires, *timeout = NULL;
967 struct timespec ts;
968 struct posix_msg_tree_node *new_leaf = NULL;
969 int ret = 0;
970 DEFINE_WAKE_Q(wake_q);
971
972 if (u_abs_timeout) {
973 int res = prepare_timeout(u_abs_timeout, &expires, &ts);
974 if (res)
975 return res;
976 timeout = &expires;
977 }
978
979 if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
980 return -EINVAL;
981
982 audit_mq_sendrecv(mqdes, msg_len, msg_prio, timeout ? &ts : NULL);
983
984 f = fdget(mqdes);
985 if (unlikely(!f.file)) {
986 ret = -EBADF;
987 goto out;
988 }
989
990 inode = file_inode(f.file);
991 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
992 ret = -EBADF;
993 goto out_fput;
994 }
995 info = MQUEUE_I(inode);
996 audit_file(f.file);
997
998 if (unlikely(!(f.file->f_mode & FMODE_WRITE))) {
999 ret = -EBADF;
1000 goto out_fput;
1001 }
1002
1003 if (unlikely(msg_len > info->attr.mq_msgsize)) {
1004 ret = -EMSGSIZE;
1005 goto out_fput;
1006 }
1007
1008 /* First try to allocate memory, before doing anything with
1009 * existing queues. */
1010 msg_ptr = load_msg(u_msg_ptr, msg_len);
1011 if (IS_ERR(msg_ptr)) {
1012 ret = PTR_ERR(msg_ptr);
1013 goto out_fput;
1014 }
1015 msg_ptr->m_ts = msg_len;
1016 msg_ptr->m_type = msg_prio;
1017
1018 /*
1019 * msg_insert really wants us to have a valid, spare node struct so
1020 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1021 * fall back to that if necessary.
1022 */
1023 if (!info->node_cache)
1024 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1025
1026 spin_lock(&info->lock);
1027
1028 if (!info->node_cache && new_leaf) {
1029 /* Save our speculative allocation into the cache */
1030 INIT_LIST_HEAD(&new_leaf->msg_list);
1031 info->node_cache = new_leaf;
1032 new_leaf = NULL;
1033 } else {
1034 kfree(new_leaf);
1035 }
1036
1037 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
1038 if (f.file->f_flags & O_NONBLOCK) {
1039 ret = -EAGAIN;
1040 } else {
1041 wait.task = current;
1042 wait.msg = (void *) msg_ptr;
1043 wait.state = STATE_NONE;
1044 ret = wq_sleep(info, SEND, timeout, &wait);
1045 /*
1046 * wq_sleep must be called with info->lock held, and
1047 * returns with the lock released
1048 */
1049 goto out_free;
1050 }
1051 } else {
1052 receiver = wq_get_first_waiter(info, RECV);
1053 if (receiver) {
1054 pipelined_send(&wake_q, info, msg_ptr, receiver);
1055 } else {
1056 /* adds message to the queue */
1057 ret = msg_insert(msg_ptr, info);
1058 if (ret)
1059 goto out_unlock;
1060 __do_notify(info);
1061 }
1062 inode->i_atime = inode->i_mtime = inode->i_ctime =
1063 current_time(inode);
1064 }
1065out_unlock:
1066 spin_unlock(&info->lock);
1067 wake_up_q(&wake_q);
1068out_free:
1069 if (ret)
1070 free_msg(msg_ptr);
1071out_fput:
1072 fdput(f);
1073out:
1074 return ret;
1075}
1076
1077SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
1078 size_t, msg_len, unsigned int __user *, u_msg_prio,
1079 const struct timespec __user *, u_abs_timeout)
1080{
1081 ssize_t ret;
1082 struct msg_msg *msg_ptr;
1083 struct fd f;
1084 struct inode *inode;
1085 struct mqueue_inode_info *info;
1086 struct ext_wait_queue wait;
1087 ktime_t expires, *timeout = NULL;
1088 struct timespec ts;
1089 struct posix_msg_tree_node *new_leaf = NULL;
1090
1091 if (u_abs_timeout) {
1092 int res = prepare_timeout(u_abs_timeout, &expires, &ts);
1093 if (res)
1094 return res;
1095 timeout = &expires;
1096 }
1097
1098 audit_mq_sendrecv(mqdes, msg_len, 0, timeout ? &ts : NULL);
1099
1100 f = fdget(mqdes);
1101 if (unlikely(!f.file)) {
1102 ret = -EBADF;
1103 goto out;
1104 }
1105
1106 inode = file_inode(f.file);
1107 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1108 ret = -EBADF;
1109 goto out_fput;
1110 }
1111 info = MQUEUE_I(inode);
1112 audit_file(f.file);
1113
1114 if (unlikely(!(f.file->f_mode & FMODE_READ))) {
1115 ret = -EBADF;
1116 goto out_fput;
1117 }
1118
1119 /* checks if buffer is big enough */
1120 if (unlikely(msg_len < info->attr.mq_msgsize)) {
1121 ret = -EMSGSIZE;
1122 goto out_fput;
1123 }
1124
1125 /*
1126 * msg_insert really wants us to have a valid, spare node struct so
1127 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1128 * fall back to that if necessary.
1129 */
1130 if (!info->node_cache)
1131 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1132
1133 spin_lock(&info->lock);
1134
1135 if (!info->node_cache && new_leaf) {
1136 /* Save our speculative allocation into the cache */
1137 INIT_LIST_HEAD(&new_leaf->msg_list);
1138 info->node_cache = new_leaf;
1139 } else {
1140 kfree(new_leaf);
1141 }
1142
1143 if (info->attr.mq_curmsgs == 0) {
1144 if (f.file->f_flags & O_NONBLOCK) {
1145 spin_unlock(&info->lock);
1146 ret = -EAGAIN;
1147 } else {
1148 wait.task = current;
1149 wait.state = STATE_NONE;
1150 ret = wq_sleep(info, RECV, timeout, &wait);
1151 msg_ptr = wait.msg;
1152 }
1153 } else {
1154 DEFINE_WAKE_Q(wake_q);
1155
1156 msg_ptr = msg_get(info);
1157
1158 inode->i_atime = inode->i_mtime = inode->i_ctime =
1159 current_time(inode);
1160
1161 /* There is now free space in queue. */
1162 pipelined_receive(&wake_q, info);
1163 spin_unlock(&info->lock);
1164 wake_up_q(&wake_q);
1165 ret = 0;
1166 }
1167 if (ret == 0) {
1168 ret = msg_ptr->m_ts;
1169
1170 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
1171 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
1172 ret = -EFAULT;
1173 }
1174 free_msg(msg_ptr);
1175 }
1176out_fput:
1177 fdput(f);
1178out:
1179 return ret;
1180}
1181
1182/*
1183 * Notes: the case when user wants us to deregister (with NULL as pointer)
1184 * and he isn't currently owner of notification, will be silently discarded.
1185 * It isn't explicitly defined in the POSIX.
1186 */
1187SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1188 const struct sigevent __user *, u_notification)
1189{
1190 int ret;
1191 struct fd f;
1192 struct sock *sock;
1193 struct inode *inode;
1194 struct sigevent notification;
1195 struct mqueue_inode_info *info;
1196 struct sk_buff *nc;
1197
1198 if (u_notification) {
1199 if (copy_from_user(¬ification, u_notification,
1200 sizeof(struct sigevent)))
1201 return -EFAULT;
1202 }
1203
1204 audit_mq_notify(mqdes, u_notification ? ¬ification : NULL);
1205
1206 nc = NULL;
1207 sock = NULL;
1208 if (u_notification != NULL) {
1209 if (unlikely(notification.sigev_notify != SIGEV_NONE &&
1210 notification.sigev_notify != SIGEV_SIGNAL &&
1211 notification.sigev_notify != SIGEV_THREAD))
1212 return -EINVAL;
1213 if (notification.sigev_notify == SIGEV_SIGNAL &&
1214 !valid_signal(notification.sigev_signo)) {
1215 return -EINVAL;
1216 }
1217 if (notification.sigev_notify == SIGEV_THREAD) {
1218 long timeo;
1219
1220 /* create the notify skb */
1221 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
1222 if (!nc) {
1223 ret = -ENOMEM;
1224 goto out;
1225 }
1226 if (copy_from_user(nc->data,
1227 notification.sigev_value.sival_ptr,
1228 NOTIFY_COOKIE_LEN)) {
1229 ret = -EFAULT;
1230 goto out;
1231 }
1232
1233 /* TODO: add a header? */
1234 skb_put(nc, NOTIFY_COOKIE_LEN);
1235 /* and attach it to the socket */
1236retry:
1237 f = fdget(notification.sigev_signo);
1238 if (!f.file) {
1239 ret = -EBADF;
1240 goto out;
1241 }
1242 sock = netlink_getsockbyfilp(f.file);
1243 fdput(f);
1244 if (IS_ERR(sock)) {
1245 ret = PTR_ERR(sock);
1246 sock = NULL;
1247 goto out;
1248 }
1249
1250 timeo = MAX_SCHEDULE_TIMEOUT;
1251 ret = netlink_attachskb(sock, nc, &timeo, NULL);
1252 if (ret == 1)
1253 goto retry;
1254 if (ret) {
1255 sock = NULL;
1256 nc = NULL;
1257 goto out;
1258 }
1259 }
1260 }
1261
1262 f = fdget(mqdes);
1263 if (!f.file) {
1264 ret = -EBADF;
1265 goto out;
1266 }
1267
1268 inode = file_inode(f.file);
1269 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1270 ret = -EBADF;
1271 goto out_fput;
1272 }
1273 info = MQUEUE_I(inode);
1274
1275 ret = 0;
1276 spin_lock(&info->lock);
1277 if (u_notification == NULL) {
1278 if (info->notify_owner == task_tgid(current)) {
1279 remove_notification(info);
1280 inode->i_atime = inode->i_ctime = current_time(inode);
1281 }
1282 } else if (info->notify_owner != NULL) {
1283 ret = -EBUSY;
1284 } else {
1285 switch (notification.sigev_notify) {
1286 case SIGEV_NONE:
1287 info->notify.sigev_notify = SIGEV_NONE;
1288 break;
1289 case SIGEV_THREAD:
1290 info->notify_sock = sock;
1291 info->notify_cookie = nc;
1292 sock = NULL;
1293 nc = NULL;
1294 info->notify.sigev_notify = SIGEV_THREAD;
1295 break;
1296 case SIGEV_SIGNAL:
1297 info->notify.sigev_signo = notification.sigev_signo;
1298 info->notify.sigev_value = notification.sigev_value;
1299 info->notify.sigev_notify = SIGEV_SIGNAL;
1300 break;
1301 }
1302
1303 info->notify_owner = get_pid(task_tgid(current));
1304 info->notify_user_ns = get_user_ns(current_user_ns());
1305 inode->i_atime = inode->i_ctime = current_time(inode);
1306 }
1307 spin_unlock(&info->lock);
1308out_fput:
1309 fdput(f);
1310out:
1311 if (sock)
1312 netlink_detachskb(sock, nc);
1313 else if (nc)
1314 dev_kfree_skb(nc);
1315
1316 return ret;
1317}
1318
1319SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1320 const struct mq_attr __user *, u_mqstat,
1321 struct mq_attr __user *, u_omqstat)
1322{
1323 int ret;
1324 struct mq_attr mqstat, omqstat;
1325 struct fd f;
1326 struct inode *inode;
1327 struct mqueue_inode_info *info;
1328
1329 if (u_mqstat != NULL) {
1330 if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr)))
1331 return -EFAULT;
1332 if (mqstat.mq_flags & (~O_NONBLOCK))
1333 return -EINVAL;
1334 }
1335
1336 f = fdget(mqdes);
1337 if (!f.file) {
1338 ret = -EBADF;
1339 goto out;
1340 }
1341
1342 inode = file_inode(f.file);
1343 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1344 ret = -EBADF;
1345 goto out_fput;
1346 }
1347 info = MQUEUE_I(inode);
1348
1349 spin_lock(&info->lock);
1350
1351 omqstat = info->attr;
1352 omqstat.mq_flags = f.file->f_flags & O_NONBLOCK;
1353 if (u_mqstat) {
1354 audit_mq_getsetattr(mqdes, &mqstat);
1355 spin_lock(&f.file->f_lock);
1356 if (mqstat.mq_flags & O_NONBLOCK)
1357 f.file->f_flags |= O_NONBLOCK;
1358 else
1359 f.file->f_flags &= ~O_NONBLOCK;
1360 spin_unlock(&f.file->f_lock);
1361
1362 inode->i_atime = inode->i_ctime = current_time(inode);
1363 }
1364
1365 spin_unlock(&info->lock);
1366
1367 ret = 0;
1368 if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat,
1369 sizeof(struct mq_attr)))
1370 ret = -EFAULT;
1371
1372out_fput:
1373 fdput(f);
1374out:
1375 return ret;
1376}
1377
1378static const struct inode_operations mqueue_dir_inode_operations = {
1379 .lookup = simple_lookup,
1380 .create = mqueue_create,
1381 .unlink = mqueue_unlink,
1382};
1383
1384static const struct file_operations mqueue_file_operations = {
1385 .flush = mqueue_flush_file,
1386 .poll = mqueue_poll_file,
1387 .read = mqueue_read_file,
1388 .llseek = default_llseek,
1389};
1390
1391static const struct super_operations mqueue_super_ops = {
1392 .alloc_inode = mqueue_alloc_inode,
1393 .destroy_inode = mqueue_destroy_inode,
1394 .evict_inode = mqueue_evict_inode,
1395 .statfs = simple_statfs,
1396};
1397
1398static struct file_system_type mqueue_fs_type = {
1399 .name = "mqueue",
1400 .mount = mqueue_mount,
1401 .kill_sb = kill_litter_super,
1402 .fs_flags = FS_USERNS_MOUNT,
1403};
1404
1405int mq_init_ns(struct ipc_namespace *ns)
1406{
1407 ns->mq_queues_count = 0;
1408 ns->mq_queues_max = DFLT_QUEUESMAX;
1409 ns->mq_msg_max = DFLT_MSGMAX;
1410 ns->mq_msgsize_max = DFLT_MSGSIZEMAX;
1411 ns->mq_msg_default = DFLT_MSG;
1412 ns->mq_msgsize_default = DFLT_MSGSIZE;
1413
1414 ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns);
1415 if (IS_ERR(ns->mq_mnt)) {
1416 int err = PTR_ERR(ns->mq_mnt);
1417 ns->mq_mnt = NULL;
1418 return err;
1419 }
1420 return 0;
1421}
1422
1423void mq_clear_sbinfo(struct ipc_namespace *ns)
1424{
1425 ns->mq_mnt->mnt_sb->s_fs_info = NULL;
1426}
1427
1428void mq_put_mnt(struct ipc_namespace *ns)
1429{
1430 kern_unmount(ns->mq_mnt);
1431}
1432
1433static int __init init_mqueue_fs(void)
1434{
1435 int error;
1436
1437 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1438 sizeof(struct mqueue_inode_info), 0,
1439 SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once);
1440 if (mqueue_inode_cachep == NULL)
1441 return -ENOMEM;
1442
1443 /* ignore failures - they are not fatal */
1444 mq_sysctl_table = mq_register_sysctl_table();
1445
1446 error = register_filesystem(&mqueue_fs_type);
1447 if (error)
1448 goto out_sysctl;
1449
1450 spin_lock_init(&mq_lock);
1451
1452 error = mq_init_ns(&init_ipc_ns);
1453 if (error)
1454 goto out_filesystem;
1455
1456 return 0;
1457
1458out_filesystem:
1459 unregister_filesystem(&mqueue_fs_type);
1460out_sysctl:
1461 if (mq_sysctl_table)
1462 unregister_sysctl_table(mq_sysctl_table);
1463 kmem_cache_destroy(mqueue_inode_cachep);
1464 return error;
1465}
1466
1467device_initcall(init_mqueue_fs);
1/*
2 * POSIX message queues filesystem for Linux.
3 *
4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl)
5 * Michal Wronski (michal.wronski@gmail.com)
6 *
7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com)
8 * Lockless receive & send, fd based notify:
9 * Manfred Spraul (manfred@colorfullife.com)
10 *
11 * Audit: George Wilson (ltcgcw@us.ibm.com)
12 *
13 * This file is released under the GPL.
14 */
15
16#include <linux/capability.h>
17#include <linux/init.h>
18#include <linux/pagemap.h>
19#include <linux/file.h>
20#include <linux/mount.h>
21#include <linux/namei.h>
22#include <linux/sysctl.h>
23#include <linux/poll.h>
24#include <linux/mqueue.h>
25#include <linux/msg.h>
26#include <linux/skbuff.h>
27#include <linux/vmalloc.h>
28#include <linux/netlink.h>
29#include <linux/syscalls.h>
30#include <linux/audit.h>
31#include <linux/signal.h>
32#include <linux/mutex.h>
33#include <linux/nsproxy.h>
34#include <linux/pid.h>
35#include <linux/ipc_namespace.h>
36#include <linux/user_namespace.h>
37#include <linux/slab.h>
38
39#include <net/sock.h>
40#include "util.h"
41
42#define MQUEUE_MAGIC 0x19800202
43#define DIRENT_SIZE 20
44#define FILENT_SIZE 80
45
46#define SEND 0
47#define RECV 1
48
49#define STATE_NONE 0
50#define STATE_PENDING 1
51#define STATE_READY 2
52
53struct posix_msg_tree_node {
54 struct rb_node rb_node;
55 struct list_head msg_list;
56 int priority;
57};
58
59struct ext_wait_queue { /* queue of sleeping tasks */
60 struct task_struct *task;
61 struct list_head list;
62 struct msg_msg *msg; /* ptr of loaded message */
63 int state; /* one of STATE_* values */
64};
65
66struct mqueue_inode_info {
67 spinlock_t lock;
68 struct inode vfs_inode;
69 wait_queue_head_t wait_q;
70
71 struct rb_root msg_tree;
72 struct posix_msg_tree_node *node_cache;
73 struct mq_attr attr;
74
75 struct sigevent notify;
76 struct pid* notify_owner;
77 struct user_namespace *notify_user_ns;
78 struct user_struct *user; /* user who created, for accounting */
79 struct sock *notify_sock;
80 struct sk_buff *notify_cookie;
81
82 /* for tasks waiting for free space and messages, respectively */
83 struct ext_wait_queue e_wait_q[2];
84
85 unsigned long qsize; /* size of queue in memory (sum of all msgs) */
86};
87
88static const struct inode_operations mqueue_dir_inode_operations;
89static const struct file_operations mqueue_file_operations;
90static const struct super_operations mqueue_super_ops;
91static void remove_notification(struct mqueue_inode_info *info);
92
93static struct kmem_cache *mqueue_inode_cachep;
94
95static struct ctl_table_header * mq_sysctl_table;
96
97static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
98{
99 return container_of(inode, struct mqueue_inode_info, vfs_inode);
100}
101
102/*
103 * This routine should be called with the mq_lock held.
104 */
105static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode)
106{
107 return get_ipc_ns(inode->i_sb->s_fs_info);
108}
109
110static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
111{
112 struct ipc_namespace *ns;
113
114 spin_lock(&mq_lock);
115 ns = __get_ns_from_inode(inode);
116 spin_unlock(&mq_lock);
117 return ns;
118}
119
120/* Auxiliary functions to manipulate messages' list */
121static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
122{
123 struct rb_node **p, *parent = NULL;
124 struct posix_msg_tree_node *leaf;
125
126 p = &info->msg_tree.rb_node;
127 while (*p) {
128 parent = *p;
129 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
130
131 if (likely(leaf->priority == msg->m_type))
132 goto insert_msg;
133 else if (msg->m_type < leaf->priority)
134 p = &(*p)->rb_left;
135 else
136 p = &(*p)->rb_right;
137 }
138 if (info->node_cache) {
139 leaf = info->node_cache;
140 info->node_cache = NULL;
141 } else {
142 leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
143 if (!leaf)
144 return -ENOMEM;
145 rb_init_node(&leaf->rb_node);
146 INIT_LIST_HEAD(&leaf->msg_list);
147 info->qsize += sizeof(*leaf);
148 }
149 leaf->priority = msg->m_type;
150 rb_link_node(&leaf->rb_node, parent, p);
151 rb_insert_color(&leaf->rb_node, &info->msg_tree);
152insert_msg:
153 info->attr.mq_curmsgs++;
154 info->qsize += msg->m_ts;
155 list_add_tail(&msg->m_list, &leaf->msg_list);
156 return 0;
157}
158
159static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
160{
161 struct rb_node **p, *parent = NULL;
162 struct posix_msg_tree_node *leaf;
163 struct msg_msg *msg;
164
165try_again:
166 p = &info->msg_tree.rb_node;
167 while (*p) {
168 parent = *p;
169 /*
170 * During insert, low priorities go to the left and high to the
171 * right. On receive, we want the highest priorities first, so
172 * walk all the way to the right.
173 */
174 p = &(*p)->rb_right;
175 }
176 if (!parent) {
177 if (info->attr.mq_curmsgs) {
178 pr_warn_once("Inconsistency in POSIX message queue, "
179 "no tree element, but supposedly messages "
180 "should exist!\n");
181 info->attr.mq_curmsgs = 0;
182 }
183 return NULL;
184 }
185 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
186 if (unlikely(list_empty(&leaf->msg_list))) {
187 pr_warn_once("Inconsistency in POSIX message queue, "
188 "empty leaf node but we haven't implemented "
189 "lazy leaf delete!\n");
190 rb_erase(&leaf->rb_node, &info->msg_tree);
191 if (info->node_cache) {
192 info->qsize -= sizeof(*leaf);
193 kfree(leaf);
194 } else {
195 info->node_cache = leaf;
196 }
197 goto try_again;
198 } else {
199 msg = list_first_entry(&leaf->msg_list,
200 struct msg_msg, m_list);
201 list_del(&msg->m_list);
202 if (list_empty(&leaf->msg_list)) {
203 rb_erase(&leaf->rb_node, &info->msg_tree);
204 if (info->node_cache) {
205 info->qsize -= sizeof(*leaf);
206 kfree(leaf);
207 } else {
208 info->node_cache = leaf;
209 }
210 }
211 }
212 info->attr.mq_curmsgs--;
213 info->qsize -= msg->m_ts;
214 return msg;
215}
216
217static struct inode *mqueue_get_inode(struct super_block *sb,
218 struct ipc_namespace *ipc_ns, umode_t mode,
219 struct mq_attr *attr)
220{
221 struct user_struct *u = current_user();
222 struct inode *inode;
223 int ret = -ENOMEM;
224
225 inode = new_inode(sb);
226 if (!inode)
227 goto err;
228
229 inode->i_ino = get_next_ino();
230 inode->i_mode = mode;
231 inode->i_uid = current_fsuid();
232 inode->i_gid = current_fsgid();
233 inode->i_mtime = inode->i_ctime = inode->i_atime = CURRENT_TIME;
234
235 if (S_ISREG(mode)) {
236 struct mqueue_inode_info *info;
237 unsigned long mq_bytes, mq_treesize;
238
239 inode->i_fop = &mqueue_file_operations;
240 inode->i_size = FILENT_SIZE;
241 /* mqueue specific info */
242 info = MQUEUE_I(inode);
243 spin_lock_init(&info->lock);
244 init_waitqueue_head(&info->wait_q);
245 INIT_LIST_HEAD(&info->e_wait_q[0].list);
246 INIT_LIST_HEAD(&info->e_wait_q[1].list);
247 info->notify_owner = NULL;
248 info->notify_user_ns = NULL;
249 info->qsize = 0;
250 info->user = NULL; /* set when all is ok */
251 info->msg_tree = RB_ROOT;
252 info->node_cache = NULL;
253 memset(&info->attr, 0, sizeof(info->attr));
254 info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
255 ipc_ns->mq_msg_default);
256 info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
257 ipc_ns->mq_msgsize_default);
258 if (attr) {
259 info->attr.mq_maxmsg = attr->mq_maxmsg;
260 info->attr.mq_msgsize = attr->mq_msgsize;
261 }
262 /*
263 * We used to allocate a static array of pointers and account
264 * the size of that array as well as one msg_msg struct per
265 * possible message into the queue size. That's no longer
266 * accurate as the queue is now an rbtree and will grow and
267 * shrink depending on usage patterns. We can, however, still
268 * account one msg_msg struct per message, but the nodes are
269 * allocated depending on priority usage, and most programs
270 * only use one, or a handful, of priorities. However, since
271 * this is pinned memory, we need to assume worst case, so
272 * that means the min(mq_maxmsg, max_priorities) * struct
273 * posix_msg_tree_node.
274 */
275 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
276 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
277 sizeof(struct posix_msg_tree_node);
278
279 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
280 info->attr.mq_msgsize);
281
282 spin_lock(&mq_lock);
283 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
284 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
285 spin_unlock(&mq_lock);
286 /* mqueue_evict_inode() releases info->messages */
287 ret = -EMFILE;
288 goto out_inode;
289 }
290 u->mq_bytes += mq_bytes;
291 spin_unlock(&mq_lock);
292
293 /* all is ok */
294 info->user = get_uid(u);
295 } else if (S_ISDIR(mode)) {
296 inc_nlink(inode);
297 /* Some things misbehave if size == 0 on a directory */
298 inode->i_size = 2 * DIRENT_SIZE;
299 inode->i_op = &mqueue_dir_inode_operations;
300 inode->i_fop = &simple_dir_operations;
301 }
302
303 return inode;
304out_inode:
305 iput(inode);
306err:
307 return ERR_PTR(ret);
308}
309
310static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
311{
312 struct inode *inode;
313 struct ipc_namespace *ns = data;
314
315 sb->s_blocksize = PAGE_CACHE_SIZE;
316 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
317 sb->s_magic = MQUEUE_MAGIC;
318 sb->s_op = &mqueue_super_ops;
319
320 inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
321 if (IS_ERR(inode))
322 return PTR_ERR(inode);
323
324 sb->s_root = d_make_root(inode);
325 if (!sb->s_root)
326 return -ENOMEM;
327 return 0;
328}
329
330static struct dentry *mqueue_mount(struct file_system_type *fs_type,
331 int flags, const char *dev_name,
332 void *data)
333{
334 if (!(flags & MS_KERNMOUNT))
335 data = current->nsproxy->ipc_ns;
336 return mount_ns(fs_type, flags, data, mqueue_fill_super);
337}
338
339static void init_once(void *foo)
340{
341 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
342
343 inode_init_once(&p->vfs_inode);
344}
345
346static struct inode *mqueue_alloc_inode(struct super_block *sb)
347{
348 struct mqueue_inode_info *ei;
349
350 ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
351 if (!ei)
352 return NULL;
353 return &ei->vfs_inode;
354}
355
356static void mqueue_i_callback(struct rcu_head *head)
357{
358 struct inode *inode = container_of(head, struct inode, i_rcu);
359 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
360}
361
362static void mqueue_destroy_inode(struct inode *inode)
363{
364 call_rcu(&inode->i_rcu, mqueue_i_callback);
365}
366
367static void mqueue_evict_inode(struct inode *inode)
368{
369 struct mqueue_inode_info *info;
370 struct user_struct *user;
371 unsigned long mq_bytes, mq_treesize;
372 struct ipc_namespace *ipc_ns;
373 struct msg_msg *msg;
374
375 clear_inode(inode);
376
377 if (S_ISDIR(inode->i_mode))
378 return;
379
380 ipc_ns = get_ns_from_inode(inode);
381 info = MQUEUE_I(inode);
382 spin_lock(&info->lock);
383 while ((msg = msg_get(info)) != NULL)
384 free_msg(msg);
385 kfree(info->node_cache);
386 spin_unlock(&info->lock);
387
388 /* Total amount of bytes accounted for the mqueue */
389 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
390 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
391 sizeof(struct posix_msg_tree_node);
392
393 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
394 info->attr.mq_msgsize);
395
396 user = info->user;
397 if (user) {
398 spin_lock(&mq_lock);
399 user->mq_bytes -= mq_bytes;
400 /*
401 * get_ns_from_inode() ensures that the
402 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
403 * to which we now hold a reference, or it is NULL.
404 * We can't put it here under mq_lock, though.
405 */
406 if (ipc_ns)
407 ipc_ns->mq_queues_count--;
408 spin_unlock(&mq_lock);
409 free_uid(user);
410 }
411 if (ipc_ns)
412 put_ipc_ns(ipc_ns);
413}
414
415static int mqueue_create(struct inode *dir, struct dentry *dentry,
416 umode_t mode, struct nameidata *nd)
417{
418 struct inode *inode;
419 struct mq_attr *attr = dentry->d_fsdata;
420 int error;
421 struct ipc_namespace *ipc_ns;
422
423 spin_lock(&mq_lock);
424 ipc_ns = __get_ns_from_inode(dir);
425 if (!ipc_ns) {
426 error = -EACCES;
427 goto out_unlock;
428 }
429 if (ipc_ns->mq_queues_count >= HARD_QUEUESMAX ||
430 (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
431 !capable(CAP_SYS_RESOURCE))) {
432 error = -ENOSPC;
433 goto out_unlock;
434 }
435 ipc_ns->mq_queues_count++;
436 spin_unlock(&mq_lock);
437
438 inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr);
439 if (IS_ERR(inode)) {
440 error = PTR_ERR(inode);
441 spin_lock(&mq_lock);
442 ipc_ns->mq_queues_count--;
443 goto out_unlock;
444 }
445
446 put_ipc_ns(ipc_ns);
447 dir->i_size += DIRENT_SIZE;
448 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
449
450 d_instantiate(dentry, inode);
451 dget(dentry);
452 return 0;
453out_unlock:
454 spin_unlock(&mq_lock);
455 if (ipc_ns)
456 put_ipc_ns(ipc_ns);
457 return error;
458}
459
460static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
461{
462 struct inode *inode = dentry->d_inode;
463
464 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
465 dir->i_size -= DIRENT_SIZE;
466 drop_nlink(inode);
467 dput(dentry);
468 return 0;
469}
470
471/*
472* This is routine for system read from queue file.
473* To avoid mess with doing here some sort of mq_receive we allow
474* to read only queue size & notification info (the only values
475* that are interesting from user point of view and aren't accessible
476* through std routines)
477*/
478static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
479 size_t count, loff_t *off)
480{
481 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
482 char buffer[FILENT_SIZE];
483 ssize_t ret;
484
485 spin_lock(&info->lock);
486 snprintf(buffer, sizeof(buffer),
487 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
488 info->qsize,
489 info->notify_owner ? info->notify.sigev_notify : 0,
490 (info->notify_owner &&
491 info->notify.sigev_notify == SIGEV_SIGNAL) ?
492 info->notify.sigev_signo : 0,
493 pid_vnr(info->notify_owner));
494 spin_unlock(&info->lock);
495 buffer[sizeof(buffer)-1] = '\0';
496
497 ret = simple_read_from_buffer(u_data, count, off, buffer,
498 strlen(buffer));
499 if (ret <= 0)
500 return ret;
501
502 filp->f_path.dentry->d_inode->i_atime = filp->f_path.dentry->d_inode->i_ctime = CURRENT_TIME;
503 return ret;
504}
505
506static int mqueue_flush_file(struct file *filp, fl_owner_t id)
507{
508 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
509
510 spin_lock(&info->lock);
511 if (task_tgid(current) == info->notify_owner)
512 remove_notification(info);
513
514 spin_unlock(&info->lock);
515 return 0;
516}
517
518static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
519{
520 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
521 int retval = 0;
522
523 poll_wait(filp, &info->wait_q, poll_tab);
524
525 spin_lock(&info->lock);
526 if (info->attr.mq_curmsgs)
527 retval = POLLIN | POLLRDNORM;
528
529 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
530 retval |= POLLOUT | POLLWRNORM;
531 spin_unlock(&info->lock);
532
533 return retval;
534}
535
536/* Adds current to info->e_wait_q[sr] before element with smaller prio */
537static void wq_add(struct mqueue_inode_info *info, int sr,
538 struct ext_wait_queue *ewp)
539{
540 struct ext_wait_queue *walk;
541
542 ewp->task = current;
543
544 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
545 if (walk->task->static_prio <= current->static_prio) {
546 list_add_tail(&ewp->list, &walk->list);
547 return;
548 }
549 }
550 list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
551}
552
553/*
554 * Puts current task to sleep. Caller must hold queue lock. After return
555 * lock isn't held.
556 * sr: SEND or RECV
557 */
558static int wq_sleep(struct mqueue_inode_info *info, int sr,
559 ktime_t *timeout, struct ext_wait_queue *ewp)
560{
561 int retval;
562 signed long time;
563
564 wq_add(info, sr, ewp);
565
566 for (;;) {
567 set_current_state(TASK_INTERRUPTIBLE);
568
569 spin_unlock(&info->lock);
570 time = schedule_hrtimeout_range_clock(timeout, 0,
571 HRTIMER_MODE_ABS, CLOCK_REALTIME);
572
573 while (ewp->state == STATE_PENDING)
574 cpu_relax();
575
576 if (ewp->state == STATE_READY) {
577 retval = 0;
578 goto out;
579 }
580 spin_lock(&info->lock);
581 if (ewp->state == STATE_READY) {
582 retval = 0;
583 goto out_unlock;
584 }
585 if (signal_pending(current)) {
586 retval = -ERESTARTSYS;
587 break;
588 }
589 if (time == 0) {
590 retval = -ETIMEDOUT;
591 break;
592 }
593 }
594 list_del(&ewp->list);
595out_unlock:
596 spin_unlock(&info->lock);
597out:
598 return retval;
599}
600
601/*
602 * Returns waiting task that should be serviced first or NULL if none exists
603 */
604static struct ext_wait_queue *wq_get_first_waiter(
605 struct mqueue_inode_info *info, int sr)
606{
607 struct list_head *ptr;
608
609 ptr = info->e_wait_q[sr].list.prev;
610 if (ptr == &info->e_wait_q[sr].list)
611 return NULL;
612 return list_entry(ptr, struct ext_wait_queue, list);
613}
614
615
616static inline void set_cookie(struct sk_buff *skb, char code)
617{
618 ((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
619}
620
621/*
622 * The next function is only to split too long sys_mq_timedsend
623 */
624static void __do_notify(struct mqueue_inode_info *info)
625{
626 /* notification
627 * invoked when there is registered process and there isn't process
628 * waiting synchronously for message AND state of queue changed from
629 * empty to not empty. Here we are sure that no one is waiting
630 * synchronously. */
631 if (info->notify_owner &&
632 info->attr.mq_curmsgs == 1) {
633 struct siginfo sig_i;
634 switch (info->notify.sigev_notify) {
635 case SIGEV_NONE:
636 break;
637 case SIGEV_SIGNAL:
638 /* sends signal */
639
640 sig_i.si_signo = info->notify.sigev_signo;
641 sig_i.si_errno = 0;
642 sig_i.si_code = SI_MESGQ;
643 sig_i.si_value = info->notify.sigev_value;
644 /* map current pid/uid into info->owner's namespaces */
645 rcu_read_lock();
646 sig_i.si_pid = task_tgid_nr_ns(current,
647 ns_of_pid(info->notify_owner));
648 sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid());
649 rcu_read_unlock();
650
651 kill_pid_info(info->notify.sigev_signo,
652 &sig_i, info->notify_owner);
653 break;
654 case SIGEV_THREAD:
655 set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
656 netlink_sendskb(info->notify_sock, info->notify_cookie);
657 break;
658 }
659 /* after notification unregisters process */
660 put_pid(info->notify_owner);
661 put_user_ns(info->notify_user_ns);
662 info->notify_owner = NULL;
663 info->notify_user_ns = NULL;
664 }
665 wake_up(&info->wait_q);
666}
667
668static int prepare_timeout(const struct timespec __user *u_abs_timeout,
669 ktime_t *expires, struct timespec *ts)
670{
671 if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec)))
672 return -EFAULT;
673 if (!timespec_valid(ts))
674 return -EINVAL;
675
676 *expires = timespec_to_ktime(*ts);
677 return 0;
678}
679
680static void remove_notification(struct mqueue_inode_info *info)
681{
682 if (info->notify_owner != NULL &&
683 info->notify.sigev_notify == SIGEV_THREAD) {
684 set_cookie(info->notify_cookie, NOTIFY_REMOVED);
685 netlink_sendskb(info->notify_sock, info->notify_cookie);
686 }
687 put_pid(info->notify_owner);
688 put_user_ns(info->notify_user_ns);
689 info->notify_owner = NULL;
690 info->notify_user_ns = NULL;
691}
692
693static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
694{
695 int mq_treesize;
696 unsigned long total_size;
697
698 if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
699 return -EINVAL;
700 if (capable(CAP_SYS_RESOURCE)) {
701 if (attr->mq_maxmsg > HARD_MSGMAX ||
702 attr->mq_msgsize > HARD_MSGSIZEMAX)
703 return -EINVAL;
704 } else {
705 if (attr->mq_maxmsg > ipc_ns->mq_msg_max ||
706 attr->mq_msgsize > ipc_ns->mq_msgsize_max)
707 return -EINVAL;
708 }
709 /* check for overflow */
710 if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
711 return -EOVERFLOW;
712 mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) +
713 min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) *
714 sizeof(struct posix_msg_tree_node);
715 total_size = attr->mq_maxmsg * attr->mq_msgsize;
716 if (total_size + mq_treesize < total_size)
717 return -EOVERFLOW;
718 return 0;
719}
720
721/*
722 * Invoked when creating a new queue via sys_mq_open
723 */
724static struct file *do_create(struct ipc_namespace *ipc_ns, struct dentry *dir,
725 struct dentry *dentry, int oflag, umode_t mode,
726 struct mq_attr *attr)
727{
728 const struct cred *cred = current_cred();
729 struct file *result;
730 int ret;
731
732 if (attr) {
733 ret = mq_attr_ok(ipc_ns, attr);
734 if (ret)
735 goto out;
736 /* store for use during create */
737 dentry->d_fsdata = attr;
738 } else {
739 struct mq_attr def_attr;
740
741 def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
742 ipc_ns->mq_msg_default);
743 def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
744 ipc_ns->mq_msgsize_default);
745 ret = mq_attr_ok(ipc_ns, &def_attr);
746 if (ret)
747 goto out;
748 }
749
750 mode &= ~current_umask();
751 ret = mnt_want_write(ipc_ns->mq_mnt);
752 if (ret)
753 goto out;
754 ret = vfs_create(dir->d_inode, dentry, mode, NULL);
755 dentry->d_fsdata = NULL;
756 if (ret)
757 goto out_drop_write;
758
759 result = dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred);
760 /*
761 * dentry_open() took a persistent mnt_want_write(),
762 * so we can now drop this one.
763 */
764 mnt_drop_write(ipc_ns->mq_mnt);
765 return result;
766
767out_drop_write:
768 mnt_drop_write(ipc_ns->mq_mnt);
769out:
770 dput(dentry);
771 mntput(ipc_ns->mq_mnt);
772 return ERR_PTR(ret);
773}
774
775/* Opens existing queue */
776static struct file *do_open(struct ipc_namespace *ipc_ns,
777 struct dentry *dentry, int oflag)
778{
779 int ret;
780 const struct cred *cred = current_cred();
781
782 static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
783 MAY_READ | MAY_WRITE };
784
785 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) {
786 ret = -EINVAL;
787 goto err;
788 }
789
790 if (inode_permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE])) {
791 ret = -EACCES;
792 goto err;
793 }
794
795 return dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred);
796
797err:
798 dput(dentry);
799 mntput(ipc_ns->mq_mnt);
800 return ERR_PTR(ret);
801}
802
803SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
804 struct mq_attr __user *, u_attr)
805{
806 struct dentry *dentry;
807 struct file *filp;
808 char *name;
809 struct mq_attr attr;
810 int fd, error;
811 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
812
813 if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
814 return -EFAULT;
815
816 audit_mq_open(oflag, mode, u_attr ? &attr : NULL);
817
818 if (IS_ERR(name = getname(u_name)))
819 return PTR_ERR(name);
820
821 fd = get_unused_fd_flags(O_CLOEXEC);
822 if (fd < 0)
823 goto out_putname;
824
825 mutex_lock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
826 dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name));
827 if (IS_ERR(dentry)) {
828 error = PTR_ERR(dentry);
829 goto out_putfd;
830 }
831 mntget(ipc_ns->mq_mnt);
832
833 if (oflag & O_CREAT) {
834 if (dentry->d_inode) { /* entry already exists */
835 audit_inode(name, dentry);
836 if (oflag & O_EXCL) {
837 error = -EEXIST;
838 goto out;
839 }
840 filp = do_open(ipc_ns, dentry, oflag);
841 } else {
842 filp = do_create(ipc_ns, ipc_ns->mq_mnt->mnt_root,
843 dentry, oflag, mode,
844 u_attr ? &attr : NULL);
845 }
846 } else {
847 if (!dentry->d_inode) {
848 error = -ENOENT;
849 goto out;
850 }
851 audit_inode(name, dentry);
852 filp = do_open(ipc_ns, dentry, oflag);
853 }
854
855 if (IS_ERR(filp)) {
856 error = PTR_ERR(filp);
857 goto out_putfd;
858 }
859
860 fd_install(fd, filp);
861 goto out_upsem;
862
863out:
864 dput(dentry);
865 mntput(ipc_ns->mq_mnt);
866out_putfd:
867 put_unused_fd(fd);
868 fd = error;
869out_upsem:
870 mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
871out_putname:
872 putname(name);
873 return fd;
874}
875
876SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
877{
878 int err;
879 char *name;
880 struct dentry *dentry;
881 struct inode *inode = NULL;
882 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
883
884 name = getname(u_name);
885 if (IS_ERR(name))
886 return PTR_ERR(name);
887
888 mutex_lock_nested(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex,
889 I_MUTEX_PARENT);
890 dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name));
891 if (IS_ERR(dentry)) {
892 err = PTR_ERR(dentry);
893 goto out_unlock;
894 }
895
896 if (!dentry->d_inode) {
897 err = -ENOENT;
898 goto out_err;
899 }
900
901 inode = dentry->d_inode;
902 if (inode)
903 ihold(inode);
904 err = mnt_want_write(ipc_ns->mq_mnt);
905 if (err)
906 goto out_err;
907 err = vfs_unlink(dentry->d_parent->d_inode, dentry);
908 mnt_drop_write(ipc_ns->mq_mnt);
909out_err:
910 dput(dentry);
911
912out_unlock:
913 mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
914 putname(name);
915 if (inode)
916 iput(inode);
917
918 return err;
919}
920
921/* Pipelined send and receive functions.
922 *
923 * If a receiver finds no waiting message, then it registers itself in the
924 * list of waiting receivers. A sender checks that list before adding the new
925 * message into the message array. If there is a waiting receiver, then it
926 * bypasses the message array and directly hands the message over to the
927 * receiver.
928 * The receiver accepts the message and returns without grabbing the queue
929 * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers
930 * are necessary. The same algorithm is used for sysv semaphores, see
931 * ipc/sem.c for more details.
932 *
933 * The same algorithm is used for senders.
934 */
935
936/* pipelined_send() - send a message directly to the task waiting in
937 * sys_mq_timedreceive() (without inserting message into a queue).
938 */
939static inline void pipelined_send(struct mqueue_inode_info *info,
940 struct msg_msg *message,
941 struct ext_wait_queue *receiver)
942{
943 receiver->msg = message;
944 list_del(&receiver->list);
945 receiver->state = STATE_PENDING;
946 wake_up_process(receiver->task);
947 smp_wmb();
948 receiver->state = STATE_READY;
949}
950
951/* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
952 * gets its message and put to the queue (we have one free place for sure). */
953static inline void pipelined_receive(struct mqueue_inode_info *info)
954{
955 struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
956
957 if (!sender) {
958 /* for poll */
959 wake_up_interruptible(&info->wait_q);
960 return;
961 }
962 if (msg_insert(sender->msg, info))
963 return;
964 list_del(&sender->list);
965 sender->state = STATE_PENDING;
966 wake_up_process(sender->task);
967 smp_wmb();
968 sender->state = STATE_READY;
969}
970
971SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
972 size_t, msg_len, unsigned int, msg_prio,
973 const struct timespec __user *, u_abs_timeout)
974{
975 struct file *filp;
976 struct inode *inode;
977 struct ext_wait_queue wait;
978 struct ext_wait_queue *receiver;
979 struct msg_msg *msg_ptr;
980 struct mqueue_inode_info *info;
981 ktime_t expires, *timeout = NULL;
982 struct timespec ts;
983 struct posix_msg_tree_node *new_leaf = NULL;
984 int ret = 0;
985
986 if (u_abs_timeout) {
987 int res = prepare_timeout(u_abs_timeout, &expires, &ts);
988 if (res)
989 return res;
990 timeout = &expires;
991 }
992
993 if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
994 return -EINVAL;
995
996 audit_mq_sendrecv(mqdes, msg_len, msg_prio, timeout ? &ts : NULL);
997
998 filp = fget(mqdes);
999 if (unlikely(!filp)) {
1000 ret = -EBADF;
1001 goto out;
1002 }
1003
1004 inode = filp->f_path.dentry->d_inode;
1005 if (unlikely(filp->f_op != &mqueue_file_operations)) {
1006 ret = -EBADF;
1007 goto out_fput;
1008 }
1009 info = MQUEUE_I(inode);
1010 audit_inode(NULL, filp->f_path.dentry);
1011
1012 if (unlikely(!(filp->f_mode & FMODE_WRITE))) {
1013 ret = -EBADF;
1014 goto out_fput;
1015 }
1016
1017 if (unlikely(msg_len > info->attr.mq_msgsize)) {
1018 ret = -EMSGSIZE;
1019 goto out_fput;
1020 }
1021
1022 /* First try to allocate memory, before doing anything with
1023 * existing queues. */
1024 msg_ptr = load_msg(u_msg_ptr, msg_len);
1025 if (IS_ERR(msg_ptr)) {
1026 ret = PTR_ERR(msg_ptr);
1027 goto out_fput;
1028 }
1029 msg_ptr->m_ts = msg_len;
1030 msg_ptr->m_type = msg_prio;
1031
1032 /*
1033 * msg_insert really wants us to have a valid, spare node struct so
1034 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1035 * fall back to that if necessary.
1036 */
1037 if (!info->node_cache)
1038 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1039
1040 spin_lock(&info->lock);
1041
1042 if (!info->node_cache && new_leaf) {
1043 /* Save our speculative allocation into the cache */
1044 rb_init_node(&new_leaf->rb_node);
1045 INIT_LIST_HEAD(&new_leaf->msg_list);
1046 info->node_cache = new_leaf;
1047 info->qsize += sizeof(*new_leaf);
1048 new_leaf = NULL;
1049 } else {
1050 kfree(new_leaf);
1051 }
1052
1053 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
1054 if (filp->f_flags & O_NONBLOCK) {
1055 ret = -EAGAIN;
1056 } else {
1057 wait.task = current;
1058 wait.msg = (void *) msg_ptr;
1059 wait.state = STATE_NONE;
1060 ret = wq_sleep(info, SEND, timeout, &wait);
1061 /*
1062 * wq_sleep must be called with info->lock held, and
1063 * returns with the lock released
1064 */
1065 goto out_free;
1066 }
1067 } else {
1068 receiver = wq_get_first_waiter(info, RECV);
1069 if (receiver) {
1070 pipelined_send(info, msg_ptr, receiver);
1071 } else {
1072 /* adds message to the queue */
1073 ret = msg_insert(msg_ptr, info);
1074 if (ret)
1075 goto out_unlock;
1076 __do_notify(info);
1077 }
1078 inode->i_atime = inode->i_mtime = inode->i_ctime =
1079 CURRENT_TIME;
1080 }
1081out_unlock:
1082 spin_unlock(&info->lock);
1083out_free:
1084 if (ret)
1085 free_msg(msg_ptr);
1086out_fput:
1087 fput(filp);
1088out:
1089 return ret;
1090}
1091
1092SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
1093 size_t, msg_len, unsigned int __user *, u_msg_prio,
1094 const struct timespec __user *, u_abs_timeout)
1095{
1096 ssize_t ret;
1097 struct msg_msg *msg_ptr;
1098 struct file *filp;
1099 struct inode *inode;
1100 struct mqueue_inode_info *info;
1101 struct ext_wait_queue wait;
1102 ktime_t expires, *timeout = NULL;
1103 struct timespec ts;
1104 struct posix_msg_tree_node *new_leaf = NULL;
1105
1106 if (u_abs_timeout) {
1107 int res = prepare_timeout(u_abs_timeout, &expires, &ts);
1108 if (res)
1109 return res;
1110 timeout = &expires;
1111 }
1112
1113 audit_mq_sendrecv(mqdes, msg_len, 0, timeout ? &ts : NULL);
1114
1115 filp = fget(mqdes);
1116 if (unlikely(!filp)) {
1117 ret = -EBADF;
1118 goto out;
1119 }
1120
1121 inode = filp->f_path.dentry->d_inode;
1122 if (unlikely(filp->f_op != &mqueue_file_operations)) {
1123 ret = -EBADF;
1124 goto out_fput;
1125 }
1126 info = MQUEUE_I(inode);
1127 audit_inode(NULL, filp->f_path.dentry);
1128
1129 if (unlikely(!(filp->f_mode & FMODE_READ))) {
1130 ret = -EBADF;
1131 goto out_fput;
1132 }
1133
1134 /* checks if buffer is big enough */
1135 if (unlikely(msg_len < info->attr.mq_msgsize)) {
1136 ret = -EMSGSIZE;
1137 goto out_fput;
1138 }
1139
1140 /*
1141 * msg_insert really wants us to have a valid, spare node struct so
1142 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1143 * fall back to that if necessary.
1144 */
1145 if (!info->node_cache)
1146 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1147
1148 spin_lock(&info->lock);
1149
1150 if (!info->node_cache && new_leaf) {
1151 /* Save our speculative allocation into the cache */
1152 rb_init_node(&new_leaf->rb_node);
1153 INIT_LIST_HEAD(&new_leaf->msg_list);
1154 info->node_cache = new_leaf;
1155 info->qsize += sizeof(*new_leaf);
1156 } else {
1157 kfree(new_leaf);
1158 }
1159
1160 if (info->attr.mq_curmsgs == 0) {
1161 if (filp->f_flags & O_NONBLOCK) {
1162 spin_unlock(&info->lock);
1163 ret = -EAGAIN;
1164 } else {
1165 wait.task = current;
1166 wait.state = STATE_NONE;
1167 ret = wq_sleep(info, RECV, timeout, &wait);
1168 msg_ptr = wait.msg;
1169 }
1170 } else {
1171 msg_ptr = msg_get(info);
1172
1173 inode->i_atime = inode->i_mtime = inode->i_ctime =
1174 CURRENT_TIME;
1175
1176 /* There is now free space in queue. */
1177 pipelined_receive(info);
1178 spin_unlock(&info->lock);
1179 ret = 0;
1180 }
1181 if (ret == 0) {
1182 ret = msg_ptr->m_ts;
1183
1184 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
1185 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
1186 ret = -EFAULT;
1187 }
1188 free_msg(msg_ptr);
1189 }
1190out_fput:
1191 fput(filp);
1192out:
1193 return ret;
1194}
1195
1196/*
1197 * Notes: the case when user wants us to deregister (with NULL as pointer)
1198 * and he isn't currently owner of notification, will be silently discarded.
1199 * It isn't explicitly defined in the POSIX.
1200 */
1201SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1202 const struct sigevent __user *, u_notification)
1203{
1204 int ret;
1205 struct file *filp;
1206 struct sock *sock;
1207 struct inode *inode;
1208 struct sigevent notification;
1209 struct mqueue_inode_info *info;
1210 struct sk_buff *nc;
1211
1212 if (u_notification) {
1213 if (copy_from_user(¬ification, u_notification,
1214 sizeof(struct sigevent)))
1215 return -EFAULT;
1216 }
1217
1218 audit_mq_notify(mqdes, u_notification ? ¬ification : NULL);
1219
1220 nc = NULL;
1221 sock = NULL;
1222 if (u_notification != NULL) {
1223 if (unlikely(notification.sigev_notify != SIGEV_NONE &&
1224 notification.sigev_notify != SIGEV_SIGNAL &&
1225 notification.sigev_notify != SIGEV_THREAD))
1226 return -EINVAL;
1227 if (notification.sigev_notify == SIGEV_SIGNAL &&
1228 !valid_signal(notification.sigev_signo)) {
1229 return -EINVAL;
1230 }
1231 if (notification.sigev_notify == SIGEV_THREAD) {
1232 long timeo;
1233
1234 /* create the notify skb */
1235 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
1236 if (!nc) {
1237 ret = -ENOMEM;
1238 goto out;
1239 }
1240 if (copy_from_user(nc->data,
1241 notification.sigev_value.sival_ptr,
1242 NOTIFY_COOKIE_LEN)) {
1243 ret = -EFAULT;
1244 goto out;
1245 }
1246
1247 /* TODO: add a header? */
1248 skb_put(nc, NOTIFY_COOKIE_LEN);
1249 /* and attach it to the socket */
1250retry:
1251 filp = fget(notification.sigev_signo);
1252 if (!filp) {
1253 ret = -EBADF;
1254 goto out;
1255 }
1256 sock = netlink_getsockbyfilp(filp);
1257 fput(filp);
1258 if (IS_ERR(sock)) {
1259 ret = PTR_ERR(sock);
1260 sock = NULL;
1261 goto out;
1262 }
1263
1264 timeo = MAX_SCHEDULE_TIMEOUT;
1265 ret = netlink_attachskb(sock, nc, &timeo, NULL);
1266 if (ret == 1)
1267 goto retry;
1268 if (ret) {
1269 sock = NULL;
1270 nc = NULL;
1271 goto out;
1272 }
1273 }
1274 }
1275
1276 filp = fget(mqdes);
1277 if (!filp) {
1278 ret = -EBADF;
1279 goto out;
1280 }
1281
1282 inode = filp->f_path.dentry->d_inode;
1283 if (unlikely(filp->f_op != &mqueue_file_operations)) {
1284 ret = -EBADF;
1285 goto out_fput;
1286 }
1287 info = MQUEUE_I(inode);
1288
1289 ret = 0;
1290 spin_lock(&info->lock);
1291 if (u_notification == NULL) {
1292 if (info->notify_owner == task_tgid(current)) {
1293 remove_notification(info);
1294 inode->i_atime = inode->i_ctime = CURRENT_TIME;
1295 }
1296 } else if (info->notify_owner != NULL) {
1297 ret = -EBUSY;
1298 } else {
1299 switch (notification.sigev_notify) {
1300 case SIGEV_NONE:
1301 info->notify.sigev_notify = SIGEV_NONE;
1302 break;
1303 case SIGEV_THREAD:
1304 info->notify_sock = sock;
1305 info->notify_cookie = nc;
1306 sock = NULL;
1307 nc = NULL;
1308 info->notify.sigev_notify = SIGEV_THREAD;
1309 break;
1310 case SIGEV_SIGNAL:
1311 info->notify.sigev_signo = notification.sigev_signo;
1312 info->notify.sigev_value = notification.sigev_value;
1313 info->notify.sigev_notify = SIGEV_SIGNAL;
1314 break;
1315 }
1316
1317 info->notify_owner = get_pid(task_tgid(current));
1318 info->notify_user_ns = get_user_ns(current_user_ns());
1319 inode->i_atime = inode->i_ctime = CURRENT_TIME;
1320 }
1321 spin_unlock(&info->lock);
1322out_fput:
1323 fput(filp);
1324out:
1325 if (sock) {
1326 netlink_detachskb(sock, nc);
1327 } else if (nc) {
1328 dev_kfree_skb(nc);
1329 }
1330 return ret;
1331}
1332
1333SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1334 const struct mq_attr __user *, u_mqstat,
1335 struct mq_attr __user *, u_omqstat)
1336{
1337 int ret;
1338 struct mq_attr mqstat, omqstat;
1339 struct file *filp;
1340 struct inode *inode;
1341 struct mqueue_inode_info *info;
1342
1343 if (u_mqstat != NULL) {
1344 if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr)))
1345 return -EFAULT;
1346 if (mqstat.mq_flags & (~O_NONBLOCK))
1347 return -EINVAL;
1348 }
1349
1350 filp = fget(mqdes);
1351 if (!filp) {
1352 ret = -EBADF;
1353 goto out;
1354 }
1355
1356 inode = filp->f_path.dentry->d_inode;
1357 if (unlikely(filp->f_op != &mqueue_file_operations)) {
1358 ret = -EBADF;
1359 goto out_fput;
1360 }
1361 info = MQUEUE_I(inode);
1362
1363 spin_lock(&info->lock);
1364
1365 omqstat = info->attr;
1366 omqstat.mq_flags = filp->f_flags & O_NONBLOCK;
1367 if (u_mqstat) {
1368 audit_mq_getsetattr(mqdes, &mqstat);
1369 spin_lock(&filp->f_lock);
1370 if (mqstat.mq_flags & O_NONBLOCK)
1371 filp->f_flags |= O_NONBLOCK;
1372 else
1373 filp->f_flags &= ~O_NONBLOCK;
1374 spin_unlock(&filp->f_lock);
1375
1376 inode->i_atime = inode->i_ctime = CURRENT_TIME;
1377 }
1378
1379 spin_unlock(&info->lock);
1380
1381 ret = 0;
1382 if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat,
1383 sizeof(struct mq_attr)))
1384 ret = -EFAULT;
1385
1386out_fput:
1387 fput(filp);
1388out:
1389 return ret;
1390}
1391
1392static const struct inode_operations mqueue_dir_inode_operations = {
1393 .lookup = simple_lookup,
1394 .create = mqueue_create,
1395 .unlink = mqueue_unlink,
1396};
1397
1398static const struct file_operations mqueue_file_operations = {
1399 .flush = mqueue_flush_file,
1400 .poll = mqueue_poll_file,
1401 .read = mqueue_read_file,
1402 .llseek = default_llseek,
1403};
1404
1405static const struct super_operations mqueue_super_ops = {
1406 .alloc_inode = mqueue_alloc_inode,
1407 .destroy_inode = mqueue_destroy_inode,
1408 .evict_inode = mqueue_evict_inode,
1409 .statfs = simple_statfs,
1410};
1411
1412static struct file_system_type mqueue_fs_type = {
1413 .name = "mqueue",
1414 .mount = mqueue_mount,
1415 .kill_sb = kill_litter_super,
1416};
1417
1418int mq_init_ns(struct ipc_namespace *ns)
1419{
1420 ns->mq_queues_count = 0;
1421 ns->mq_queues_max = DFLT_QUEUESMAX;
1422 ns->mq_msg_max = DFLT_MSGMAX;
1423 ns->mq_msgsize_max = DFLT_MSGSIZEMAX;
1424 ns->mq_msg_default = DFLT_MSG;
1425 ns->mq_msgsize_default = DFLT_MSGSIZE;
1426
1427 ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns);
1428 if (IS_ERR(ns->mq_mnt)) {
1429 int err = PTR_ERR(ns->mq_mnt);
1430 ns->mq_mnt = NULL;
1431 return err;
1432 }
1433 return 0;
1434}
1435
1436void mq_clear_sbinfo(struct ipc_namespace *ns)
1437{
1438 ns->mq_mnt->mnt_sb->s_fs_info = NULL;
1439}
1440
1441void mq_put_mnt(struct ipc_namespace *ns)
1442{
1443 kern_unmount(ns->mq_mnt);
1444}
1445
1446static int __init init_mqueue_fs(void)
1447{
1448 int error;
1449
1450 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1451 sizeof(struct mqueue_inode_info), 0,
1452 SLAB_HWCACHE_ALIGN, init_once);
1453 if (mqueue_inode_cachep == NULL)
1454 return -ENOMEM;
1455
1456 /* ignore failures - they are not fatal */
1457 mq_sysctl_table = mq_register_sysctl_table();
1458
1459 error = register_filesystem(&mqueue_fs_type);
1460 if (error)
1461 goto out_sysctl;
1462
1463 spin_lock_init(&mq_lock);
1464
1465 error = mq_init_ns(&init_ipc_ns);
1466 if (error)
1467 goto out_filesystem;
1468
1469 return 0;
1470
1471out_filesystem:
1472 unregister_filesystem(&mqueue_fs_type);
1473out_sysctl:
1474 if (mq_sysctl_table)
1475 unregister_sysctl_table(mq_sysctl_table);
1476 kmem_cache_destroy(mqueue_inode_cachep);
1477 return error;
1478}
1479
1480__initcall(init_mqueue_fs);