Linux Audio

Check our new training course

Loading...
v5.14.15
   1/*
   2 * POSIX message queues filesystem for Linux.
   3 *
   4 * Copyright (C) 2003,2004  Krzysztof Benedyczak    (golbi@mat.uni.torun.pl)
   5 *                          Michal Wronski          (michal.wronski@gmail.com)
   6 *
   7 * Spinlocks:               Mohamed Abbas           (abbas.mohamed@intel.com)
   8 * Lockless receive & send, fd based notify:
   9 *			    Manfred Spraul	    (manfred@colorfullife.com)
  10 *
  11 * Audit:                   George Wilson           (ltcgcw@us.ibm.com)
  12 *
  13 * This file is released under the GPL.
  14 */
  15
  16#include <linux/capability.h>
  17#include <linux/init.h>
  18#include <linux/pagemap.h>
  19#include <linux/file.h>
  20#include <linux/mount.h>
  21#include <linux/fs_context.h>
  22#include <linux/namei.h>
  23#include <linux/sysctl.h>
  24#include <linux/poll.h>
  25#include <linux/mqueue.h>
  26#include <linux/msg.h>
  27#include <linux/skbuff.h>
  28#include <linux/vmalloc.h>
  29#include <linux/netlink.h>
  30#include <linux/syscalls.h>
  31#include <linux/audit.h>
  32#include <linux/signal.h>
  33#include <linux/mutex.h>
  34#include <linux/nsproxy.h>
  35#include <linux/pid.h>
  36#include <linux/ipc_namespace.h>
  37#include <linux/user_namespace.h>
  38#include <linux/slab.h>
  39#include <linux/sched/wake_q.h>
  40#include <linux/sched/signal.h>
  41#include <linux/sched/user.h>
  42
  43#include <net/sock.h>
  44#include "util.h"
  45
  46struct mqueue_fs_context {
  47	struct ipc_namespace	*ipc_ns;
  48};
  49
  50#define MQUEUE_MAGIC	0x19800202
  51#define DIRENT_SIZE	20
  52#define FILENT_SIZE	80
  53
  54#define SEND		0
  55#define RECV		1
  56
  57#define STATE_NONE	0
  58#define STATE_READY	1
  59
  60struct posix_msg_tree_node {
  61	struct rb_node		rb_node;
  62	struct list_head	msg_list;
  63	int			priority;
  64};
  65
  66/*
  67 * Locking:
  68 *
  69 * Accesses to a message queue are synchronized by acquiring info->lock.
  70 *
  71 * There are two notable exceptions:
  72 * - The actual wakeup of a sleeping task is performed using the wake_q
  73 *   framework. info->lock is already released when wake_up_q is called.
  74 * - The exit codepaths after sleeping check ext_wait_queue->state without
  75 *   any locks. If it is STATE_READY, then the syscall is completed without
  76 *   acquiring info->lock.
  77 *
  78 * MQ_BARRIER:
  79 * To achieve proper release/acquire memory barrier pairing, the state is set to
  80 * STATE_READY with smp_store_release(), and it is read with READ_ONCE followed
  81 * by smp_acquire__after_ctrl_dep(). In addition, wake_q_add_safe() is used.
  82 *
  83 * This prevents the following races:
  84 *
  85 * 1) With the simple wake_q_add(), the task could be gone already before
  86 *    the increase of the reference happens
  87 * Thread A
  88 *				Thread B
  89 * WRITE_ONCE(wait.state, STATE_NONE);
  90 * schedule_hrtimeout()
  91 *				wake_q_add(A)
  92 *				if (cmpxchg()) // success
  93 *				   ->state = STATE_READY (reordered)
  94 * <timeout returns>
  95 * if (wait.state == STATE_READY) return;
  96 * sysret to user space
  97 * sys_exit()
  98 *				get_task_struct() // UaF
  99 *
 100 * Solution: Use wake_q_add_safe() and perform the get_task_struct() before
 101 * the smp_store_release() that does ->state = STATE_READY.
 102 *
 103 * 2) Without proper _release/_acquire barriers, the woken up task
 104 *    could read stale data
 105 *
 106 * Thread A
 107 *				Thread B
 108 * do_mq_timedreceive
 109 * WRITE_ONCE(wait.state, STATE_NONE);
 110 * schedule_hrtimeout()
 111 *				state = STATE_READY;
 112 * <timeout returns>
 113 * if (wait.state == STATE_READY) return;
 114 * msg_ptr = wait.msg;		// Access to stale data!
 115 *				receiver->msg = message; (reordered)
 116 *
 117 * Solution: use _release and _acquire barriers.
 118 *
 119 * 3) There is intentionally no barrier when setting current->state
 120 *    to TASK_INTERRUPTIBLE: spin_unlock(&info->lock) provides the
 121 *    release memory barrier, and the wakeup is triggered when holding
 122 *    info->lock, i.e. spin_lock(&info->lock) provided a pairing
 123 *    acquire memory barrier.
 124 */
 125
 126struct ext_wait_queue {		/* queue of sleeping tasks */
 127	struct task_struct *task;
 128	struct list_head list;
 129	struct msg_msg *msg;	/* ptr of loaded message */
 130	int state;		/* one of STATE_* values */
 131};
 132
 133struct mqueue_inode_info {
 134	spinlock_t lock;
 135	struct inode vfs_inode;
 136	wait_queue_head_t wait_q;
 137
 138	struct rb_root msg_tree;
 139	struct rb_node *msg_tree_rightmost;
 140	struct posix_msg_tree_node *node_cache;
 141	struct mq_attr attr;
 142
 143	struct sigevent notify;
 144	struct pid *notify_owner;
 145	u32 notify_self_exec_id;
 146	struct user_namespace *notify_user_ns;
 147	struct ucounts *ucounts;	/* user who created, for accounting */
 148	struct sock *notify_sock;
 149	struct sk_buff *notify_cookie;
 150
 151	/* for tasks waiting for free space and messages, respectively */
 152	struct ext_wait_queue e_wait_q[2];
 153
 154	unsigned long qsize; /* size of queue in memory (sum of all msgs) */
 155};
 156
 157static struct file_system_type mqueue_fs_type;
 158static const struct inode_operations mqueue_dir_inode_operations;
 159static const struct file_operations mqueue_file_operations;
 160static const struct super_operations mqueue_super_ops;
 161static const struct fs_context_operations mqueue_fs_context_ops;
 162static void remove_notification(struct mqueue_inode_info *info);
 163
 164static struct kmem_cache *mqueue_inode_cachep;
 165
 166static struct ctl_table_header *mq_sysctl_table;
 167
 168static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
 169{
 170	return container_of(inode, struct mqueue_inode_info, vfs_inode);
 171}
 172
 173/*
 174 * This routine should be called with the mq_lock held.
 175 */
 176static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode)
 177{
 178	return get_ipc_ns(inode->i_sb->s_fs_info);
 179}
 180
 181static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
 182{
 183	struct ipc_namespace *ns;
 184
 185	spin_lock(&mq_lock);
 186	ns = __get_ns_from_inode(inode);
 187	spin_unlock(&mq_lock);
 188	return ns;
 189}
 190
 191/* Auxiliary functions to manipulate messages' list */
 192static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
 193{
 194	struct rb_node **p, *parent = NULL;
 195	struct posix_msg_tree_node *leaf;
 196	bool rightmost = true;
 197
 198	p = &info->msg_tree.rb_node;
 199	while (*p) {
 200		parent = *p;
 201		leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
 202
 203		if (likely(leaf->priority == msg->m_type))
 204			goto insert_msg;
 205		else if (msg->m_type < leaf->priority) {
 206			p = &(*p)->rb_left;
 207			rightmost = false;
 208		} else
 209			p = &(*p)->rb_right;
 210	}
 211	if (info->node_cache) {
 212		leaf = info->node_cache;
 213		info->node_cache = NULL;
 214	} else {
 215		leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
 216		if (!leaf)
 217			return -ENOMEM;
 218		INIT_LIST_HEAD(&leaf->msg_list);
 219	}
 220	leaf->priority = msg->m_type;
 221
 222	if (rightmost)
 223		info->msg_tree_rightmost = &leaf->rb_node;
 224
 225	rb_link_node(&leaf->rb_node, parent, p);
 226	rb_insert_color(&leaf->rb_node, &info->msg_tree);
 227insert_msg:
 228	info->attr.mq_curmsgs++;
 229	info->qsize += msg->m_ts;
 230	list_add_tail(&msg->m_list, &leaf->msg_list);
 231	return 0;
 232}
 233
 234static inline void msg_tree_erase(struct posix_msg_tree_node *leaf,
 235				  struct mqueue_inode_info *info)
 236{
 237	struct rb_node *node = &leaf->rb_node;
 238
 239	if (info->msg_tree_rightmost == node)
 240		info->msg_tree_rightmost = rb_prev(node);
 241
 242	rb_erase(node, &info->msg_tree);
 243	if (info->node_cache)
 244		kfree(leaf);
 245	else
 246		info->node_cache = leaf;
 
 247}
 248
 249static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
 250{
 251	struct rb_node *parent = NULL;
 252	struct posix_msg_tree_node *leaf;
 253	struct msg_msg *msg;
 254
 255try_again:
 256	/*
 257	 * During insert, low priorities go to the left and high to the
 258	 * right.  On receive, we want the highest priorities first, so
 259	 * walk all the way to the right.
 260	 */
 261	parent = info->msg_tree_rightmost;
 262	if (!parent) {
 263		if (info->attr.mq_curmsgs) {
 264			pr_warn_once("Inconsistency in POSIX message queue, "
 265				     "no tree element, but supposedly messages "
 266				     "should exist!\n");
 267			info->attr.mq_curmsgs = 0;
 268		}
 269		return NULL;
 270	}
 271	leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
 272	if (unlikely(list_empty(&leaf->msg_list))) {
 273		pr_warn_once("Inconsistency in POSIX message queue, "
 274			     "empty leaf node but we haven't implemented "
 275			     "lazy leaf delete!\n");
 276		msg_tree_erase(leaf, info);
 277		goto try_again;
 278	} else {
 279		msg = list_first_entry(&leaf->msg_list,
 280				       struct msg_msg, m_list);
 281		list_del(&msg->m_list);
 282		if (list_empty(&leaf->msg_list)) {
 283			msg_tree_erase(leaf, info);
 284		}
 285	}
 286	info->attr.mq_curmsgs--;
 287	info->qsize -= msg->m_ts;
 288	return msg;
 289}
 290
 291static struct inode *mqueue_get_inode(struct super_block *sb,
 292		struct ipc_namespace *ipc_ns, umode_t mode,
 293		struct mq_attr *attr)
 294{
 
 295	struct inode *inode;
 296	int ret = -ENOMEM;
 297
 298	inode = new_inode(sb);
 299	if (!inode)
 300		goto err;
 301
 302	inode->i_ino = get_next_ino();
 303	inode->i_mode = mode;
 304	inode->i_uid = current_fsuid();
 305	inode->i_gid = current_fsgid();
 306	inode->i_mtime = inode->i_ctime = inode->i_atime = current_time(inode);
 307
 308	if (S_ISREG(mode)) {
 309		struct mqueue_inode_info *info;
 310		unsigned long mq_bytes, mq_treesize;
 311
 312		inode->i_fop = &mqueue_file_operations;
 313		inode->i_size = FILENT_SIZE;
 314		/* mqueue specific info */
 315		info = MQUEUE_I(inode);
 316		spin_lock_init(&info->lock);
 317		init_waitqueue_head(&info->wait_q);
 318		INIT_LIST_HEAD(&info->e_wait_q[0].list);
 319		INIT_LIST_HEAD(&info->e_wait_q[1].list);
 320		info->notify_owner = NULL;
 321		info->notify_user_ns = NULL;
 322		info->qsize = 0;
 323		info->ucounts = NULL;	/* set when all is ok */
 324		info->msg_tree = RB_ROOT;
 325		info->msg_tree_rightmost = NULL;
 326		info->node_cache = NULL;
 327		memset(&info->attr, 0, sizeof(info->attr));
 328		info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
 329					   ipc_ns->mq_msg_default);
 330		info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
 331					    ipc_ns->mq_msgsize_default);
 332		if (attr) {
 333			info->attr.mq_maxmsg = attr->mq_maxmsg;
 334			info->attr.mq_msgsize = attr->mq_msgsize;
 335		}
 336		/*
 337		 * We used to allocate a static array of pointers and account
 338		 * the size of that array as well as one msg_msg struct per
 339		 * possible message into the queue size. That's no longer
 340		 * accurate as the queue is now an rbtree and will grow and
 341		 * shrink depending on usage patterns.  We can, however, still
 342		 * account one msg_msg struct per message, but the nodes are
 343		 * allocated depending on priority usage, and most programs
 344		 * only use one, or a handful, of priorities.  However, since
 345		 * this is pinned memory, we need to assume worst case, so
 346		 * that means the min(mq_maxmsg, max_priorities) * struct
 347		 * posix_msg_tree_node.
 348		 */
 349
 350		ret = -EINVAL;
 351		if (info->attr.mq_maxmsg <= 0 || info->attr.mq_msgsize <= 0)
 352			goto out_inode;
 353		if (capable(CAP_SYS_RESOURCE)) {
 354			if (info->attr.mq_maxmsg > HARD_MSGMAX ||
 355			    info->attr.mq_msgsize > HARD_MSGSIZEMAX)
 356				goto out_inode;
 357		} else {
 358			if (info->attr.mq_maxmsg > ipc_ns->mq_msg_max ||
 359					info->attr.mq_msgsize > ipc_ns->mq_msgsize_max)
 360				goto out_inode;
 361		}
 362		ret = -EOVERFLOW;
 363		/* check for overflow */
 364		if (info->attr.mq_msgsize > ULONG_MAX/info->attr.mq_maxmsg)
 365			goto out_inode;
 366		mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
 367			min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
 368			sizeof(struct posix_msg_tree_node);
 369		mq_bytes = info->attr.mq_maxmsg * info->attr.mq_msgsize;
 370		if (mq_bytes + mq_treesize < mq_bytes)
 371			goto out_inode;
 372		mq_bytes += mq_treesize;
 373		info->ucounts = get_ucounts(current_ucounts());
 374		if (info->ucounts) {
 375			long msgqueue;
 376
 377			spin_lock(&mq_lock);
 378			msgqueue = inc_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes);
 379			if (msgqueue == LONG_MAX || msgqueue > rlimit(RLIMIT_MSGQUEUE)) {
 380				dec_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes);
 381				spin_unlock(&mq_lock);
 382				put_ucounts(info->ucounts);
 383				info->ucounts = NULL;
 384				/* mqueue_evict_inode() releases info->messages */
 385				ret = -EMFILE;
 386				goto out_inode;
 387			}
 388			spin_unlock(&mq_lock);
 
 
 
 389		}
 
 
 
 
 
 390	} else if (S_ISDIR(mode)) {
 391		inc_nlink(inode);
 392		/* Some things misbehave if size == 0 on a directory */
 393		inode->i_size = 2 * DIRENT_SIZE;
 394		inode->i_op = &mqueue_dir_inode_operations;
 395		inode->i_fop = &simple_dir_operations;
 396	}
 397
 398	return inode;
 399out_inode:
 400	iput(inode);
 401err:
 402	return ERR_PTR(ret);
 403}
 404
 405static int mqueue_fill_super(struct super_block *sb, struct fs_context *fc)
 406{
 407	struct inode *inode;
 408	struct ipc_namespace *ns = sb->s_fs_info;
 409
 410	sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV;
 411	sb->s_blocksize = PAGE_SIZE;
 412	sb->s_blocksize_bits = PAGE_SHIFT;
 413	sb->s_magic = MQUEUE_MAGIC;
 414	sb->s_op = &mqueue_super_ops;
 415
 416	inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
 417	if (IS_ERR(inode))
 418		return PTR_ERR(inode);
 419
 420	sb->s_root = d_make_root(inode);
 421	if (!sb->s_root)
 422		return -ENOMEM;
 423	return 0;
 424}
 425
 426static int mqueue_get_tree(struct fs_context *fc)
 427{
 428	struct mqueue_fs_context *ctx = fc->fs_private;
 429
 430	return get_tree_keyed(fc, mqueue_fill_super, ctx->ipc_ns);
 431}
 432
 433static void mqueue_fs_context_free(struct fs_context *fc)
 434{
 435	struct mqueue_fs_context *ctx = fc->fs_private;
 436
 437	put_ipc_ns(ctx->ipc_ns);
 438	kfree(ctx);
 439}
 440
 441static int mqueue_init_fs_context(struct fs_context *fc)
 442{
 443	struct mqueue_fs_context *ctx;
 444
 445	ctx = kzalloc(sizeof(struct mqueue_fs_context), GFP_KERNEL);
 446	if (!ctx)
 447		return -ENOMEM;
 448
 449	ctx->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
 450	put_user_ns(fc->user_ns);
 451	fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
 452	fc->fs_private = ctx;
 453	fc->ops = &mqueue_fs_context_ops;
 454	return 0;
 455}
 456
 457static struct vfsmount *mq_create_mount(struct ipc_namespace *ns)
 458{
 459	struct mqueue_fs_context *ctx;
 460	struct fs_context *fc;
 461	struct vfsmount *mnt;
 462
 463	fc = fs_context_for_mount(&mqueue_fs_type, SB_KERNMOUNT);
 464	if (IS_ERR(fc))
 465		return ERR_CAST(fc);
 466
 467	ctx = fc->fs_private;
 468	put_ipc_ns(ctx->ipc_ns);
 469	ctx->ipc_ns = get_ipc_ns(ns);
 470	put_user_ns(fc->user_ns);
 471	fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
 472
 473	mnt = fc_mount(fc);
 474	put_fs_context(fc);
 475	return mnt;
 476}
 477
 478static void init_once(void *foo)
 479{
 480	struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
 481
 482	inode_init_once(&p->vfs_inode);
 483}
 484
 485static struct inode *mqueue_alloc_inode(struct super_block *sb)
 486{
 487	struct mqueue_inode_info *ei;
 488
 489	ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
 490	if (!ei)
 491		return NULL;
 492	return &ei->vfs_inode;
 493}
 494
 495static void mqueue_free_inode(struct inode *inode)
 496{
 497	kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
 498}
 499
 500static void mqueue_evict_inode(struct inode *inode)
 501{
 502	struct mqueue_inode_info *info;
 
 503	struct ipc_namespace *ipc_ns;
 504	struct msg_msg *msg, *nmsg;
 505	LIST_HEAD(tmp_msg);
 506
 507	clear_inode(inode);
 508
 509	if (S_ISDIR(inode->i_mode))
 510		return;
 511
 512	ipc_ns = get_ns_from_inode(inode);
 513	info = MQUEUE_I(inode);
 514	spin_lock(&info->lock);
 515	while ((msg = msg_get(info)) != NULL)
 516		list_add_tail(&msg->m_list, &tmp_msg);
 517	kfree(info->node_cache);
 518	spin_unlock(&info->lock);
 519
 520	list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) {
 521		list_del(&msg->m_list);
 522		free_msg(msg);
 523	}
 524
 525	if (info->ucounts) {
 
 526		unsigned long mq_bytes, mq_treesize;
 527
 528		/* Total amount of bytes accounted for the mqueue */
 529		mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
 530			min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
 531			sizeof(struct posix_msg_tree_node);
 532
 533		mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
 534					  info->attr.mq_msgsize);
 535
 536		spin_lock(&mq_lock);
 537		dec_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes);
 538		/*
 539		 * get_ns_from_inode() ensures that the
 540		 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
 541		 * to which we now hold a reference, or it is NULL.
 542		 * We can't put it here under mq_lock, though.
 543		 */
 544		if (ipc_ns)
 545			ipc_ns->mq_queues_count--;
 546		spin_unlock(&mq_lock);
 547		put_ucounts(info->ucounts);
 548		info->ucounts = NULL;
 549	}
 550	if (ipc_ns)
 551		put_ipc_ns(ipc_ns);
 552}
 553
 554static int mqueue_create_attr(struct dentry *dentry, umode_t mode, void *arg)
 555{
 556	struct inode *dir = dentry->d_parent->d_inode;
 557	struct inode *inode;
 558	struct mq_attr *attr = arg;
 559	int error;
 560	struct ipc_namespace *ipc_ns;
 561
 562	spin_lock(&mq_lock);
 563	ipc_ns = __get_ns_from_inode(dir);
 564	if (!ipc_ns) {
 565		error = -EACCES;
 566		goto out_unlock;
 567	}
 568
 569	if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
 570	    !capable(CAP_SYS_RESOURCE)) {
 571		error = -ENOSPC;
 572		goto out_unlock;
 573	}
 574	ipc_ns->mq_queues_count++;
 575	spin_unlock(&mq_lock);
 576
 577	inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr);
 578	if (IS_ERR(inode)) {
 579		error = PTR_ERR(inode);
 580		spin_lock(&mq_lock);
 581		ipc_ns->mq_queues_count--;
 582		goto out_unlock;
 583	}
 584
 585	put_ipc_ns(ipc_ns);
 586	dir->i_size += DIRENT_SIZE;
 587	dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
 588
 589	d_instantiate(dentry, inode);
 590	dget(dentry);
 591	return 0;
 592out_unlock:
 593	spin_unlock(&mq_lock);
 594	if (ipc_ns)
 595		put_ipc_ns(ipc_ns);
 596	return error;
 597}
 598
 599static int mqueue_create(struct user_namespace *mnt_userns, struct inode *dir,
 600			 struct dentry *dentry, umode_t mode, bool excl)
 601{
 602	return mqueue_create_attr(dentry, mode, NULL);
 603}
 604
 605static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
 606{
 607	struct inode *inode = d_inode(dentry);
 608
 609	dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
 610	dir->i_size -= DIRENT_SIZE;
 611	drop_nlink(inode);
 612	dput(dentry);
 613	return 0;
 614}
 615
 616/*
 617*	This is routine for system read from queue file.
 618*	To avoid mess with doing here some sort of mq_receive we allow
 619*	to read only queue size & notification info (the only values
 620*	that are interesting from user point of view and aren't accessible
 621*	through std routines)
 622*/
 623static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
 624				size_t count, loff_t *off)
 625{
 626	struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
 627	char buffer[FILENT_SIZE];
 628	ssize_t ret;
 629
 630	spin_lock(&info->lock);
 631	snprintf(buffer, sizeof(buffer),
 632			"QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
 633			info->qsize,
 634			info->notify_owner ? info->notify.sigev_notify : 0,
 635			(info->notify_owner &&
 636			 info->notify.sigev_notify == SIGEV_SIGNAL) ?
 637				info->notify.sigev_signo : 0,
 638			pid_vnr(info->notify_owner));
 639	spin_unlock(&info->lock);
 640	buffer[sizeof(buffer)-1] = '\0';
 641
 642	ret = simple_read_from_buffer(u_data, count, off, buffer,
 643				strlen(buffer));
 644	if (ret <= 0)
 645		return ret;
 646
 647	file_inode(filp)->i_atime = file_inode(filp)->i_ctime = current_time(file_inode(filp));
 648	return ret;
 649}
 650
 651static int mqueue_flush_file(struct file *filp, fl_owner_t id)
 652{
 653	struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
 654
 655	spin_lock(&info->lock);
 656	if (task_tgid(current) == info->notify_owner)
 657		remove_notification(info);
 658
 659	spin_unlock(&info->lock);
 660	return 0;
 661}
 662
 663static __poll_t mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
 664{
 665	struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
 666	__poll_t retval = 0;
 667
 668	poll_wait(filp, &info->wait_q, poll_tab);
 669
 670	spin_lock(&info->lock);
 671	if (info->attr.mq_curmsgs)
 672		retval = EPOLLIN | EPOLLRDNORM;
 673
 674	if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
 675		retval |= EPOLLOUT | EPOLLWRNORM;
 676	spin_unlock(&info->lock);
 677
 678	return retval;
 679}
 680
 681/* Adds current to info->e_wait_q[sr] before element with smaller prio */
 682static void wq_add(struct mqueue_inode_info *info, int sr,
 683			struct ext_wait_queue *ewp)
 684{
 685	struct ext_wait_queue *walk;
 686
 687	list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
 688		if (walk->task->prio <= current->prio) {
 689			list_add_tail(&ewp->list, &walk->list);
 690			return;
 691		}
 692	}
 693	list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
 694}
 695
 696/*
 697 * Puts current task to sleep. Caller must hold queue lock. After return
 698 * lock isn't held.
 699 * sr: SEND or RECV
 700 */
 701static int wq_sleep(struct mqueue_inode_info *info, int sr,
 702		    ktime_t *timeout, struct ext_wait_queue *ewp)
 703	__releases(&info->lock)
 704{
 705	int retval;
 706	signed long time;
 707
 708	wq_add(info, sr, ewp);
 709
 710	for (;;) {
 711		/* memory barrier not required, we hold info->lock */
 712		__set_current_state(TASK_INTERRUPTIBLE);
 713
 714		spin_unlock(&info->lock);
 715		time = schedule_hrtimeout_range_clock(timeout, 0,
 716			HRTIMER_MODE_ABS, CLOCK_REALTIME);
 717
 718		if (READ_ONCE(ewp->state) == STATE_READY) {
 719			/* see MQ_BARRIER for purpose/pairing */
 720			smp_acquire__after_ctrl_dep();
 721			retval = 0;
 722			goto out;
 723		}
 724		spin_lock(&info->lock);
 725
 726		/* we hold info->lock, so no memory barrier required */
 727		if (READ_ONCE(ewp->state) == STATE_READY) {
 728			retval = 0;
 729			goto out_unlock;
 730		}
 731		if (signal_pending(current)) {
 732			retval = -ERESTARTSYS;
 733			break;
 734		}
 735		if (time == 0) {
 736			retval = -ETIMEDOUT;
 737			break;
 738		}
 739	}
 740	list_del(&ewp->list);
 741out_unlock:
 742	spin_unlock(&info->lock);
 743out:
 744	return retval;
 745}
 746
 747/*
 748 * Returns waiting task that should be serviced first or NULL if none exists
 749 */
 750static struct ext_wait_queue *wq_get_first_waiter(
 751		struct mqueue_inode_info *info, int sr)
 752{
 753	struct list_head *ptr;
 754
 755	ptr = info->e_wait_q[sr].list.prev;
 756	if (ptr == &info->e_wait_q[sr].list)
 757		return NULL;
 758	return list_entry(ptr, struct ext_wait_queue, list);
 759}
 760
 761
 762static inline void set_cookie(struct sk_buff *skb, char code)
 763{
 764	((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
 765}
 766
 767/*
 768 * The next function is only to split too long sys_mq_timedsend
 769 */
 770static void __do_notify(struct mqueue_inode_info *info)
 771{
 772	/* notification
 773	 * invoked when there is registered process and there isn't process
 774	 * waiting synchronously for message AND state of queue changed from
 775	 * empty to not empty. Here we are sure that no one is waiting
 776	 * synchronously. */
 777	if (info->notify_owner &&
 778	    info->attr.mq_curmsgs == 1) {
 
 779		switch (info->notify.sigev_notify) {
 780		case SIGEV_NONE:
 781			break;
 782		case SIGEV_SIGNAL: {
 783			struct kernel_siginfo sig_i;
 784			struct task_struct *task;
 785
 786			/* do_mq_notify() accepts sigev_signo == 0, why?? */
 787			if (!info->notify.sigev_signo)
 788				break;
 789
 790			clear_siginfo(&sig_i);
 791			sig_i.si_signo = info->notify.sigev_signo;
 792			sig_i.si_errno = 0;
 793			sig_i.si_code = SI_MESGQ;
 794			sig_i.si_value = info->notify.sigev_value;
 795			rcu_read_lock();
 796			/* map current pid/uid into info->owner's namespaces */
 
 797			sig_i.si_pid = task_tgid_nr_ns(current,
 798						ns_of_pid(info->notify_owner));
 799			sig_i.si_uid = from_kuid_munged(info->notify_user_ns,
 800						current_uid());
 801			/*
 802			 * We can't use kill_pid_info(), this signal should
 803			 * bypass check_kill_permission(). It is from kernel
 804			 * but si_fromuser() can't know this.
 805			 * We do check the self_exec_id, to avoid sending
 806			 * signals to programs that don't expect them.
 807			 */
 808			task = pid_task(info->notify_owner, PIDTYPE_TGID);
 809			if (task && task->self_exec_id ==
 810						info->notify_self_exec_id) {
 811				do_send_sig_info(info->notify.sigev_signo,
 812						&sig_i, task, PIDTYPE_TGID);
 813			}
 814			rcu_read_unlock();
 
 
 
 815			break;
 816		}
 817		case SIGEV_THREAD:
 818			set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
 819			netlink_sendskb(info->notify_sock, info->notify_cookie);
 820			break;
 821		}
 822		/* after notification unregisters process */
 823		put_pid(info->notify_owner);
 824		put_user_ns(info->notify_user_ns);
 825		info->notify_owner = NULL;
 826		info->notify_user_ns = NULL;
 827	}
 828	wake_up(&info->wait_q);
 829}
 830
 831static int prepare_timeout(const struct __kernel_timespec __user *u_abs_timeout,
 832			   struct timespec64 *ts)
 833{
 834	if (get_timespec64(ts, u_abs_timeout))
 835		return -EFAULT;
 836	if (!timespec64_valid(ts))
 837		return -EINVAL;
 838	return 0;
 839}
 840
 841static void remove_notification(struct mqueue_inode_info *info)
 842{
 843	if (info->notify_owner != NULL &&
 844	    info->notify.sigev_notify == SIGEV_THREAD) {
 845		set_cookie(info->notify_cookie, NOTIFY_REMOVED);
 846		netlink_sendskb(info->notify_sock, info->notify_cookie);
 847	}
 848	put_pid(info->notify_owner);
 849	put_user_ns(info->notify_user_ns);
 850	info->notify_owner = NULL;
 851	info->notify_user_ns = NULL;
 852}
 853
 854static int prepare_open(struct dentry *dentry, int oflag, int ro,
 855			umode_t mode, struct filename *name,
 856			struct mq_attr *attr)
 857{
 858	static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
 859						  MAY_READ | MAY_WRITE };
 860	int acc;
 861
 862	if (d_really_is_negative(dentry)) {
 863		if (!(oflag & O_CREAT))
 864			return -ENOENT;
 865		if (ro)
 866			return ro;
 867		audit_inode_parent_hidden(name, dentry->d_parent);
 868		return vfs_mkobj(dentry, mode & ~current_umask(),
 869				  mqueue_create_attr, attr);
 870	}
 871	/* it already existed */
 872	audit_inode(name, dentry, 0);
 873	if ((oflag & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
 874		return -EEXIST;
 875	if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
 876		return -EINVAL;
 877	acc = oflag2acc[oflag & O_ACCMODE];
 878	return inode_permission(&init_user_ns, d_inode(dentry), acc);
 879}
 880
 881static int do_mq_open(const char __user *u_name, int oflag, umode_t mode,
 882		      struct mq_attr *attr)
 883{
 884	struct vfsmount *mnt = current->nsproxy->ipc_ns->mq_mnt;
 885	struct dentry *root = mnt->mnt_root;
 886	struct filename *name;
 887	struct path path;
 888	int fd, error;
 889	int ro;
 890
 891	audit_mq_open(oflag, mode, attr);
 892
 893	if (IS_ERR(name = getname(u_name)))
 894		return PTR_ERR(name);
 895
 896	fd = get_unused_fd_flags(O_CLOEXEC);
 897	if (fd < 0)
 898		goto out_putname;
 899
 900	ro = mnt_want_write(mnt);	/* we'll drop it in any case */
 901	inode_lock(d_inode(root));
 902	path.dentry = lookup_one_len(name->name, root, strlen(name->name));
 903	if (IS_ERR(path.dentry)) {
 904		error = PTR_ERR(path.dentry);
 905		goto out_putfd;
 906	}
 907	path.mnt = mntget(mnt);
 908	error = prepare_open(path.dentry, oflag, ro, mode, name, attr);
 909	if (!error) {
 910		struct file *file = dentry_open(&path, oflag, current_cred());
 911		if (!IS_ERR(file))
 912			fd_install(fd, file);
 913		else
 914			error = PTR_ERR(file);
 915	}
 916	path_put(&path);
 917out_putfd:
 918	if (error) {
 919		put_unused_fd(fd);
 920		fd = error;
 921	}
 922	inode_unlock(d_inode(root));
 923	if (!ro)
 924		mnt_drop_write(mnt);
 925out_putname:
 926	putname(name);
 927	return fd;
 928}
 929
 930SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
 931		struct mq_attr __user *, u_attr)
 932{
 933	struct mq_attr attr;
 934	if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
 935		return -EFAULT;
 936
 937	return do_mq_open(u_name, oflag, mode, u_attr ? &attr : NULL);
 938}
 939
 940SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
 941{
 942	int err;
 943	struct filename *name;
 944	struct dentry *dentry;
 945	struct inode *inode = NULL;
 946	struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
 947	struct vfsmount *mnt = ipc_ns->mq_mnt;
 948
 949	name = getname(u_name);
 950	if (IS_ERR(name))
 951		return PTR_ERR(name);
 952
 953	audit_inode_parent_hidden(name, mnt->mnt_root);
 954	err = mnt_want_write(mnt);
 955	if (err)
 956		goto out_name;
 957	inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT);
 958	dentry = lookup_one_len(name->name, mnt->mnt_root,
 959				strlen(name->name));
 960	if (IS_ERR(dentry)) {
 961		err = PTR_ERR(dentry);
 962		goto out_unlock;
 963	}
 964
 965	inode = d_inode(dentry);
 966	if (!inode) {
 967		err = -ENOENT;
 968	} else {
 969		ihold(inode);
 970		err = vfs_unlink(&init_user_ns, d_inode(dentry->d_parent),
 971				 dentry, NULL);
 972	}
 973	dput(dentry);
 974
 975out_unlock:
 976	inode_unlock(d_inode(mnt->mnt_root));
 977	if (inode)
 978		iput(inode);
 979	mnt_drop_write(mnt);
 980out_name:
 981	putname(name);
 982
 983	return err;
 984}
 985
 986/* Pipelined send and receive functions.
 987 *
 988 * If a receiver finds no waiting message, then it registers itself in the
 989 * list of waiting receivers. A sender checks that list before adding the new
 990 * message into the message array. If there is a waiting receiver, then it
 991 * bypasses the message array and directly hands the message over to the
 992 * receiver. The receiver accepts the message and returns without grabbing the
 993 * queue spinlock:
 994 *
 995 * - Set pointer to message.
 996 * - Queue the receiver task for later wakeup (without the info->lock).
 997 * - Update its state to STATE_READY. Now the receiver can continue.
 998 * - Wake up the process after the lock is dropped. Should the process wake up
 999 *   before this wakeup (due to a timeout or a signal) it will either see
1000 *   STATE_READY and continue or acquire the lock to check the state again.
1001 *
1002 * The same algorithm is used for senders.
1003 */
1004
1005static inline void __pipelined_op(struct wake_q_head *wake_q,
1006				  struct mqueue_inode_info *info,
1007				  struct ext_wait_queue *this)
1008{
1009	struct task_struct *task;
1010
1011	list_del(&this->list);
1012	task = get_task_struct(this->task);
1013
1014	/* see MQ_BARRIER for purpose/pairing */
1015	smp_store_release(&this->state, STATE_READY);
1016	wake_q_add_safe(wake_q, task);
1017}
1018
1019/* pipelined_send() - send a message directly to the task waiting in
1020 * sys_mq_timedreceive() (without inserting message into a queue).
1021 */
1022static inline void pipelined_send(struct wake_q_head *wake_q,
1023				  struct mqueue_inode_info *info,
1024				  struct msg_msg *message,
1025				  struct ext_wait_queue *receiver)
1026{
1027	receiver->msg = message;
1028	__pipelined_op(wake_q, info, receiver);
 
 
 
 
 
 
 
 
 
 
1029}
1030
1031/* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
1032 * gets its message and put to the queue (we have one free place for sure). */
1033static inline void pipelined_receive(struct wake_q_head *wake_q,
1034				     struct mqueue_inode_info *info)
1035{
1036	struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
1037
1038	if (!sender) {
1039		/* for poll */
1040		wake_up_interruptible(&info->wait_q);
1041		return;
1042	}
1043	if (msg_insert(sender->msg, info))
1044		return;
1045
1046	__pipelined_op(wake_q, info, sender);
 
 
1047}
1048
1049static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
1050		size_t msg_len, unsigned int msg_prio,
1051		struct timespec64 *ts)
1052{
1053	struct fd f;
1054	struct inode *inode;
1055	struct ext_wait_queue wait;
1056	struct ext_wait_queue *receiver;
1057	struct msg_msg *msg_ptr;
1058	struct mqueue_inode_info *info;
1059	ktime_t expires, *timeout = NULL;
1060	struct posix_msg_tree_node *new_leaf = NULL;
1061	int ret = 0;
1062	DEFINE_WAKE_Q(wake_q);
1063
1064	if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
1065		return -EINVAL;
1066
1067	if (ts) {
1068		expires = timespec64_to_ktime(*ts);
1069		timeout = &expires;
1070	}
1071
1072	audit_mq_sendrecv(mqdes, msg_len, msg_prio, ts);
1073
1074	f = fdget(mqdes);
1075	if (unlikely(!f.file)) {
1076		ret = -EBADF;
1077		goto out;
1078	}
1079
1080	inode = file_inode(f.file);
1081	if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1082		ret = -EBADF;
1083		goto out_fput;
1084	}
1085	info = MQUEUE_I(inode);
1086	audit_file(f.file);
1087
1088	if (unlikely(!(f.file->f_mode & FMODE_WRITE))) {
1089		ret = -EBADF;
1090		goto out_fput;
1091	}
1092
1093	if (unlikely(msg_len > info->attr.mq_msgsize)) {
1094		ret = -EMSGSIZE;
1095		goto out_fput;
1096	}
1097
1098	/* First try to allocate memory, before doing anything with
1099	 * existing queues. */
1100	msg_ptr = load_msg(u_msg_ptr, msg_len);
1101	if (IS_ERR(msg_ptr)) {
1102		ret = PTR_ERR(msg_ptr);
1103		goto out_fput;
1104	}
1105	msg_ptr->m_ts = msg_len;
1106	msg_ptr->m_type = msg_prio;
1107
1108	/*
1109	 * msg_insert really wants us to have a valid, spare node struct so
1110	 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1111	 * fall back to that if necessary.
1112	 */
1113	if (!info->node_cache)
1114		new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1115
1116	spin_lock(&info->lock);
1117
1118	if (!info->node_cache && new_leaf) {
1119		/* Save our speculative allocation into the cache */
1120		INIT_LIST_HEAD(&new_leaf->msg_list);
1121		info->node_cache = new_leaf;
1122		new_leaf = NULL;
1123	} else {
1124		kfree(new_leaf);
1125	}
1126
1127	if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
1128		if (f.file->f_flags & O_NONBLOCK) {
1129			ret = -EAGAIN;
1130		} else {
1131			wait.task = current;
1132			wait.msg = (void *) msg_ptr;
1133
1134			/* memory barrier not required, we hold info->lock */
1135			WRITE_ONCE(wait.state, STATE_NONE);
1136			ret = wq_sleep(info, SEND, timeout, &wait);
1137			/*
1138			 * wq_sleep must be called with info->lock held, and
1139			 * returns with the lock released
1140			 */
1141			goto out_free;
1142		}
1143	} else {
1144		receiver = wq_get_first_waiter(info, RECV);
1145		if (receiver) {
1146			pipelined_send(&wake_q, info, msg_ptr, receiver);
1147		} else {
1148			/* adds message to the queue */
1149			ret = msg_insert(msg_ptr, info);
1150			if (ret)
1151				goto out_unlock;
1152			__do_notify(info);
1153		}
1154		inode->i_atime = inode->i_mtime = inode->i_ctime =
1155				current_time(inode);
1156	}
1157out_unlock:
1158	spin_unlock(&info->lock);
1159	wake_up_q(&wake_q);
1160out_free:
1161	if (ret)
1162		free_msg(msg_ptr);
1163out_fput:
1164	fdput(f);
1165out:
1166	return ret;
1167}
1168
1169static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
1170		size_t msg_len, unsigned int __user *u_msg_prio,
1171		struct timespec64 *ts)
1172{
1173	ssize_t ret;
1174	struct msg_msg *msg_ptr;
1175	struct fd f;
1176	struct inode *inode;
1177	struct mqueue_inode_info *info;
1178	struct ext_wait_queue wait;
1179	ktime_t expires, *timeout = NULL;
1180	struct posix_msg_tree_node *new_leaf = NULL;
1181
1182	if (ts) {
1183		expires = timespec64_to_ktime(*ts);
1184		timeout = &expires;
1185	}
1186
1187	audit_mq_sendrecv(mqdes, msg_len, 0, ts);
1188
1189	f = fdget(mqdes);
1190	if (unlikely(!f.file)) {
1191		ret = -EBADF;
1192		goto out;
1193	}
1194
1195	inode = file_inode(f.file);
1196	if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1197		ret = -EBADF;
1198		goto out_fput;
1199	}
1200	info = MQUEUE_I(inode);
1201	audit_file(f.file);
1202
1203	if (unlikely(!(f.file->f_mode & FMODE_READ))) {
1204		ret = -EBADF;
1205		goto out_fput;
1206	}
1207
1208	/* checks if buffer is big enough */
1209	if (unlikely(msg_len < info->attr.mq_msgsize)) {
1210		ret = -EMSGSIZE;
1211		goto out_fput;
1212	}
1213
1214	/*
1215	 * msg_insert really wants us to have a valid, spare node struct so
1216	 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1217	 * fall back to that if necessary.
1218	 */
1219	if (!info->node_cache)
1220		new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1221
1222	spin_lock(&info->lock);
1223
1224	if (!info->node_cache && new_leaf) {
1225		/* Save our speculative allocation into the cache */
1226		INIT_LIST_HEAD(&new_leaf->msg_list);
1227		info->node_cache = new_leaf;
1228	} else {
1229		kfree(new_leaf);
1230	}
1231
1232	if (info->attr.mq_curmsgs == 0) {
1233		if (f.file->f_flags & O_NONBLOCK) {
1234			spin_unlock(&info->lock);
1235			ret = -EAGAIN;
1236		} else {
1237			wait.task = current;
1238
1239			/* memory barrier not required, we hold info->lock */
1240			WRITE_ONCE(wait.state, STATE_NONE);
1241			ret = wq_sleep(info, RECV, timeout, &wait);
1242			msg_ptr = wait.msg;
1243		}
1244	} else {
1245		DEFINE_WAKE_Q(wake_q);
1246
1247		msg_ptr = msg_get(info);
1248
1249		inode->i_atime = inode->i_mtime = inode->i_ctime =
1250				current_time(inode);
1251
1252		/* There is now free space in queue. */
1253		pipelined_receive(&wake_q, info);
1254		spin_unlock(&info->lock);
1255		wake_up_q(&wake_q);
1256		ret = 0;
1257	}
1258	if (ret == 0) {
1259		ret = msg_ptr->m_ts;
1260
1261		if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
1262			store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
1263			ret = -EFAULT;
1264		}
1265		free_msg(msg_ptr);
1266	}
1267out_fput:
1268	fdput(f);
1269out:
1270	return ret;
1271}
1272
1273SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
1274		size_t, msg_len, unsigned int, msg_prio,
1275		const struct __kernel_timespec __user *, u_abs_timeout)
1276{
1277	struct timespec64 ts, *p = NULL;
1278	if (u_abs_timeout) {
1279		int res = prepare_timeout(u_abs_timeout, &ts);
1280		if (res)
1281			return res;
1282		p = &ts;
1283	}
1284	return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1285}
1286
1287SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
1288		size_t, msg_len, unsigned int __user *, u_msg_prio,
1289		const struct __kernel_timespec __user *, u_abs_timeout)
1290{
1291	struct timespec64 ts, *p = NULL;
1292	if (u_abs_timeout) {
1293		int res = prepare_timeout(u_abs_timeout, &ts);
1294		if (res)
1295			return res;
1296		p = &ts;
1297	}
1298	return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1299}
1300
1301/*
1302 * Notes: the case when user wants us to deregister (with NULL as pointer)
1303 * and he isn't currently owner of notification, will be silently discarded.
1304 * It isn't explicitly defined in the POSIX.
1305 */
1306static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification)
1307{
1308	int ret;
1309	struct fd f;
1310	struct sock *sock;
1311	struct inode *inode;
1312	struct mqueue_inode_info *info;
1313	struct sk_buff *nc;
1314
1315	audit_mq_notify(mqdes, notification);
1316
1317	nc = NULL;
1318	sock = NULL;
1319	if (notification != NULL) {
1320		if (unlikely(notification->sigev_notify != SIGEV_NONE &&
1321			     notification->sigev_notify != SIGEV_SIGNAL &&
1322			     notification->sigev_notify != SIGEV_THREAD))
1323			return -EINVAL;
1324		if (notification->sigev_notify == SIGEV_SIGNAL &&
1325			!valid_signal(notification->sigev_signo)) {
1326			return -EINVAL;
1327		}
1328		if (notification->sigev_notify == SIGEV_THREAD) {
1329			long timeo;
1330
1331			/* create the notify skb */
1332			nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
1333			if (!nc)
1334				return -ENOMEM;
1335
1336			if (copy_from_user(nc->data,
1337					notification->sigev_value.sival_ptr,
1338					NOTIFY_COOKIE_LEN)) {
1339				ret = -EFAULT;
1340				goto free_skb;
1341			}
1342
1343			/* TODO: add a header? */
1344			skb_put(nc, NOTIFY_COOKIE_LEN);
1345			/* and attach it to the socket */
1346retry:
1347			f = fdget(notification->sigev_signo);
1348			if (!f.file) {
1349				ret = -EBADF;
1350				goto out;
1351			}
1352			sock = netlink_getsockbyfilp(f.file);
1353			fdput(f);
1354			if (IS_ERR(sock)) {
1355				ret = PTR_ERR(sock);
1356				goto free_skb;
1357			}
1358
1359			timeo = MAX_SCHEDULE_TIMEOUT;
1360			ret = netlink_attachskb(sock, nc, &timeo, NULL);
1361			if (ret == 1) {
1362				sock = NULL;
1363				goto retry;
1364			}
1365			if (ret)
1366				return ret;
1367		}
1368	}
1369
1370	f = fdget(mqdes);
1371	if (!f.file) {
1372		ret = -EBADF;
1373		goto out;
1374	}
1375
1376	inode = file_inode(f.file);
1377	if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1378		ret = -EBADF;
1379		goto out_fput;
1380	}
1381	info = MQUEUE_I(inode);
1382
1383	ret = 0;
1384	spin_lock(&info->lock);
1385	if (notification == NULL) {
1386		if (info->notify_owner == task_tgid(current)) {
1387			remove_notification(info);
1388			inode->i_atime = inode->i_ctime = current_time(inode);
1389		}
1390	} else if (info->notify_owner != NULL) {
1391		ret = -EBUSY;
1392	} else {
1393		switch (notification->sigev_notify) {
1394		case SIGEV_NONE:
1395			info->notify.sigev_notify = SIGEV_NONE;
1396			break;
1397		case SIGEV_THREAD:
1398			info->notify_sock = sock;
1399			info->notify_cookie = nc;
1400			sock = NULL;
1401			nc = NULL;
1402			info->notify.sigev_notify = SIGEV_THREAD;
1403			break;
1404		case SIGEV_SIGNAL:
1405			info->notify.sigev_signo = notification->sigev_signo;
1406			info->notify.sigev_value = notification->sigev_value;
1407			info->notify.sigev_notify = SIGEV_SIGNAL;
1408			info->notify_self_exec_id = current->self_exec_id;
1409			break;
1410		}
1411
1412		info->notify_owner = get_pid(task_tgid(current));
1413		info->notify_user_ns = get_user_ns(current_user_ns());
1414		inode->i_atime = inode->i_ctime = current_time(inode);
1415	}
1416	spin_unlock(&info->lock);
1417out_fput:
1418	fdput(f);
1419out:
1420	if (sock)
1421		netlink_detachskb(sock, nc);
1422	else
1423free_skb:
1424		dev_kfree_skb(nc);
1425
1426	return ret;
1427}
1428
1429SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1430		const struct sigevent __user *, u_notification)
1431{
1432	struct sigevent n, *p = NULL;
1433	if (u_notification) {
1434		if (copy_from_user(&n, u_notification, sizeof(struct sigevent)))
1435			return -EFAULT;
1436		p = &n;
1437	}
1438	return do_mq_notify(mqdes, p);
1439}
1440
1441static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old)
1442{
1443	struct fd f;
1444	struct inode *inode;
1445	struct mqueue_inode_info *info;
1446
1447	if (new && (new->mq_flags & (~O_NONBLOCK)))
1448		return -EINVAL;
1449
1450	f = fdget(mqdes);
1451	if (!f.file)
1452		return -EBADF;
1453
1454	if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1455		fdput(f);
1456		return -EBADF;
1457	}
1458
1459	inode = file_inode(f.file);
1460	info = MQUEUE_I(inode);
1461
1462	spin_lock(&info->lock);
1463
1464	if (old) {
1465		*old = info->attr;
1466		old->mq_flags = f.file->f_flags & O_NONBLOCK;
1467	}
1468	if (new) {
1469		audit_mq_getsetattr(mqdes, new);
1470		spin_lock(&f.file->f_lock);
1471		if (new->mq_flags & O_NONBLOCK)
1472			f.file->f_flags |= O_NONBLOCK;
1473		else
1474			f.file->f_flags &= ~O_NONBLOCK;
1475		spin_unlock(&f.file->f_lock);
1476
1477		inode->i_atime = inode->i_ctime = current_time(inode);
1478	}
1479
1480	spin_unlock(&info->lock);
1481	fdput(f);
1482	return 0;
1483}
1484
1485SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1486		const struct mq_attr __user *, u_mqstat,
1487		struct mq_attr __user *, u_omqstat)
1488{
1489	int ret;
1490	struct mq_attr mqstat, omqstat;
1491	struct mq_attr *new = NULL, *old = NULL;
1492
1493	if (u_mqstat) {
1494		new = &mqstat;
1495		if (copy_from_user(new, u_mqstat, sizeof(struct mq_attr)))
1496			return -EFAULT;
1497	}
1498	if (u_omqstat)
1499		old = &omqstat;
1500
1501	ret = do_mq_getsetattr(mqdes, new, old);
1502	if (ret || !old)
1503		return ret;
1504
1505	if (copy_to_user(u_omqstat, old, sizeof(struct mq_attr)))
1506		return -EFAULT;
1507	return 0;
1508}
1509
1510#ifdef CONFIG_COMPAT
1511
1512struct compat_mq_attr {
1513	compat_long_t mq_flags;      /* message queue flags		     */
1514	compat_long_t mq_maxmsg;     /* maximum number of messages	     */
1515	compat_long_t mq_msgsize;    /* maximum message size		     */
1516	compat_long_t mq_curmsgs;    /* number of messages currently queued  */
1517	compat_long_t __reserved[4]; /* ignored for input, zeroed for output */
1518};
1519
1520static inline int get_compat_mq_attr(struct mq_attr *attr,
1521			const struct compat_mq_attr __user *uattr)
1522{
1523	struct compat_mq_attr v;
1524
1525	if (copy_from_user(&v, uattr, sizeof(*uattr)))
1526		return -EFAULT;
1527
1528	memset(attr, 0, sizeof(*attr));
1529	attr->mq_flags = v.mq_flags;
1530	attr->mq_maxmsg = v.mq_maxmsg;
1531	attr->mq_msgsize = v.mq_msgsize;
1532	attr->mq_curmsgs = v.mq_curmsgs;
1533	return 0;
1534}
1535
1536static inline int put_compat_mq_attr(const struct mq_attr *attr,
1537			struct compat_mq_attr __user *uattr)
1538{
1539	struct compat_mq_attr v;
1540
1541	memset(&v, 0, sizeof(v));
1542	v.mq_flags = attr->mq_flags;
1543	v.mq_maxmsg = attr->mq_maxmsg;
1544	v.mq_msgsize = attr->mq_msgsize;
1545	v.mq_curmsgs = attr->mq_curmsgs;
1546	if (copy_to_user(uattr, &v, sizeof(*uattr)))
1547		return -EFAULT;
1548	return 0;
1549}
1550
1551COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name,
1552		       int, oflag, compat_mode_t, mode,
1553		       struct compat_mq_attr __user *, u_attr)
1554{
1555	struct mq_attr attr, *p = NULL;
1556	if (u_attr && oflag & O_CREAT) {
1557		p = &attr;
1558		if (get_compat_mq_attr(&attr, u_attr))
1559			return -EFAULT;
1560	}
1561	return do_mq_open(u_name, oflag, mode, p);
1562}
1563
1564COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1565		       const struct compat_sigevent __user *, u_notification)
1566{
1567	struct sigevent n, *p = NULL;
1568	if (u_notification) {
1569		if (get_compat_sigevent(&n, u_notification))
1570			return -EFAULT;
1571		if (n.sigev_notify == SIGEV_THREAD)
1572			n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int);
1573		p = &n;
1574	}
1575	return do_mq_notify(mqdes, p);
1576}
1577
1578COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1579		       const struct compat_mq_attr __user *, u_mqstat,
1580		       struct compat_mq_attr __user *, u_omqstat)
1581{
1582	int ret;
1583	struct mq_attr mqstat, omqstat;
1584	struct mq_attr *new = NULL, *old = NULL;
1585
1586	if (u_mqstat) {
1587		new = &mqstat;
1588		if (get_compat_mq_attr(new, u_mqstat))
1589			return -EFAULT;
1590	}
1591	if (u_omqstat)
1592		old = &omqstat;
1593
1594	ret = do_mq_getsetattr(mqdes, new, old);
1595	if (ret || !old)
1596		return ret;
1597
1598	if (put_compat_mq_attr(old, u_omqstat))
1599		return -EFAULT;
1600	return 0;
1601}
1602#endif
1603
1604#ifdef CONFIG_COMPAT_32BIT_TIME
1605static int compat_prepare_timeout(const struct old_timespec32 __user *p,
1606				   struct timespec64 *ts)
1607{
1608	if (get_old_timespec32(ts, p))
1609		return -EFAULT;
1610	if (!timespec64_valid(ts))
1611		return -EINVAL;
1612	return 0;
1613}
1614
1615SYSCALL_DEFINE5(mq_timedsend_time32, mqd_t, mqdes,
1616		const char __user *, u_msg_ptr,
1617		unsigned int, msg_len, unsigned int, msg_prio,
1618		const struct old_timespec32 __user *, u_abs_timeout)
1619{
1620	struct timespec64 ts, *p = NULL;
1621	if (u_abs_timeout) {
1622		int res = compat_prepare_timeout(u_abs_timeout, &ts);
1623		if (res)
1624			return res;
1625		p = &ts;
1626	}
1627	return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1628}
1629
1630SYSCALL_DEFINE5(mq_timedreceive_time32, mqd_t, mqdes,
1631		char __user *, u_msg_ptr,
1632		unsigned int, msg_len, unsigned int __user *, u_msg_prio,
1633		const struct old_timespec32 __user *, u_abs_timeout)
1634{
1635	struct timespec64 ts, *p = NULL;
1636	if (u_abs_timeout) {
1637		int res = compat_prepare_timeout(u_abs_timeout, &ts);
1638		if (res)
1639			return res;
1640		p = &ts;
1641	}
1642	return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1643}
1644#endif
1645
1646static const struct inode_operations mqueue_dir_inode_operations = {
1647	.lookup = simple_lookup,
1648	.create = mqueue_create,
1649	.unlink = mqueue_unlink,
1650};
1651
1652static const struct file_operations mqueue_file_operations = {
1653	.flush = mqueue_flush_file,
1654	.poll = mqueue_poll_file,
1655	.read = mqueue_read_file,
1656	.llseek = default_llseek,
1657};
1658
1659static const struct super_operations mqueue_super_ops = {
1660	.alloc_inode = mqueue_alloc_inode,
1661	.free_inode = mqueue_free_inode,
1662	.evict_inode = mqueue_evict_inode,
1663	.statfs = simple_statfs,
1664};
1665
1666static const struct fs_context_operations mqueue_fs_context_ops = {
1667	.free		= mqueue_fs_context_free,
1668	.get_tree	= mqueue_get_tree,
1669};
1670
1671static struct file_system_type mqueue_fs_type = {
1672	.name			= "mqueue",
1673	.init_fs_context	= mqueue_init_fs_context,
1674	.kill_sb		= kill_litter_super,
1675	.fs_flags		= FS_USERNS_MOUNT,
1676};
1677
1678int mq_init_ns(struct ipc_namespace *ns)
1679{
1680	struct vfsmount *m;
1681
1682	ns->mq_queues_count  = 0;
1683	ns->mq_queues_max    = DFLT_QUEUESMAX;
1684	ns->mq_msg_max       = DFLT_MSGMAX;
1685	ns->mq_msgsize_max   = DFLT_MSGSIZEMAX;
1686	ns->mq_msg_default   = DFLT_MSG;
1687	ns->mq_msgsize_default  = DFLT_MSGSIZE;
1688
1689	m = mq_create_mount(ns);
1690	if (IS_ERR(m))
1691		return PTR_ERR(m);
1692	ns->mq_mnt = m;
1693	return 0;
1694}
1695
1696void mq_clear_sbinfo(struct ipc_namespace *ns)
1697{
1698	ns->mq_mnt->mnt_sb->s_fs_info = NULL;
1699}
1700
1701void mq_put_mnt(struct ipc_namespace *ns)
1702{
1703	kern_unmount(ns->mq_mnt);
1704}
1705
1706static int __init init_mqueue_fs(void)
1707{
1708	int error;
1709
1710	mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1711				sizeof(struct mqueue_inode_info), 0,
1712				SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once);
1713	if (mqueue_inode_cachep == NULL)
1714		return -ENOMEM;
1715
1716	/* ignore failures - they are not fatal */
1717	mq_sysctl_table = mq_register_sysctl_table();
1718
1719	error = register_filesystem(&mqueue_fs_type);
1720	if (error)
1721		goto out_sysctl;
1722
1723	spin_lock_init(&mq_lock);
1724
1725	error = mq_init_ns(&init_ipc_ns);
1726	if (error)
1727		goto out_filesystem;
1728
1729	return 0;
1730
1731out_filesystem:
1732	unregister_filesystem(&mqueue_fs_type);
1733out_sysctl:
1734	if (mq_sysctl_table)
1735		unregister_sysctl_table(mq_sysctl_table);
1736	kmem_cache_destroy(mqueue_inode_cachep);
1737	return error;
1738}
1739
1740device_initcall(init_mqueue_fs);
v5.4
   1/*
   2 * POSIX message queues filesystem for Linux.
   3 *
   4 * Copyright (C) 2003,2004  Krzysztof Benedyczak    (golbi@mat.uni.torun.pl)
   5 *                          Michal Wronski          (michal.wronski@gmail.com)
   6 *
   7 * Spinlocks:               Mohamed Abbas           (abbas.mohamed@intel.com)
   8 * Lockless receive & send, fd based notify:
   9 *			    Manfred Spraul	    (manfred@colorfullife.com)
  10 *
  11 * Audit:                   George Wilson           (ltcgcw@us.ibm.com)
  12 *
  13 * This file is released under the GPL.
  14 */
  15
  16#include <linux/capability.h>
  17#include <linux/init.h>
  18#include <linux/pagemap.h>
  19#include <linux/file.h>
  20#include <linux/mount.h>
  21#include <linux/fs_context.h>
  22#include <linux/namei.h>
  23#include <linux/sysctl.h>
  24#include <linux/poll.h>
  25#include <linux/mqueue.h>
  26#include <linux/msg.h>
  27#include <linux/skbuff.h>
  28#include <linux/vmalloc.h>
  29#include <linux/netlink.h>
  30#include <linux/syscalls.h>
  31#include <linux/audit.h>
  32#include <linux/signal.h>
  33#include <linux/mutex.h>
  34#include <linux/nsproxy.h>
  35#include <linux/pid.h>
  36#include <linux/ipc_namespace.h>
  37#include <linux/user_namespace.h>
  38#include <linux/slab.h>
  39#include <linux/sched/wake_q.h>
  40#include <linux/sched/signal.h>
  41#include <linux/sched/user.h>
  42
  43#include <net/sock.h>
  44#include "util.h"
  45
  46struct mqueue_fs_context {
  47	struct ipc_namespace	*ipc_ns;
  48};
  49
  50#define MQUEUE_MAGIC	0x19800202
  51#define DIRENT_SIZE	20
  52#define FILENT_SIZE	80
  53
  54#define SEND		0
  55#define RECV		1
  56
  57#define STATE_NONE	0
  58#define STATE_READY	1
  59
  60struct posix_msg_tree_node {
  61	struct rb_node		rb_node;
  62	struct list_head	msg_list;
  63	int			priority;
  64};
  65
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  66struct ext_wait_queue {		/* queue of sleeping tasks */
  67	struct task_struct *task;
  68	struct list_head list;
  69	struct msg_msg *msg;	/* ptr of loaded message */
  70	int state;		/* one of STATE_* values */
  71};
  72
  73struct mqueue_inode_info {
  74	spinlock_t lock;
  75	struct inode vfs_inode;
  76	wait_queue_head_t wait_q;
  77
  78	struct rb_root msg_tree;
  79	struct rb_node *msg_tree_rightmost;
  80	struct posix_msg_tree_node *node_cache;
  81	struct mq_attr attr;
  82
  83	struct sigevent notify;
  84	struct pid *notify_owner;
 
  85	struct user_namespace *notify_user_ns;
  86	struct user_struct *user;	/* user who created, for accounting */
  87	struct sock *notify_sock;
  88	struct sk_buff *notify_cookie;
  89
  90	/* for tasks waiting for free space and messages, respectively */
  91	struct ext_wait_queue e_wait_q[2];
  92
  93	unsigned long qsize; /* size of queue in memory (sum of all msgs) */
  94};
  95
  96static struct file_system_type mqueue_fs_type;
  97static const struct inode_operations mqueue_dir_inode_operations;
  98static const struct file_operations mqueue_file_operations;
  99static const struct super_operations mqueue_super_ops;
 100static const struct fs_context_operations mqueue_fs_context_ops;
 101static void remove_notification(struct mqueue_inode_info *info);
 102
 103static struct kmem_cache *mqueue_inode_cachep;
 104
 105static struct ctl_table_header *mq_sysctl_table;
 106
 107static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
 108{
 109	return container_of(inode, struct mqueue_inode_info, vfs_inode);
 110}
 111
 112/*
 113 * This routine should be called with the mq_lock held.
 114 */
 115static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode)
 116{
 117	return get_ipc_ns(inode->i_sb->s_fs_info);
 118}
 119
 120static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
 121{
 122	struct ipc_namespace *ns;
 123
 124	spin_lock(&mq_lock);
 125	ns = __get_ns_from_inode(inode);
 126	spin_unlock(&mq_lock);
 127	return ns;
 128}
 129
 130/* Auxiliary functions to manipulate messages' list */
 131static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
 132{
 133	struct rb_node **p, *parent = NULL;
 134	struct posix_msg_tree_node *leaf;
 135	bool rightmost = true;
 136
 137	p = &info->msg_tree.rb_node;
 138	while (*p) {
 139		parent = *p;
 140		leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
 141
 142		if (likely(leaf->priority == msg->m_type))
 143			goto insert_msg;
 144		else if (msg->m_type < leaf->priority) {
 145			p = &(*p)->rb_left;
 146			rightmost = false;
 147		} else
 148			p = &(*p)->rb_right;
 149	}
 150	if (info->node_cache) {
 151		leaf = info->node_cache;
 152		info->node_cache = NULL;
 153	} else {
 154		leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
 155		if (!leaf)
 156			return -ENOMEM;
 157		INIT_LIST_HEAD(&leaf->msg_list);
 158	}
 159	leaf->priority = msg->m_type;
 160
 161	if (rightmost)
 162		info->msg_tree_rightmost = &leaf->rb_node;
 163
 164	rb_link_node(&leaf->rb_node, parent, p);
 165	rb_insert_color(&leaf->rb_node, &info->msg_tree);
 166insert_msg:
 167	info->attr.mq_curmsgs++;
 168	info->qsize += msg->m_ts;
 169	list_add_tail(&msg->m_list, &leaf->msg_list);
 170	return 0;
 171}
 172
 173static inline void msg_tree_erase(struct posix_msg_tree_node *leaf,
 174				  struct mqueue_inode_info *info)
 175{
 176	struct rb_node *node = &leaf->rb_node;
 177
 178	if (info->msg_tree_rightmost == node)
 179		info->msg_tree_rightmost = rb_prev(node);
 180
 181	rb_erase(node, &info->msg_tree);
 182	if (info->node_cache) {
 183		kfree(leaf);
 184	} else {
 185		info->node_cache = leaf;
 186	}
 187}
 188
 189static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
 190{
 191	struct rb_node *parent = NULL;
 192	struct posix_msg_tree_node *leaf;
 193	struct msg_msg *msg;
 194
 195try_again:
 196	/*
 197	 * During insert, low priorities go to the left and high to the
 198	 * right.  On receive, we want the highest priorities first, so
 199	 * walk all the way to the right.
 200	 */
 201	parent = info->msg_tree_rightmost;
 202	if (!parent) {
 203		if (info->attr.mq_curmsgs) {
 204			pr_warn_once("Inconsistency in POSIX message queue, "
 205				     "no tree element, but supposedly messages "
 206				     "should exist!\n");
 207			info->attr.mq_curmsgs = 0;
 208		}
 209		return NULL;
 210	}
 211	leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
 212	if (unlikely(list_empty(&leaf->msg_list))) {
 213		pr_warn_once("Inconsistency in POSIX message queue, "
 214			     "empty leaf node but we haven't implemented "
 215			     "lazy leaf delete!\n");
 216		msg_tree_erase(leaf, info);
 217		goto try_again;
 218	} else {
 219		msg = list_first_entry(&leaf->msg_list,
 220				       struct msg_msg, m_list);
 221		list_del(&msg->m_list);
 222		if (list_empty(&leaf->msg_list)) {
 223			msg_tree_erase(leaf, info);
 224		}
 225	}
 226	info->attr.mq_curmsgs--;
 227	info->qsize -= msg->m_ts;
 228	return msg;
 229}
 230
 231static struct inode *mqueue_get_inode(struct super_block *sb,
 232		struct ipc_namespace *ipc_ns, umode_t mode,
 233		struct mq_attr *attr)
 234{
 235	struct user_struct *u = current_user();
 236	struct inode *inode;
 237	int ret = -ENOMEM;
 238
 239	inode = new_inode(sb);
 240	if (!inode)
 241		goto err;
 242
 243	inode->i_ino = get_next_ino();
 244	inode->i_mode = mode;
 245	inode->i_uid = current_fsuid();
 246	inode->i_gid = current_fsgid();
 247	inode->i_mtime = inode->i_ctime = inode->i_atime = current_time(inode);
 248
 249	if (S_ISREG(mode)) {
 250		struct mqueue_inode_info *info;
 251		unsigned long mq_bytes, mq_treesize;
 252
 253		inode->i_fop = &mqueue_file_operations;
 254		inode->i_size = FILENT_SIZE;
 255		/* mqueue specific info */
 256		info = MQUEUE_I(inode);
 257		spin_lock_init(&info->lock);
 258		init_waitqueue_head(&info->wait_q);
 259		INIT_LIST_HEAD(&info->e_wait_q[0].list);
 260		INIT_LIST_HEAD(&info->e_wait_q[1].list);
 261		info->notify_owner = NULL;
 262		info->notify_user_ns = NULL;
 263		info->qsize = 0;
 264		info->user = NULL;	/* set when all is ok */
 265		info->msg_tree = RB_ROOT;
 266		info->msg_tree_rightmost = NULL;
 267		info->node_cache = NULL;
 268		memset(&info->attr, 0, sizeof(info->attr));
 269		info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
 270					   ipc_ns->mq_msg_default);
 271		info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
 272					    ipc_ns->mq_msgsize_default);
 273		if (attr) {
 274			info->attr.mq_maxmsg = attr->mq_maxmsg;
 275			info->attr.mq_msgsize = attr->mq_msgsize;
 276		}
 277		/*
 278		 * We used to allocate a static array of pointers and account
 279		 * the size of that array as well as one msg_msg struct per
 280		 * possible message into the queue size. That's no longer
 281		 * accurate as the queue is now an rbtree and will grow and
 282		 * shrink depending on usage patterns.  We can, however, still
 283		 * account one msg_msg struct per message, but the nodes are
 284		 * allocated depending on priority usage, and most programs
 285		 * only use one, or a handful, of priorities.  However, since
 286		 * this is pinned memory, we need to assume worst case, so
 287		 * that means the min(mq_maxmsg, max_priorities) * struct
 288		 * posix_msg_tree_node.
 289		 */
 290
 291		ret = -EINVAL;
 292		if (info->attr.mq_maxmsg <= 0 || info->attr.mq_msgsize <= 0)
 293			goto out_inode;
 294		if (capable(CAP_SYS_RESOURCE)) {
 295			if (info->attr.mq_maxmsg > HARD_MSGMAX ||
 296			    info->attr.mq_msgsize > HARD_MSGSIZEMAX)
 297				goto out_inode;
 298		} else {
 299			if (info->attr.mq_maxmsg > ipc_ns->mq_msg_max ||
 300					info->attr.mq_msgsize > ipc_ns->mq_msgsize_max)
 301				goto out_inode;
 302		}
 303		ret = -EOVERFLOW;
 304		/* check for overflow */
 305		if (info->attr.mq_msgsize > ULONG_MAX/info->attr.mq_maxmsg)
 306			goto out_inode;
 307		mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
 308			min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
 309			sizeof(struct posix_msg_tree_node);
 310		mq_bytes = info->attr.mq_maxmsg * info->attr.mq_msgsize;
 311		if (mq_bytes + mq_treesize < mq_bytes)
 312			goto out_inode;
 313		mq_bytes += mq_treesize;
 314		spin_lock(&mq_lock);
 315		if (u->mq_bytes + mq_bytes < u->mq_bytes ||
 316		    u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
 
 
 
 
 
 
 
 
 
 
 
 
 317			spin_unlock(&mq_lock);
 318			/* mqueue_evict_inode() releases info->messages */
 319			ret = -EMFILE;
 320			goto out_inode;
 321		}
 322		u->mq_bytes += mq_bytes;
 323		spin_unlock(&mq_lock);
 324
 325		/* all is ok */
 326		info->user = get_uid(u);
 327	} else if (S_ISDIR(mode)) {
 328		inc_nlink(inode);
 329		/* Some things misbehave if size == 0 on a directory */
 330		inode->i_size = 2 * DIRENT_SIZE;
 331		inode->i_op = &mqueue_dir_inode_operations;
 332		inode->i_fop = &simple_dir_operations;
 333	}
 334
 335	return inode;
 336out_inode:
 337	iput(inode);
 338err:
 339	return ERR_PTR(ret);
 340}
 341
 342static int mqueue_fill_super(struct super_block *sb, struct fs_context *fc)
 343{
 344	struct inode *inode;
 345	struct ipc_namespace *ns = sb->s_fs_info;
 346
 347	sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV;
 348	sb->s_blocksize = PAGE_SIZE;
 349	sb->s_blocksize_bits = PAGE_SHIFT;
 350	sb->s_magic = MQUEUE_MAGIC;
 351	sb->s_op = &mqueue_super_ops;
 352
 353	inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
 354	if (IS_ERR(inode))
 355		return PTR_ERR(inode);
 356
 357	sb->s_root = d_make_root(inode);
 358	if (!sb->s_root)
 359		return -ENOMEM;
 360	return 0;
 361}
 362
 363static int mqueue_get_tree(struct fs_context *fc)
 364{
 365	struct mqueue_fs_context *ctx = fc->fs_private;
 366
 367	return get_tree_keyed(fc, mqueue_fill_super, ctx->ipc_ns);
 368}
 369
 370static void mqueue_fs_context_free(struct fs_context *fc)
 371{
 372	struct mqueue_fs_context *ctx = fc->fs_private;
 373
 374	put_ipc_ns(ctx->ipc_ns);
 375	kfree(ctx);
 376}
 377
 378static int mqueue_init_fs_context(struct fs_context *fc)
 379{
 380	struct mqueue_fs_context *ctx;
 381
 382	ctx = kzalloc(sizeof(struct mqueue_fs_context), GFP_KERNEL);
 383	if (!ctx)
 384		return -ENOMEM;
 385
 386	ctx->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
 387	put_user_ns(fc->user_ns);
 388	fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
 389	fc->fs_private = ctx;
 390	fc->ops = &mqueue_fs_context_ops;
 391	return 0;
 392}
 393
 394static struct vfsmount *mq_create_mount(struct ipc_namespace *ns)
 395{
 396	struct mqueue_fs_context *ctx;
 397	struct fs_context *fc;
 398	struct vfsmount *mnt;
 399
 400	fc = fs_context_for_mount(&mqueue_fs_type, SB_KERNMOUNT);
 401	if (IS_ERR(fc))
 402		return ERR_CAST(fc);
 403
 404	ctx = fc->fs_private;
 405	put_ipc_ns(ctx->ipc_ns);
 406	ctx->ipc_ns = get_ipc_ns(ns);
 407	put_user_ns(fc->user_ns);
 408	fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
 409
 410	mnt = fc_mount(fc);
 411	put_fs_context(fc);
 412	return mnt;
 413}
 414
 415static void init_once(void *foo)
 416{
 417	struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
 418
 419	inode_init_once(&p->vfs_inode);
 420}
 421
 422static struct inode *mqueue_alloc_inode(struct super_block *sb)
 423{
 424	struct mqueue_inode_info *ei;
 425
 426	ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
 427	if (!ei)
 428		return NULL;
 429	return &ei->vfs_inode;
 430}
 431
 432static void mqueue_free_inode(struct inode *inode)
 433{
 434	kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
 435}
 436
 437static void mqueue_evict_inode(struct inode *inode)
 438{
 439	struct mqueue_inode_info *info;
 440	struct user_struct *user;
 441	struct ipc_namespace *ipc_ns;
 442	struct msg_msg *msg, *nmsg;
 443	LIST_HEAD(tmp_msg);
 444
 445	clear_inode(inode);
 446
 447	if (S_ISDIR(inode->i_mode))
 448		return;
 449
 450	ipc_ns = get_ns_from_inode(inode);
 451	info = MQUEUE_I(inode);
 452	spin_lock(&info->lock);
 453	while ((msg = msg_get(info)) != NULL)
 454		list_add_tail(&msg->m_list, &tmp_msg);
 455	kfree(info->node_cache);
 456	spin_unlock(&info->lock);
 457
 458	list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) {
 459		list_del(&msg->m_list);
 460		free_msg(msg);
 461	}
 462
 463	user = info->user;
 464	if (user) {
 465		unsigned long mq_bytes, mq_treesize;
 466
 467		/* Total amount of bytes accounted for the mqueue */
 468		mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
 469			min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
 470			sizeof(struct posix_msg_tree_node);
 471
 472		mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
 473					  info->attr.mq_msgsize);
 474
 475		spin_lock(&mq_lock);
 476		user->mq_bytes -= mq_bytes;
 477		/*
 478		 * get_ns_from_inode() ensures that the
 479		 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
 480		 * to which we now hold a reference, or it is NULL.
 481		 * We can't put it here under mq_lock, though.
 482		 */
 483		if (ipc_ns)
 484			ipc_ns->mq_queues_count--;
 485		spin_unlock(&mq_lock);
 486		free_uid(user);
 
 487	}
 488	if (ipc_ns)
 489		put_ipc_ns(ipc_ns);
 490}
 491
 492static int mqueue_create_attr(struct dentry *dentry, umode_t mode, void *arg)
 493{
 494	struct inode *dir = dentry->d_parent->d_inode;
 495	struct inode *inode;
 496	struct mq_attr *attr = arg;
 497	int error;
 498	struct ipc_namespace *ipc_ns;
 499
 500	spin_lock(&mq_lock);
 501	ipc_ns = __get_ns_from_inode(dir);
 502	if (!ipc_ns) {
 503		error = -EACCES;
 504		goto out_unlock;
 505	}
 506
 507	if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
 508	    !capable(CAP_SYS_RESOURCE)) {
 509		error = -ENOSPC;
 510		goto out_unlock;
 511	}
 512	ipc_ns->mq_queues_count++;
 513	spin_unlock(&mq_lock);
 514
 515	inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr);
 516	if (IS_ERR(inode)) {
 517		error = PTR_ERR(inode);
 518		spin_lock(&mq_lock);
 519		ipc_ns->mq_queues_count--;
 520		goto out_unlock;
 521	}
 522
 523	put_ipc_ns(ipc_ns);
 524	dir->i_size += DIRENT_SIZE;
 525	dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
 526
 527	d_instantiate(dentry, inode);
 528	dget(dentry);
 529	return 0;
 530out_unlock:
 531	spin_unlock(&mq_lock);
 532	if (ipc_ns)
 533		put_ipc_ns(ipc_ns);
 534	return error;
 535}
 536
 537static int mqueue_create(struct inode *dir, struct dentry *dentry,
 538				umode_t mode, bool excl)
 539{
 540	return mqueue_create_attr(dentry, mode, NULL);
 541}
 542
 543static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
 544{
 545	struct inode *inode = d_inode(dentry);
 546
 547	dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
 548	dir->i_size -= DIRENT_SIZE;
 549	drop_nlink(inode);
 550	dput(dentry);
 551	return 0;
 552}
 553
 554/*
 555*	This is routine for system read from queue file.
 556*	To avoid mess with doing here some sort of mq_receive we allow
 557*	to read only queue size & notification info (the only values
 558*	that are interesting from user point of view and aren't accessible
 559*	through std routines)
 560*/
 561static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
 562				size_t count, loff_t *off)
 563{
 564	struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
 565	char buffer[FILENT_SIZE];
 566	ssize_t ret;
 567
 568	spin_lock(&info->lock);
 569	snprintf(buffer, sizeof(buffer),
 570			"QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
 571			info->qsize,
 572			info->notify_owner ? info->notify.sigev_notify : 0,
 573			(info->notify_owner &&
 574			 info->notify.sigev_notify == SIGEV_SIGNAL) ?
 575				info->notify.sigev_signo : 0,
 576			pid_vnr(info->notify_owner));
 577	spin_unlock(&info->lock);
 578	buffer[sizeof(buffer)-1] = '\0';
 579
 580	ret = simple_read_from_buffer(u_data, count, off, buffer,
 581				strlen(buffer));
 582	if (ret <= 0)
 583		return ret;
 584
 585	file_inode(filp)->i_atime = file_inode(filp)->i_ctime = current_time(file_inode(filp));
 586	return ret;
 587}
 588
 589static int mqueue_flush_file(struct file *filp, fl_owner_t id)
 590{
 591	struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
 592
 593	spin_lock(&info->lock);
 594	if (task_tgid(current) == info->notify_owner)
 595		remove_notification(info);
 596
 597	spin_unlock(&info->lock);
 598	return 0;
 599}
 600
 601static __poll_t mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
 602{
 603	struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
 604	__poll_t retval = 0;
 605
 606	poll_wait(filp, &info->wait_q, poll_tab);
 607
 608	spin_lock(&info->lock);
 609	if (info->attr.mq_curmsgs)
 610		retval = EPOLLIN | EPOLLRDNORM;
 611
 612	if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
 613		retval |= EPOLLOUT | EPOLLWRNORM;
 614	spin_unlock(&info->lock);
 615
 616	return retval;
 617}
 618
 619/* Adds current to info->e_wait_q[sr] before element with smaller prio */
 620static void wq_add(struct mqueue_inode_info *info, int sr,
 621			struct ext_wait_queue *ewp)
 622{
 623	struct ext_wait_queue *walk;
 624
 625	list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
 626		if (walk->task->prio <= current->prio) {
 627			list_add_tail(&ewp->list, &walk->list);
 628			return;
 629		}
 630	}
 631	list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
 632}
 633
 634/*
 635 * Puts current task to sleep. Caller must hold queue lock. After return
 636 * lock isn't held.
 637 * sr: SEND or RECV
 638 */
 639static int wq_sleep(struct mqueue_inode_info *info, int sr,
 640		    ktime_t *timeout, struct ext_wait_queue *ewp)
 641	__releases(&info->lock)
 642{
 643	int retval;
 644	signed long time;
 645
 646	wq_add(info, sr, ewp);
 647
 648	for (;;) {
 
 649		__set_current_state(TASK_INTERRUPTIBLE);
 650
 651		spin_unlock(&info->lock);
 652		time = schedule_hrtimeout_range_clock(timeout, 0,
 653			HRTIMER_MODE_ABS, CLOCK_REALTIME);
 654
 655		if (ewp->state == STATE_READY) {
 
 
 656			retval = 0;
 657			goto out;
 658		}
 659		spin_lock(&info->lock);
 660		if (ewp->state == STATE_READY) {
 
 
 661			retval = 0;
 662			goto out_unlock;
 663		}
 664		if (signal_pending(current)) {
 665			retval = -ERESTARTSYS;
 666			break;
 667		}
 668		if (time == 0) {
 669			retval = -ETIMEDOUT;
 670			break;
 671		}
 672	}
 673	list_del(&ewp->list);
 674out_unlock:
 675	spin_unlock(&info->lock);
 676out:
 677	return retval;
 678}
 679
 680/*
 681 * Returns waiting task that should be serviced first or NULL if none exists
 682 */
 683static struct ext_wait_queue *wq_get_first_waiter(
 684		struct mqueue_inode_info *info, int sr)
 685{
 686	struct list_head *ptr;
 687
 688	ptr = info->e_wait_q[sr].list.prev;
 689	if (ptr == &info->e_wait_q[sr].list)
 690		return NULL;
 691	return list_entry(ptr, struct ext_wait_queue, list);
 692}
 693
 694
 695static inline void set_cookie(struct sk_buff *skb, char code)
 696{
 697	((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
 698}
 699
 700/*
 701 * The next function is only to split too long sys_mq_timedsend
 702 */
 703static void __do_notify(struct mqueue_inode_info *info)
 704{
 705	/* notification
 706	 * invoked when there is registered process and there isn't process
 707	 * waiting synchronously for message AND state of queue changed from
 708	 * empty to not empty. Here we are sure that no one is waiting
 709	 * synchronously. */
 710	if (info->notify_owner &&
 711	    info->attr.mq_curmsgs == 1) {
 712		struct kernel_siginfo sig_i;
 713		switch (info->notify.sigev_notify) {
 714		case SIGEV_NONE:
 715			break;
 716		case SIGEV_SIGNAL:
 717			/* sends signal */
 
 
 
 
 
 718
 719			clear_siginfo(&sig_i);
 720			sig_i.si_signo = info->notify.sigev_signo;
 721			sig_i.si_errno = 0;
 722			sig_i.si_code = SI_MESGQ;
 723			sig_i.si_value = info->notify.sigev_value;
 
 724			/* map current pid/uid into info->owner's namespaces */
 725			rcu_read_lock();
 726			sig_i.si_pid = task_tgid_nr_ns(current,
 727						ns_of_pid(info->notify_owner));
 728			sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid());
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 729			rcu_read_unlock();
 730
 731			kill_pid_info(info->notify.sigev_signo,
 732				      &sig_i, info->notify_owner);
 733			break;
 
 734		case SIGEV_THREAD:
 735			set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
 736			netlink_sendskb(info->notify_sock, info->notify_cookie);
 737			break;
 738		}
 739		/* after notification unregisters process */
 740		put_pid(info->notify_owner);
 741		put_user_ns(info->notify_user_ns);
 742		info->notify_owner = NULL;
 743		info->notify_user_ns = NULL;
 744	}
 745	wake_up(&info->wait_q);
 746}
 747
 748static int prepare_timeout(const struct __kernel_timespec __user *u_abs_timeout,
 749			   struct timespec64 *ts)
 750{
 751	if (get_timespec64(ts, u_abs_timeout))
 752		return -EFAULT;
 753	if (!timespec64_valid(ts))
 754		return -EINVAL;
 755	return 0;
 756}
 757
 758static void remove_notification(struct mqueue_inode_info *info)
 759{
 760	if (info->notify_owner != NULL &&
 761	    info->notify.sigev_notify == SIGEV_THREAD) {
 762		set_cookie(info->notify_cookie, NOTIFY_REMOVED);
 763		netlink_sendskb(info->notify_sock, info->notify_cookie);
 764	}
 765	put_pid(info->notify_owner);
 766	put_user_ns(info->notify_user_ns);
 767	info->notify_owner = NULL;
 768	info->notify_user_ns = NULL;
 769}
 770
 771static int prepare_open(struct dentry *dentry, int oflag, int ro,
 772			umode_t mode, struct filename *name,
 773			struct mq_attr *attr)
 774{
 775	static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
 776						  MAY_READ | MAY_WRITE };
 777	int acc;
 778
 779	if (d_really_is_negative(dentry)) {
 780		if (!(oflag & O_CREAT))
 781			return -ENOENT;
 782		if (ro)
 783			return ro;
 784		audit_inode_parent_hidden(name, dentry->d_parent);
 785		return vfs_mkobj(dentry, mode & ~current_umask(),
 786				  mqueue_create_attr, attr);
 787	}
 788	/* it already existed */
 789	audit_inode(name, dentry, 0);
 790	if ((oflag & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
 791		return -EEXIST;
 792	if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
 793		return -EINVAL;
 794	acc = oflag2acc[oflag & O_ACCMODE];
 795	return inode_permission(d_inode(dentry), acc);
 796}
 797
 798static int do_mq_open(const char __user *u_name, int oflag, umode_t mode,
 799		      struct mq_attr *attr)
 800{
 801	struct vfsmount *mnt = current->nsproxy->ipc_ns->mq_mnt;
 802	struct dentry *root = mnt->mnt_root;
 803	struct filename *name;
 804	struct path path;
 805	int fd, error;
 806	int ro;
 807
 808	audit_mq_open(oflag, mode, attr);
 809
 810	if (IS_ERR(name = getname(u_name)))
 811		return PTR_ERR(name);
 812
 813	fd = get_unused_fd_flags(O_CLOEXEC);
 814	if (fd < 0)
 815		goto out_putname;
 816
 817	ro = mnt_want_write(mnt);	/* we'll drop it in any case */
 818	inode_lock(d_inode(root));
 819	path.dentry = lookup_one_len(name->name, root, strlen(name->name));
 820	if (IS_ERR(path.dentry)) {
 821		error = PTR_ERR(path.dentry);
 822		goto out_putfd;
 823	}
 824	path.mnt = mntget(mnt);
 825	error = prepare_open(path.dentry, oflag, ro, mode, name, attr);
 826	if (!error) {
 827		struct file *file = dentry_open(&path, oflag, current_cred());
 828		if (!IS_ERR(file))
 829			fd_install(fd, file);
 830		else
 831			error = PTR_ERR(file);
 832	}
 833	path_put(&path);
 834out_putfd:
 835	if (error) {
 836		put_unused_fd(fd);
 837		fd = error;
 838	}
 839	inode_unlock(d_inode(root));
 840	if (!ro)
 841		mnt_drop_write(mnt);
 842out_putname:
 843	putname(name);
 844	return fd;
 845}
 846
 847SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
 848		struct mq_attr __user *, u_attr)
 849{
 850	struct mq_attr attr;
 851	if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
 852		return -EFAULT;
 853
 854	return do_mq_open(u_name, oflag, mode, u_attr ? &attr : NULL);
 855}
 856
 857SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
 858{
 859	int err;
 860	struct filename *name;
 861	struct dentry *dentry;
 862	struct inode *inode = NULL;
 863	struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
 864	struct vfsmount *mnt = ipc_ns->mq_mnt;
 865
 866	name = getname(u_name);
 867	if (IS_ERR(name))
 868		return PTR_ERR(name);
 869
 870	audit_inode_parent_hidden(name, mnt->mnt_root);
 871	err = mnt_want_write(mnt);
 872	if (err)
 873		goto out_name;
 874	inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT);
 875	dentry = lookup_one_len(name->name, mnt->mnt_root,
 876				strlen(name->name));
 877	if (IS_ERR(dentry)) {
 878		err = PTR_ERR(dentry);
 879		goto out_unlock;
 880	}
 881
 882	inode = d_inode(dentry);
 883	if (!inode) {
 884		err = -ENOENT;
 885	} else {
 886		ihold(inode);
 887		err = vfs_unlink(d_inode(dentry->d_parent), dentry, NULL);
 
 888	}
 889	dput(dentry);
 890
 891out_unlock:
 892	inode_unlock(d_inode(mnt->mnt_root));
 893	if (inode)
 894		iput(inode);
 895	mnt_drop_write(mnt);
 896out_name:
 897	putname(name);
 898
 899	return err;
 900}
 901
 902/* Pipelined send and receive functions.
 903 *
 904 * If a receiver finds no waiting message, then it registers itself in the
 905 * list of waiting receivers. A sender checks that list before adding the new
 906 * message into the message array. If there is a waiting receiver, then it
 907 * bypasses the message array and directly hands the message over to the
 908 * receiver. The receiver accepts the message and returns without grabbing the
 909 * queue spinlock:
 910 *
 911 * - Set pointer to message.
 912 * - Queue the receiver task for later wakeup (without the info->lock).
 913 * - Update its state to STATE_READY. Now the receiver can continue.
 914 * - Wake up the process after the lock is dropped. Should the process wake up
 915 *   before this wakeup (due to a timeout or a signal) it will either see
 916 *   STATE_READY and continue or acquire the lock to check the state again.
 917 *
 918 * The same algorithm is used for senders.
 919 */
 920
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 921/* pipelined_send() - send a message directly to the task waiting in
 922 * sys_mq_timedreceive() (without inserting message into a queue).
 923 */
 924static inline void pipelined_send(struct wake_q_head *wake_q,
 925				  struct mqueue_inode_info *info,
 926				  struct msg_msg *message,
 927				  struct ext_wait_queue *receiver)
 928{
 929	receiver->msg = message;
 930	list_del(&receiver->list);
 931	wake_q_add(wake_q, receiver->task);
 932	/*
 933	 * Rely on the implicit cmpxchg barrier from wake_q_add such
 934	 * that we can ensure that updating receiver->state is the last
 935	 * write operation: As once set, the receiver can continue,
 936	 * and if we don't have the reference count from the wake_q,
 937	 * yet, at that point we can later have a use-after-free
 938	 * condition and bogus wakeup.
 939	 */
 940	receiver->state = STATE_READY;
 941}
 942
 943/* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
 944 * gets its message and put to the queue (we have one free place for sure). */
 945static inline void pipelined_receive(struct wake_q_head *wake_q,
 946				     struct mqueue_inode_info *info)
 947{
 948	struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
 949
 950	if (!sender) {
 951		/* for poll */
 952		wake_up_interruptible(&info->wait_q);
 953		return;
 954	}
 955	if (msg_insert(sender->msg, info))
 956		return;
 957
 958	list_del(&sender->list);
 959	wake_q_add(wake_q, sender->task);
 960	sender->state = STATE_READY;
 961}
 962
 963static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
 964		size_t msg_len, unsigned int msg_prio,
 965		struct timespec64 *ts)
 966{
 967	struct fd f;
 968	struct inode *inode;
 969	struct ext_wait_queue wait;
 970	struct ext_wait_queue *receiver;
 971	struct msg_msg *msg_ptr;
 972	struct mqueue_inode_info *info;
 973	ktime_t expires, *timeout = NULL;
 974	struct posix_msg_tree_node *new_leaf = NULL;
 975	int ret = 0;
 976	DEFINE_WAKE_Q(wake_q);
 977
 978	if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
 979		return -EINVAL;
 980
 981	if (ts) {
 982		expires = timespec64_to_ktime(*ts);
 983		timeout = &expires;
 984	}
 985
 986	audit_mq_sendrecv(mqdes, msg_len, msg_prio, ts);
 987
 988	f = fdget(mqdes);
 989	if (unlikely(!f.file)) {
 990		ret = -EBADF;
 991		goto out;
 992	}
 993
 994	inode = file_inode(f.file);
 995	if (unlikely(f.file->f_op != &mqueue_file_operations)) {
 996		ret = -EBADF;
 997		goto out_fput;
 998	}
 999	info = MQUEUE_I(inode);
1000	audit_file(f.file);
1001
1002	if (unlikely(!(f.file->f_mode & FMODE_WRITE))) {
1003		ret = -EBADF;
1004		goto out_fput;
1005	}
1006
1007	if (unlikely(msg_len > info->attr.mq_msgsize)) {
1008		ret = -EMSGSIZE;
1009		goto out_fput;
1010	}
1011
1012	/* First try to allocate memory, before doing anything with
1013	 * existing queues. */
1014	msg_ptr = load_msg(u_msg_ptr, msg_len);
1015	if (IS_ERR(msg_ptr)) {
1016		ret = PTR_ERR(msg_ptr);
1017		goto out_fput;
1018	}
1019	msg_ptr->m_ts = msg_len;
1020	msg_ptr->m_type = msg_prio;
1021
1022	/*
1023	 * msg_insert really wants us to have a valid, spare node struct so
1024	 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1025	 * fall back to that if necessary.
1026	 */
1027	if (!info->node_cache)
1028		new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1029
1030	spin_lock(&info->lock);
1031
1032	if (!info->node_cache && new_leaf) {
1033		/* Save our speculative allocation into the cache */
1034		INIT_LIST_HEAD(&new_leaf->msg_list);
1035		info->node_cache = new_leaf;
1036		new_leaf = NULL;
1037	} else {
1038		kfree(new_leaf);
1039	}
1040
1041	if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
1042		if (f.file->f_flags & O_NONBLOCK) {
1043			ret = -EAGAIN;
1044		} else {
1045			wait.task = current;
1046			wait.msg = (void *) msg_ptr;
1047			wait.state = STATE_NONE;
 
 
1048			ret = wq_sleep(info, SEND, timeout, &wait);
1049			/*
1050			 * wq_sleep must be called with info->lock held, and
1051			 * returns with the lock released
1052			 */
1053			goto out_free;
1054		}
1055	} else {
1056		receiver = wq_get_first_waiter(info, RECV);
1057		if (receiver) {
1058			pipelined_send(&wake_q, info, msg_ptr, receiver);
1059		} else {
1060			/* adds message to the queue */
1061			ret = msg_insert(msg_ptr, info);
1062			if (ret)
1063				goto out_unlock;
1064			__do_notify(info);
1065		}
1066		inode->i_atime = inode->i_mtime = inode->i_ctime =
1067				current_time(inode);
1068	}
1069out_unlock:
1070	spin_unlock(&info->lock);
1071	wake_up_q(&wake_q);
1072out_free:
1073	if (ret)
1074		free_msg(msg_ptr);
1075out_fput:
1076	fdput(f);
1077out:
1078	return ret;
1079}
1080
1081static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
1082		size_t msg_len, unsigned int __user *u_msg_prio,
1083		struct timespec64 *ts)
1084{
1085	ssize_t ret;
1086	struct msg_msg *msg_ptr;
1087	struct fd f;
1088	struct inode *inode;
1089	struct mqueue_inode_info *info;
1090	struct ext_wait_queue wait;
1091	ktime_t expires, *timeout = NULL;
1092	struct posix_msg_tree_node *new_leaf = NULL;
1093
1094	if (ts) {
1095		expires = timespec64_to_ktime(*ts);
1096		timeout = &expires;
1097	}
1098
1099	audit_mq_sendrecv(mqdes, msg_len, 0, ts);
1100
1101	f = fdget(mqdes);
1102	if (unlikely(!f.file)) {
1103		ret = -EBADF;
1104		goto out;
1105	}
1106
1107	inode = file_inode(f.file);
1108	if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1109		ret = -EBADF;
1110		goto out_fput;
1111	}
1112	info = MQUEUE_I(inode);
1113	audit_file(f.file);
1114
1115	if (unlikely(!(f.file->f_mode & FMODE_READ))) {
1116		ret = -EBADF;
1117		goto out_fput;
1118	}
1119
1120	/* checks if buffer is big enough */
1121	if (unlikely(msg_len < info->attr.mq_msgsize)) {
1122		ret = -EMSGSIZE;
1123		goto out_fput;
1124	}
1125
1126	/*
1127	 * msg_insert really wants us to have a valid, spare node struct so
1128	 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1129	 * fall back to that if necessary.
1130	 */
1131	if (!info->node_cache)
1132		new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1133
1134	spin_lock(&info->lock);
1135
1136	if (!info->node_cache && new_leaf) {
1137		/* Save our speculative allocation into the cache */
1138		INIT_LIST_HEAD(&new_leaf->msg_list);
1139		info->node_cache = new_leaf;
1140	} else {
1141		kfree(new_leaf);
1142	}
1143
1144	if (info->attr.mq_curmsgs == 0) {
1145		if (f.file->f_flags & O_NONBLOCK) {
1146			spin_unlock(&info->lock);
1147			ret = -EAGAIN;
1148		} else {
1149			wait.task = current;
1150			wait.state = STATE_NONE;
 
 
1151			ret = wq_sleep(info, RECV, timeout, &wait);
1152			msg_ptr = wait.msg;
1153		}
1154	} else {
1155		DEFINE_WAKE_Q(wake_q);
1156
1157		msg_ptr = msg_get(info);
1158
1159		inode->i_atime = inode->i_mtime = inode->i_ctime =
1160				current_time(inode);
1161
1162		/* There is now free space in queue. */
1163		pipelined_receive(&wake_q, info);
1164		spin_unlock(&info->lock);
1165		wake_up_q(&wake_q);
1166		ret = 0;
1167	}
1168	if (ret == 0) {
1169		ret = msg_ptr->m_ts;
1170
1171		if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
1172			store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
1173			ret = -EFAULT;
1174		}
1175		free_msg(msg_ptr);
1176	}
1177out_fput:
1178	fdput(f);
1179out:
1180	return ret;
1181}
1182
1183SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
1184		size_t, msg_len, unsigned int, msg_prio,
1185		const struct __kernel_timespec __user *, u_abs_timeout)
1186{
1187	struct timespec64 ts, *p = NULL;
1188	if (u_abs_timeout) {
1189		int res = prepare_timeout(u_abs_timeout, &ts);
1190		if (res)
1191			return res;
1192		p = &ts;
1193	}
1194	return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1195}
1196
1197SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
1198		size_t, msg_len, unsigned int __user *, u_msg_prio,
1199		const struct __kernel_timespec __user *, u_abs_timeout)
1200{
1201	struct timespec64 ts, *p = NULL;
1202	if (u_abs_timeout) {
1203		int res = prepare_timeout(u_abs_timeout, &ts);
1204		if (res)
1205			return res;
1206		p = &ts;
1207	}
1208	return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1209}
1210
1211/*
1212 * Notes: the case when user wants us to deregister (with NULL as pointer)
1213 * and he isn't currently owner of notification, will be silently discarded.
1214 * It isn't explicitly defined in the POSIX.
1215 */
1216static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification)
1217{
1218	int ret;
1219	struct fd f;
1220	struct sock *sock;
1221	struct inode *inode;
1222	struct mqueue_inode_info *info;
1223	struct sk_buff *nc;
1224
1225	audit_mq_notify(mqdes, notification);
1226
1227	nc = NULL;
1228	sock = NULL;
1229	if (notification != NULL) {
1230		if (unlikely(notification->sigev_notify != SIGEV_NONE &&
1231			     notification->sigev_notify != SIGEV_SIGNAL &&
1232			     notification->sigev_notify != SIGEV_THREAD))
1233			return -EINVAL;
1234		if (notification->sigev_notify == SIGEV_SIGNAL &&
1235			!valid_signal(notification->sigev_signo)) {
1236			return -EINVAL;
1237		}
1238		if (notification->sigev_notify == SIGEV_THREAD) {
1239			long timeo;
1240
1241			/* create the notify skb */
1242			nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
1243			if (!nc)
1244				return -ENOMEM;
1245
1246			if (copy_from_user(nc->data,
1247					notification->sigev_value.sival_ptr,
1248					NOTIFY_COOKIE_LEN)) {
1249				ret = -EFAULT;
1250				goto free_skb;
1251			}
1252
1253			/* TODO: add a header? */
1254			skb_put(nc, NOTIFY_COOKIE_LEN);
1255			/* and attach it to the socket */
1256retry:
1257			f = fdget(notification->sigev_signo);
1258			if (!f.file) {
1259				ret = -EBADF;
1260				goto out;
1261			}
1262			sock = netlink_getsockbyfilp(f.file);
1263			fdput(f);
1264			if (IS_ERR(sock)) {
1265				ret = PTR_ERR(sock);
1266				goto free_skb;
1267			}
1268
1269			timeo = MAX_SCHEDULE_TIMEOUT;
1270			ret = netlink_attachskb(sock, nc, &timeo, NULL);
1271			if (ret == 1) {
1272				sock = NULL;
1273				goto retry;
1274			}
1275			if (ret)
1276				return ret;
1277		}
1278	}
1279
1280	f = fdget(mqdes);
1281	if (!f.file) {
1282		ret = -EBADF;
1283		goto out;
1284	}
1285
1286	inode = file_inode(f.file);
1287	if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1288		ret = -EBADF;
1289		goto out_fput;
1290	}
1291	info = MQUEUE_I(inode);
1292
1293	ret = 0;
1294	spin_lock(&info->lock);
1295	if (notification == NULL) {
1296		if (info->notify_owner == task_tgid(current)) {
1297			remove_notification(info);
1298			inode->i_atime = inode->i_ctime = current_time(inode);
1299		}
1300	} else if (info->notify_owner != NULL) {
1301		ret = -EBUSY;
1302	} else {
1303		switch (notification->sigev_notify) {
1304		case SIGEV_NONE:
1305			info->notify.sigev_notify = SIGEV_NONE;
1306			break;
1307		case SIGEV_THREAD:
1308			info->notify_sock = sock;
1309			info->notify_cookie = nc;
1310			sock = NULL;
1311			nc = NULL;
1312			info->notify.sigev_notify = SIGEV_THREAD;
1313			break;
1314		case SIGEV_SIGNAL:
1315			info->notify.sigev_signo = notification->sigev_signo;
1316			info->notify.sigev_value = notification->sigev_value;
1317			info->notify.sigev_notify = SIGEV_SIGNAL;
 
1318			break;
1319		}
1320
1321		info->notify_owner = get_pid(task_tgid(current));
1322		info->notify_user_ns = get_user_ns(current_user_ns());
1323		inode->i_atime = inode->i_ctime = current_time(inode);
1324	}
1325	spin_unlock(&info->lock);
1326out_fput:
1327	fdput(f);
1328out:
1329	if (sock)
1330		netlink_detachskb(sock, nc);
1331	else
1332free_skb:
1333		dev_kfree_skb(nc);
1334
1335	return ret;
1336}
1337
1338SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1339		const struct sigevent __user *, u_notification)
1340{
1341	struct sigevent n, *p = NULL;
1342	if (u_notification) {
1343		if (copy_from_user(&n, u_notification, sizeof(struct sigevent)))
1344			return -EFAULT;
1345		p = &n;
1346	}
1347	return do_mq_notify(mqdes, p);
1348}
1349
1350static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old)
1351{
1352	struct fd f;
1353	struct inode *inode;
1354	struct mqueue_inode_info *info;
1355
1356	if (new && (new->mq_flags & (~O_NONBLOCK)))
1357		return -EINVAL;
1358
1359	f = fdget(mqdes);
1360	if (!f.file)
1361		return -EBADF;
1362
1363	if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1364		fdput(f);
1365		return -EBADF;
1366	}
1367
1368	inode = file_inode(f.file);
1369	info = MQUEUE_I(inode);
1370
1371	spin_lock(&info->lock);
1372
1373	if (old) {
1374		*old = info->attr;
1375		old->mq_flags = f.file->f_flags & O_NONBLOCK;
1376	}
1377	if (new) {
1378		audit_mq_getsetattr(mqdes, new);
1379		spin_lock(&f.file->f_lock);
1380		if (new->mq_flags & O_NONBLOCK)
1381			f.file->f_flags |= O_NONBLOCK;
1382		else
1383			f.file->f_flags &= ~O_NONBLOCK;
1384		spin_unlock(&f.file->f_lock);
1385
1386		inode->i_atime = inode->i_ctime = current_time(inode);
1387	}
1388
1389	spin_unlock(&info->lock);
1390	fdput(f);
1391	return 0;
1392}
1393
1394SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1395		const struct mq_attr __user *, u_mqstat,
1396		struct mq_attr __user *, u_omqstat)
1397{
1398	int ret;
1399	struct mq_attr mqstat, omqstat;
1400	struct mq_attr *new = NULL, *old = NULL;
1401
1402	if (u_mqstat) {
1403		new = &mqstat;
1404		if (copy_from_user(new, u_mqstat, sizeof(struct mq_attr)))
1405			return -EFAULT;
1406	}
1407	if (u_omqstat)
1408		old = &omqstat;
1409
1410	ret = do_mq_getsetattr(mqdes, new, old);
1411	if (ret || !old)
1412		return ret;
1413
1414	if (copy_to_user(u_omqstat, old, sizeof(struct mq_attr)))
1415		return -EFAULT;
1416	return 0;
1417}
1418
1419#ifdef CONFIG_COMPAT
1420
1421struct compat_mq_attr {
1422	compat_long_t mq_flags;      /* message queue flags		     */
1423	compat_long_t mq_maxmsg;     /* maximum number of messages	     */
1424	compat_long_t mq_msgsize;    /* maximum message size		     */
1425	compat_long_t mq_curmsgs;    /* number of messages currently queued  */
1426	compat_long_t __reserved[4]; /* ignored for input, zeroed for output */
1427};
1428
1429static inline int get_compat_mq_attr(struct mq_attr *attr,
1430			const struct compat_mq_attr __user *uattr)
1431{
1432	struct compat_mq_attr v;
1433
1434	if (copy_from_user(&v, uattr, sizeof(*uattr)))
1435		return -EFAULT;
1436
1437	memset(attr, 0, sizeof(*attr));
1438	attr->mq_flags = v.mq_flags;
1439	attr->mq_maxmsg = v.mq_maxmsg;
1440	attr->mq_msgsize = v.mq_msgsize;
1441	attr->mq_curmsgs = v.mq_curmsgs;
1442	return 0;
1443}
1444
1445static inline int put_compat_mq_attr(const struct mq_attr *attr,
1446			struct compat_mq_attr __user *uattr)
1447{
1448	struct compat_mq_attr v;
1449
1450	memset(&v, 0, sizeof(v));
1451	v.mq_flags = attr->mq_flags;
1452	v.mq_maxmsg = attr->mq_maxmsg;
1453	v.mq_msgsize = attr->mq_msgsize;
1454	v.mq_curmsgs = attr->mq_curmsgs;
1455	if (copy_to_user(uattr, &v, sizeof(*uattr)))
1456		return -EFAULT;
1457	return 0;
1458}
1459
1460COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name,
1461		       int, oflag, compat_mode_t, mode,
1462		       struct compat_mq_attr __user *, u_attr)
1463{
1464	struct mq_attr attr, *p = NULL;
1465	if (u_attr && oflag & O_CREAT) {
1466		p = &attr;
1467		if (get_compat_mq_attr(&attr, u_attr))
1468			return -EFAULT;
1469	}
1470	return do_mq_open(u_name, oflag, mode, p);
1471}
1472
1473COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1474		       const struct compat_sigevent __user *, u_notification)
1475{
1476	struct sigevent n, *p = NULL;
1477	if (u_notification) {
1478		if (get_compat_sigevent(&n, u_notification))
1479			return -EFAULT;
1480		if (n.sigev_notify == SIGEV_THREAD)
1481			n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int);
1482		p = &n;
1483	}
1484	return do_mq_notify(mqdes, p);
1485}
1486
1487COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1488		       const struct compat_mq_attr __user *, u_mqstat,
1489		       struct compat_mq_attr __user *, u_omqstat)
1490{
1491	int ret;
1492	struct mq_attr mqstat, omqstat;
1493	struct mq_attr *new = NULL, *old = NULL;
1494
1495	if (u_mqstat) {
1496		new = &mqstat;
1497		if (get_compat_mq_attr(new, u_mqstat))
1498			return -EFAULT;
1499	}
1500	if (u_omqstat)
1501		old = &omqstat;
1502
1503	ret = do_mq_getsetattr(mqdes, new, old);
1504	if (ret || !old)
1505		return ret;
1506
1507	if (put_compat_mq_attr(old, u_omqstat))
1508		return -EFAULT;
1509	return 0;
1510}
1511#endif
1512
1513#ifdef CONFIG_COMPAT_32BIT_TIME
1514static int compat_prepare_timeout(const struct old_timespec32 __user *p,
1515				   struct timespec64 *ts)
1516{
1517	if (get_old_timespec32(ts, p))
1518		return -EFAULT;
1519	if (!timespec64_valid(ts))
1520		return -EINVAL;
1521	return 0;
1522}
1523
1524SYSCALL_DEFINE5(mq_timedsend_time32, mqd_t, mqdes,
1525		const char __user *, u_msg_ptr,
1526		unsigned int, msg_len, unsigned int, msg_prio,
1527		const struct old_timespec32 __user *, u_abs_timeout)
1528{
1529	struct timespec64 ts, *p = NULL;
1530	if (u_abs_timeout) {
1531		int res = compat_prepare_timeout(u_abs_timeout, &ts);
1532		if (res)
1533			return res;
1534		p = &ts;
1535	}
1536	return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1537}
1538
1539SYSCALL_DEFINE5(mq_timedreceive_time32, mqd_t, mqdes,
1540		char __user *, u_msg_ptr,
1541		unsigned int, msg_len, unsigned int __user *, u_msg_prio,
1542		const struct old_timespec32 __user *, u_abs_timeout)
1543{
1544	struct timespec64 ts, *p = NULL;
1545	if (u_abs_timeout) {
1546		int res = compat_prepare_timeout(u_abs_timeout, &ts);
1547		if (res)
1548			return res;
1549		p = &ts;
1550	}
1551	return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1552}
1553#endif
1554
1555static const struct inode_operations mqueue_dir_inode_operations = {
1556	.lookup = simple_lookup,
1557	.create = mqueue_create,
1558	.unlink = mqueue_unlink,
1559};
1560
1561static const struct file_operations mqueue_file_operations = {
1562	.flush = mqueue_flush_file,
1563	.poll = mqueue_poll_file,
1564	.read = mqueue_read_file,
1565	.llseek = default_llseek,
1566};
1567
1568static const struct super_operations mqueue_super_ops = {
1569	.alloc_inode = mqueue_alloc_inode,
1570	.free_inode = mqueue_free_inode,
1571	.evict_inode = mqueue_evict_inode,
1572	.statfs = simple_statfs,
1573};
1574
1575static const struct fs_context_operations mqueue_fs_context_ops = {
1576	.free		= mqueue_fs_context_free,
1577	.get_tree	= mqueue_get_tree,
1578};
1579
1580static struct file_system_type mqueue_fs_type = {
1581	.name			= "mqueue",
1582	.init_fs_context	= mqueue_init_fs_context,
1583	.kill_sb		= kill_litter_super,
1584	.fs_flags		= FS_USERNS_MOUNT,
1585};
1586
1587int mq_init_ns(struct ipc_namespace *ns)
1588{
1589	struct vfsmount *m;
1590
1591	ns->mq_queues_count  = 0;
1592	ns->mq_queues_max    = DFLT_QUEUESMAX;
1593	ns->mq_msg_max       = DFLT_MSGMAX;
1594	ns->mq_msgsize_max   = DFLT_MSGSIZEMAX;
1595	ns->mq_msg_default   = DFLT_MSG;
1596	ns->mq_msgsize_default  = DFLT_MSGSIZE;
1597
1598	m = mq_create_mount(ns);
1599	if (IS_ERR(m))
1600		return PTR_ERR(m);
1601	ns->mq_mnt = m;
1602	return 0;
1603}
1604
1605void mq_clear_sbinfo(struct ipc_namespace *ns)
1606{
1607	ns->mq_mnt->mnt_sb->s_fs_info = NULL;
1608}
1609
1610void mq_put_mnt(struct ipc_namespace *ns)
1611{
1612	kern_unmount(ns->mq_mnt);
1613}
1614
1615static int __init init_mqueue_fs(void)
1616{
1617	int error;
1618
1619	mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1620				sizeof(struct mqueue_inode_info), 0,
1621				SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once);
1622	if (mqueue_inode_cachep == NULL)
1623		return -ENOMEM;
1624
1625	/* ignore failures - they are not fatal */
1626	mq_sysctl_table = mq_register_sysctl_table();
1627
1628	error = register_filesystem(&mqueue_fs_type);
1629	if (error)
1630		goto out_sysctl;
1631
1632	spin_lock_init(&mq_lock);
1633
1634	error = mq_init_ns(&init_ipc_ns);
1635	if (error)
1636		goto out_filesystem;
1637
1638	return 0;
1639
1640out_filesystem:
1641	unregister_filesystem(&mqueue_fs_type);
1642out_sysctl:
1643	if (mq_sysctl_table)
1644		unregister_sysctl_table(mq_sysctl_table);
1645	kmem_cache_destroy(mqueue_inode_cachep);
1646	return error;
1647}
1648
1649device_initcall(init_mqueue_fs);