Linux Audio

Check our new training course

Loading...
v3.5.6
   1/*
   2 * POSIX message queues filesystem for Linux.
   3 *
   4 * Copyright (C) 2003,2004  Krzysztof Benedyczak    (golbi@mat.uni.torun.pl)
   5 *                          Michal Wronski          (michal.wronski@gmail.com)
   6 *
   7 * Spinlocks:               Mohamed Abbas           (abbas.mohamed@intel.com)
   8 * Lockless receive & send, fd based notify:
   9 * 			    Manfred Spraul	    (manfred@colorfullife.com)
  10 *
  11 * Audit:                   George Wilson           (ltcgcw@us.ibm.com)
  12 *
  13 * This file is released under the GPL.
  14 */
  15
  16#include <linux/capability.h>
  17#include <linux/init.h>
  18#include <linux/pagemap.h>
  19#include <linux/file.h>
  20#include <linux/mount.h>
 
  21#include <linux/namei.h>
  22#include <linux/sysctl.h>
  23#include <linux/poll.h>
  24#include <linux/mqueue.h>
  25#include <linux/msg.h>
  26#include <linux/skbuff.h>
  27#include <linux/vmalloc.h>
  28#include <linux/netlink.h>
  29#include <linux/syscalls.h>
  30#include <linux/audit.h>
  31#include <linux/signal.h>
  32#include <linux/mutex.h>
  33#include <linux/nsproxy.h>
  34#include <linux/pid.h>
  35#include <linux/ipc_namespace.h>
  36#include <linux/user_namespace.h>
  37#include <linux/slab.h>
 
 
 
  38
  39#include <net/sock.h>
  40#include "util.h"
  41
 
 
 
 
  42#define MQUEUE_MAGIC	0x19800202
  43#define DIRENT_SIZE	20
  44#define FILENT_SIZE	80
  45
  46#define SEND		0
  47#define RECV		1
  48
  49#define STATE_NONE	0
  50#define STATE_PENDING	1
  51#define STATE_READY	2
  52
  53struct posix_msg_tree_node {
  54	struct rb_node		rb_node;
  55	struct list_head	msg_list;
  56	int			priority;
  57};
  58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  59struct ext_wait_queue {		/* queue of sleeping tasks */
  60	struct task_struct *task;
  61	struct list_head list;
  62	struct msg_msg *msg;	/* ptr of loaded message */
  63	int state;		/* one of STATE_* values */
  64};
  65
  66struct mqueue_inode_info {
  67	spinlock_t lock;
  68	struct inode vfs_inode;
  69	wait_queue_head_t wait_q;
  70
  71	struct rb_root msg_tree;
 
  72	struct posix_msg_tree_node *node_cache;
  73	struct mq_attr attr;
  74
  75	struct sigevent notify;
  76	struct pid* notify_owner;
 
  77	struct user_namespace *notify_user_ns;
  78	struct user_struct *user;	/* user who created, for accounting */
  79	struct sock *notify_sock;
  80	struct sk_buff *notify_cookie;
  81
  82	/* for tasks waiting for free space and messages, respectively */
  83	struct ext_wait_queue e_wait_q[2];
  84
  85	unsigned long qsize; /* size of queue in memory (sum of all msgs) */
  86};
  87
 
  88static const struct inode_operations mqueue_dir_inode_operations;
  89static const struct file_operations mqueue_file_operations;
  90static const struct super_operations mqueue_super_ops;
 
  91static void remove_notification(struct mqueue_inode_info *info);
  92
  93static struct kmem_cache *mqueue_inode_cachep;
  94
  95static struct ctl_table_header * mq_sysctl_table;
  96
  97static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
  98{
  99	return container_of(inode, struct mqueue_inode_info, vfs_inode);
 100}
 101
 102/*
 103 * This routine should be called with the mq_lock held.
 104 */
 105static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode)
 106{
 107	return get_ipc_ns(inode->i_sb->s_fs_info);
 108}
 109
 110static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
 111{
 112	struct ipc_namespace *ns;
 113
 114	spin_lock(&mq_lock);
 115	ns = __get_ns_from_inode(inode);
 116	spin_unlock(&mq_lock);
 117	return ns;
 118}
 119
 120/* Auxiliary functions to manipulate messages' list */
 121static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
 122{
 123	struct rb_node **p, *parent = NULL;
 124	struct posix_msg_tree_node *leaf;
 
 125
 126	p = &info->msg_tree.rb_node;
 127	while (*p) {
 128		parent = *p;
 129		leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
 130
 131		if (likely(leaf->priority == msg->m_type))
 132			goto insert_msg;
 133		else if (msg->m_type < leaf->priority)
 134			p = &(*p)->rb_left;
 135		else
 
 136			p = &(*p)->rb_right;
 137	}
 138	if (info->node_cache) {
 139		leaf = info->node_cache;
 140		info->node_cache = NULL;
 141	} else {
 142		leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
 143		if (!leaf)
 144			return -ENOMEM;
 145		rb_init_node(&leaf->rb_node);
 146		INIT_LIST_HEAD(&leaf->msg_list);
 147		info->qsize += sizeof(*leaf);
 148	}
 149	leaf->priority = msg->m_type;
 
 
 
 
 150	rb_link_node(&leaf->rb_node, parent, p);
 151	rb_insert_color(&leaf->rb_node, &info->msg_tree);
 152insert_msg:
 153	info->attr.mq_curmsgs++;
 154	info->qsize += msg->m_ts;
 155	list_add_tail(&msg->m_list, &leaf->msg_list);
 156	return 0;
 157}
 158
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 159static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
 160{
 161	struct rb_node **p, *parent = NULL;
 162	struct posix_msg_tree_node *leaf;
 163	struct msg_msg *msg;
 164
 165try_again:
 166	p = &info->msg_tree.rb_node;
 167	while (*p) {
 168		parent = *p;
 169		/*
 170		 * During insert, low priorities go to the left and high to the
 171		 * right.  On receive, we want the highest priorities first, so
 172		 * walk all the way to the right.
 173		 */
 174		p = &(*p)->rb_right;
 175	}
 176	if (!parent) {
 177		if (info->attr.mq_curmsgs) {
 178			pr_warn_once("Inconsistency in POSIX message queue, "
 179				     "no tree element, but supposedly messages "
 180				     "should exist!\n");
 181			info->attr.mq_curmsgs = 0;
 182		}
 183		return NULL;
 184	}
 185	leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
 186	if (unlikely(list_empty(&leaf->msg_list))) {
 187		pr_warn_once("Inconsistency in POSIX message queue, "
 188			     "empty leaf node but we haven't implemented "
 189			     "lazy leaf delete!\n");
 190		rb_erase(&leaf->rb_node, &info->msg_tree);
 191		if (info->node_cache) {
 192			info->qsize -= sizeof(*leaf);
 193			kfree(leaf);
 194		} else {
 195			info->node_cache = leaf;
 196		}
 197		goto try_again;
 198	} else {
 199		msg = list_first_entry(&leaf->msg_list,
 200				       struct msg_msg, m_list);
 201		list_del(&msg->m_list);
 202		if (list_empty(&leaf->msg_list)) {
 203			rb_erase(&leaf->rb_node, &info->msg_tree);
 204			if (info->node_cache) {
 205				info->qsize -= sizeof(*leaf);
 206				kfree(leaf);
 207			} else {
 208				info->node_cache = leaf;
 209			}
 210		}
 211	}
 212	info->attr.mq_curmsgs--;
 213	info->qsize -= msg->m_ts;
 214	return msg;
 215}
 216
 217static struct inode *mqueue_get_inode(struct super_block *sb,
 218		struct ipc_namespace *ipc_ns, umode_t mode,
 219		struct mq_attr *attr)
 220{
 221	struct user_struct *u = current_user();
 222	struct inode *inode;
 223	int ret = -ENOMEM;
 224
 225	inode = new_inode(sb);
 226	if (!inode)
 227		goto err;
 228
 229	inode->i_ino = get_next_ino();
 230	inode->i_mode = mode;
 231	inode->i_uid = current_fsuid();
 232	inode->i_gid = current_fsgid();
 233	inode->i_mtime = inode->i_ctime = inode->i_atime = CURRENT_TIME;
 234
 235	if (S_ISREG(mode)) {
 236		struct mqueue_inode_info *info;
 237		unsigned long mq_bytes, mq_treesize;
 238
 239		inode->i_fop = &mqueue_file_operations;
 240		inode->i_size = FILENT_SIZE;
 241		/* mqueue specific info */
 242		info = MQUEUE_I(inode);
 243		spin_lock_init(&info->lock);
 244		init_waitqueue_head(&info->wait_q);
 245		INIT_LIST_HEAD(&info->e_wait_q[0].list);
 246		INIT_LIST_HEAD(&info->e_wait_q[1].list);
 247		info->notify_owner = NULL;
 248		info->notify_user_ns = NULL;
 249		info->qsize = 0;
 250		info->user = NULL;	/* set when all is ok */
 251		info->msg_tree = RB_ROOT;
 
 252		info->node_cache = NULL;
 253		memset(&info->attr, 0, sizeof(info->attr));
 254		info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
 255					   ipc_ns->mq_msg_default);
 256		info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
 257					    ipc_ns->mq_msgsize_default);
 258		if (attr) {
 259			info->attr.mq_maxmsg = attr->mq_maxmsg;
 260			info->attr.mq_msgsize = attr->mq_msgsize;
 261		}
 262		/*
 263		 * We used to allocate a static array of pointers and account
 264		 * the size of that array as well as one msg_msg struct per
 265		 * possible message into the queue size. That's no longer
 266		 * accurate as the queue is now an rbtree and will grow and
 267		 * shrink depending on usage patterns.  We can, however, still
 268		 * account one msg_msg struct per message, but the nodes are
 269		 * allocated depending on priority usage, and most programs
 270		 * only use one, or a handful, of priorities.  However, since
 271		 * this is pinned memory, we need to assume worst case, so
 272		 * that means the min(mq_maxmsg, max_priorities) * struct
 273		 * posix_msg_tree_node.
 274		 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 275		mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
 276			min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
 277			sizeof(struct posix_msg_tree_node);
 278
 279		mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
 280					  info->attr.mq_msgsize);
 281
 282		spin_lock(&mq_lock);
 283		if (u->mq_bytes + mq_bytes < u->mq_bytes ||
 284		    u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
 285			spin_unlock(&mq_lock);
 286			/* mqueue_evict_inode() releases info->messages */
 287			ret = -EMFILE;
 288			goto out_inode;
 289		}
 290		u->mq_bytes += mq_bytes;
 291		spin_unlock(&mq_lock);
 292
 293		/* all is ok */
 294		info->user = get_uid(u);
 295	} else if (S_ISDIR(mode)) {
 296		inc_nlink(inode);
 297		/* Some things misbehave if size == 0 on a directory */
 298		inode->i_size = 2 * DIRENT_SIZE;
 299		inode->i_op = &mqueue_dir_inode_operations;
 300		inode->i_fop = &simple_dir_operations;
 301	}
 302
 303	return inode;
 304out_inode:
 305	iput(inode);
 306err:
 307	return ERR_PTR(ret);
 308}
 309
 310static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
 311{
 312	struct inode *inode;
 313	struct ipc_namespace *ns = data;
 314
 315	sb->s_blocksize = PAGE_CACHE_SIZE;
 316	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
 
 317	sb->s_magic = MQUEUE_MAGIC;
 318	sb->s_op = &mqueue_super_ops;
 319
 320	inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
 321	if (IS_ERR(inode))
 322		return PTR_ERR(inode);
 323
 324	sb->s_root = d_make_root(inode);
 325	if (!sb->s_root)
 326		return -ENOMEM;
 327	return 0;
 328}
 329
 330static struct dentry *mqueue_mount(struct file_system_type *fs_type,
 331			 int flags, const char *dev_name,
 332			 void *data)
 333{
 334	if (!(flags & MS_KERNMOUNT))
 335		data = current->nsproxy->ipc_ns;
 336	return mount_ns(fs_type, flags, data, mqueue_fill_super);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 337}
 338
 339static void init_once(void *foo)
 340{
 341	struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
 342
 343	inode_init_once(&p->vfs_inode);
 344}
 345
 346static struct inode *mqueue_alloc_inode(struct super_block *sb)
 347{
 348	struct mqueue_inode_info *ei;
 349
 350	ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
 351	if (!ei)
 352		return NULL;
 353	return &ei->vfs_inode;
 354}
 355
 356static void mqueue_i_callback(struct rcu_head *head)
 357{
 358	struct inode *inode = container_of(head, struct inode, i_rcu);
 359	kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
 360}
 361
 362static void mqueue_destroy_inode(struct inode *inode)
 363{
 364	call_rcu(&inode->i_rcu, mqueue_i_callback);
 365}
 366
 367static void mqueue_evict_inode(struct inode *inode)
 368{
 369	struct mqueue_inode_info *info;
 370	struct user_struct *user;
 371	unsigned long mq_bytes, mq_treesize;
 372	struct ipc_namespace *ipc_ns;
 373	struct msg_msg *msg;
 
 374
 375	clear_inode(inode);
 376
 377	if (S_ISDIR(inode->i_mode))
 378		return;
 379
 380	ipc_ns = get_ns_from_inode(inode);
 381	info = MQUEUE_I(inode);
 382	spin_lock(&info->lock);
 383	while ((msg = msg_get(info)) != NULL)
 384		free_msg(msg);
 385	kfree(info->node_cache);
 386	spin_unlock(&info->lock);
 387
 388	/* Total amount of bytes accounted for the mqueue */
 389	mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
 390		min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
 391		sizeof(struct posix_msg_tree_node);
 392
 393	mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
 394				  info->attr.mq_msgsize);
 395
 396	user = info->user;
 397	if (user) {
 
 
 
 
 
 
 
 
 
 
 398		spin_lock(&mq_lock);
 399		user->mq_bytes -= mq_bytes;
 400		/*
 401		 * get_ns_from_inode() ensures that the
 402		 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
 403		 * to which we now hold a reference, or it is NULL.
 404		 * We can't put it here under mq_lock, though.
 405		 */
 406		if (ipc_ns)
 407			ipc_ns->mq_queues_count--;
 408		spin_unlock(&mq_lock);
 409		free_uid(user);
 410	}
 411	if (ipc_ns)
 412		put_ipc_ns(ipc_ns);
 413}
 414
 415static int mqueue_create(struct inode *dir, struct dentry *dentry,
 416				umode_t mode, struct nameidata *nd)
 417{
 
 418	struct inode *inode;
 419	struct mq_attr *attr = dentry->d_fsdata;
 420	int error;
 421	struct ipc_namespace *ipc_ns;
 422
 423	spin_lock(&mq_lock);
 424	ipc_ns = __get_ns_from_inode(dir);
 425	if (!ipc_ns) {
 426		error = -EACCES;
 427		goto out_unlock;
 428	}
 429	if (ipc_ns->mq_queues_count >= HARD_QUEUESMAX ||
 430	    (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
 431	     !capable(CAP_SYS_RESOURCE))) {
 432		error = -ENOSPC;
 433		goto out_unlock;
 434	}
 435	ipc_ns->mq_queues_count++;
 436	spin_unlock(&mq_lock);
 437
 438	inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr);
 439	if (IS_ERR(inode)) {
 440		error = PTR_ERR(inode);
 441		spin_lock(&mq_lock);
 442		ipc_ns->mq_queues_count--;
 443		goto out_unlock;
 444	}
 445
 446	put_ipc_ns(ipc_ns);
 447	dir->i_size += DIRENT_SIZE;
 448	dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
 449
 450	d_instantiate(dentry, inode);
 451	dget(dentry);
 452	return 0;
 453out_unlock:
 454	spin_unlock(&mq_lock);
 455	if (ipc_ns)
 456		put_ipc_ns(ipc_ns);
 457	return error;
 458}
 459
 
 
 
 
 
 
 460static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
 461{
 462  	struct inode *inode = dentry->d_inode;
 463
 464	dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
 465	dir->i_size -= DIRENT_SIZE;
 466  	drop_nlink(inode);
 467  	dput(dentry);
 468  	return 0;
 469}
 470
 471/*
 472*	This is routine for system read from queue file.
 473*	To avoid mess with doing here some sort of mq_receive we allow
 474*	to read only queue size & notification info (the only values
 475*	that are interesting from user point of view and aren't accessible
 476*	through std routines)
 477*/
 478static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
 479				size_t count, loff_t *off)
 480{
 481	struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
 482	char buffer[FILENT_SIZE];
 483	ssize_t ret;
 484
 485	spin_lock(&info->lock);
 486	snprintf(buffer, sizeof(buffer),
 487			"QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
 488			info->qsize,
 489			info->notify_owner ? info->notify.sigev_notify : 0,
 490			(info->notify_owner &&
 491			 info->notify.sigev_notify == SIGEV_SIGNAL) ?
 492				info->notify.sigev_signo : 0,
 493			pid_vnr(info->notify_owner));
 494	spin_unlock(&info->lock);
 495	buffer[sizeof(buffer)-1] = '\0';
 496
 497	ret = simple_read_from_buffer(u_data, count, off, buffer,
 498				strlen(buffer));
 499	if (ret <= 0)
 500		return ret;
 501
 502	filp->f_path.dentry->d_inode->i_atime = filp->f_path.dentry->d_inode->i_ctime = CURRENT_TIME;
 503	return ret;
 504}
 505
 506static int mqueue_flush_file(struct file *filp, fl_owner_t id)
 507{
 508	struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
 509
 510	spin_lock(&info->lock);
 511	if (task_tgid(current) == info->notify_owner)
 512		remove_notification(info);
 513
 514	spin_unlock(&info->lock);
 515	return 0;
 516}
 517
 518static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
 519{
 520	struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
 521	int retval = 0;
 522
 523	poll_wait(filp, &info->wait_q, poll_tab);
 524
 525	spin_lock(&info->lock);
 526	if (info->attr.mq_curmsgs)
 527		retval = POLLIN | POLLRDNORM;
 528
 529	if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
 530		retval |= POLLOUT | POLLWRNORM;
 531	spin_unlock(&info->lock);
 532
 533	return retval;
 534}
 535
 536/* Adds current to info->e_wait_q[sr] before element with smaller prio */
 537static void wq_add(struct mqueue_inode_info *info, int sr,
 538			struct ext_wait_queue *ewp)
 539{
 540	struct ext_wait_queue *walk;
 541
 542	ewp->task = current;
 543
 544	list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
 545		if (walk->task->static_prio <= current->static_prio) {
 546			list_add_tail(&ewp->list, &walk->list);
 547			return;
 548		}
 549	}
 550	list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
 551}
 552
 553/*
 554 * Puts current task to sleep. Caller must hold queue lock. After return
 555 * lock isn't held.
 556 * sr: SEND or RECV
 557 */
 558static int wq_sleep(struct mqueue_inode_info *info, int sr,
 559		    ktime_t *timeout, struct ext_wait_queue *ewp)
 
 560{
 561	int retval;
 562	signed long time;
 563
 564	wq_add(info, sr, ewp);
 565
 566	for (;;) {
 567		set_current_state(TASK_INTERRUPTIBLE);
 
 568
 569		spin_unlock(&info->lock);
 570		time = schedule_hrtimeout_range_clock(timeout, 0,
 571			HRTIMER_MODE_ABS, CLOCK_REALTIME);
 572
 573		while (ewp->state == STATE_PENDING)
 574			cpu_relax();
 575
 576		if (ewp->state == STATE_READY) {
 577			retval = 0;
 578			goto out;
 579		}
 580		spin_lock(&info->lock);
 581		if (ewp->state == STATE_READY) {
 
 
 582			retval = 0;
 583			goto out_unlock;
 584		}
 585		if (signal_pending(current)) {
 586			retval = -ERESTARTSYS;
 587			break;
 588		}
 589		if (time == 0) {
 590			retval = -ETIMEDOUT;
 591			break;
 592		}
 593	}
 594	list_del(&ewp->list);
 595out_unlock:
 596	spin_unlock(&info->lock);
 597out:
 598	return retval;
 599}
 600
 601/*
 602 * Returns waiting task that should be serviced first or NULL if none exists
 603 */
 604static struct ext_wait_queue *wq_get_first_waiter(
 605		struct mqueue_inode_info *info, int sr)
 606{
 607	struct list_head *ptr;
 608
 609	ptr = info->e_wait_q[sr].list.prev;
 610	if (ptr == &info->e_wait_q[sr].list)
 611		return NULL;
 612	return list_entry(ptr, struct ext_wait_queue, list);
 613}
 614
 615
 616static inline void set_cookie(struct sk_buff *skb, char code)
 617{
 618	((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
 619}
 620
 621/*
 622 * The next function is only to split too long sys_mq_timedsend
 623 */
 624static void __do_notify(struct mqueue_inode_info *info)
 625{
 626	/* notification
 627	 * invoked when there is registered process and there isn't process
 628	 * waiting synchronously for message AND state of queue changed from
 629	 * empty to not empty. Here we are sure that no one is waiting
 630	 * synchronously. */
 631	if (info->notify_owner &&
 632	    info->attr.mq_curmsgs == 1) {
 633		struct siginfo sig_i;
 634		switch (info->notify.sigev_notify) {
 635		case SIGEV_NONE:
 636			break;
 637		case SIGEV_SIGNAL:
 638			/* sends signal */
 
 
 
 
 
 639
 
 640			sig_i.si_signo = info->notify.sigev_signo;
 641			sig_i.si_errno = 0;
 642			sig_i.si_code = SI_MESGQ;
 643			sig_i.si_value = info->notify.sigev_value;
 644			/* map current pid/uid into info->owner's namespaces */
 645			rcu_read_lock();
 
 646			sig_i.si_pid = task_tgid_nr_ns(current,
 647						ns_of_pid(info->notify_owner));
 648			sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid());
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 649			rcu_read_unlock();
 650
 651			kill_pid_info(info->notify.sigev_signo,
 652				      &sig_i, info->notify_owner);
 653			break;
 
 654		case SIGEV_THREAD:
 655			set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
 656			netlink_sendskb(info->notify_sock, info->notify_cookie);
 657			break;
 658		}
 659		/* after notification unregisters process */
 660		put_pid(info->notify_owner);
 661		put_user_ns(info->notify_user_ns);
 662		info->notify_owner = NULL;
 663		info->notify_user_ns = NULL;
 664	}
 665	wake_up(&info->wait_q);
 666}
 667
 668static int prepare_timeout(const struct timespec __user *u_abs_timeout,
 669			   ktime_t *expires, struct timespec *ts)
 670{
 671	if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec)))
 672		return -EFAULT;
 673	if (!timespec_valid(ts))
 674		return -EINVAL;
 675
 676	*expires = timespec_to_ktime(*ts);
 677	return 0;
 678}
 679
 680static void remove_notification(struct mqueue_inode_info *info)
 681{
 682	if (info->notify_owner != NULL &&
 683	    info->notify.sigev_notify == SIGEV_THREAD) {
 684		set_cookie(info->notify_cookie, NOTIFY_REMOVED);
 685		netlink_sendskb(info->notify_sock, info->notify_cookie);
 686	}
 687	put_pid(info->notify_owner);
 688	put_user_ns(info->notify_user_ns);
 689	info->notify_owner = NULL;
 690	info->notify_user_ns = NULL;
 691}
 692
 693static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
 694{
 695	int mq_treesize;
 696	unsigned long total_size;
 697
 698	if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
 699		return -EINVAL;
 700	if (capable(CAP_SYS_RESOURCE)) {
 701		if (attr->mq_maxmsg > HARD_MSGMAX ||
 702		    attr->mq_msgsize > HARD_MSGSIZEMAX)
 703			return -EINVAL;
 704	} else {
 705		if (attr->mq_maxmsg > ipc_ns->mq_msg_max ||
 706				attr->mq_msgsize > ipc_ns->mq_msgsize_max)
 707			return -EINVAL;
 708	}
 709	/* check for overflow */
 710	if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
 711		return -EOVERFLOW;
 712	mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) +
 713		min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) *
 714		sizeof(struct posix_msg_tree_node);
 715	total_size = attr->mq_maxmsg * attr->mq_msgsize;
 716	if (total_size + mq_treesize < total_size)
 717		return -EOVERFLOW;
 718	return 0;
 719}
 720
 721/*
 722 * Invoked when creating a new queue via sys_mq_open
 723 */
 724static struct file *do_create(struct ipc_namespace *ipc_ns, struct dentry *dir,
 725			struct dentry *dentry, int oflag, umode_t mode,
 726			struct mq_attr *attr)
 727{
 728	const struct cred *cred = current_cred();
 729	struct file *result;
 730	int ret;
 731
 732	if (attr) {
 733		ret = mq_attr_ok(ipc_ns, attr);
 734		if (ret)
 735			goto out;
 736		/* store for use during create */
 737		dentry->d_fsdata = attr;
 738	} else {
 739		struct mq_attr def_attr;
 740
 741		def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
 742					 ipc_ns->mq_msg_default);
 743		def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
 744					  ipc_ns->mq_msgsize_default);
 745		ret = mq_attr_ok(ipc_ns, &def_attr);
 746		if (ret)
 747			goto out;
 748	}
 749
 750	mode &= ~current_umask();
 751	ret = mnt_want_write(ipc_ns->mq_mnt);
 752	if (ret)
 753		goto out;
 754	ret = vfs_create(dir->d_inode, dentry, mode, NULL);
 755	dentry->d_fsdata = NULL;
 756	if (ret)
 757		goto out_drop_write;
 758
 759	result = dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred);
 760	/*
 761	 * dentry_open() took a persistent mnt_want_write(),
 762	 * so we can now drop this one.
 763	 */
 764	mnt_drop_write(ipc_ns->mq_mnt);
 765	return result;
 766
 767out_drop_write:
 768	mnt_drop_write(ipc_ns->mq_mnt);
 769out:
 770	dput(dentry);
 771	mntput(ipc_ns->mq_mnt);
 772	return ERR_PTR(ret);
 773}
 774
 775/* Opens existing queue */
 776static struct file *do_open(struct ipc_namespace *ipc_ns,
 777				struct dentry *dentry, int oflag)
 778{
 779	int ret;
 780	const struct cred *cred = current_cred();
 781
 782	static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
 783						  MAY_READ | MAY_WRITE };
 
 784
 785	if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) {
 786		ret = -EINVAL;
 787		goto err;
 788	}
 789
 790	if (inode_permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE])) {
 791		ret = -EACCES;
 792		goto err;
 793	}
 794
 795	return dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred);
 796
 797err:
 798	dput(dentry);
 799	mntput(ipc_ns->mq_mnt);
 800	return ERR_PTR(ret);
 
 801}
 802
 803SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
 804		struct mq_attr __user *, u_attr)
 805{
 806	struct dentry *dentry;
 807	struct file *filp;
 808	char *name;
 809	struct mq_attr attr;
 810	int fd, error;
 811	struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
 812
 813	if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
 814		return -EFAULT;
 815
 816	audit_mq_open(oflag, mode, u_attr ? &attr : NULL);
 817
 818	if (IS_ERR(name = getname(u_name)))
 819		return PTR_ERR(name);
 820
 821	fd = get_unused_fd_flags(O_CLOEXEC);
 822	if (fd < 0)
 823		goto out_putname;
 824
 825	mutex_lock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
 826	dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name));
 827	if (IS_ERR(dentry)) {
 828		error = PTR_ERR(dentry);
 
 829		goto out_putfd;
 830	}
 831	mntget(ipc_ns->mq_mnt);
 832
 833	if (oflag & O_CREAT) {
 834		if (dentry->d_inode) {	/* entry already exists */
 835			audit_inode(name, dentry);
 836			if (oflag & O_EXCL) {
 837				error = -EEXIST;
 838				goto out;
 839			}
 840			filp = do_open(ipc_ns, dentry, oflag);
 841		} else {
 842			filp = do_create(ipc_ns, ipc_ns->mq_mnt->mnt_root,
 843						dentry, oflag, mode,
 844						u_attr ? &attr : NULL);
 845		}
 846	} else {
 847		if (!dentry->d_inode) {
 848			error = -ENOENT;
 849			goto out;
 850		}
 851		audit_inode(name, dentry);
 852		filp = do_open(ipc_ns, dentry, oflag);
 853	}
 854
 855	if (IS_ERR(filp)) {
 856		error = PTR_ERR(filp);
 857		goto out_putfd;
 858	}
 859
 860	fd_install(fd, filp);
 861	goto out_upsem;
 862
 863out:
 864	dput(dentry);
 865	mntput(ipc_ns->mq_mnt);
 866out_putfd:
 867	put_unused_fd(fd);
 868	fd = error;
 869out_upsem:
 870	mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
 
 
 
 871out_putname:
 872	putname(name);
 873	return fd;
 874}
 875
 
 
 
 
 
 
 
 
 
 
 876SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
 877{
 878	int err;
 879	char *name;
 880	struct dentry *dentry;
 881	struct inode *inode = NULL;
 882	struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
 
 883
 884	name = getname(u_name);
 885	if (IS_ERR(name))
 886		return PTR_ERR(name);
 887
 888	mutex_lock_nested(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex,
 889			I_MUTEX_PARENT);
 890	dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name));
 
 
 
 
 891	if (IS_ERR(dentry)) {
 892		err = PTR_ERR(dentry);
 893		goto out_unlock;
 894	}
 895
 896	if (!dentry->d_inode) {
 
 897		err = -ENOENT;
 898		goto out_err;
 899	}
 900
 901	inode = dentry->d_inode;
 902	if (inode)
 903		ihold(inode);
 904	err = mnt_want_write(ipc_ns->mq_mnt);
 905	if (err)
 906		goto out_err;
 907	err = vfs_unlink(dentry->d_parent->d_inode, dentry);
 908	mnt_drop_write(ipc_ns->mq_mnt);
 909out_err:
 910	dput(dentry);
 911
 912out_unlock:
 913	mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
 914	putname(name);
 915	if (inode)
 916		iput(inode);
 
 
 
 917
 918	return err;
 919}
 920
 921/* Pipelined send and receive functions.
 922 *
 923 * If a receiver finds no waiting message, then it registers itself in the
 924 * list of waiting receivers. A sender checks that list before adding the new
 925 * message into the message array. If there is a waiting receiver, then it
 926 * bypasses the message array and directly hands the message over to the
 927 * receiver.
 928 * The receiver accepts the message and returns without grabbing the queue
 929 * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers
 930 * are necessary. The same algorithm is used for sysv semaphores, see
 931 * ipc/sem.c for more details.
 
 
 
 
 932 *
 933 * The same algorithm is used for senders.
 934 */
 935
 
 
 
 
 
 
 
 
 
 
 
 
 936/* pipelined_send() - send a message directly to the task waiting in
 937 * sys_mq_timedreceive() (without inserting message into a queue).
 938 */
 939static inline void pipelined_send(struct mqueue_inode_info *info,
 
 940				  struct msg_msg *message,
 941				  struct ext_wait_queue *receiver)
 942{
 943	receiver->msg = message;
 944	list_del(&receiver->list);
 945	receiver->state = STATE_PENDING;
 946	wake_up_process(receiver->task);
 947	smp_wmb();
 948	receiver->state = STATE_READY;
 949}
 950
 951/* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
 952 * gets its message and put to the queue (we have one free place for sure). */
 953static inline void pipelined_receive(struct mqueue_inode_info *info)
 
 954{
 955	struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
 956
 957	if (!sender) {
 958		/* for poll */
 959		wake_up_interruptible(&info->wait_q);
 960		return;
 961	}
 962	if (msg_insert(sender->msg, info))
 963		return;
 964	list_del(&sender->list);
 965	sender->state = STATE_PENDING;
 966	wake_up_process(sender->task);
 967	smp_wmb();
 968	sender->state = STATE_READY;
 969}
 970
 971SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
 972		size_t, msg_len, unsigned int, msg_prio,
 973		const struct timespec __user *, u_abs_timeout)
 974{
 975	struct file *filp;
 976	struct inode *inode;
 977	struct ext_wait_queue wait;
 978	struct ext_wait_queue *receiver;
 979	struct msg_msg *msg_ptr;
 980	struct mqueue_inode_info *info;
 981	ktime_t expires, *timeout = NULL;
 982	struct timespec ts;
 983	struct posix_msg_tree_node *new_leaf = NULL;
 984	int ret = 0;
 985
 986	if (u_abs_timeout) {
 987		int res = prepare_timeout(u_abs_timeout, &expires, &ts);
 988		if (res)
 989			return res;
 990		timeout = &expires;
 991	}
 992
 993	if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
 994		return -EINVAL;
 995
 996	audit_mq_sendrecv(mqdes, msg_len, msg_prio, timeout ? &ts : NULL);
 
 
 
 
 
 997
 998	filp = fget(mqdes);
 999	if (unlikely(!filp)) {
1000		ret = -EBADF;
1001		goto out;
1002	}
1003
1004	inode = filp->f_path.dentry->d_inode;
1005	if (unlikely(filp->f_op != &mqueue_file_operations)) {
1006		ret = -EBADF;
1007		goto out_fput;
1008	}
1009	info = MQUEUE_I(inode);
1010	audit_inode(NULL, filp->f_path.dentry);
1011
1012	if (unlikely(!(filp->f_mode & FMODE_WRITE))) {
1013		ret = -EBADF;
1014		goto out_fput;
1015	}
1016
1017	if (unlikely(msg_len > info->attr.mq_msgsize)) {
1018		ret = -EMSGSIZE;
1019		goto out_fput;
1020	}
1021
1022	/* First try to allocate memory, before doing anything with
1023	 * existing queues. */
1024	msg_ptr = load_msg(u_msg_ptr, msg_len);
1025	if (IS_ERR(msg_ptr)) {
1026		ret = PTR_ERR(msg_ptr);
1027		goto out_fput;
1028	}
1029	msg_ptr->m_ts = msg_len;
1030	msg_ptr->m_type = msg_prio;
1031
1032	/*
1033	 * msg_insert really wants us to have a valid, spare node struct so
1034	 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1035	 * fall back to that if necessary.
1036	 */
1037	if (!info->node_cache)
1038		new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1039
1040	spin_lock(&info->lock);
1041
1042	if (!info->node_cache && new_leaf) {
1043		/* Save our speculative allocation into the cache */
1044		rb_init_node(&new_leaf->rb_node);
1045		INIT_LIST_HEAD(&new_leaf->msg_list);
1046		info->node_cache = new_leaf;
1047		info->qsize += sizeof(*new_leaf);
1048		new_leaf = NULL;
1049	} else {
1050		kfree(new_leaf);
1051	}
1052
1053	if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
1054		if (filp->f_flags & O_NONBLOCK) {
1055			ret = -EAGAIN;
1056		} else {
1057			wait.task = current;
1058			wait.msg = (void *) msg_ptr;
1059			wait.state = STATE_NONE;
 
 
1060			ret = wq_sleep(info, SEND, timeout, &wait);
1061			/*
1062			 * wq_sleep must be called with info->lock held, and
1063			 * returns with the lock released
1064			 */
1065			goto out_free;
1066		}
1067	} else {
1068		receiver = wq_get_first_waiter(info, RECV);
1069		if (receiver) {
1070			pipelined_send(info, msg_ptr, receiver);
1071		} else {
1072			/* adds message to the queue */
1073			ret = msg_insert(msg_ptr, info);
1074			if (ret)
1075				goto out_unlock;
1076			__do_notify(info);
1077		}
1078		inode->i_atime = inode->i_mtime = inode->i_ctime =
1079				CURRENT_TIME;
1080	}
1081out_unlock:
1082	spin_unlock(&info->lock);
 
1083out_free:
1084	if (ret)
1085		free_msg(msg_ptr);
1086out_fput:
1087	fput(filp);
1088out:
1089	return ret;
1090}
1091
1092SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
1093		size_t, msg_len, unsigned int __user *, u_msg_prio,
1094		const struct timespec __user *, u_abs_timeout)
1095{
1096	ssize_t ret;
1097	struct msg_msg *msg_ptr;
1098	struct file *filp;
1099	struct inode *inode;
1100	struct mqueue_inode_info *info;
1101	struct ext_wait_queue wait;
1102	ktime_t expires, *timeout = NULL;
1103	struct timespec ts;
1104	struct posix_msg_tree_node *new_leaf = NULL;
1105
1106	if (u_abs_timeout) {
1107		int res = prepare_timeout(u_abs_timeout, &expires, &ts);
1108		if (res)
1109			return res;
1110		timeout = &expires;
1111	}
1112
1113	audit_mq_sendrecv(mqdes, msg_len, 0, timeout ? &ts : NULL);
1114
1115	filp = fget(mqdes);
1116	if (unlikely(!filp)) {
1117		ret = -EBADF;
1118		goto out;
1119	}
1120
1121	inode = filp->f_path.dentry->d_inode;
1122	if (unlikely(filp->f_op != &mqueue_file_operations)) {
1123		ret = -EBADF;
1124		goto out_fput;
1125	}
1126	info = MQUEUE_I(inode);
1127	audit_inode(NULL, filp->f_path.dentry);
1128
1129	if (unlikely(!(filp->f_mode & FMODE_READ))) {
1130		ret = -EBADF;
1131		goto out_fput;
1132	}
1133
1134	/* checks if buffer is big enough */
1135	if (unlikely(msg_len < info->attr.mq_msgsize)) {
1136		ret = -EMSGSIZE;
1137		goto out_fput;
1138	}
1139
1140	/*
1141	 * msg_insert really wants us to have a valid, spare node struct so
1142	 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1143	 * fall back to that if necessary.
1144	 */
1145	if (!info->node_cache)
1146		new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1147
1148	spin_lock(&info->lock);
1149
1150	if (!info->node_cache && new_leaf) {
1151		/* Save our speculative allocation into the cache */
1152		rb_init_node(&new_leaf->rb_node);
1153		INIT_LIST_HEAD(&new_leaf->msg_list);
1154		info->node_cache = new_leaf;
1155		info->qsize += sizeof(*new_leaf);
1156	} else {
1157		kfree(new_leaf);
1158	}
1159
1160	if (info->attr.mq_curmsgs == 0) {
1161		if (filp->f_flags & O_NONBLOCK) {
1162			spin_unlock(&info->lock);
1163			ret = -EAGAIN;
1164		} else {
1165			wait.task = current;
1166			wait.state = STATE_NONE;
 
 
1167			ret = wq_sleep(info, RECV, timeout, &wait);
1168			msg_ptr = wait.msg;
1169		}
1170	} else {
 
 
1171		msg_ptr = msg_get(info);
1172
1173		inode->i_atime = inode->i_mtime = inode->i_ctime =
1174				CURRENT_TIME;
1175
1176		/* There is now free space in queue. */
1177		pipelined_receive(info);
1178		spin_unlock(&info->lock);
 
1179		ret = 0;
1180	}
1181	if (ret == 0) {
1182		ret = msg_ptr->m_ts;
1183
1184		if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
1185			store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
1186			ret = -EFAULT;
1187		}
1188		free_msg(msg_ptr);
1189	}
1190out_fput:
1191	fput(filp);
1192out:
1193	return ret;
1194}
1195
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1196/*
1197 * Notes: the case when user wants us to deregister (with NULL as pointer)
1198 * and he isn't currently owner of notification, will be silently discarded.
1199 * It isn't explicitly defined in the POSIX.
1200 */
1201SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1202		const struct sigevent __user *, u_notification)
1203{
1204	int ret;
1205	struct file *filp;
1206	struct sock *sock;
1207	struct inode *inode;
1208	struct sigevent notification;
1209	struct mqueue_inode_info *info;
1210	struct sk_buff *nc;
1211
1212	if (u_notification) {
1213		if (copy_from_user(&notification, u_notification,
1214					sizeof(struct sigevent)))
1215			return -EFAULT;
1216	}
1217
1218	audit_mq_notify(mqdes, u_notification ? &notification : NULL);
1219
1220	nc = NULL;
1221	sock = NULL;
1222	if (u_notification != NULL) {
1223		if (unlikely(notification.sigev_notify != SIGEV_NONE &&
1224			     notification.sigev_notify != SIGEV_SIGNAL &&
1225			     notification.sigev_notify != SIGEV_THREAD))
1226			return -EINVAL;
1227		if (notification.sigev_notify == SIGEV_SIGNAL &&
1228			!valid_signal(notification.sigev_signo)) {
1229			return -EINVAL;
1230		}
1231		if (notification.sigev_notify == SIGEV_THREAD) {
1232			long timeo;
1233
1234			/* create the notify skb */
1235			nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
1236			if (!nc) {
1237				ret = -ENOMEM;
1238				goto out;
1239			}
1240			if (copy_from_user(nc->data,
1241					notification.sigev_value.sival_ptr,
1242					NOTIFY_COOKIE_LEN)) {
1243				ret = -EFAULT;
1244				goto out;
1245			}
1246
1247			/* TODO: add a header? */
1248			skb_put(nc, NOTIFY_COOKIE_LEN);
1249			/* and attach it to the socket */
1250retry:
1251			filp = fget(notification.sigev_signo);
1252			if (!filp) {
1253				ret = -EBADF;
1254				goto out;
1255			}
1256			sock = netlink_getsockbyfilp(filp);
1257			fput(filp);
1258			if (IS_ERR(sock)) {
1259				ret = PTR_ERR(sock);
1260				sock = NULL;
1261				goto out;
1262			}
1263
1264			timeo = MAX_SCHEDULE_TIMEOUT;
1265			ret = netlink_attachskb(sock, nc, &timeo, NULL);
1266			if (ret == 1)
1267				goto retry;
1268			if (ret) {
1269				sock = NULL;
1270				nc = NULL;
1271				goto out;
1272			}
 
 
1273		}
1274	}
1275
1276	filp = fget(mqdes);
1277	if (!filp) {
1278		ret = -EBADF;
1279		goto out;
1280	}
1281
1282	inode = filp->f_path.dentry->d_inode;
1283	if (unlikely(filp->f_op != &mqueue_file_operations)) {
1284		ret = -EBADF;
1285		goto out_fput;
1286	}
1287	info = MQUEUE_I(inode);
1288
1289	ret = 0;
1290	spin_lock(&info->lock);
1291	if (u_notification == NULL) {
1292		if (info->notify_owner == task_tgid(current)) {
1293			remove_notification(info);
1294			inode->i_atime = inode->i_ctime = CURRENT_TIME;
1295		}
1296	} else if (info->notify_owner != NULL) {
1297		ret = -EBUSY;
1298	} else {
1299		switch (notification.sigev_notify) {
1300		case SIGEV_NONE:
1301			info->notify.sigev_notify = SIGEV_NONE;
1302			break;
1303		case SIGEV_THREAD:
1304			info->notify_sock = sock;
1305			info->notify_cookie = nc;
1306			sock = NULL;
1307			nc = NULL;
1308			info->notify.sigev_notify = SIGEV_THREAD;
1309			break;
1310		case SIGEV_SIGNAL:
1311			info->notify.sigev_signo = notification.sigev_signo;
1312			info->notify.sigev_value = notification.sigev_value;
1313			info->notify.sigev_notify = SIGEV_SIGNAL;
 
1314			break;
1315		}
1316
1317		info->notify_owner = get_pid(task_tgid(current));
1318		info->notify_user_ns = get_user_ns(current_user_ns());
1319		inode->i_atime = inode->i_ctime = CURRENT_TIME;
1320	}
1321	spin_unlock(&info->lock);
1322out_fput:
1323	fput(filp);
1324out:
1325	if (sock) {
1326		netlink_detachskb(sock, nc);
1327	} else if (nc) {
 
1328		dev_kfree_skb(nc);
1329	}
1330	return ret;
1331}
1332
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1333SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1334		const struct mq_attr __user *, u_mqstat,
1335		struct mq_attr __user *, u_omqstat)
1336{
1337	int ret;
1338	struct mq_attr mqstat, omqstat;
1339	struct file *filp;
1340	struct inode *inode;
1341	struct mqueue_inode_info *info;
1342
1343	if (u_mqstat != NULL) {
1344		if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr)))
 
1345			return -EFAULT;
1346		if (mqstat.mq_flags & (~O_NONBLOCK))
1347			return -EINVAL;
1348	}
 
 
1349
1350	filp = fget(mqdes);
1351	if (!filp) {
1352		ret = -EBADF;
1353		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1354	}
 
 
1355
1356	inode = filp->f_path.dentry->d_inode;
1357	if (unlikely(filp->f_op != &mqueue_file_operations)) {
1358		ret = -EBADF;
1359		goto out_fput;
 
 
 
 
 
 
1360	}
1361	info = MQUEUE_I(inode);
 
1362
1363	spin_lock(&info->lock);
 
 
 
 
 
 
1364
1365	omqstat = info->attr;
1366	omqstat.mq_flags = filp->f_flags & O_NONBLOCK;
1367	if (u_mqstat) {
1368		audit_mq_getsetattr(mqdes, &mqstat);
1369		spin_lock(&filp->f_lock);
1370		if (mqstat.mq_flags & O_NONBLOCK)
1371			filp->f_flags |= O_NONBLOCK;
1372		else
1373			filp->f_flags &= ~O_NONBLOCK;
1374		spin_unlock(&filp->f_lock);
1375
1376		inode->i_atime = inode->i_ctime = CURRENT_TIME;
1377	}
 
 
1378
1379	spin_unlock(&info->lock);
 
 
1380
1381	ret = 0;
1382	if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat,
1383						sizeof(struct mq_attr)))
1384		ret = -EFAULT;
 
1385
1386out_fput:
1387	fput(filp);
1388out:
1389	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1390}
1391
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1392static const struct inode_operations mqueue_dir_inode_operations = {
1393	.lookup = simple_lookup,
1394	.create = mqueue_create,
1395	.unlink = mqueue_unlink,
1396};
1397
1398static const struct file_operations mqueue_file_operations = {
1399	.flush = mqueue_flush_file,
1400	.poll = mqueue_poll_file,
1401	.read = mqueue_read_file,
1402	.llseek = default_llseek,
1403};
1404
1405static const struct super_operations mqueue_super_ops = {
1406	.alloc_inode = mqueue_alloc_inode,
1407	.destroy_inode = mqueue_destroy_inode,
1408	.evict_inode = mqueue_evict_inode,
1409	.statfs = simple_statfs,
1410};
1411
 
 
 
 
 
1412static struct file_system_type mqueue_fs_type = {
1413	.name = "mqueue",
1414	.mount = mqueue_mount,
1415	.kill_sb = kill_litter_super,
 
1416};
1417
1418int mq_init_ns(struct ipc_namespace *ns)
1419{
 
 
1420	ns->mq_queues_count  = 0;
1421	ns->mq_queues_max    = DFLT_QUEUESMAX;
1422	ns->mq_msg_max       = DFLT_MSGMAX;
1423	ns->mq_msgsize_max   = DFLT_MSGSIZEMAX;
1424	ns->mq_msg_default   = DFLT_MSG;
1425	ns->mq_msgsize_default  = DFLT_MSGSIZE;
1426
1427	ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns);
1428	if (IS_ERR(ns->mq_mnt)) {
1429		int err = PTR_ERR(ns->mq_mnt);
1430		ns->mq_mnt = NULL;
1431		return err;
1432	}
1433	return 0;
1434}
1435
1436void mq_clear_sbinfo(struct ipc_namespace *ns)
1437{
1438	ns->mq_mnt->mnt_sb->s_fs_info = NULL;
1439}
1440
1441void mq_put_mnt(struct ipc_namespace *ns)
1442{
1443	kern_unmount(ns->mq_mnt);
1444}
1445
1446static int __init init_mqueue_fs(void)
1447{
1448	int error;
1449
1450	mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1451				sizeof(struct mqueue_inode_info), 0,
1452				SLAB_HWCACHE_ALIGN, init_once);
1453	if (mqueue_inode_cachep == NULL)
1454		return -ENOMEM;
1455
1456	/* ignore failures - they are not fatal */
1457	mq_sysctl_table = mq_register_sysctl_table();
1458
1459	error = register_filesystem(&mqueue_fs_type);
1460	if (error)
1461		goto out_sysctl;
1462
1463	spin_lock_init(&mq_lock);
1464
1465	error = mq_init_ns(&init_ipc_ns);
1466	if (error)
1467		goto out_filesystem;
1468
1469	return 0;
1470
1471out_filesystem:
1472	unregister_filesystem(&mqueue_fs_type);
1473out_sysctl:
1474	if (mq_sysctl_table)
1475		unregister_sysctl_table(mq_sysctl_table);
1476	kmem_cache_destroy(mqueue_inode_cachep);
1477	return error;
1478}
1479
1480__initcall(init_mqueue_fs);
v5.9
   1/*
   2 * POSIX message queues filesystem for Linux.
   3 *
   4 * Copyright (C) 2003,2004  Krzysztof Benedyczak    (golbi@mat.uni.torun.pl)
   5 *                          Michal Wronski          (michal.wronski@gmail.com)
   6 *
   7 * Spinlocks:               Mohamed Abbas           (abbas.mohamed@intel.com)
   8 * Lockless receive & send, fd based notify:
   9 *			    Manfred Spraul	    (manfred@colorfullife.com)
  10 *
  11 * Audit:                   George Wilson           (ltcgcw@us.ibm.com)
  12 *
  13 * This file is released under the GPL.
  14 */
  15
  16#include <linux/capability.h>
  17#include <linux/init.h>
  18#include <linux/pagemap.h>
  19#include <linux/file.h>
  20#include <linux/mount.h>
  21#include <linux/fs_context.h>
  22#include <linux/namei.h>
  23#include <linux/sysctl.h>
  24#include <linux/poll.h>
  25#include <linux/mqueue.h>
  26#include <linux/msg.h>
  27#include <linux/skbuff.h>
  28#include <linux/vmalloc.h>
  29#include <linux/netlink.h>
  30#include <linux/syscalls.h>
  31#include <linux/audit.h>
  32#include <linux/signal.h>
  33#include <linux/mutex.h>
  34#include <linux/nsproxy.h>
  35#include <linux/pid.h>
  36#include <linux/ipc_namespace.h>
  37#include <linux/user_namespace.h>
  38#include <linux/slab.h>
  39#include <linux/sched/wake_q.h>
  40#include <linux/sched/signal.h>
  41#include <linux/sched/user.h>
  42
  43#include <net/sock.h>
  44#include "util.h"
  45
  46struct mqueue_fs_context {
  47	struct ipc_namespace	*ipc_ns;
  48};
  49
  50#define MQUEUE_MAGIC	0x19800202
  51#define DIRENT_SIZE	20
  52#define FILENT_SIZE	80
  53
  54#define SEND		0
  55#define RECV		1
  56
  57#define STATE_NONE	0
  58#define STATE_READY	1
 
  59
  60struct posix_msg_tree_node {
  61	struct rb_node		rb_node;
  62	struct list_head	msg_list;
  63	int			priority;
  64};
  65
  66/*
  67 * Locking:
  68 *
  69 * Accesses to a message queue are synchronized by acquiring info->lock.
  70 *
  71 * There are two notable exceptions:
  72 * - The actual wakeup of a sleeping task is performed using the wake_q
  73 *   framework. info->lock is already released when wake_up_q is called.
  74 * - The exit codepaths after sleeping check ext_wait_queue->state without
  75 *   any locks. If it is STATE_READY, then the syscall is completed without
  76 *   acquiring info->lock.
  77 *
  78 * MQ_BARRIER:
  79 * To achieve proper release/acquire memory barrier pairing, the state is set to
  80 * STATE_READY with smp_store_release(), and it is read with READ_ONCE followed
  81 * by smp_acquire__after_ctrl_dep(). In addition, wake_q_add_safe() is used.
  82 *
  83 * This prevents the following races:
  84 *
  85 * 1) With the simple wake_q_add(), the task could be gone already before
  86 *    the increase of the reference happens
  87 * Thread A
  88 *				Thread B
  89 * WRITE_ONCE(wait.state, STATE_NONE);
  90 * schedule_hrtimeout()
  91 *				wake_q_add(A)
  92 *				if (cmpxchg()) // success
  93 *				   ->state = STATE_READY (reordered)
  94 * <timeout returns>
  95 * if (wait.state == STATE_READY) return;
  96 * sysret to user space
  97 * sys_exit()
  98 *				get_task_struct() // UaF
  99 *
 100 * Solution: Use wake_q_add_safe() and perform the get_task_struct() before
 101 * the smp_store_release() that does ->state = STATE_READY.
 102 *
 103 * 2) Without proper _release/_acquire barriers, the woken up task
 104 *    could read stale data
 105 *
 106 * Thread A
 107 *				Thread B
 108 * do_mq_timedreceive
 109 * WRITE_ONCE(wait.state, STATE_NONE);
 110 * schedule_hrtimeout()
 111 *				state = STATE_READY;
 112 * <timeout returns>
 113 * if (wait.state == STATE_READY) return;
 114 * msg_ptr = wait.msg;		// Access to stale data!
 115 *				receiver->msg = message; (reordered)
 116 *
 117 * Solution: use _release and _acquire barriers.
 118 *
 119 * 3) There is intentionally no barrier when setting current->state
 120 *    to TASK_INTERRUPTIBLE: spin_unlock(&info->lock) provides the
 121 *    release memory barrier, and the wakeup is triggered when holding
 122 *    info->lock, i.e. spin_lock(&info->lock) provided a pairing
 123 *    acquire memory barrier.
 124 */
 125
 126struct ext_wait_queue {		/* queue of sleeping tasks */
 127	struct task_struct *task;
 128	struct list_head list;
 129	struct msg_msg *msg;	/* ptr of loaded message */
 130	int state;		/* one of STATE_* values */
 131};
 132
 133struct mqueue_inode_info {
 134	spinlock_t lock;
 135	struct inode vfs_inode;
 136	wait_queue_head_t wait_q;
 137
 138	struct rb_root msg_tree;
 139	struct rb_node *msg_tree_rightmost;
 140	struct posix_msg_tree_node *node_cache;
 141	struct mq_attr attr;
 142
 143	struct sigevent notify;
 144	struct pid *notify_owner;
 145	u32 notify_self_exec_id;
 146	struct user_namespace *notify_user_ns;
 147	struct user_struct *user;	/* user who created, for accounting */
 148	struct sock *notify_sock;
 149	struct sk_buff *notify_cookie;
 150
 151	/* for tasks waiting for free space and messages, respectively */
 152	struct ext_wait_queue e_wait_q[2];
 153
 154	unsigned long qsize; /* size of queue in memory (sum of all msgs) */
 155};
 156
 157static struct file_system_type mqueue_fs_type;
 158static const struct inode_operations mqueue_dir_inode_operations;
 159static const struct file_operations mqueue_file_operations;
 160static const struct super_operations mqueue_super_ops;
 161static const struct fs_context_operations mqueue_fs_context_ops;
 162static void remove_notification(struct mqueue_inode_info *info);
 163
 164static struct kmem_cache *mqueue_inode_cachep;
 165
 166static struct ctl_table_header *mq_sysctl_table;
 167
 168static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
 169{
 170	return container_of(inode, struct mqueue_inode_info, vfs_inode);
 171}
 172
 173/*
 174 * This routine should be called with the mq_lock held.
 175 */
 176static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode)
 177{
 178	return get_ipc_ns(inode->i_sb->s_fs_info);
 179}
 180
 181static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
 182{
 183	struct ipc_namespace *ns;
 184
 185	spin_lock(&mq_lock);
 186	ns = __get_ns_from_inode(inode);
 187	spin_unlock(&mq_lock);
 188	return ns;
 189}
 190
 191/* Auxiliary functions to manipulate messages' list */
 192static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
 193{
 194	struct rb_node **p, *parent = NULL;
 195	struct posix_msg_tree_node *leaf;
 196	bool rightmost = true;
 197
 198	p = &info->msg_tree.rb_node;
 199	while (*p) {
 200		parent = *p;
 201		leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
 202
 203		if (likely(leaf->priority == msg->m_type))
 204			goto insert_msg;
 205		else if (msg->m_type < leaf->priority) {
 206			p = &(*p)->rb_left;
 207			rightmost = false;
 208		} else
 209			p = &(*p)->rb_right;
 210	}
 211	if (info->node_cache) {
 212		leaf = info->node_cache;
 213		info->node_cache = NULL;
 214	} else {
 215		leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
 216		if (!leaf)
 217			return -ENOMEM;
 
 218		INIT_LIST_HEAD(&leaf->msg_list);
 
 219	}
 220	leaf->priority = msg->m_type;
 221
 222	if (rightmost)
 223		info->msg_tree_rightmost = &leaf->rb_node;
 224
 225	rb_link_node(&leaf->rb_node, parent, p);
 226	rb_insert_color(&leaf->rb_node, &info->msg_tree);
 227insert_msg:
 228	info->attr.mq_curmsgs++;
 229	info->qsize += msg->m_ts;
 230	list_add_tail(&msg->m_list, &leaf->msg_list);
 231	return 0;
 232}
 233
 234static inline void msg_tree_erase(struct posix_msg_tree_node *leaf,
 235				  struct mqueue_inode_info *info)
 236{
 237	struct rb_node *node = &leaf->rb_node;
 238
 239	if (info->msg_tree_rightmost == node)
 240		info->msg_tree_rightmost = rb_prev(node);
 241
 242	rb_erase(node, &info->msg_tree);
 243	if (info->node_cache)
 244		kfree(leaf);
 245	else
 246		info->node_cache = leaf;
 247}
 248
 249static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
 250{
 251	struct rb_node *parent = NULL;
 252	struct posix_msg_tree_node *leaf;
 253	struct msg_msg *msg;
 254
 255try_again:
 256	/*
 257	 * During insert, low priorities go to the left and high to the
 258	 * right.  On receive, we want the highest priorities first, so
 259	 * walk all the way to the right.
 260	 */
 261	parent = info->msg_tree_rightmost;
 
 
 
 
 262	if (!parent) {
 263		if (info->attr.mq_curmsgs) {
 264			pr_warn_once("Inconsistency in POSIX message queue, "
 265				     "no tree element, but supposedly messages "
 266				     "should exist!\n");
 267			info->attr.mq_curmsgs = 0;
 268		}
 269		return NULL;
 270	}
 271	leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
 272	if (unlikely(list_empty(&leaf->msg_list))) {
 273		pr_warn_once("Inconsistency in POSIX message queue, "
 274			     "empty leaf node but we haven't implemented "
 275			     "lazy leaf delete!\n");
 276		msg_tree_erase(leaf, info);
 
 
 
 
 
 
 277		goto try_again;
 278	} else {
 279		msg = list_first_entry(&leaf->msg_list,
 280				       struct msg_msg, m_list);
 281		list_del(&msg->m_list);
 282		if (list_empty(&leaf->msg_list)) {
 283			msg_tree_erase(leaf, info);
 
 
 
 
 
 
 284		}
 285	}
 286	info->attr.mq_curmsgs--;
 287	info->qsize -= msg->m_ts;
 288	return msg;
 289}
 290
 291static struct inode *mqueue_get_inode(struct super_block *sb,
 292		struct ipc_namespace *ipc_ns, umode_t mode,
 293		struct mq_attr *attr)
 294{
 295	struct user_struct *u = current_user();
 296	struct inode *inode;
 297	int ret = -ENOMEM;
 298
 299	inode = new_inode(sb);
 300	if (!inode)
 301		goto err;
 302
 303	inode->i_ino = get_next_ino();
 304	inode->i_mode = mode;
 305	inode->i_uid = current_fsuid();
 306	inode->i_gid = current_fsgid();
 307	inode->i_mtime = inode->i_ctime = inode->i_atime = current_time(inode);
 308
 309	if (S_ISREG(mode)) {
 310		struct mqueue_inode_info *info;
 311		unsigned long mq_bytes, mq_treesize;
 312
 313		inode->i_fop = &mqueue_file_operations;
 314		inode->i_size = FILENT_SIZE;
 315		/* mqueue specific info */
 316		info = MQUEUE_I(inode);
 317		spin_lock_init(&info->lock);
 318		init_waitqueue_head(&info->wait_q);
 319		INIT_LIST_HEAD(&info->e_wait_q[0].list);
 320		INIT_LIST_HEAD(&info->e_wait_q[1].list);
 321		info->notify_owner = NULL;
 322		info->notify_user_ns = NULL;
 323		info->qsize = 0;
 324		info->user = NULL;	/* set when all is ok */
 325		info->msg_tree = RB_ROOT;
 326		info->msg_tree_rightmost = NULL;
 327		info->node_cache = NULL;
 328		memset(&info->attr, 0, sizeof(info->attr));
 329		info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
 330					   ipc_ns->mq_msg_default);
 331		info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
 332					    ipc_ns->mq_msgsize_default);
 333		if (attr) {
 334			info->attr.mq_maxmsg = attr->mq_maxmsg;
 335			info->attr.mq_msgsize = attr->mq_msgsize;
 336		}
 337		/*
 338		 * We used to allocate a static array of pointers and account
 339		 * the size of that array as well as one msg_msg struct per
 340		 * possible message into the queue size. That's no longer
 341		 * accurate as the queue is now an rbtree and will grow and
 342		 * shrink depending on usage patterns.  We can, however, still
 343		 * account one msg_msg struct per message, but the nodes are
 344		 * allocated depending on priority usage, and most programs
 345		 * only use one, or a handful, of priorities.  However, since
 346		 * this is pinned memory, we need to assume worst case, so
 347		 * that means the min(mq_maxmsg, max_priorities) * struct
 348		 * posix_msg_tree_node.
 349		 */
 350
 351		ret = -EINVAL;
 352		if (info->attr.mq_maxmsg <= 0 || info->attr.mq_msgsize <= 0)
 353			goto out_inode;
 354		if (capable(CAP_SYS_RESOURCE)) {
 355			if (info->attr.mq_maxmsg > HARD_MSGMAX ||
 356			    info->attr.mq_msgsize > HARD_MSGSIZEMAX)
 357				goto out_inode;
 358		} else {
 359			if (info->attr.mq_maxmsg > ipc_ns->mq_msg_max ||
 360					info->attr.mq_msgsize > ipc_ns->mq_msgsize_max)
 361				goto out_inode;
 362		}
 363		ret = -EOVERFLOW;
 364		/* check for overflow */
 365		if (info->attr.mq_msgsize > ULONG_MAX/info->attr.mq_maxmsg)
 366			goto out_inode;
 367		mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
 368			min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
 369			sizeof(struct posix_msg_tree_node);
 370		mq_bytes = info->attr.mq_maxmsg * info->attr.mq_msgsize;
 371		if (mq_bytes + mq_treesize < mq_bytes)
 372			goto out_inode;
 373		mq_bytes += mq_treesize;
 374		spin_lock(&mq_lock);
 375		if (u->mq_bytes + mq_bytes < u->mq_bytes ||
 376		    u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
 377			spin_unlock(&mq_lock);
 378			/* mqueue_evict_inode() releases info->messages */
 379			ret = -EMFILE;
 380			goto out_inode;
 381		}
 382		u->mq_bytes += mq_bytes;
 383		spin_unlock(&mq_lock);
 384
 385		/* all is ok */
 386		info->user = get_uid(u);
 387	} else if (S_ISDIR(mode)) {
 388		inc_nlink(inode);
 389		/* Some things misbehave if size == 0 on a directory */
 390		inode->i_size = 2 * DIRENT_SIZE;
 391		inode->i_op = &mqueue_dir_inode_operations;
 392		inode->i_fop = &simple_dir_operations;
 393	}
 394
 395	return inode;
 396out_inode:
 397	iput(inode);
 398err:
 399	return ERR_PTR(ret);
 400}
 401
 402static int mqueue_fill_super(struct super_block *sb, struct fs_context *fc)
 403{
 404	struct inode *inode;
 405	struct ipc_namespace *ns = sb->s_fs_info;
 406
 407	sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV;
 408	sb->s_blocksize = PAGE_SIZE;
 409	sb->s_blocksize_bits = PAGE_SHIFT;
 410	sb->s_magic = MQUEUE_MAGIC;
 411	sb->s_op = &mqueue_super_ops;
 412
 413	inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
 414	if (IS_ERR(inode))
 415		return PTR_ERR(inode);
 416
 417	sb->s_root = d_make_root(inode);
 418	if (!sb->s_root)
 419		return -ENOMEM;
 420	return 0;
 421}
 422
 423static int mqueue_get_tree(struct fs_context *fc)
 424{
 425	struct mqueue_fs_context *ctx = fc->fs_private;
 426
 427	return get_tree_keyed(fc, mqueue_fill_super, ctx->ipc_ns);
 428}
 429
 430static void mqueue_fs_context_free(struct fs_context *fc)
 431{
 432	struct mqueue_fs_context *ctx = fc->fs_private;
 433
 434	put_ipc_ns(ctx->ipc_ns);
 435	kfree(ctx);
 436}
 437
 438static int mqueue_init_fs_context(struct fs_context *fc)
 439{
 440	struct mqueue_fs_context *ctx;
 441
 442	ctx = kzalloc(sizeof(struct mqueue_fs_context), GFP_KERNEL);
 443	if (!ctx)
 444		return -ENOMEM;
 445
 446	ctx->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
 447	put_user_ns(fc->user_ns);
 448	fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
 449	fc->fs_private = ctx;
 450	fc->ops = &mqueue_fs_context_ops;
 451	return 0;
 452}
 453
 454static struct vfsmount *mq_create_mount(struct ipc_namespace *ns)
 455{
 456	struct mqueue_fs_context *ctx;
 457	struct fs_context *fc;
 458	struct vfsmount *mnt;
 459
 460	fc = fs_context_for_mount(&mqueue_fs_type, SB_KERNMOUNT);
 461	if (IS_ERR(fc))
 462		return ERR_CAST(fc);
 463
 464	ctx = fc->fs_private;
 465	put_ipc_ns(ctx->ipc_ns);
 466	ctx->ipc_ns = get_ipc_ns(ns);
 467	put_user_ns(fc->user_ns);
 468	fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
 469
 470	mnt = fc_mount(fc);
 471	put_fs_context(fc);
 472	return mnt;
 473}
 474
 475static void init_once(void *foo)
 476{
 477	struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
 478
 479	inode_init_once(&p->vfs_inode);
 480}
 481
 482static struct inode *mqueue_alloc_inode(struct super_block *sb)
 483{
 484	struct mqueue_inode_info *ei;
 485
 486	ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
 487	if (!ei)
 488		return NULL;
 489	return &ei->vfs_inode;
 490}
 491
 492static void mqueue_free_inode(struct inode *inode)
 493{
 
 494	kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
 495}
 496
 
 
 
 
 
 497static void mqueue_evict_inode(struct inode *inode)
 498{
 499	struct mqueue_inode_info *info;
 500	struct user_struct *user;
 
 501	struct ipc_namespace *ipc_ns;
 502	struct msg_msg *msg, *nmsg;
 503	LIST_HEAD(tmp_msg);
 504
 505	clear_inode(inode);
 506
 507	if (S_ISDIR(inode->i_mode))
 508		return;
 509
 510	ipc_ns = get_ns_from_inode(inode);
 511	info = MQUEUE_I(inode);
 512	spin_lock(&info->lock);
 513	while ((msg = msg_get(info)) != NULL)
 514		list_add_tail(&msg->m_list, &tmp_msg);
 515	kfree(info->node_cache);
 516	spin_unlock(&info->lock);
 517
 518	list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) {
 519		list_del(&msg->m_list);
 520		free_msg(msg);
 521	}
 
 
 
 522
 523	user = info->user;
 524	if (user) {
 525		unsigned long mq_bytes, mq_treesize;
 526
 527		/* Total amount of bytes accounted for the mqueue */
 528		mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
 529			min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
 530			sizeof(struct posix_msg_tree_node);
 531
 532		mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
 533					  info->attr.mq_msgsize);
 534
 535		spin_lock(&mq_lock);
 536		user->mq_bytes -= mq_bytes;
 537		/*
 538		 * get_ns_from_inode() ensures that the
 539		 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
 540		 * to which we now hold a reference, or it is NULL.
 541		 * We can't put it here under mq_lock, though.
 542		 */
 543		if (ipc_ns)
 544			ipc_ns->mq_queues_count--;
 545		spin_unlock(&mq_lock);
 546		free_uid(user);
 547	}
 548	if (ipc_ns)
 549		put_ipc_ns(ipc_ns);
 550}
 551
 552static int mqueue_create_attr(struct dentry *dentry, umode_t mode, void *arg)
 
 553{
 554	struct inode *dir = dentry->d_parent->d_inode;
 555	struct inode *inode;
 556	struct mq_attr *attr = arg;
 557	int error;
 558	struct ipc_namespace *ipc_ns;
 559
 560	spin_lock(&mq_lock);
 561	ipc_ns = __get_ns_from_inode(dir);
 562	if (!ipc_ns) {
 563		error = -EACCES;
 564		goto out_unlock;
 565	}
 566
 567	if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
 568	    !capable(CAP_SYS_RESOURCE)) {
 569		error = -ENOSPC;
 570		goto out_unlock;
 571	}
 572	ipc_ns->mq_queues_count++;
 573	spin_unlock(&mq_lock);
 574
 575	inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr);
 576	if (IS_ERR(inode)) {
 577		error = PTR_ERR(inode);
 578		spin_lock(&mq_lock);
 579		ipc_ns->mq_queues_count--;
 580		goto out_unlock;
 581	}
 582
 583	put_ipc_ns(ipc_ns);
 584	dir->i_size += DIRENT_SIZE;
 585	dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
 586
 587	d_instantiate(dentry, inode);
 588	dget(dentry);
 589	return 0;
 590out_unlock:
 591	spin_unlock(&mq_lock);
 592	if (ipc_ns)
 593		put_ipc_ns(ipc_ns);
 594	return error;
 595}
 596
 597static int mqueue_create(struct inode *dir, struct dentry *dentry,
 598				umode_t mode, bool excl)
 599{
 600	return mqueue_create_attr(dentry, mode, NULL);
 601}
 602
 603static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
 604{
 605	struct inode *inode = d_inode(dentry);
 606
 607	dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
 608	dir->i_size -= DIRENT_SIZE;
 609	drop_nlink(inode);
 610	dput(dentry);
 611	return 0;
 612}
 613
 614/*
 615*	This is routine for system read from queue file.
 616*	To avoid mess with doing here some sort of mq_receive we allow
 617*	to read only queue size & notification info (the only values
 618*	that are interesting from user point of view and aren't accessible
 619*	through std routines)
 620*/
 621static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
 622				size_t count, loff_t *off)
 623{
 624	struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
 625	char buffer[FILENT_SIZE];
 626	ssize_t ret;
 627
 628	spin_lock(&info->lock);
 629	snprintf(buffer, sizeof(buffer),
 630			"QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
 631			info->qsize,
 632			info->notify_owner ? info->notify.sigev_notify : 0,
 633			(info->notify_owner &&
 634			 info->notify.sigev_notify == SIGEV_SIGNAL) ?
 635				info->notify.sigev_signo : 0,
 636			pid_vnr(info->notify_owner));
 637	spin_unlock(&info->lock);
 638	buffer[sizeof(buffer)-1] = '\0';
 639
 640	ret = simple_read_from_buffer(u_data, count, off, buffer,
 641				strlen(buffer));
 642	if (ret <= 0)
 643		return ret;
 644
 645	file_inode(filp)->i_atime = file_inode(filp)->i_ctime = current_time(file_inode(filp));
 646	return ret;
 647}
 648
 649static int mqueue_flush_file(struct file *filp, fl_owner_t id)
 650{
 651	struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
 652
 653	spin_lock(&info->lock);
 654	if (task_tgid(current) == info->notify_owner)
 655		remove_notification(info);
 656
 657	spin_unlock(&info->lock);
 658	return 0;
 659}
 660
 661static __poll_t mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
 662{
 663	struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
 664	__poll_t retval = 0;
 665
 666	poll_wait(filp, &info->wait_q, poll_tab);
 667
 668	spin_lock(&info->lock);
 669	if (info->attr.mq_curmsgs)
 670		retval = EPOLLIN | EPOLLRDNORM;
 671
 672	if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
 673		retval |= EPOLLOUT | EPOLLWRNORM;
 674	spin_unlock(&info->lock);
 675
 676	return retval;
 677}
 678
 679/* Adds current to info->e_wait_q[sr] before element with smaller prio */
 680static void wq_add(struct mqueue_inode_info *info, int sr,
 681			struct ext_wait_queue *ewp)
 682{
 683	struct ext_wait_queue *walk;
 684
 
 
 685	list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
 686		if (walk->task->prio <= current->prio) {
 687			list_add_tail(&ewp->list, &walk->list);
 688			return;
 689		}
 690	}
 691	list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
 692}
 693
 694/*
 695 * Puts current task to sleep. Caller must hold queue lock. After return
 696 * lock isn't held.
 697 * sr: SEND or RECV
 698 */
 699static int wq_sleep(struct mqueue_inode_info *info, int sr,
 700		    ktime_t *timeout, struct ext_wait_queue *ewp)
 701	__releases(&info->lock)
 702{
 703	int retval;
 704	signed long time;
 705
 706	wq_add(info, sr, ewp);
 707
 708	for (;;) {
 709		/* memory barrier not required, we hold info->lock */
 710		__set_current_state(TASK_INTERRUPTIBLE);
 711
 712		spin_unlock(&info->lock);
 713		time = schedule_hrtimeout_range_clock(timeout, 0,
 714			HRTIMER_MODE_ABS, CLOCK_REALTIME);
 715
 716		if (READ_ONCE(ewp->state) == STATE_READY) {
 717			/* see MQ_BARRIER for purpose/pairing */
 718			smp_acquire__after_ctrl_dep();
 
 719			retval = 0;
 720			goto out;
 721		}
 722		spin_lock(&info->lock);
 723
 724		/* we hold info->lock, so no memory barrier required */
 725		if (READ_ONCE(ewp->state) == STATE_READY) {
 726			retval = 0;
 727			goto out_unlock;
 728		}
 729		if (signal_pending(current)) {
 730			retval = -ERESTARTSYS;
 731			break;
 732		}
 733		if (time == 0) {
 734			retval = -ETIMEDOUT;
 735			break;
 736		}
 737	}
 738	list_del(&ewp->list);
 739out_unlock:
 740	spin_unlock(&info->lock);
 741out:
 742	return retval;
 743}
 744
 745/*
 746 * Returns waiting task that should be serviced first or NULL if none exists
 747 */
 748static struct ext_wait_queue *wq_get_first_waiter(
 749		struct mqueue_inode_info *info, int sr)
 750{
 751	struct list_head *ptr;
 752
 753	ptr = info->e_wait_q[sr].list.prev;
 754	if (ptr == &info->e_wait_q[sr].list)
 755		return NULL;
 756	return list_entry(ptr, struct ext_wait_queue, list);
 757}
 758
 759
 760static inline void set_cookie(struct sk_buff *skb, char code)
 761{
 762	((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
 763}
 764
 765/*
 766 * The next function is only to split too long sys_mq_timedsend
 767 */
 768static void __do_notify(struct mqueue_inode_info *info)
 769{
 770	/* notification
 771	 * invoked when there is registered process and there isn't process
 772	 * waiting synchronously for message AND state of queue changed from
 773	 * empty to not empty. Here we are sure that no one is waiting
 774	 * synchronously. */
 775	if (info->notify_owner &&
 776	    info->attr.mq_curmsgs == 1) {
 
 777		switch (info->notify.sigev_notify) {
 778		case SIGEV_NONE:
 779			break;
 780		case SIGEV_SIGNAL: {
 781			struct kernel_siginfo sig_i;
 782			struct task_struct *task;
 783
 784			/* do_mq_notify() accepts sigev_signo == 0, why?? */
 785			if (!info->notify.sigev_signo)
 786				break;
 787
 788			clear_siginfo(&sig_i);
 789			sig_i.si_signo = info->notify.sigev_signo;
 790			sig_i.si_errno = 0;
 791			sig_i.si_code = SI_MESGQ;
 792			sig_i.si_value = info->notify.sigev_value;
 
 793			rcu_read_lock();
 794			/* map current pid/uid into info->owner's namespaces */
 795			sig_i.si_pid = task_tgid_nr_ns(current,
 796						ns_of_pid(info->notify_owner));
 797			sig_i.si_uid = from_kuid_munged(info->notify_user_ns,
 798						current_uid());
 799			/*
 800			 * We can't use kill_pid_info(), this signal should
 801			 * bypass check_kill_permission(). It is from kernel
 802			 * but si_fromuser() can't know this.
 803			 * We do check the self_exec_id, to avoid sending
 804			 * signals to programs that don't expect them.
 805			 */
 806			task = pid_task(info->notify_owner, PIDTYPE_TGID);
 807			if (task && task->self_exec_id ==
 808						info->notify_self_exec_id) {
 809				do_send_sig_info(info->notify.sigev_signo,
 810						&sig_i, task, PIDTYPE_TGID);
 811			}
 812			rcu_read_unlock();
 
 
 
 813			break;
 814		}
 815		case SIGEV_THREAD:
 816			set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
 817			netlink_sendskb(info->notify_sock, info->notify_cookie);
 818			break;
 819		}
 820		/* after notification unregisters process */
 821		put_pid(info->notify_owner);
 822		put_user_ns(info->notify_user_ns);
 823		info->notify_owner = NULL;
 824		info->notify_user_ns = NULL;
 825	}
 826	wake_up(&info->wait_q);
 827}
 828
 829static int prepare_timeout(const struct __kernel_timespec __user *u_abs_timeout,
 830			   struct timespec64 *ts)
 831{
 832	if (get_timespec64(ts, u_abs_timeout))
 833		return -EFAULT;
 834	if (!timespec64_valid(ts))
 835		return -EINVAL;
 
 
 836	return 0;
 837}
 838
 839static void remove_notification(struct mqueue_inode_info *info)
 840{
 841	if (info->notify_owner != NULL &&
 842	    info->notify.sigev_notify == SIGEV_THREAD) {
 843		set_cookie(info->notify_cookie, NOTIFY_REMOVED);
 844		netlink_sendskb(info->notify_sock, info->notify_cookie);
 845	}
 846	put_pid(info->notify_owner);
 847	put_user_ns(info->notify_user_ns);
 848	info->notify_owner = NULL;
 849	info->notify_user_ns = NULL;
 850}
 851
 852static int prepare_open(struct dentry *dentry, int oflag, int ro,
 853			umode_t mode, struct filename *name,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 854			struct mq_attr *attr)
 855{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 856	static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
 857						  MAY_READ | MAY_WRITE };
 858	int acc;
 859
 860	if (d_really_is_negative(dentry)) {
 861		if (!(oflag & O_CREAT))
 862			return -ENOENT;
 863		if (ro)
 864			return ro;
 865		audit_inode_parent_hidden(name, dentry->d_parent);
 866		return vfs_mkobj(dentry, mode & ~current_umask(),
 867				  mqueue_create_attr, attr);
 868	}
 869	/* it already existed */
 870	audit_inode(name, dentry, 0);
 871	if ((oflag & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
 872		return -EEXIST;
 873	if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
 874		return -EINVAL;
 875	acc = oflag2acc[oflag & O_ACCMODE];
 876	return inode_permission(d_inode(dentry), acc);
 877}
 878
 879static int do_mq_open(const char __user *u_name, int oflag, umode_t mode,
 880		      struct mq_attr *attr)
 881{
 882	struct vfsmount *mnt = current->nsproxy->ipc_ns->mq_mnt;
 883	struct dentry *root = mnt->mnt_root;
 884	struct filename *name;
 885	struct path path;
 886	int fd, error;
 887	int ro;
 
 
 
 888
 889	audit_mq_open(oflag, mode, attr);
 890
 891	if (IS_ERR(name = getname(u_name)))
 892		return PTR_ERR(name);
 893
 894	fd = get_unused_fd_flags(O_CLOEXEC);
 895	if (fd < 0)
 896		goto out_putname;
 897
 898	ro = mnt_want_write(mnt);	/* we'll drop it in any case */
 899	inode_lock(d_inode(root));
 900	path.dentry = lookup_one_len(name->name, root, strlen(name->name));
 901	if (IS_ERR(path.dentry)) {
 902		error = PTR_ERR(path.dentry);
 903		goto out_putfd;
 904	}
 905	path.mnt = mntget(mnt);
 906	error = prepare_open(path.dentry, oflag, ro, mode, name, attr);
 907	if (!error) {
 908		struct file *file = dentry_open(&path, oflag, current_cred());
 909		if (!IS_ERR(file))
 910			fd_install(fd, file);
 911		else
 912			error = PTR_ERR(file);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 913	}
 914	path_put(&path);
 
 
 
 
 
 
 915out_putfd:
 916	if (error) {
 917		put_unused_fd(fd);
 918		fd = error;
 919	}
 920	inode_unlock(d_inode(root));
 921	if (!ro)
 922		mnt_drop_write(mnt);
 923out_putname:
 924	putname(name);
 925	return fd;
 926}
 927
 928SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
 929		struct mq_attr __user *, u_attr)
 930{
 931	struct mq_attr attr;
 932	if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
 933		return -EFAULT;
 934
 935	return do_mq_open(u_name, oflag, mode, u_attr ? &attr : NULL);
 936}
 937
 938SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
 939{
 940	int err;
 941	struct filename *name;
 942	struct dentry *dentry;
 943	struct inode *inode = NULL;
 944	struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
 945	struct vfsmount *mnt = ipc_ns->mq_mnt;
 946
 947	name = getname(u_name);
 948	if (IS_ERR(name))
 949		return PTR_ERR(name);
 950
 951	audit_inode_parent_hidden(name, mnt->mnt_root);
 952	err = mnt_want_write(mnt);
 953	if (err)
 954		goto out_name;
 955	inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT);
 956	dentry = lookup_one_len(name->name, mnt->mnt_root,
 957				strlen(name->name));
 958	if (IS_ERR(dentry)) {
 959		err = PTR_ERR(dentry);
 960		goto out_unlock;
 961	}
 962
 963	inode = d_inode(dentry);
 964	if (!inode) {
 965		err = -ENOENT;
 966	} else {
 
 
 
 
 967		ihold(inode);
 968		err = vfs_unlink(d_inode(dentry->d_parent), dentry, NULL);
 969	}
 
 
 
 
 970	dput(dentry);
 971
 972out_unlock:
 973	inode_unlock(d_inode(mnt->mnt_root));
 
 974	if (inode)
 975		iput(inode);
 976	mnt_drop_write(mnt);
 977out_name:
 978	putname(name);
 979
 980	return err;
 981}
 982
 983/* Pipelined send and receive functions.
 984 *
 985 * If a receiver finds no waiting message, then it registers itself in the
 986 * list of waiting receivers. A sender checks that list before adding the new
 987 * message into the message array. If there is a waiting receiver, then it
 988 * bypasses the message array and directly hands the message over to the
 989 * receiver. The receiver accepts the message and returns without grabbing the
 990 * queue spinlock:
 991 *
 992 * - Set pointer to message.
 993 * - Queue the receiver task for later wakeup (without the info->lock).
 994 * - Update its state to STATE_READY. Now the receiver can continue.
 995 * - Wake up the process after the lock is dropped. Should the process wake up
 996 *   before this wakeup (due to a timeout or a signal) it will either see
 997 *   STATE_READY and continue or acquire the lock to check the state again.
 998 *
 999 * The same algorithm is used for senders.
1000 */
1001
1002static inline void __pipelined_op(struct wake_q_head *wake_q,
1003				  struct mqueue_inode_info *info,
1004				  struct ext_wait_queue *this)
1005{
1006	list_del(&this->list);
1007	get_task_struct(this->task);
1008
1009	/* see MQ_BARRIER for purpose/pairing */
1010	smp_store_release(&this->state, STATE_READY);
1011	wake_q_add_safe(wake_q, this->task);
1012}
1013
1014/* pipelined_send() - send a message directly to the task waiting in
1015 * sys_mq_timedreceive() (without inserting message into a queue).
1016 */
1017static inline void pipelined_send(struct wake_q_head *wake_q,
1018				  struct mqueue_inode_info *info,
1019				  struct msg_msg *message,
1020				  struct ext_wait_queue *receiver)
1021{
1022	receiver->msg = message;
1023	__pipelined_op(wake_q, info, receiver);
 
 
 
 
1024}
1025
1026/* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
1027 * gets its message and put to the queue (we have one free place for sure). */
1028static inline void pipelined_receive(struct wake_q_head *wake_q,
1029				     struct mqueue_inode_info *info)
1030{
1031	struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
1032
1033	if (!sender) {
1034		/* for poll */
1035		wake_up_interruptible(&info->wait_q);
1036		return;
1037	}
1038	if (msg_insert(sender->msg, info))
1039		return;
1040
1041	__pipelined_op(wake_q, info, sender);
 
 
 
1042}
1043
1044static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
1045		size_t msg_len, unsigned int msg_prio,
1046		struct timespec64 *ts)
1047{
1048	struct fd f;
1049	struct inode *inode;
1050	struct ext_wait_queue wait;
1051	struct ext_wait_queue *receiver;
1052	struct msg_msg *msg_ptr;
1053	struct mqueue_inode_info *info;
1054	ktime_t expires, *timeout = NULL;
 
1055	struct posix_msg_tree_node *new_leaf = NULL;
1056	int ret = 0;
1057	DEFINE_WAKE_Q(wake_q);
 
 
 
 
 
 
1058
1059	if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
1060		return -EINVAL;
1061
1062	if (ts) {
1063		expires = timespec64_to_ktime(*ts);
1064		timeout = &expires;
1065	}
1066
1067	audit_mq_sendrecv(mqdes, msg_len, msg_prio, ts);
1068
1069	f = fdget(mqdes);
1070	if (unlikely(!f.file)) {
1071		ret = -EBADF;
1072		goto out;
1073	}
1074
1075	inode = file_inode(f.file);
1076	if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1077		ret = -EBADF;
1078		goto out_fput;
1079	}
1080	info = MQUEUE_I(inode);
1081	audit_file(f.file);
1082
1083	if (unlikely(!(f.file->f_mode & FMODE_WRITE))) {
1084		ret = -EBADF;
1085		goto out_fput;
1086	}
1087
1088	if (unlikely(msg_len > info->attr.mq_msgsize)) {
1089		ret = -EMSGSIZE;
1090		goto out_fput;
1091	}
1092
1093	/* First try to allocate memory, before doing anything with
1094	 * existing queues. */
1095	msg_ptr = load_msg(u_msg_ptr, msg_len);
1096	if (IS_ERR(msg_ptr)) {
1097		ret = PTR_ERR(msg_ptr);
1098		goto out_fput;
1099	}
1100	msg_ptr->m_ts = msg_len;
1101	msg_ptr->m_type = msg_prio;
1102
1103	/*
1104	 * msg_insert really wants us to have a valid, spare node struct so
1105	 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1106	 * fall back to that if necessary.
1107	 */
1108	if (!info->node_cache)
1109		new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1110
1111	spin_lock(&info->lock);
1112
1113	if (!info->node_cache && new_leaf) {
1114		/* Save our speculative allocation into the cache */
 
1115		INIT_LIST_HEAD(&new_leaf->msg_list);
1116		info->node_cache = new_leaf;
 
1117		new_leaf = NULL;
1118	} else {
1119		kfree(new_leaf);
1120	}
1121
1122	if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
1123		if (f.file->f_flags & O_NONBLOCK) {
1124			ret = -EAGAIN;
1125		} else {
1126			wait.task = current;
1127			wait.msg = (void *) msg_ptr;
1128
1129			/* memory barrier not required, we hold info->lock */
1130			WRITE_ONCE(wait.state, STATE_NONE);
1131			ret = wq_sleep(info, SEND, timeout, &wait);
1132			/*
1133			 * wq_sleep must be called with info->lock held, and
1134			 * returns with the lock released
1135			 */
1136			goto out_free;
1137		}
1138	} else {
1139		receiver = wq_get_first_waiter(info, RECV);
1140		if (receiver) {
1141			pipelined_send(&wake_q, info, msg_ptr, receiver);
1142		} else {
1143			/* adds message to the queue */
1144			ret = msg_insert(msg_ptr, info);
1145			if (ret)
1146				goto out_unlock;
1147			__do_notify(info);
1148		}
1149		inode->i_atime = inode->i_mtime = inode->i_ctime =
1150				current_time(inode);
1151	}
1152out_unlock:
1153	spin_unlock(&info->lock);
1154	wake_up_q(&wake_q);
1155out_free:
1156	if (ret)
1157		free_msg(msg_ptr);
1158out_fput:
1159	fdput(f);
1160out:
1161	return ret;
1162}
1163
1164static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
1165		size_t msg_len, unsigned int __user *u_msg_prio,
1166		struct timespec64 *ts)
1167{
1168	ssize_t ret;
1169	struct msg_msg *msg_ptr;
1170	struct fd f;
1171	struct inode *inode;
1172	struct mqueue_inode_info *info;
1173	struct ext_wait_queue wait;
1174	ktime_t expires, *timeout = NULL;
 
1175	struct posix_msg_tree_node *new_leaf = NULL;
1176
1177	if (ts) {
1178		expires = timespec64_to_ktime(*ts);
 
 
1179		timeout = &expires;
1180	}
1181
1182	audit_mq_sendrecv(mqdes, msg_len, 0, ts);
1183
1184	f = fdget(mqdes);
1185	if (unlikely(!f.file)) {
1186		ret = -EBADF;
1187		goto out;
1188	}
1189
1190	inode = file_inode(f.file);
1191	if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1192		ret = -EBADF;
1193		goto out_fput;
1194	}
1195	info = MQUEUE_I(inode);
1196	audit_file(f.file);
1197
1198	if (unlikely(!(f.file->f_mode & FMODE_READ))) {
1199		ret = -EBADF;
1200		goto out_fput;
1201	}
1202
1203	/* checks if buffer is big enough */
1204	if (unlikely(msg_len < info->attr.mq_msgsize)) {
1205		ret = -EMSGSIZE;
1206		goto out_fput;
1207	}
1208
1209	/*
1210	 * msg_insert really wants us to have a valid, spare node struct so
1211	 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1212	 * fall back to that if necessary.
1213	 */
1214	if (!info->node_cache)
1215		new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1216
1217	spin_lock(&info->lock);
1218
1219	if (!info->node_cache && new_leaf) {
1220		/* Save our speculative allocation into the cache */
 
1221		INIT_LIST_HEAD(&new_leaf->msg_list);
1222		info->node_cache = new_leaf;
 
1223	} else {
1224		kfree(new_leaf);
1225	}
1226
1227	if (info->attr.mq_curmsgs == 0) {
1228		if (f.file->f_flags & O_NONBLOCK) {
1229			spin_unlock(&info->lock);
1230			ret = -EAGAIN;
1231		} else {
1232			wait.task = current;
1233
1234			/* memory barrier not required, we hold info->lock */
1235			WRITE_ONCE(wait.state, STATE_NONE);
1236			ret = wq_sleep(info, RECV, timeout, &wait);
1237			msg_ptr = wait.msg;
1238		}
1239	} else {
1240		DEFINE_WAKE_Q(wake_q);
1241
1242		msg_ptr = msg_get(info);
1243
1244		inode->i_atime = inode->i_mtime = inode->i_ctime =
1245				current_time(inode);
1246
1247		/* There is now free space in queue. */
1248		pipelined_receive(&wake_q, info);
1249		spin_unlock(&info->lock);
1250		wake_up_q(&wake_q);
1251		ret = 0;
1252	}
1253	if (ret == 0) {
1254		ret = msg_ptr->m_ts;
1255
1256		if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
1257			store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
1258			ret = -EFAULT;
1259		}
1260		free_msg(msg_ptr);
1261	}
1262out_fput:
1263	fdput(f);
1264out:
1265	return ret;
1266}
1267
1268SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
1269		size_t, msg_len, unsigned int, msg_prio,
1270		const struct __kernel_timespec __user *, u_abs_timeout)
1271{
1272	struct timespec64 ts, *p = NULL;
1273	if (u_abs_timeout) {
1274		int res = prepare_timeout(u_abs_timeout, &ts);
1275		if (res)
1276			return res;
1277		p = &ts;
1278	}
1279	return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1280}
1281
1282SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
1283		size_t, msg_len, unsigned int __user *, u_msg_prio,
1284		const struct __kernel_timespec __user *, u_abs_timeout)
1285{
1286	struct timespec64 ts, *p = NULL;
1287	if (u_abs_timeout) {
1288		int res = prepare_timeout(u_abs_timeout, &ts);
1289		if (res)
1290			return res;
1291		p = &ts;
1292	}
1293	return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1294}
1295
1296/*
1297 * Notes: the case when user wants us to deregister (with NULL as pointer)
1298 * and he isn't currently owner of notification, will be silently discarded.
1299 * It isn't explicitly defined in the POSIX.
1300 */
1301static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification)
 
1302{
1303	int ret;
1304	struct fd f;
1305	struct sock *sock;
1306	struct inode *inode;
 
1307	struct mqueue_inode_info *info;
1308	struct sk_buff *nc;
1309
1310	audit_mq_notify(mqdes, notification);
 
 
 
 
 
 
1311
1312	nc = NULL;
1313	sock = NULL;
1314	if (notification != NULL) {
1315		if (unlikely(notification->sigev_notify != SIGEV_NONE &&
1316			     notification->sigev_notify != SIGEV_SIGNAL &&
1317			     notification->sigev_notify != SIGEV_THREAD))
1318			return -EINVAL;
1319		if (notification->sigev_notify == SIGEV_SIGNAL &&
1320			!valid_signal(notification->sigev_signo)) {
1321			return -EINVAL;
1322		}
1323		if (notification->sigev_notify == SIGEV_THREAD) {
1324			long timeo;
1325
1326			/* create the notify skb */
1327			nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
1328			if (!nc)
1329				return -ENOMEM;
1330
 
1331			if (copy_from_user(nc->data,
1332					notification->sigev_value.sival_ptr,
1333					NOTIFY_COOKIE_LEN)) {
1334				ret = -EFAULT;
1335				goto free_skb;
1336			}
1337
1338			/* TODO: add a header? */
1339			skb_put(nc, NOTIFY_COOKIE_LEN);
1340			/* and attach it to the socket */
1341retry:
1342			f = fdget(notification->sigev_signo);
1343			if (!f.file) {
1344				ret = -EBADF;
1345				goto out;
1346			}
1347			sock = netlink_getsockbyfilp(f.file);
1348			fdput(f);
1349			if (IS_ERR(sock)) {
1350				ret = PTR_ERR(sock);
1351				goto free_skb;
 
1352			}
1353
1354			timeo = MAX_SCHEDULE_TIMEOUT;
1355			ret = netlink_attachskb(sock, nc, &timeo, NULL);
1356			if (ret == 1) {
 
 
1357				sock = NULL;
1358				goto retry;
 
1359			}
1360			if (ret)
1361				return ret;
1362		}
1363	}
1364
1365	f = fdget(mqdes);
1366	if (!f.file) {
1367		ret = -EBADF;
1368		goto out;
1369	}
1370
1371	inode = file_inode(f.file);
1372	if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1373		ret = -EBADF;
1374		goto out_fput;
1375	}
1376	info = MQUEUE_I(inode);
1377
1378	ret = 0;
1379	spin_lock(&info->lock);
1380	if (notification == NULL) {
1381		if (info->notify_owner == task_tgid(current)) {
1382			remove_notification(info);
1383			inode->i_atime = inode->i_ctime = current_time(inode);
1384		}
1385	} else if (info->notify_owner != NULL) {
1386		ret = -EBUSY;
1387	} else {
1388		switch (notification->sigev_notify) {
1389		case SIGEV_NONE:
1390			info->notify.sigev_notify = SIGEV_NONE;
1391			break;
1392		case SIGEV_THREAD:
1393			info->notify_sock = sock;
1394			info->notify_cookie = nc;
1395			sock = NULL;
1396			nc = NULL;
1397			info->notify.sigev_notify = SIGEV_THREAD;
1398			break;
1399		case SIGEV_SIGNAL:
1400			info->notify.sigev_signo = notification->sigev_signo;
1401			info->notify.sigev_value = notification->sigev_value;
1402			info->notify.sigev_notify = SIGEV_SIGNAL;
1403			info->notify_self_exec_id = current->self_exec_id;
1404			break;
1405		}
1406
1407		info->notify_owner = get_pid(task_tgid(current));
1408		info->notify_user_ns = get_user_ns(current_user_ns());
1409		inode->i_atime = inode->i_ctime = current_time(inode);
1410	}
1411	spin_unlock(&info->lock);
1412out_fput:
1413	fdput(f);
1414out:
1415	if (sock)
1416		netlink_detachskb(sock, nc);
1417	else
1418free_skb:
1419		dev_kfree_skb(nc);
1420
1421	return ret;
1422}
1423
1424SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1425		const struct sigevent __user *, u_notification)
1426{
1427	struct sigevent n, *p = NULL;
1428	if (u_notification) {
1429		if (copy_from_user(&n, u_notification, sizeof(struct sigevent)))
1430			return -EFAULT;
1431		p = &n;
1432	}
1433	return do_mq_notify(mqdes, p);
1434}
1435
1436static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old)
1437{
1438	struct fd f;
1439	struct inode *inode;
1440	struct mqueue_inode_info *info;
1441
1442	if (new && (new->mq_flags & (~O_NONBLOCK)))
1443		return -EINVAL;
1444
1445	f = fdget(mqdes);
1446	if (!f.file)
1447		return -EBADF;
1448
1449	if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1450		fdput(f);
1451		return -EBADF;
1452	}
1453
1454	inode = file_inode(f.file);
1455	info = MQUEUE_I(inode);
1456
1457	spin_lock(&info->lock);
1458
1459	if (old) {
1460		*old = info->attr;
1461		old->mq_flags = f.file->f_flags & O_NONBLOCK;
1462	}
1463	if (new) {
1464		audit_mq_getsetattr(mqdes, new);
1465		spin_lock(&f.file->f_lock);
1466		if (new->mq_flags & O_NONBLOCK)
1467			f.file->f_flags |= O_NONBLOCK;
1468		else
1469			f.file->f_flags &= ~O_NONBLOCK;
1470		spin_unlock(&f.file->f_lock);
1471
1472		inode->i_atime = inode->i_ctime = current_time(inode);
1473	}
1474
1475	spin_unlock(&info->lock);
1476	fdput(f);
1477	return 0;
1478}
1479
1480SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1481		const struct mq_attr __user *, u_mqstat,
1482		struct mq_attr __user *, u_omqstat)
1483{
1484	int ret;
1485	struct mq_attr mqstat, omqstat;
1486	struct mq_attr *new = NULL, *old = NULL;
 
 
1487
1488	if (u_mqstat) {
1489		new = &mqstat;
1490		if (copy_from_user(new, u_mqstat, sizeof(struct mq_attr)))
1491			return -EFAULT;
 
 
1492	}
1493	if (u_omqstat)
1494		old = &omqstat;
1495
1496	ret = do_mq_getsetattr(mqdes, new, old);
1497	if (ret || !old)
1498		return ret;
1499
1500	if (copy_to_user(u_omqstat, old, sizeof(struct mq_attr)))
1501		return -EFAULT;
1502	return 0;
1503}
1504
1505#ifdef CONFIG_COMPAT
1506
1507struct compat_mq_attr {
1508	compat_long_t mq_flags;      /* message queue flags		     */
1509	compat_long_t mq_maxmsg;     /* maximum number of messages	     */
1510	compat_long_t mq_msgsize;    /* maximum message size		     */
1511	compat_long_t mq_curmsgs;    /* number of messages currently queued  */
1512	compat_long_t __reserved[4]; /* ignored for input, zeroed for output */
1513};
1514
1515static inline int get_compat_mq_attr(struct mq_attr *attr,
1516			const struct compat_mq_attr __user *uattr)
1517{
1518	struct compat_mq_attr v;
1519
1520	if (copy_from_user(&v, uattr, sizeof(*uattr)))
1521		return -EFAULT;
1522
1523	memset(attr, 0, sizeof(*attr));
1524	attr->mq_flags = v.mq_flags;
1525	attr->mq_maxmsg = v.mq_maxmsg;
1526	attr->mq_msgsize = v.mq_msgsize;
1527	attr->mq_curmsgs = v.mq_curmsgs;
1528	return 0;
1529}
1530
1531static inline int put_compat_mq_attr(const struct mq_attr *attr,
1532			struct compat_mq_attr __user *uattr)
1533{
1534	struct compat_mq_attr v;
1535
1536	memset(&v, 0, sizeof(v));
1537	v.mq_flags = attr->mq_flags;
1538	v.mq_maxmsg = attr->mq_maxmsg;
1539	v.mq_msgsize = attr->mq_msgsize;
1540	v.mq_curmsgs = attr->mq_curmsgs;
1541	if (copy_to_user(uattr, &v, sizeof(*uattr)))
1542		return -EFAULT;
1543	return 0;
1544}
1545
1546COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name,
1547		       int, oflag, compat_mode_t, mode,
1548		       struct compat_mq_attr __user *, u_attr)
1549{
1550	struct mq_attr attr, *p = NULL;
1551	if (u_attr && oflag & O_CREAT) {
1552		p = &attr;
1553		if (get_compat_mq_attr(&attr, u_attr))
1554			return -EFAULT;
1555	}
1556	return do_mq_open(u_name, oflag, mode, p);
1557}
1558
1559COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1560		       const struct compat_sigevent __user *, u_notification)
1561{
1562	struct sigevent n, *p = NULL;
1563	if (u_notification) {
1564		if (get_compat_sigevent(&n, u_notification))
1565			return -EFAULT;
1566		if (n.sigev_notify == SIGEV_THREAD)
1567			n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int);
1568		p = &n;
1569	}
1570	return do_mq_notify(mqdes, p);
1571}
1572
1573COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1574		       const struct compat_mq_attr __user *, u_mqstat,
1575		       struct compat_mq_attr __user *, u_omqstat)
1576{
1577	int ret;
1578	struct mq_attr mqstat, omqstat;
1579	struct mq_attr *new = NULL, *old = NULL;
1580
 
 
1581	if (u_mqstat) {
1582		new = &mqstat;
1583		if (get_compat_mq_attr(new, u_mqstat))
1584			return -EFAULT;
 
 
 
 
 
 
1585	}
1586	if (u_omqstat)
1587		old = &omqstat;
1588
1589	ret = do_mq_getsetattr(mqdes, new, old);
1590	if (ret || !old)
1591		return ret;
1592
1593	if (put_compat_mq_attr(old, u_omqstat))
1594		return -EFAULT;
1595	return 0;
1596}
1597#endif
1598
1599#ifdef CONFIG_COMPAT_32BIT_TIME
1600static int compat_prepare_timeout(const struct old_timespec32 __user *p,
1601				   struct timespec64 *ts)
1602{
1603	if (get_old_timespec32(ts, p))
1604		return -EFAULT;
1605	if (!timespec64_valid(ts))
1606		return -EINVAL;
1607	return 0;
1608}
1609
1610SYSCALL_DEFINE5(mq_timedsend_time32, mqd_t, mqdes,
1611		const char __user *, u_msg_ptr,
1612		unsigned int, msg_len, unsigned int, msg_prio,
1613		const struct old_timespec32 __user *, u_abs_timeout)
1614{
1615	struct timespec64 ts, *p = NULL;
1616	if (u_abs_timeout) {
1617		int res = compat_prepare_timeout(u_abs_timeout, &ts);
1618		if (res)
1619			return res;
1620		p = &ts;
1621	}
1622	return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1623}
1624
1625SYSCALL_DEFINE5(mq_timedreceive_time32, mqd_t, mqdes,
1626		char __user *, u_msg_ptr,
1627		unsigned int, msg_len, unsigned int __user *, u_msg_prio,
1628		const struct old_timespec32 __user *, u_abs_timeout)
1629{
1630	struct timespec64 ts, *p = NULL;
1631	if (u_abs_timeout) {
1632		int res = compat_prepare_timeout(u_abs_timeout, &ts);
1633		if (res)
1634			return res;
1635		p = &ts;
1636	}
1637	return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1638}
1639#endif
1640
1641static const struct inode_operations mqueue_dir_inode_operations = {
1642	.lookup = simple_lookup,
1643	.create = mqueue_create,
1644	.unlink = mqueue_unlink,
1645};
1646
1647static const struct file_operations mqueue_file_operations = {
1648	.flush = mqueue_flush_file,
1649	.poll = mqueue_poll_file,
1650	.read = mqueue_read_file,
1651	.llseek = default_llseek,
1652};
1653
1654static const struct super_operations mqueue_super_ops = {
1655	.alloc_inode = mqueue_alloc_inode,
1656	.free_inode = mqueue_free_inode,
1657	.evict_inode = mqueue_evict_inode,
1658	.statfs = simple_statfs,
1659};
1660
1661static const struct fs_context_operations mqueue_fs_context_ops = {
1662	.free		= mqueue_fs_context_free,
1663	.get_tree	= mqueue_get_tree,
1664};
1665
1666static struct file_system_type mqueue_fs_type = {
1667	.name			= "mqueue",
1668	.init_fs_context	= mqueue_init_fs_context,
1669	.kill_sb		= kill_litter_super,
1670	.fs_flags		= FS_USERNS_MOUNT,
1671};
1672
1673int mq_init_ns(struct ipc_namespace *ns)
1674{
1675	struct vfsmount *m;
1676
1677	ns->mq_queues_count  = 0;
1678	ns->mq_queues_max    = DFLT_QUEUESMAX;
1679	ns->mq_msg_max       = DFLT_MSGMAX;
1680	ns->mq_msgsize_max   = DFLT_MSGSIZEMAX;
1681	ns->mq_msg_default   = DFLT_MSG;
1682	ns->mq_msgsize_default  = DFLT_MSGSIZE;
1683
1684	m = mq_create_mount(ns);
1685	if (IS_ERR(m))
1686		return PTR_ERR(m);
1687	ns->mq_mnt = m;
 
 
1688	return 0;
1689}
1690
1691void mq_clear_sbinfo(struct ipc_namespace *ns)
1692{
1693	ns->mq_mnt->mnt_sb->s_fs_info = NULL;
1694}
1695
1696void mq_put_mnt(struct ipc_namespace *ns)
1697{
1698	kern_unmount(ns->mq_mnt);
1699}
1700
1701static int __init init_mqueue_fs(void)
1702{
1703	int error;
1704
1705	mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1706				sizeof(struct mqueue_inode_info), 0,
1707				SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once);
1708	if (mqueue_inode_cachep == NULL)
1709		return -ENOMEM;
1710
1711	/* ignore failures - they are not fatal */
1712	mq_sysctl_table = mq_register_sysctl_table();
1713
1714	error = register_filesystem(&mqueue_fs_type);
1715	if (error)
1716		goto out_sysctl;
1717
1718	spin_lock_init(&mq_lock);
1719
1720	error = mq_init_ns(&init_ipc_ns);
1721	if (error)
1722		goto out_filesystem;
1723
1724	return 0;
1725
1726out_filesystem:
1727	unregister_filesystem(&mqueue_fs_type);
1728out_sysctl:
1729	if (mq_sysctl_table)
1730		unregister_sysctl_table(mq_sysctl_table);
1731	kmem_cache_destroy(mqueue_inode_cachep);
1732	return error;
1733}
1734
1735device_initcall(init_mqueue_fs);