Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/fs/pipe.c
   4 *
   5 *  Copyright (C) 1991, 1992, 1999  Linus Torvalds
   6 */
   7
   8#include <linux/mm.h>
   9#include <linux/file.h>
  10#include <linux/poll.h>
  11#include <linux/slab.h>
  12#include <linux/module.h>
  13#include <linux/init.h>
  14#include <linux/fs.h>
  15#include <linux/log2.h>
  16#include <linux/mount.h>
  17#include <linux/pseudo_fs.h>
  18#include <linux/magic.h>
  19#include <linux/pipe_fs_i.h>
  20#include <linux/uio.h>
  21#include <linux/highmem.h>
  22#include <linux/pagemap.h>
  23#include <linux/audit.h>
  24#include <linux/syscalls.h>
  25#include <linux/fcntl.h>
  26#include <linux/memcontrol.h>
  27#include <linux/watch_queue.h>
  28#include <linux/sysctl.h>
  29
  30#include <linux/uaccess.h>
  31#include <asm/ioctls.h>
  32
  33#include "internal.h"
  34
  35/*
  36 * New pipe buffers will be restricted to this size while the user is exceeding
  37 * their pipe buffer quota. The general pipe use case needs at least two
  38 * buffers: one for data yet to be read, and one for new data. If this is less
  39 * than two, then a write to a non-empty pipe may block even if the pipe is not
  40 * full. This can occur with GNU make jobserver or similar uses of pipes as
  41 * semaphores: multiple processes may be waiting to write tokens back to the
  42 * pipe before reading tokens: https://lore.kernel.org/lkml/1628086770.5rn8p04n6j.none@localhost/.
  43 *
  44 * Users can reduce their pipe buffers with F_SETPIPE_SZ below this at their
  45 * own risk, namely: pipe writes to non-full pipes may block until the pipe is
  46 * emptied.
  47 */
  48#define PIPE_MIN_DEF_BUFFERS 2
  49
  50/*
  51 * The max size that a non-root user is allowed to grow the pipe. Can
  52 * be set by root in /proc/sys/fs/pipe-max-size
  53 */
  54static unsigned int pipe_max_size = 1048576;
  55
  56/* Maximum allocatable pages per user. Hard limit is unset by default, soft
  57 * matches default values.
  58 */
  59static unsigned long pipe_user_pages_hard;
  60static unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
  61
  62/*
  63 * We use head and tail indices that aren't masked off, except at the point of
  64 * dereference, but rather they're allowed to wrap naturally.  This means there
  65 * isn't a dead spot in the buffer, but the ring has to be a power of two and
  66 * <= 2^31.
  67 * -- David Howells 2019-09-23.
  68 *
  69 * Reads with count = 0 should always return 0.
  70 * -- Julian Bradfield 1999-06-07.
  71 *
  72 * FIFOs and Pipes now generate SIGIO for both readers and writers.
  73 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
  74 *
  75 * pipe_read & write cleanup
  76 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
  77 */
  78
  79static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
  80{
  81	if (pipe->files)
  82		mutex_lock_nested(&pipe->mutex, subclass);
  83}
  84
  85void pipe_lock(struct pipe_inode_info *pipe)
  86{
  87	/*
  88	 * pipe_lock() nests non-pipe inode locks (for writing to a file)
  89	 */
  90	pipe_lock_nested(pipe, I_MUTEX_PARENT);
  91}
  92EXPORT_SYMBOL(pipe_lock);
  93
  94void pipe_unlock(struct pipe_inode_info *pipe)
  95{
  96	if (pipe->files)
  97		mutex_unlock(&pipe->mutex);
  98}
  99EXPORT_SYMBOL(pipe_unlock);
 100
 101static inline void __pipe_lock(struct pipe_inode_info *pipe)
 102{
 103	mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
 104}
 105
 106static inline void __pipe_unlock(struct pipe_inode_info *pipe)
 107{
 108	mutex_unlock(&pipe->mutex);
 109}
 110
 111void pipe_double_lock(struct pipe_inode_info *pipe1,
 112		      struct pipe_inode_info *pipe2)
 113{
 114	BUG_ON(pipe1 == pipe2);
 115
 116	if (pipe1 < pipe2) {
 117		pipe_lock_nested(pipe1, I_MUTEX_PARENT);
 118		pipe_lock_nested(pipe2, I_MUTEX_CHILD);
 119	} else {
 120		pipe_lock_nested(pipe2, I_MUTEX_PARENT);
 121		pipe_lock_nested(pipe1, I_MUTEX_CHILD);
 122	}
 123}
 124
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 125static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
 126				  struct pipe_buffer *buf)
 127{
 128	struct page *page = buf->page;
 129
 130	/*
 131	 * If nobody else uses this page, and we don't already have a
 132	 * temporary page, let's keep track of it as a one-deep
 133	 * allocation cache. (Otherwise just release our reference to it)
 134	 */
 135	if (page_count(page) == 1 && !pipe->tmp_page)
 136		pipe->tmp_page = page;
 137	else
 138		put_page(page);
 139}
 140
 141static bool anon_pipe_buf_try_steal(struct pipe_inode_info *pipe,
 142		struct pipe_buffer *buf)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 143{
 144	struct page *page = buf->page;
 
 
 
 145
 146	if (page_count(page) != 1)
 147		return false;
 148	memcg_kmem_uncharge_page(page, 0);
 149	__SetPageLocked(page);
 150	return true;
 151}
 
 152
 153/**
 154 * generic_pipe_buf_try_steal - attempt to take ownership of a &pipe_buffer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 155 * @pipe:	the pipe that the buffer belongs to
 156 * @buf:	the buffer to attempt to steal
 157 *
 158 * Description:
 159 *	This function attempts to steal the &struct page attached to
 160 *	@buf. If successful, this function returns 0 and returns with
 161 *	the page locked. The caller may then reuse the page for whatever
 162 *	he wishes; the typical use is insertion into a different file
 163 *	page cache.
 164 */
 165bool generic_pipe_buf_try_steal(struct pipe_inode_info *pipe,
 166		struct pipe_buffer *buf)
 167{
 168	struct page *page = buf->page;
 169
 170	/*
 171	 * A reference of one is golden, that means that the owner of this
 172	 * page is the only one holding a reference to it. lock the page
 173	 * and return OK.
 174	 */
 175	if (page_count(page) == 1) {
 176		lock_page(page);
 177		return true;
 178	}
 179	return false;
 
 180}
 181EXPORT_SYMBOL(generic_pipe_buf_try_steal);
 182
 183/**
 184 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
 185 * @pipe:	the pipe that the buffer belongs to
 186 * @buf:	the buffer to get a reference to
 187 *
 188 * Description:
 189 *	This function grabs an extra reference to @buf. It's used in
 190 *	the tee() system call, when we duplicate the buffers in one
 191 *	pipe into another.
 192 */
 193bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
 194{
 195	return try_get_page(buf->page);
 196}
 197EXPORT_SYMBOL(generic_pipe_buf_get);
 198
 199/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 200 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
 201 * @pipe:	the pipe that the buffer belongs to
 202 * @buf:	the buffer to put a reference to
 203 *
 204 * Description:
 205 *	This function releases a reference to @buf.
 206 */
 207void generic_pipe_buf_release(struct pipe_inode_info *pipe,
 208			      struct pipe_buffer *buf)
 209{
 210	put_page(buf->page);
 211}
 212EXPORT_SYMBOL(generic_pipe_buf_release);
 213
 214static const struct pipe_buf_operations anon_pipe_buf_ops = {
 215	.release	= anon_pipe_buf_release,
 216	.try_steal	= anon_pipe_buf_try_steal,
 217	.get		= generic_pipe_buf_get,
 
 
 
 
 218};
 219
 220/* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
 221static inline bool pipe_readable(const struct pipe_inode_info *pipe)
 222{
 223	unsigned int head = READ_ONCE(pipe->head);
 224	unsigned int tail = READ_ONCE(pipe->tail);
 225	unsigned int writers = READ_ONCE(pipe->writers);
 226
 227	return !pipe_empty(head, tail) || !writers;
 228}
 229
 230static inline unsigned int pipe_update_tail(struct pipe_inode_info *pipe,
 231					    struct pipe_buffer *buf,
 232					    unsigned int tail)
 233{
 234	pipe_buf_release(pipe, buf);
 235
 236	/*
 237	 * If the pipe has a watch_queue, we need additional protection
 238	 * by the spinlock because notifications get posted with only
 239	 * this spinlock, no mutex
 240	 */
 241	if (pipe_has_watch_queue(pipe)) {
 242		spin_lock_irq(&pipe->rd_wait.lock);
 243#ifdef CONFIG_WATCH_QUEUE
 244		if (buf->flags & PIPE_BUF_FLAG_LOSS)
 245			pipe->note_loss = true;
 246#endif
 247		pipe->tail = ++tail;
 248		spin_unlock_irq(&pipe->rd_wait.lock);
 249		return tail;
 250	}
 251
 252	/*
 253	 * Without a watch_queue, we can simply increment the tail
 254	 * without the spinlock - the mutex is enough.
 255	 */
 256	pipe->tail = ++tail;
 257	return tail;
 258}
 259
 260static ssize_t
 261pipe_read(struct kiocb *iocb, struct iov_iter *to)
 
 262{
 263	size_t total_len = iov_iter_count(to);
 264	struct file *filp = iocb->ki_filp;
 265	struct pipe_inode_info *pipe = filp->private_data;
 266	bool was_full, wake_next_reader = false;
 
 267	ssize_t ret;
 
 
 268
 
 269	/* Null read succeeds. */
 270	if (unlikely(total_len == 0))
 271		return 0;
 272
 
 273	ret = 0;
 274	__pipe_lock(pipe);
 275
 276	/*
 277	 * We only wake up writers if the pipe was full when we started
 278	 * reading in order to avoid unnecessary wakeups.
 279	 *
 280	 * But when we do wake up writers, we do so using a sync wakeup
 281	 * (WF_SYNC), because we want them to get going and generate more
 282	 * data for us.
 283	 */
 284	was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
 285	for (;;) {
 286		/* Read ->head with a barrier vs post_one_notification() */
 287		unsigned int head = smp_load_acquire(&pipe->head);
 288		unsigned int tail = pipe->tail;
 289		unsigned int mask = pipe->ring_size - 1;
 290
 291#ifdef CONFIG_WATCH_QUEUE
 292		if (pipe->note_loss) {
 293			struct watch_notification n;
 294
 295			if (total_len < 8) {
 296				if (ret == 0)
 297					ret = -ENOBUFS;
 298				break;
 299			}
 300
 301			n.type = WATCH_TYPE_META;
 302			n.subtype = WATCH_META_LOSS_NOTIFICATION;
 303			n.info = watch_sizeof(n);
 304			if (copy_to_iter(&n, sizeof(n), to) != sizeof(n)) {
 305				if (ret == 0)
 306					ret = -EFAULT;
 307				break;
 308			}
 309			ret += sizeof(n);
 310			total_len -= sizeof(n);
 311			pipe->note_loss = false;
 312		}
 313#endif
 314
 315		if (!pipe_empty(head, tail)) {
 316			struct pipe_buffer *buf = &pipe->bufs[tail & mask];
 317			size_t chars = buf->len;
 318			size_t written;
 319			int error;
 320
 321			if (chars > total_len) {
 322				if (buf->flags & PIPE_BUF_FLAG_WHOLE) {
 323					if (ret == 0)
 324						ret = -ENOBUFS;
 325					break;
 326				}
 327				chars = total_len;
 328			}
 329
 330			error = pipe_buf_confirm(pipe, buf);
 331			if (error) {
 332				if (!ret)
 333					ret = error;
 334				break;
 335			}
 336
 337			written = copy_page_to_iter(buf->page, buf->offset, chars, to);
 338			if (unlikely(written < chars)) {
 
 
 
 
 
 
 
 
 
 
 
 339				if (!ret)
 340					ret = -EFAULT;
 341				break;
 342			}
 343			ret += chars;
 344			buf->offset += chars;
 345			buf->len -= chars;
 346
 347			/* Was it a packet buffer? Clean up and exit */
 348			if (buf->flags & PIPE_BUF_FLAG_PACKET) {
 349				total_len = chars;
 350				buf->len = 0;
 351			}
 352
 353			if (!buf->len)
 354				tail = pipe_update_tail(pipe, buf, tail);
 
 
 
 
 
 
 355			total_len -= chars;
 356			if (!total_len)
 357				break;	/* common path: read succeeded */
 358			if (!pipe_empty(head, tail))	/* More to do? */
 359				continue;
 360		}
 361
 
 362		if (!pipe->writers)
 363			break;
 364		if (ret)
 365			break;
 366		if ((filp->f_flags & O_NONBLOCK) ||
 367		    (iocb->ki_flags & IOCB_NOWAIT)) {
 368			ret = -EAGAIN;
 
 
 
 
 
 
 
 
 
 
 
 369			break;
 370		}
 371		__pipe_unlock(pipe);
 
 
 
 
 
 
 372
 373		/*
 374		 * We only get here if we didn't actually read anything.
 375		 *
 376		 * However, we could have seen (and removed) a zero-sized
 377		 * pipe buffer, and might have made space in the buffers
 378		 * that way.
 379		 *
 380		 * You can't make zero-sized pipe buffers by doing an empty
 381		 * write (not even in packet mode), but they can happen if
 382		 * the writer gets an EFAULT when trying to fill a buffer
 383		 * that already got allocated and inserted in the buffer
 384		 * array.
 385		 *
 386		 * So we still need to wake up any pending writers in the
 387		 * _very_ unlikely case that the pipe was full, but we got
 388		 * no data.
 389		 */
 390		if (unlikely(was_full))
 391			wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
 392		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
 393
 394		/*
 395		 * But because we didn't read anything, at this point we can
 396		 * just return directly with -ERESTARTSYS if we're interrupted,
 397		 * since we've done any required wakeups and there's no need
 398		 * to mark anything accessed. And we've dropped the lock.
 399		 */
 400		if (wait_event_interruptible_exclusive(pipe->rd_wait, pipe_readable(pipe)) < 0)
 401			return -ERESTARTSYS;
 402
 403		__pipe_lock(pipe);
 404		was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
 405		wake_next_reader = true;
 406	}
 407	if (pipe_empty(pipe->head, pipe->tail))
 408		wake_next_reader = false;
 409	__pipe_unlock(pipe);
 410
 411	if (was_full)
 412		wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
 413	if (wake_next_reader)
 414		wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
 415	kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
 416	if (ret > 0)
 417		file_accessed(filp);
 418	return ret;
 419}
 420
 421static inline int is_packetized(struct file *file)
 422{
 423	return (file->f_flags & O_DIRECT) != 0;
 424}
 425
 426/* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
 427static inline bool pipe_writable(const struct pipe_inode_info *pipe)
 428{
 429	unsigned int head = READ_ONCE(pipe->head);
 430	unsigned int tail = READ_ONCE(pipe->tail);
 431	unsigned int max_usage = READ_ONCE(pipe->max_usage);
 432
 433	return !pipe_full(head, tail, max_usage) ||
 434		!READ_ONCE(pipe->readers);
 435}
 436
 437static ssize_t
 438pipe_write(struct kiocb *iocb, struct iov_iter *from)
 
 439{
 440	struct file *filp = iocb->ki_filp;
 441	struct pipe_inode_info *pipe = filp->private_data;
 442	unsigned int head;
 443	ssize_t ret = 0;
 444	size_t total_len = iov_iter_count(from);
 
 
 445	ssize_t chars;
 446	bool was_empty = false;
 447	bool wake_next_writer = false;
 448
 449	/*
 450	 * Reject writing to watch queue pipes before the point where we lock
 451	 * the pipe.
 452	 * Otherwise, lockdep would be unhappy if the caller already has another
 453	 * pipe locked.
 454	 * If we had to support locking a normal pipe and a notification pipe at
 455	 * the same time, we could set up lockdep annotations for that, but
 456	 * since we don't actually need that, it's simpler to just bail here.
 457	 */
 458	if (pipe_has_watch_queue(pipe))
 459		return -EXDEV;
 460
 
 461	/* Null write succeeds. */
 462	if (unlikely(total_len == 0))
 463		return 0;
 464
 465	__pipe_lock(pipe);
 
 
 
 466
 467	if (!pipe->readers) {
 468		send_sig(SIGPIPE, current, 0);
 469		ret = -EPIPE;
 470		goto out;
 471	}
 472
 473	/*
 474	 * If it wasn't empty we try to merge new data into
 475	 * the last buffer.
 476	 *
 477	 * That naturally merges small writes, but it also
 478	 * page-aligns the rest of the writes for large writes
 479	 * spanning multiple pages.
 480	 */
 481	head = pipe->head;
 482	was_empty = pipe_empty(head, pipe->tail);
 483	chars = total_len & (PAGE_SIZE-1);
 484	if (chars && !was_empty) {
 485		unsigned int mask = pipe->ring_size - 1;
 486		struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
 487		int offset = buf->offset + buf->len;
 488
 489		if ((buf->flags & PIPE_BUF_FLAG_CAN_MERGE) &&
 490		    offset + chars <= PAGE_SIZE) {
 491			ret = pipe_buf_confirm(pipe, buf);
 492			if (ret)
 
 
 493				goto out;
 494
 495			ret = copy_page_from_iter(buf->page, offset, chars, from);
 496			if (unlikely(ret < chars)) {
 497				ret = -EFAULT;
 
 
 
 
 
 
 
 
 
 
 498				goto out;
 499			}
 500
 501			buf->len += ret;
 502			if (!iov_iter_count(from))
 
 503				goto out;
 504		}
 505	}
 506
 507	for (;;) {
 
 
 508		if (!pipe->readers) {
 509			send_sig(SIGPIPE, current, 0);
 510			if (!ret)
 511				ret = -EPIPE;
 512			break;
 513		}
 514
 515		head = pipe->head;
 516		if (!pipe_full(head, pipe->tail, pipe->max_usage)) {
 517			unsigned int mask = pipe->ring_size - 1;
 518			struct pipe_buffer *buf;
 519			struct page *page = pipe->tmp_page;
 520			int copied;
 
 521
 522			if (!page) {
 523				page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
 524				if (unlikely(!page)) {
 525					ret = ret ? : -ENOMEM;
 526					break;
 527				}
 528				pipe->tmp_page = page;
 529			}
 530
 531			/* Allocate a slot in the ring in advance and attach an
 532			 * empty buffer.  If we fault or otherwise fail to use
 533			 * it, either the reader will consume it or it'll still
 534			 * be there for the next write.
 535			 */
 536			pipe->head = head + 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 537
 538			/* Insert it into the buffer array */
 539			buf = &pipe->bufs[head & mask];
 540			buf->page = page;
 541			buf->ops = &anon_pipe_buf_ops;
 542			buf->offset = 0;
 543			buf->len = 0;
 544			if (is_packetized(filp))
 
 
 545				buf->flags = PIPE_BUF_FLAG_PACKET;
 546			else
 547				buf->flags = PIPE_BUF_FLAG_CAN_MERGE;
 548			pipe->tmp_page = NULL;
 549
 550			copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
 551			if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
 552				if (!ret)
 553					ret = -EFAULT;
 554				break;
 555			}
 556			ret += copied;
 557			buf->len = copied;
 558
 559			if (!iov_iter_count(from))
 
 560				break;
 561		}
 562
 563		if (!pipe_full(head, pipe->tail, pipe->max_usage))
 564			continue;
 565
 566		/* Wait for buffer space to become available. */
 567		if ((filp->f_flags & O_NONBLOCK) ||
 568		    (iocb->ki_flags & IOCB_NOWAIT)) {
 569			if (!ret)
 570				ret = -EAGAIN;
 571			break;
 572		}
 573		if (signal_pending(current)) {
 574			if (!ret)
 575				ret = -ERESTARTSYS;
 576			break;
 577		}
 578
 579		/*
 580		 * We're going to release the pipe lock and wait for more
 581		 * space. We wake up any readers if necessary, and then
 582		 * after waiting we need to re-check whether the pipe
 583		 * become empty while we dropped the lock.
 584		 */
 585		__pipe_unlock(pipe);
 586		if (was_empty)
 587			wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
 588		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 589		wait_event_interruptible_exclusive(pipe->wr_wait, pipe_writable(pipe));
 590		__pipe_lock(pipe);
 591		was_empty = pipe_empty(pipe->head, pipe->tail);
 592		wake_next_writer = true;
 593	}
 594out:
 595	if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
 596		wake_next_writer = false;
 597	__pipe_unlock(pipe);
 598
 599	/*
 600	 * If we do do a wakeup event, we do a 'sync' wakeup, because we
 601	 * want the reader to start processing things asap, rather than
 602	 * leave the data pending.
 603	 *
 604	 * This is particularly important for small writes, because of
 605	 * how (for example) the GNU make jobserver uses small writes to
 606	 * wake up pending jobs
 607	 *
 608	 * Epoll nonsensically wants a wakeup whether the pipe
 609	 * was already empty or not.
 610	 */
 611	if (was_empty || pipe->poll_usage)
 612		wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
 613	kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 614	if (wake_next_writer)
 615		wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
 616	if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
 617		int err = file_update_time(filp);
 618		if (err)
 619			ret = err;
 620		sb_end_write(file_inode(filp)->i_sb);
 621	}
 622	return ret;
 623}
 624
 625static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 
 626{
 627	struct pipe_inode_info *pipe = filp->private_data;
 628	unsigned int count, head, tail, mask;
 629
 630	switch (cmd) {
 631	case FIONREAD:
 632		__pipe_lock(pipe);
 633		count = 0;
 634		head = pipe->head;
 635		tail = pipe->tail;
 636		mask = pipe->ring_size - 1;
 637
 638		while (tail != head) {
 639			count += pipe->bufs[tail & mask].len;
 640			tail++;
 641		}
 642		__pipe_unlock(pipe);
 643
 644		return put_user(count, (int __user *)arg);
 
 
 
 
 
 645
 646#ifdef CONFIG_WATCH_QUEUE
 647	case IOC_WATCH_QUEUE_SET_SIZE: {
 648		int ret;
 649		__pipe_lock(pipe);
 650		ret = watch_queue_set_size(pipe, arg);
 651		__pipe_unlock(pipe);
 652		return ret;
 653	}
 654
 655	case IOC_WATCH_QUEUE_SET_FILTER:
 656		return watch_queue_set_filter(
 657			pipe, (struct watch_notification_filter __user *)arg);
 658#endif
 
 
 
 
 
 
 
 
 659
 660	default:
 661		return -ENOIOCTLCMD;
 
 662	}
 663}
 664
 665/* No kernel lock held - fine */
 666static __poll_t
 667pipe_poll(struct file *filp, poll_table *wait)
 668{
 669	__poll_t mask;
 670	struct pipe_inode_info *pipe = filp->private_data;
 671	unsigned int head, tail;
 
 672
 673	/* Epoll has some historical nasty semantics, this enables them */
 674	WRITE_ONCE(pipe->poll_usage, true);
 675
 676	/*
 677	 * Reading pipe state only -- no need for acquiring the semaphore.
 678	 *
 679	 * But because this is racy, the code has to add the
 680	 * entry to the poll table _first_ ..
 681	 */
 682	if (filp->f_mode & FMODE_READ)
 683		poll_wait(filp, &pipe->rd_wait, wait);
 684	if (filp->f_mode & FMODE_WRITE)
 685		poll_wait(filp, &pipe->wr_wait, wait);
 686
 687	/*
 688	 * .. and only then can you do the racy tests. That way,
 689	 * if something changes and you got it wrong, the poll
 690	 * table entry will wake you up and fix it.
 691	 */
 692	head = READ_ONCE(pipe->head);
 693	tail = READ_ONCE(pipe->tail);
 694
 
 
 695	mask = 0;
 696	if (filp->f_mode & FMODE_READ) {
 697		if (!pipe_empty(head, tail))
 698			mask |= EPOLLIN | EPOLLRDNORM;
 699		if (!pipe->writers && filp->f_version != pipe->w_counter)
 700			mask |= EPOLLHUP;
 701	}
 702
 703	if (filp->f_mode & FMODE_WRITE) {
 704		if (!pipe_full(head, tail, pipe->max_usage))
 705			mask |= EPOLLOUT | EPOLLWRNORM;
 706		/*
 707		 * Most Unices do not set EPOLLERR for FIFOs but on Linux they
 708		 * behave exactly like pipes for poll().
 709		 */
 710		if (!pipe->readers)
 711			mask |= EPOLLERR;
 712	}
 713
 714	return mask;
 715}
 716
 717static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
 
 718{
 719	int kill = 0;
 
 
 
 
 
 720
 721	spin_lock(&inode->i_lock);
 722	if (!--pipe->files) {
 723		inode->i_pipe = NULL;
 724		kill = 1;
 
 
 725	}
 726	spin_unlock(&inode->i_lock);
 727
 728	if (kill)
 729		free_pipe_info(pipe);
 730}
 731
 732static int
 733pipe_release(struct inode *inode, struct file *file)
 734{
 735	struct pipe_inode_info *pipe = file->private_data;
 
 736
 737	__pipe_lock(pipe);
 738	if (file->f_mode & FMODE_READ)
 739		pipe->readers--;
 740	if (file->f_mode & FMODE_WRITE)
 741		pipe->writers--;
 742
 743	/* Was that the last reader or writer, but not the other side? */
 744	if (!pipe->readers != !pipe->writers) {
 745		wake_up_interruptible_all(&pipe->rd_wait);
 746		wake_up_interruptible_all(&pipe->wr_wait);
 747		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 748		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
 749	}
 750	__pipe_unlock(pipe);
 751
 752	put_pipe_info(inode, pipe);
 753	return 0;
 754}
 755
 
 756static int
 757pipe_fasync(int fd, struct file *filp, int on)
 758{
 759	struct pipe_inode_info *pipe = filp->private_data;
 760	int retval = 0;
 761
 762	__pipe_lock(pipe);
 763	if (filp->f_mode & FMODE_READ)
 764		retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
 765	if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 766		retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
 767		if (retval < 0 && (filp->f_mode & FMODE_READ))
 768			/* this can happen only if on == T */
 769			fasync_helper(-1, filp, 0, &pipe->fasync_readers);
 770	}
 771	__pipe_unlock(pipe);
 772	return retval;
 773}
 774
 775unsigned long account_pipe_buffers(struct user_struct *user,
 776				   unsigned long old, unsigned long new)
 
 777{
 778	return atomic_long_add_return(new - old, &user->pipe_bufs);
 779}
 780
 781bool too_many_pipe_buffers_soft(unsigned long user_bufs)
 
 782{
 783	unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft);
 784
 785	return soft_limit && user_bufs > soft_limit;
 786}
 787
 788bool too_many_pipe_buffers_hard(unsigned long user_bufs)
 
 789{
 790	unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard);
 791
 792	return hard_limit && user_bufs > hard_limit;
 
 
 793}
 794
 795bool pipe_is_unprivileged_user(void)
 
 796{
 797	return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
 
 
 
 
 
 
 
 
 
 
 
 798}
 799
 800struct pipe_inode_info *alloc_pipe_info(void)
 
 801{
 802	struct pipe_inode_info *pipe;
 803	unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
 804	struct user_struct *user = get_current_user();
 805	unsigned long user_bufs;
 806	unsigned int max_size = READ_ONCE(pipe_max_size);
 807
 808	pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
 809	if (pipe == NULL)
 810		goto out_free_uid;
 811
 812	if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE))
 813		pipe_bufs = max_size >> PAGE_SHIFT;
 
 
 814
 815	user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
 816
 817	if (too_many_pipe_buffers_soft(user_bufs) && pipe_is_unprivileged_user()) {
 818		user_bufs = account_pipe_buffers(user, pipe_bufs, PIPE_MIN_DEF_BUFFERS);
 819		pipe_bufs = PIPE_MIN_DEF_BUFFERS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 820	}
 821
 822	if (too_many_pipe_buffers_hard(user_bufs) && pipe_is_unprivileged_user())
 823		goto out_revert_acct;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 824
 825	pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
 826			     GFP_KERNEL_ACCOUNT);
 
 827
 828	if (pipe->bufs) {
 829		init_waitqueue_head(&pipe->rd_wait);
 830		init_waitqueue_head(&pipe->wr_wait);
 831		pipe->r_counter = pipe->w_counter = 1;
 832		pipe->max_usage = pipe_bufs;
 833		pipe->ring_size = pipe_bufs;
 834		pipe->nr_accounted = pipe_bufs;
 835		pipe->user = user;
 836		mutex_init(&pipe->mutex);
 837		return pipe;
 
 838	}
 839
 840out_revert_acct:
 841	(void) account_pipe_buffers(user, pipe_bufs, 0);
 842	kfree(pipe);
 843out_free_uid:
 844	free_uid(user);
 845	return NULL;
 846}
 847
 848void free_pipe_info(struct pipe_inode_info *pipe)
 849{
 850	unsigned int i;
 851
 852#ifdef CONFIG_WATCH_QUEUE
 853	if (pipe->watch_queue)
 854		watch_queue_clear(pipe->watch_queue);
 855#endif
 856
 857	(void) account_pipe_buffers(pipe->user, pipe->nr_accounted, 0);
 858	free_uid(pipe->user);
 859	for (i = 0; i < pipe->ring_size; i++) {
 860		struct pipe_buffer *buf = pipe->bufs + i;
 861		if (buf->ops)
 862			pipe_buf_release(pipe, buf);
 863	}
 864#ifdef CONFIG_WATCH_QUEUE
 865	if (pipe->watch_queue)
 866		put_watch_queue(pipe->watch_queue);
 867#endif
 868	if (pipe->tmp_page)
 869		__free_page(pipe->tmp_page);
 870	kfree(pipe->bufs);
 871	kfree(pipe);
 872}
 873
 874static struct vfsmount *pipe_mnt __ro_after_init;
 
 
 
 
 
 
 875
 876/*
 877 * pipefs_dname() is called from d_path().
 878 */
 879static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
 880{
 881	return dynamic_dname(buffer, buflen, "pipe:[%lu]",
 882				d_inode(dentry)->i_ino);
 883}
 884
 885static const struct dentry_operations pipefs_dentry_operations = {
 886	.d_dname	= pipefs_dname,
 887};
 888
 889static struct inode * get_pipe_inode(void)
 890{
 891	struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
 892	struct pipe_inode_info *pipe;
 893
 894	if (!inode)
 895		goto fail_inode;
 896
 897	inode->i_ino = get_next_ino();
 898
 899	pipe = alloc_pipe_info();
 900	if (!pipe)
 901		goto fail_iput;
 902
 903	inode->i_pipe = pipe;
 904	pipe->files = 2;
 905	pipe->readers = pipe->writers = 1;
 906	inode->i_fop = &pipefifo_fops;
 907
 908	/*
 909	 * Mark the inode dirty from the very beginning,
 910	 * that way it will never be moved to the dirty
 911	 * list because "mark_inode_dirty()" will think
 912	 * that it already _is_ on the dirty list.
 913	 */
 914	inode->i_state = I_DIRTY;
 915	inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
 916	inode->i_uid = current_fsuid();
 917	inode->i_gid = current_fsgid();
 918	simple_inode_init_ts(inode);
 919
 920	return inode;
 921
 922fail_iput:
 923	iput(inode);
 924
 925fail_inode:
 926	return NULL;
 927}
 928
 929int create_pipe_files(struct file **res, int flags)
 930{
 931	struct inode *inode = get_pipe_inode();
 
 932	struct file *f;
 933	int error;
 
 934
 
 
 935	if (!inode)
 936		return -ENFILE;
 937
 938	if (flags & O_NOTIFICATION_PIPE) {
 939		error = watch_queue_init(inode->i_pipe);
 940		if (error) {
 941			free_pipe_info(inode->i_pipe);
 942			iput(inode);
 943			return error;
 944		}
 945	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 946
 947	f = alloc_file_pseudo(inode, pipe_mnt, "",
 948				O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
 949				&pipefifo_fops);
 950	if (IS_ERR(f)) {
 951		free_pipe_info(inode->i_pipe);
 952		iput(inode);
 953		return PTR_ERR(f);
 954	}
 955
 956	f->private_data = inode->i_pipe;
 957
 958	res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
 959				  &pipefifo_fops);
 960	if (IS_ERR(res[0])) {
 961		put_pipe_info(inode, inode->i_pipe);
 962		fput(f);
 963		return PTR_ERR(res[0]);
 964	}
 965	res[0]->private_data = inode->i_pipe;
 966	res[1] = f;
 967	stream_open(inode, res[0]);
 968	stream_open(inode, res[1]);
 969	return 0;
 970}
 971
 972static int __do_pipe_flags(int *fd, struct file **files, int flags)
 973{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 974	int error;
 975	int fdw, fdr;
 976
 977	if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT | O_NOTIFICATION_PIPE))
 978		return -EINVAL;
 979
 980	error = create_pipe_files(files, flags);
 981	if (error)
 982		return error;
 
 
 
 
 983
 984	error = get_unused_fd_flags(flags);
 985	if (error < 0)
 986		goto err_read_pipe;
 987	fdr = error;
 988
 989	error = get_unused_fd_flags(flags);
 990	if (error < 0)
 991		goto err_fdr;
 992	fdw = error;
 993
 994	audit_fd_pair(fdr, fdw);
 
 
 995	fd[0] = fdr;
 996	fd[1] = fdw;
 997	/* pipe groks IOCB_NOWAIT */
 998	files[0]->f_mode |= FMODE_NOWAIT;
 999	files[1]->f_mode |= FMODE_NOWAIT;
1000	return 0;
1001
1002 err_fdr:
1003	put_unused_fd(fdr);
1004 err_read_pipe:
1005	fput(files[0]);
1006	fput(files[1]);
1007	return error;
1008}
1009
1010int do_pipe_flags(int *fd, int flags)
1011{
1012	struct file *files[2];
1013	int error = __do_pipe_flags(fd, files, flags);
1014	if (!error) {
1015		fd_install(fd[0], files[0]);
1016		fd_install(fd[1], files[1]);
1017	}
1018	return error;
1019}
1020
1021/*
1022 * sys_pipe() is the normal C calling standard for creating
1023 * a pipe. It's not the way Unix traditionally does this, though.
1024 */
1025static int do_pipe2(int __user *fildes, int flags)
1026{
1027	struct file *files[2];
1028	int fd[2];
1029	int error;
1030
1031	error = __do_pipe_flags(fd, files, flags);
1032	if (!error) {
1033		if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
1034			fput(files[0]);
1035			fput(files[1]);
1036			put_unused_fd(fd[0]);
1037			put_unused_fd(fd[1]);
1038			error = -EFAULT;
1039		} else {
1040			fd_install(fd[0], files[0]);
1041			fd_install(fd[1], files[1]);
1042		}
1043	}
1044	return error;
1045}
1046
1047SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
1048{
1049	return do_pipe2(fildes, flags);
1050}
1051
1052SYSCALL_DEFINE1(pipe, int __user *, fildes)
1053{
1054	return do_pipe2(fildes, 0);
1055}
1056
1057/*
1058 * This is the stupid "wait for pipe to be readable or writable"
1059 * model.
1060 *
1061 * See pipe_read/write() for the proper kind of exclusive wait,
1062 * but that requires that we wake up any other readers/writers
1063 * if we then do not end up reading everything (ie the whole
1064 * "wake_next_reader/writer" logic in pipe_read/write()).
1065 */
1066void pipe_wait_readable(struct pipe_inode_info *pipe)
1067{
1068	pipe_unlock(pipe);
1069	wait_event_interruptible(pipe->rd_wait, pipe_readable(pipe));
1070	pipe_lock(pipe);
1071}
1072
1073void pipe_wait_writable(struct pipe_inode_info *pipe)
1074{
1075	pipe_unlock(pipe);
1076	wait_event_interruptible(pipe->wr_wait, pipe_writable(pipe));
1077	pipe_lock(pipe);
1078}
1079
1080/*
1081 * This depends on both the wait (here) and the wakeup (wake_up_partner)
1082 * holding the pipe lock, so "*cnt" is stable and we know a wakeup cannot
1083 * race with the count check and waitqueue prep.
1084 *
1085 * Normally in order to avoid races, you'd do the prepare_to_wait() first,
1086 * then check the condition you're waiting for, and only then sleep. But
1087 * because of the pipe lock, we can check the condition before being on
1088 * the wait queue.
1089 *
1090 * We use the 'rd_wait' waitqueue for pipe partner waiting.
1091 */
1092static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
1093{
1094	DEFINE_WAIT(rdwait);
1095	int cur = *cnt;
1096
1097	while (cur == *cnt) {
1098		prepare_to_wait(&pipe->rd_wait, &rdwait, TASK_INTERRUPTIBLE);
1099		pipe_unlock(pipe);
1100		schedule();
1101		finish_wait(&pipe->rd_wait, &rdwait);
1102		pipe_lock(pipe);
1103		if (signal_pending(current))
1104			break;
1105	}
1106	return cur == *cnt ? -ERESTARTSYS : 0;
1107}
1108
1109static void wake_up_partner(struct pipe_inode_info *pipe)
1110{
1111	wake_up_interruptible_all(&pipe->rd_wait);
1112}
1113
1114static int fifo_open(struct inode *inode, struct file *filp)
1115{
1116	struct pipe_inode_info *pipe;
1117	bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
1118	int ret;
1119
1120	filp->f_version = 0;
1121
1122	spin_lock(&inode->i_lock);
1123	if (inode->i_pipe) {
1124		pipe = inode->i_pipe;
1125		pipe->files++;
1126		spin_unlock(&inode->i_lock);
1127	} else {
1128		spin_unlock(&inode->i_lock);
1129		pipe = alloc_pipe_info();
1130		if (!pipe)
1131			return -ENOMEM;
1132		pipe->files = 1;
1133		spin_lock(&inode->i_lock);
1134		if (unlikely(inode->i_pipe)) {
1135			inode->i_pipe->files++;
1136			spin_unlock(&inode->i_lock);
1137			free_pipe_info(pipe);
1138			pipe = inode->i_pipe;
1139		} else {
1140			inode->i_pipe = pipe;
1141			spin_unlock(&inode->i_lock);
1142		}
1143	}
1144	filp->private_data = pipe;
1145	/* OK, we have a pipe and it's pinned down */
1146
1147	__pipe_lock(pipe);
1148
1149	/* We can only do regular read/write on fifos */
1150	stream_open(inode, filp);
1151
1152	switch (filp->f_mode & (FMODE_READ | FMODE_WRITE)) {
1153	case FMODE_READ:
1154	/*
1155	 *  O_RDONLY
1156	 *  POSIX.1 says that O_NONBLOCK means return with the FIFO
1157	 *  opened, even when there is no process writing the FIFO.
1158	 */
1159		pipe->r_counter++;
1160		if (pipe->readers++ == 0)
1161			wake_up_partner(pipe);
1162
1163		if (!is_pipe && !pipe->writers) {
1164			if ((filp->f_flags & O_NONBLOCK)) {
1165				/* suppress EPOLLHUP until we have
1166				 * seen a writer */
1167				filp->f_version = pipe->w_counter;
1168			} else {
1169				if (wait_for_partner(pipe, &pipe->w_counter))
1170					goto err_rd;
1171			}
1172		}
1173		break;
1174
1175	case FMODE_WRITE:
1176	/*
1177	 *  O_WRONLY
1178	 *  POSIX.1 says that O_NONBLOCK means return -1 with
1179	 *  errno=ENXIO when there is no process reading the FIFO.
1180	 */
1181		ret = -ENXIO;
1182		if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
1183			goto err;
1184
1185		pipe->w_counter++;
1186		if (!pipe->writers++)
1187			wake_up_partner(pipe);
1188
1189		if (!is_pipe && !pipe->readers) {
1190			if (wait_for_partner(pipe, &pipe->r_counter))
1191				goto err_wr;
1192		}
1193		break;
1194
1195	case FMODE_READ | FMODE_WRITE:
1196	/*
1197	 *  O_RDWR
1198	 *  POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
1199	 *  This implementation will NEVER block on a O_RDWR open, since
1200	 *  the process can at least talk to itself.
1201	 */
 
 
1202
1203		pipe->readers++;
1204		pipe->writers++;
1205		pipe->r_counter++;
1206		pipe->w_counter++;
1207		if (pipe->readers == 1 || pipe->writers == 1)
1208			wake_up_partner(pipe);
1209		break;
1210
1211	default:
1212		ret = -EINVAL;
1213		goto err;
1214	}
1215
1216	/* Ok! */
1217	__pipe_unlock(pipe);
1218	return 0;
1219
1220err_rd:
1221	if (!--pipe->readers)
1222		wake_up_interruptible(&pipe->wr_wait);
1223	ret = -ERESTARTSYS;
1224	goto err;
1225
1226err_wr:
1227	if (!--pipe->writers)
1228		wake_up_interruptible_all(&pipe->rd_wait);
1229	ret = -ERESTARTSYS;
1230	goto err;
1231
1232err:
1233	__pipe_unlock(pipe);
1234
1235	put_pipe_info(inode, pipe);
1236	return ret;
1237}
1238
1239const struct file_operations pipefifo_fops = {
1240	.open		= fifo_open,
1241	.llseek		= no_llseek,
1242	.read_iter	= pipe_read,
1243	.write_iter	= pipe_write,
1244	.poll		= pipe_poll,
1245	.unlocked_ioctl	= pipe_ioctl,
1246	.release	= pipe_release,
1247	.fasync		= pipe_fasync,
1248	.splice_write	= iter_file_splice_write,
1249};
1250
1251/*
1252 * Currently we rely on the pipe array holding a power-of-2 number
1253 * of pages. Returns 0 on error.
1254 */
1255unsigned int round_pipe_size(unsigned int size)
1256{
1257	if (size > (1U << 31))
1258		return 0;
1259
1260	/* Minimum pipe size, as required by POSIX */
1261	if (size < PAGE_SIZE)
1262		return PAGE_SIZE;
1263
1264	return roundup_pow_of_two(size);
1265}
1266
1267/*
1268 * Resize the pipe ring to a number of slots.
1269 *
1270 * Note the pipe can be reduced in capacity, but only if the current
1271 * occupancy doesn't exceed nr_slots; if it does, EBUSY will be
1272 * returned instead.
1273 */
1274int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
1275{
1276	struct pipe_buffer *bufs;
1277	unsigned int head, tail, mask, n;
1278
1279	bufs = kcalloc(nr_slots, sizeof(*bufs),
1280		       GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1281	if (unlikely(!bufs))
1282		return -ENOMEM;
1283
1284	spin_lock_irq(&pipe->rd_wait.lock);
1285	mask = pipe->ring_size - 1;
1286	head = pipe->head;
1287	tail = pipe->tail;
1288
1289	n = pipe_occupancy(head, tail);
1290	if (nr_slots < n) {
1291		spin_unlock_irq(&pipe->rd_wait.lock);
1292		kfree(bufs);
1293		return -EBUSY;
1294	}
1295
1296	/*
1297	 * The pipe array wraps around, so just start the new one at zero
1298	 * and adjust the indices.
1299	 */
1300	if (n > 0) {
1301		unsigned int h = head & mask;
1302		unsigned int t = tail & mask;
1303		if (h > t) {
1304			memcpy(bufs, pipe->bufs + t,
1305			       n * sizeof(struct pipe_buffer));
1306		} else {
1307			unsigned int tsize = pipe->ring_size - t;
1308			if (h > 0)
1309				memcpy(bufs + tsize, pipe->bufs,
1310				       h * sizeof(struct pipe_buffer));
1311			memcpy(bufs, pipe->bufs + t,
1312			       tsize * sizeof(struct pipe_buffer));
1313		}
 
1314	}
1315
1316	head = n;
1317	tail = 0;
1318
1319	kfree(pipe->bufs);
1320	pipe->bufs = bufs;
1321	pipe->ring_size = nr_slots;
1322	if (pipe->max_usage > nr_slots)
1323		pipe->max_usage = nr_slots;
1324	pipe->tail = tail;
1325	pipe->head = head;
1326
1327	if (!pipe_has_watch_queue(pipe)) {
1328		pipe->max_usage = nr_slots;
1329		pipe->nr_accounted = nr_slots;
1330	}
1331
1332	spin_unlock_irq(&pipe->rd_wait.lock);
1333
1334	/* This might have made more room for writers */
1335	wake_up_interruptible(&pipe->wr_wait);
1336	return 0;
1337}
1338
1339/*
1340 * Allocate a new array of pipe buffers and copy the info over. Returns the
1341 * pipe size if successful, or return -ERROR on error.
1342 */
1343static long pipe_set_size(struct pipe_inode_info *pipe, unsigned int arg)
1344{
1345	unsigned long user_bufs;
1346	unsigned int nr_slots, size;
1347	long ret = 0;
1348
1349	if (pipe_has_watch_queue(pipe))
1350		return -EBUSY;
1351
1352	size = round_pipe_size(arg);
1353	nr_slots = size >> PAGE_SHIFT;
1354
1355	if (!nr_slots)
1356		return -EINVAL;
1357
1358	/*
1359	 * If trying to increase the pipe capacity, check that an
1360	 * unprivileged user is not trying to exceed various limits
1361	 * (soft limit check here, hard limit check just below).
1362	 * Decreasing the pipe capacity is always permitted, even
1363	 * if the user is currently over a limit.
1364	 */
1365	if (nr_slots > pipe->max_usage &&
1366			size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
1367		return -EPERM;
1368
1369	user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_slots);
1370
1371	if (nr_slots > pipe->max_usage &&
1372			(too_many_pipe_buffers_hard(user_bufs) ||
1373			 too_many_pipe_buffers_soft(user_bufs)) &&
1374			pipe_is_unprivileged_user()) {
1375		ret = -EPERM;
1376		goto out_revert_acct;
1377	}
1378
1379	ret = pipe_resize_ring(pipe, nr_slots);
1380	if (ret < 0)
1381		goto out_revert_acct;
 
 
 
 
 
1382
1383	return pipe->max_usage * PAGE_SIZE;
 
 
1384
1385out_revert_acct:
1386	(void) account_pipe_buffers(pipe->user, nr_slots, pipe->nr_accounted);
1387	return ret;
1388}
1389
1390/*
1391 * Note that i_pipe and i_cdev share the same location, so checking ->i_pipe is
1392 * not enough to verify that this is a pipe.
 
1393 */
1394struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice)
1395{
1396	struct pipe_inode_info *pipe = file->private_data;
1397
1398	if (file->f_op != &pipefifo_fops || !pipe)
1399		return NULL;
1400	if (for_splice && pipe_has_watch_queue(pipe))
1401		return NULL;
1402	return pipe;
1403}
1404
1405long pipe_fcntl(struct file *file, unsigned int cmd, unsigned int arg)
1406{
1407	struct pipe_inode_info *pipe;
1408	long ret;
1409
1410	pipe = get_pipe_info(file, false);
1411	if (!pipe)
1412		return -EBADF;
1413
1414	__pipe_lock(pipe);
1415
1416	switch (cmd) {
1417	case F_SETPIPE_SZ:
1418		ret = pipe_set_size(pipe, arg);
 
 
 
 
 
 
 
 
 
 
 
 
 
1419		break;
 
1420	case F_GETPIPE_SZ:
1421		ret = pipe->max_usage * PAGE_SIZE;
1422		break;
1423	default:
1424		ret = -EINVAL;
1425		break;
1426	}
1427
1428	__pipe_unlock(pipe);
 
1429	return ret;
1430}
1431
1432static const struct super_operations pipefs_ops = {
1433	.destroy_inode = free_inode_nonrcu,
1434	.statfs = simple_statfs,
1435};
1436
1437/*
1438 * pipefs should _never_ be mounted by userland - too much of security hassle,
1439 * no real gain from having the whole whorehouse mounted. So we don't need
1440 * any operations on the root directory. However, we need a non-trivial
1441 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1442 */
1443
1444static int pipefs_init_fs_context(struct fs_context *fc)
1445{
1446	struct pseudo_fs_context *ctx = init_pseudo(fc, PIPEFS_MAGIC);
1447	if (!ctx)
1448		return -ENOMEM;
1449	ctx->ops = &pipefs_ops;
1450	ctx->dops = &pipefs_dentry_operations;
1451	return 0;
1452}
1453
1454static struct file_system_type pipe_fs_type = {
1455	.name		= "pipefs",
1456	.init_fs_context = pipefs_init_fs_context,
1457	.kill_sb	= kill_anon_super,
1458};
1459
1460#ifdef CONFIG_SYSCTL
1461static int do_proc_dopipe_max_size_conv(unsigned long *lvalp,
1462					unsigned int *valp,
1463					int write, void *data)
1464{
1465	if (write) {
1466		unsigned int val;
1467
1468		val = round_pipe_size(*lvalp);
1469		if (val == 0)
1470			return -EINVAL;
1471
1472		*valp = val;
1473	} else {
1474		unsigned int val = *valp;
1475		*lvalp = (unsigned long) val;
1476	}
1477
1478	return 0;
1479}
1480
1481static int proc_dopipe_max_size(struct ctl_table *table, int write,
1482				void *buffer, size_t *lenp, loff_t *ppos)
1483{
1484	return do_proc_douintvec(table, write, buffer, lenp, ppos,
1485				 do_proc_dopipe_max_size_conv, NULL);
1486}
1487
1488static struct ctl_table fs_pipe_sysctls[] = {
1489	{
1490		.procname	= "pipe-max-size",
1491		.data		= &pipe_max_size,
1492		.maxlen		= sizeof(pipe_max_size),
1493		.mode		= 0644,
1494		.proc_handler	= proc_dopipe_max_size,
1495	},
1496	{
1497		.procname	= "pipe-user-pages-hard",
1498		.data		= &pipe_user_pages_hard,
1499		.maxlen		= sizeof(pipe_user_pages_hard),
1500		.mode		= 0644,
1501		.proc_handler	= proc_doulongvec_minmax,
1502	},
1503	{
1504		.procname	= "pipe-user-pages-soft",
1505		.data		= &pipe_user_pages_soft,
1506		.maxlen		= sizeof(pipe_user_pages_soft),
1507		.mode		= 0644,
1508		.proc_handler	= proc_doulongvec_minmax,
1509	},
1510};
1511#endif
1512
1513static int __init init_pipe_fs(void)
1514{
1515	int err = register_filesystem(&pipe_fs_type);
1516
1517	if (!err) {
1518		pipe_mnt = kern_mount(&pipe_fs_type);
1519		if (IS_ERR(pipe_mnt)) {
1520			err = PTR_ERR(pipe_mnt);
1521			unregister_filesystem(&pipe_fs_type);
1522		}
1523	}
1524#ifdef CONFIG_SYSCTL
1525	register_sysctl_init("fs", fs_pipe_sysctls);
1526#endif
1527	return err;
1528}
1529
1530fs_initcall(init_pipe_fs);
v3.5.6
 
   1/*
   2 *  linux/fs/pipe.c
   3 *
   4 *  Copyright (C) 1991, 1992, 1999  Linus Torvalds
   5 */
   6
   7#include <linux/mm.h>
   8#include <linux/file.h>
   9#include <linux/poll.h>
  10#include <linux/slab.h>
  11#include <linux/module.h>
  12#include <linux/init.h>
  13#include <linux/fs.h>
  14#include <linux/log2.h>
  15#include <linux/mount.h>
 
  16#include <linux/magic.h>
  17#include <linux/pipe_fs_i.h>
  18#include <linux/uio.h>
  19#include <linux/highmem.h>
  20#include <linux/pagemap.h>
  21#include <linux/audit.h>
  22#include <linux/syscalls.h>
  23#include <linux/fcntl.h>
 
 
 
  24
  25#include <asm/uaccess.h>
  26#include <asm/ioctls.h>
  27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  28/*
  29 * The max size that a non-root user is allowed to grow the pipe. Can
  30 * be set by root in /proc/sys/fs/pipe-max-size
  31 */
  32unsigned int pipe_max_size = 1048576;
  33
  34/*
  35 * Minimum pipe size, as required by POSIX
  36 */
  37unsigned int pipe_min_size = PAGE_SIZE;
 
  38
  39/*
  40 * We use a start+len construction, which provides full use of the 
  41 * allocated memory.
  42 * -- Florian Coosmann (FGC)
  43 * 
 
 
  44 * Reads with count = 0 should always return 0.
  45 * -- Julian Bradfield 1999-06-07.
  46 *
  47 * FIFOs and Pipes now generate SIGIO for both readers and writers.
  48 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
  49 *
  50 * pipe_read & write cleanup
  51 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
  52 */
  53
  54static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
  55{
  56	if (pipe->inode)
  57		mutex_lock_nested(&pipe->inode->i_mutex, subclass);
  58}
  59
  60void pipe_lock(struct pipe_inode_info *pipe)
  61{
  62	/*
  63	 * pipe_lock() nests non-pipe inode locks (for writing to a file)
  64	 */
  65	pipe_lock_nested(pipe, I_MUTEX_PARENT);
  66}
  67EXPORT_SYMBOL(pipe_lock);
  68
  69void pipe_unlock(struct pipe_inode_info *pipe)
  70{
  71	if (pipe->inode)
  72		mutex_unlock(&pipe->inode->i_mutex);
  73}
  74EXPORT_SYMBOL(pipe_unlock);
  75
 
 
 
 
 
 
 
 
 
 
  76void pipe_double_lock(struct pipe_inode_info *pipe1,
  77		      struct pipe_inode_info *pipe2)
  78{
  79	BUG_ON(pipe1 == pipe2);
  80
  81	if (pipe1 < pipe2) {
  82		pipe_lock_nested(pipe1, I_MUTEX_PARENT);
  83		pipe_lock_nested(pipe2, I_MUTEX_CHILD);
  84	} else {
  85		pipe_lock_nested(pipe2, I_MUTEX_PARENT);
  86		pipe_lock_nested(pipe1, I_MUTEX_CHILD);
  87	}
  88}
  89
  90/* Drop the inode semaphore and wait for a pipe event, atomically */
  91void pipe_wait(struct pipe_inode_info *pipe)
  92{
  93	DEFINE_WAIT(wait);
  94
  95	/*
  96	 * Pipes are system-local resources, so sleeping on them
  97	 * is considered a noninteractive wait:
  98	 */
  99	prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
 100	pipe_unlock(pipe);
 101	schedule();
 102	finish_wait(&pipe->wait, &wait);
 103	pipe_lock(pipe);
 104}
 105
 106static int
 107pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
 108			int atomic)
 109{
 110	unsigned long copy;
 111
 112	while (len > 0) {
 113		while (!iov->iov_len)
 114			iov++;
 115		copy = min_t(unsigned long, len, iov->iov_len);
 116
 117		if (atomic) {
 118			if (__copy_from_user_inatomic(to, iov->iov_base, copy))
 119				return -EFAULT;
 120		} else {
 121			if (copy_from_user(to, iov->iov_base, copy))
 122				return -EFAULT;
 123		}
 124		to += copy;
 125		len -= copy;
 126		iov->iov_base += copy;
 127		iov->iov_len -= copy;
 128	}
 129	return 0;
 130}
 131
 132static int
 133pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len,
 134		      int atomic)
 135{
 136	unsigned long copy;
 137
 138	while (len > 0) {
 139		while (!iov->iov_len)
 140			iov++;
 141		copy = min_t(unsigned long, len, iov->iov_len);
 142
 143		if (atomic) {
 144			if (__copy_to_user_inatomic(iov->iov_base, from, copy))
 145				return -EFAULT;
 146		} else {
 147			if (copy_to_user(iov->iov_base, from, copy))
 148				return -EFAULT;
 149		}
 150		from += copy;
 151		len -= copy;
 152		iov->iov_base += copy;
 153		iov->iov_len -= copy;
 154	}
 155	return 0;
 156}
 157
 158/*
 159 * Attempt to pre-fault in the user memory, so we can use atomic copies.
 160 * Returns the number of bytes not faulted in.
 161 */
 162static int iov_fault_in_pages_write(struct iovec *iov, unsigned long len)
 163{
 164	while (!iov->iov_len)
 165		iov++;
 166
 167	while (len > 0) {
 168		unsigned long this_len;
 169
 170		this_len = min_t(unsigned long, len, iov->iov_len);
 171		if (fault_in_pages_writeable(iov->iov_base, this_len))
 172			break;
 173
 174		len -= this_len;
 175		iov++;
 176	}
 177
 178	return len;
 179}
 180
 181/*
 182 * Pre-fault in the user memory, so we can use atomic copies.
 183 */
 184static void iov_fault_in_pages_read(struct iovec *iov, unsigned long len)
 185{
 186	while (!iov->iov_len)
 187		iov++;
 188
 189	while (len > 0) {
 190		unsigned long this_len;
 191
 192		this_len = min_t(unsigned long, len, iov->iov_len);
 193		fault_in_pages_readable(iov->iov_base, this_len);
 194		len -= this_len;
 195		iov++;
 196	}
 197}
 198
 199static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
 200				  struct pipe_buffer *buf)
 201{
 202	struct page *page = buf->page;
 203
 204	/*
 205	 * If nobody else uses this page, and we don't already have a
 206	 * temporary page, let's keep track of it as a one-deep
 207	 * allocation cache. (Otherwise just release our reference to it)
 208	 */
 209	if (page_count(page) == 1 && !pipe->tmp_page)
 210		pipe->tmp_page = page;
 211	else
 212		page_cache_release(page);
 213}
 214
 215/**
 216 * generic_pipe_buf_map - virtually map a pipe buffer
 217 * @pipe:	the pipe that the buffer belongs to
 218 * @buf:	the buffer that should be mapped
 219 * @atomic:	whether to use an atomic map
 220 *
 221 * Description:
 222 *	This function returns a kernel virtual address mapping for the
 223 *	pipe_buffer passed in @buf. If @atomic is set, an atomic map is provided
 224 *	and the caller has to be careful not to fault before calling
 225 *	the unmap function.
 226 *
 227 *	Note that this function occupies KM_USER0 if @atomic != 0.
 228 */
 229void *generic_pipe_buf_map(struct pipe_inode_info *pipe,
 230			   struct pipe_buffer *buf, int atomic)
 231{
 232	if (atomic) {
 233		buf->flags |= PIPE_BUF_FLAG_ATOMIC;
 234		return kmap_atomic(buf->page);
 235	}
 236
 237	return kmap(buf->page);
 
 
 
 
 238}
 239EXPORT_SYMBOL(generic_pipe_buf_map);
 240
 241/**
 242 * generic_pipe_buf_unmap - unmap a previously mapped pipe buffer
 243 * @pipe:	the pipe that the buffer belongs to
 244 * @buf:	the buffer that should be unmapped
 245 * @map_data:	the data that the mapping function returned
 246 *
 247 * Description:
 248 *	This function undoes the mapping that ->map() provided.
 249 */
 250void generic_pipe_buf_unmap(struct pipe_inode_info *pipe,
 251			    struct pipe_buffer *buf, void *map_data)
 252{
 253	if (buf->flags & PIPE_BUF_FLAG_ATOMIC) {
 254		buf->flags &= ~PIPE_BUF_FLAG_ATOMIC;
 255		kunmap_atomic(map_data);
 256	} else
 257		kunmap(buf->page);
 258}
 259EXPORT_SYMBOL(generic_pipe_buf_unmap);
 260
 261/**
 262 * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
 263 * @pipe:	the pipe that the buffer belongs to
 264 * @buf:	the buffer to attempt to steal
 265 *
 266 * Description:
 267 *	This function attempts to steal the &struct page attached to
 268 *	@buf. If successful, this function returns 0 and returns with
 269 *	the page locked. The caller may then reuse the page for whatever
 270 *	he wishes; the typical use is insertion into a different file
 271 *	page cache.
 272 */
 273int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
 274			   struct pipe_buffer *buf)
 275{
 276	struct page *page = buf->page;
 277
 278	/*
 279	 * A reference of one is golden, that means that the owner of this
 280	 * page is the only one holding a reference to it. lock the page
 281	 * and return OK.
 282	 */
 283	if (page_count(page) == 1) {
 284		lock_page(page);
 285		return 0;
 286	}
 287
 288	return 1;
 289}
 290EXPORT_SYMBOL(generic_pipe_buf_steal);
 291
 292/**
 293 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
 294 * @pipe:	the pipe that the buffer belongs to
 295 * @buf:	the buffer to get a reference to
 296 *
 297 * Description:
 298 *	This function grabs an extra reference to @buf. It's used in
 299 *	in the tee() system call, when we duplicate the buffers in one
 300 *	pipe into another.
 301 */
 302void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
 303{
 304	page_cache_get(buf->page);
 305}
 306EXPORT_SYMBOL(generic_pipe_buf_get);
 307
 308/**
 309 * generic_pipe_buf_confirm - verify contents of the pipe buffer
 310 * @info:	the pipe that the buffer belongs to
 311 * @buf:	the buffer to confirm
 312 *
 313 * Description:
 314 *	This function does nothing, because the generic pipe code uses
 315 *	pages that are always good when inserted into the pipe.
 316 */
 317int generic_pipe_buf_confirm(struct pipe_inode_info *info,
 318			     struct pipe_buffer *buf)
 319{
 320	return 0;
 321}
 322EXPORT_SYMBOL(generic_pipe_buf_confirm);
 323
 324/**
 325 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
 326 * @pipe:	the pipe that the buffer belongs to
 327 * @buf:	the buffer to put a reference to
 328 *
 329 * Description:
 330 *	This function releases a reference to @buf.
 331 */
 332void generic_pipe_buf_release(struct pipe_inode_info *pipe,
 333			      struct pipe_buffer *buf)
 334{
 335	page_cache_release(buf->page);
 336}
 337EXPORT_SYMBOL(generic_pipe_buf_release);
 338
 339static const struct pipe_buf_operations anon_pipe_buf_ops = {
 340	.can_merge = 1,
 341	.map = generic_pipe_buf_map,
 342	.unmap = generic_pipe_buf_unmap,
 343	.confirm = generic_pipe_buf_confirm,
 344	.release = anon_pipe_buf_release,
 345	.steal = generic_pipe_buf_steal,
 346	.get = generic_pipe_buf_get,
 347};
 348
 349static const struct pipe_buf_operations packet_pipe_buf_ops = {
 350	.can_merge = 0,
 351	.map = generic_pipe_buf_map,
 352	.unmap = generic_pipe_buf_unmap,
 353	.confirm = generic_pipe_buf_confirm,
 354	.release = anon_pipe_buf_release,
 355	.steal = generic_pipe_buf_steal,
 356	.get = generic_pipe_buf_get,
 357};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 358
 359static ssize_t
 360pipe_read(struct kiocb *iocb, const struct iovec *_iov,
 361	   unsigned long nr_segs, loff_t pos)
 362{
 
 363	struct file *filp = iocb->ki_filp;
 364	struct inode *inode = filp->f_path.dentry->d_inode;
 365	struct pipe_inode_info *pipe;
 366	int do_wakeup;
 367	ssize_t ret;
 368	struct iovec *iov = (struct iovec *)_iov;
 369	size_t total_len;
 370
 371	total_len = iov_length(iov, nr_segs);
 372	/* Null read succeeds. */
 373	if (unlikely(total_len == 0))
 374		return 0;
 375
 376	do_wakeup = 0;
 377	ret = 0;
 378	mutex_lock(&inode->i_mutex);
 379	pipe = inode->i_pipe;
 
 
 
 
 
 
 
 
 
 380	for (;;) {
 381		int bufs = pipe->nrbufs;
 382		if (bufs) {
 383			int curbuf = pipe->curbuf;
 384			struct pipe_buffer *buf = pipe->bufs + curbuf;
 385			const struct pipe_buf_operations *ops = buf->ops;
 386			void *addr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 387			size_t chars = buf->len;
 388			int error, atomic;
 
 389
 390			if (chars > total_len)
 
 
 
 
 
 391				chars = total_len;
 
 392
 393			error = ops->confirm(pipe, buf);
 394			if (error) {
 395				if (!ret)
 396					ret = error;
 397				break;
 398			}
 399
 400			atomic = !iov_fault_in_pages_write(iov, chars);
 401redo:
 402			addr = ops->map(pipe, buf, atomic);
 403			error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic);
 404			ops->unmap(pipe, buf, addr);
 405			if (unlikely(error)) {
 406				/*
 407				 * Just retry with the slow path if we failed.
 408				 */
 409				if (atomic) {
 410					atomic = 0;
 411					goto redo;
 412				}
 413				if (!ret)
 414					ret = error;
 415				break;
 416			}
 417			ret += chars;
 418			buf->offset += chars;
 419			buf->len -= chars;
 420
 421			/* Was it a packet buffer? Clean up and exit */
 422			if (buf->flags & PIPE_BUF_FLAG_PACKET) {
 423				total_len = chars;
 424				buf->len = 0;
 425			}
 426
 427			if (!buf->len) {
 428				buf->ops = NULL;
 429				ops->release(pipe, buf);
 430				curbuf = (curbuf + 1) & (pipe->buffers - 1);
 431				pipe->curbuf = curbuf;
 432				pipe->nrbufs = --bufs;
 433				do_wakeup = 1;
 434			}
 435			total_len -= chars;
 436			if (!total_len)
 437				break;	/* common path: read succeeded */
 
 
 438		}
 439		if (bufs)	/* More to do? */
 440			continue;
 441		if (!pipe->writers)
 442			break;
 443		if (!pipe->waiting_writers) {
 444			/* syscall merging: Usually we must not sleep
 445			 * if O_NONBLOCK is set, or if we got some data.
 446			 * But if a writer sleeps in kernel space, then
 447			 * we can wait for that data without violating POSIX.
 448			 */
 449			if (ret)
 450				break;
 451			if (filp->f_flags & O_NONBLOCK) {
 452				ret = -EAGAIN;
 453				break;
 454			}
 455		}
 456		if (signal_pending(current)) {
 457			if (!ret)
 458				ret = -ERESTARTSYS;
 459			break;
 460		}
 461		if (do_wakeup) {
 462			wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
 463 			kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
 464		}
 465		pipe_wait(pipe);
 466	}
 467	mutex_unlock(&inode->i_mutex);
 468
 469	/* Signal writers asynchronously that there is more room. */
 470	if (do_wakeup) {
 471		wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 472		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
 473	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 474	if (ret > 0)
 475		file_accessed(filp);
 476	return ret;
 477}
 478
 479static inline int is_packetized(struct file *file)
 480{
 481	return (file->f_flags & O_DIRECT) != 0;
 482}
 483
 
 
 
 
 
 
 
 
 
 
 
 484static ssize_t
 485pipe_write(struct kiocb *iocb, const struct iovec *_iov,
 486	    unsigned long nr_segs, loff_t ppos)
 487{
 488	struct file *filp = iocb->ki_filp;
 489	struct inode *inode = filp->f_path.dentry->d_inode;
 490	struct pipe_inode_info *pipe;
 491	ssize_t ret;
 492	int do_wakeup;
 493	struct iovec *iov = (struct iovec *)_iov;
 494	size_t total_len;
 495	ssize_t chars;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 496
 497	total_len = iov_length(iov, nr_segs);
 498	/* Null write succeeds. */
 499	if (unlikely(total_len == 0))
 500		return 0;
 501
 502	do_wakeup = 0;
 503	ret = 0;
 504	mutex_lock(&inode->i_mutex);
 505	pipe = inode->i_pipe;
 506
 507	if (!pipe->readers) {
 508		send_sig(SIGPIPE, current, 0);
 509		ret = -EPIPE;
 510		goto out;
 511	}
 512
 513	/* We try to merge small writes */
 514	chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
 515	if (pipe->nrbufs && chars != 0) {
 516		int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
 517							(pipe->buffers - 1);
 518		struct pipe_buffer *buf = pipe->bufs + lastbuf;
 519		const struct pipe_buf_operations *ops = buf->ops;
 
 
 
 
 
 
 
 520		int offset = buf->offset + buf->len;
 521
 522		if (ops->can_merge && offset + chars <= PAGE_SIZE) {
 523			int error, atomic = 1;
 524			void *addr;
 525
 526			error = ops->confirm(pipe, buf);
 527			if (error)
 528				goto out;
 529
 530			iov_fault_in_pages_read(iov, chars);
 531redo1:
 532			addr = ops->map(pipe, buf, atomic);
 533			error = pipe_iov_copy_from_user(offset + addr, iov,
 534							chars, atomic);
 535			ops->unmap(pipe, buf, addr);
 536			ret = error;
 537			do_wakeup = 1;
 538			if (error) {
 539				if (atomic) {
 540					atomic = 0;
 541					goto redo1;
 542				}
 543				goto out;
 544			}
 545			buf->len += chars;
 546			total_len -= chars;
 547			ret = chars;
 548			if (!total_len)
 549				goto out;
 550		}
 551	}
 552
 553	for (;;) {
 554		int bufs;
 555
 556		if (!pipe->readers) {
 557			send_sig(SIGPIPE, current, 0);
 558			if (!ret)
 559				ret = -EPIPE;
 560			break;
 561		}
 562		bufs = pipe->nrbufs;
 563		if (bufs < pipe->buffers) {
 564			int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
 565			struct pipe_buffer *buf = pipe->bufs + newbuf;
 
 566			struct page *page = pipe->tmp_page;
 567			char *src;
 568			int error, atomic = 1;
 569
 570			if (!page) {
 571				page = alloc_page(GFP_HIGHUSER);
 572				if (unlikely(!page)) {
 573					ret = ret ? : -ENOMEM;
 574					break;
 575				}
 576				pipe->tmp_page = page;
 577			}
 578			/* Always wake up, even if the copy fails. Otherwise
 579			 * we lock up (O_NONBLOCK-)readers that sleep due to
 580			 * syscall merging.
 581			 * FIXME! Is this really true?
 
 582			 */
 583			do_wakeup = 1;
 584			chars = PAGE_SIZE;
 585			if (chars > total_len)
 586				chars = total_len;
 587
 588			iov_fault_in_pages_read(iov, chars);
 589redo2:
 590			if (atomic)
 591				src = kmap_atomic(page);
 592			else
 593				src = kmap(page);
 594
 595			error = pipe_iov_copy_from_user(src, iov, chars,
 596							atomic);
 597			if (atomic)
 598				kunmap_atomic(src);
 599			else
 600				kunmap(page);
 601
 602			if (unlikely(error)) {
 603				if (atomic) {
 604					atomic = 0;
 605					goto redo2;
 606				}
 607				if (!ret)
 608					ret = error;
 609				break;
 610			}
 611			ret += chars;
 612
 613			/* Insert it into the buffer array */
 
 614			buf->page = page;
 615			buf->ops = &anon_pipe_buf_ops;
 616			buf->offset = 0;
 617			buf->len = chars;
 618			buf->flags = 0;
 619			if (is_packetized(filp)) {
 620				buf->ops = &packet_pipe_buf_ops;
 621				buf->flags = PIPE_BUF_FLAG_PACKET;
 
 
 
 
 
 
 
 
 
 622			}
 623			pipe->nrbufs = ++bufs;
 624			pipe->tmp_page = NULL;
 625
 626			total_len -= chars;
 627			if (!total_len)
 628				break;
 629		}
 630		if (bufs < pipe->buffers)
 
 631			continue;
 632		if (filp->f_flags & O_NONBLOCK) {
 
 
 
 633			if (!ret)
 634				ret = -EAGAIN;
 635			break;
 636		}
 637		if (signal_pending(current)) {
 638			if (!ret)
 639				ret = -ERESTARTSYS;
 640			break;
 641		}
 642		if (do_wakeup) {
 643			wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
 644			kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 645			do_wakeup = 0;
 646		}
 647		pipe->waiting_writers++;
 648		pipe_wait(pipe);
 649		pipe->waiting_writers--;
 
 
 
 
 
 
 
 650	}
 651out:
 652	mutex_unlock(&inode->i_mutex);
 653	if (do_wakeup) {
 654		wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
 655		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 656	}
 657	if (ret > 0) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 658		int err = file_update_time(filp);
 659		if (err)
 660			ret = err;
 
 661	}
 662	return ret;
 663}
 664
 665static ssize_t
 666bad_pipe_r(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
 667{
 668	return -EBADF;
 669}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 670
 671static ssize_t
 672bad_pipe_w(struct file *filp, const char __user *buf, size_t count,
 673	   loff_t *ppos)
 674{
 675	return -EBADF;
 676}
 677
 678static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 679{
 680	struct inode *inode = filp->f_path.dentry->d_inode;
 681	struct pipe_inode_info *pipe;
 682	int count, buf, nrbufs;
 
 
 
 683
 684	switch (cmd) {
 685		case FIONREAD:
 686			mutex_lock(&inode->i_mutex);
 687			pipe = inode->i_pipe;
 688			count = 0;
 689			buf = pipe->curbuf;
 690			nrbufs = pipe->nrbufs;
 691			while (--nrbufs >= 0) {
 692				count += pipe->bufs[buf].len;
 693				buf = (buf+1) & (pipe->buffers - 1);
 694			}
 695			mutex_unlock(&inode->i_mutex);
 696
 697			return put_user(count, (int __user *)arg);
 698		default:
 699			return -ENOIOCTLCMD;
 700	}
 701}
 702
 703/* No kernel lock held - fine */
 704static unsigned int
 705pipe_poll(struct file *filp, poll_table *wait)
 706{
 707	unsigned int mask;
 708	struct inode *inode = filp->f_path.dentry->d_inode;
 709	struct pipe_inode_info *pipe = inode->i_pipe;
 710	int nrbufs;
 711
 712	poll_wait(filp, &pipe->wait, wait);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 713
 714	/* Reading only -- no need for acquiring the semaphore.  */
 715	nrbufs = pipe->nrbufs;
 716	mask = 0;
 717	if (filp->f_mode & FMODE_READ) {
 718		mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
 
 719		if (!pipe->writers && filp->f_version != pipe->w_counter)
 720			mask |= POLLHUP;
 721	}
 722
 723	if (filp->f_mode & FMODE_WRITE) {
 724		mask |= (nrbufs < pipe->buffers) ? POLLOUT | POLLWRNORM : 0;
 
 725		/*
 726		 * Most Unices do not set POLLERR for FIFOs but on Linux they
 727		 * behave exactly like pipes for poll().
 728		 */
 729		if (!pipe->readers)
 730			mask |= POLLERR;
 731	}
 732
 733	return mask;
 734}
 735
 736static int
 737pipe_release(struct inode *inode, int decr, int decw)
 738{
 739	struct pipe_inode_info *pipe;
 740
 741	mutex_lock(&inode->i_mutex);
 742	pipe = inode->i_pipe;
 743	pipe->readers -= decr;
 744	pipe->writers -= decw;
 745
 746	if (!pipe->readers && !pipe->writers) {
 747		free_pipe_info(inode);
 748	} else {
 749		wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
 750		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 751		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
 752	}
 753	mutex_unlock(&inode->i_mutex);
 754
 755	return 0;
 
 756}
 757
 758static int
 759pipe_read_fasync(int fd, struct file *filp, int on)
 760{
 761	struct inode *inode = filp->f_path.dentry->d_inode;
 762	int retval;
 763
 764	mutex_lock(&inode->i_mutex);
 765	retval = fasync_helper(fd, filp, on, &inode->i_pipe->fasync_readers);
 766	mutex_unlock(&inode->i_mutex);
 
 
 
 
 
 
 
 
 
 
 
 767
 768	return retval;
 
 769}
 770
 771
 772static int
 773pipe_write_fasync(int fd, struct file *filp, int on)
 774{
 775	struct inode *inode = filp->f_path.dentry->d_inode;
 776	int retval;
 777
 778	mutex_lock(&inode->i_mutex);
 779	retval = fasync_helper(fd, filp, on, &inode->i_pipe->fasync_writers);
 780	mutex_unlock(&inode->i_mutex);
 781
 782	return retval;
 783}
 784
 785
 786static int
 787pipe_rdwr_fasync(int fd, struct file *filp, int on)
 788{
 789	struct inode *inode = filp->f_path.dentry->d_inode;
 790	struct pipe_inode_info *pipe = inode->i_pipe;
 791	int retval;
 792
 793	mutex_lock(&inode->i_mutex);
 794	retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
 795	if (retval >= 0) {
 796		retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
 797		if (retval < 0) /* this can happen only if on == T */
 
 798			fasync_helper(-1, filp, 0, &pipe->fasync_readers);
 799	}
 800	mutex_unlock(&inode->i_mutex);
 801	return retval;
 802}
 803
 804
 805static int
 806pipe_read_release(struct inode *inode, struct file *filp)
 807{
 808	return pipe_release(inode, 1, 0);
 809}
 810
 811static int
 812pipe_write_release(struct inode *inode, struct file *filp)
 813{
 814	return pipe_release(inode, 0, 1);
 
 
 815}
 816
 817static int
 818pipe_rdwr_release(struct inode *inode, struct file *filp)
 819{
 820	int decr, decw;
 821
 822	decr = (filp->f_mode & FMODE_READ) != 0;
 823	decw = (filp->f_mode & FMODE_WRITE) != 0;
 824	return pipe_release(inode, decr, decw);
 825}
 826
 827static int
 828pipe_read_open(struct inode *inode, struct file *filp)
 829{
 830	int ret = -ENOENT;
 831
 832	mutex_lock(&inode->i_mutex);
 833
 834	if (inode->i_pipe) {
 835		ret = 0;
 836		inode->i_pipe->readers++;
 837	}
 838
 839	mutex_unlock(&inode->i_mutex);
 840
 841	return ret;
 842}
 843
 844static int
 845pipe_write_open(struct inode *inode, struct file *filp)
 846{
 847	int ret = -ENOENT;
 
 
 
 
 848
 849	mutex_lock(&inode->i_mutex);
 
 
 850
 851	if (inode->i_pipe) {
 852		ret = 0;
 853		inode->i_pipe->writers++;
 854	}
 855
 856	mutex_unlock(&inode->i_mutex);
 857
 858	return ret;
 859}
 860
 861static int
 862pipe_rdwr_open(struct inode *inode, struct file *filp)
 863{
 864	int ret = -ENOENT;
 865
 866	mutex_lock(&inode->i_mutex);
 867
 868	if (inode->i_pipe) {
 869		ret = 0;
 870		if (filp->f_mode & FMODE_READ)
 871			inode->i_pipe->readers++;
 872		if (filp->f_mode & FMODE_WRITE)
 873			inode->i_pipe->writers++;
 874	}
 875
 876	mutex_unlock(&inode->i_mutex);
 877
 878	return ret;
 879}
 880
 881/*
 882 * The file_operations structs are not static because they
 883 * are also used in linux/fs/fifo.c to do operations on FIFOs.
 884 *
 885 * Pipes reuse fifos' file_operations structs.
 886 */
 887const struct file_operations read_pipefifo_fops = {
 888	.llseek		= no_llseek,
 889	.read		= do_sync_read,
 890	.aio_read	= pipe_read,
 891	.write		= bad_pipe_w,
 892	.poll		= pipe_poll,
 893	.unlocked_ioctl	= pipe_ioctl,
 894	.open		= pipe_read_open,
 895	.release	= pipe_read_release,
 896	.fasync		= pipe_read_fasync,
 897};
 898
 899const struct file_operations write_pipefifo_fops = {
 900	.llseek		= no_llseek,
 901	.read		= bad_pipe_r,
 902	.write		= do_sync_write,
 903	.aio_write	= pipe_write,
 904	.poll		= pipe_poll,
 905	.unlocked_ioctl	= pipe_ioctl,
 906	.open		= pipe_write_open,
 907	.release	= pipe_write_release,
 908	.fasync		= pipe_write_fasync,
 909};
 910
 911const struct file_operations rdwr_pipefifo_fops = {
 912	.llseek		= no_llseek,
 913	.read		= do_sync_read,
 914	.aio_read	= pipe_read,
 915	.write		= do_sync_write,
 916	.aio_write	= pipe_write,
 917	.poll		= pipe_poll,
 918	.unlocked_ioctl	= pipe_ioctl,
 919	.open		= pipe_rdwr_open,
 920	.release	= pipe_rdwr_release,
 921	.fasync		= pipe_rdwr_fasync,
 922};
 923
 924struct pipe_inode_info * alloc_pipe_info(struct inode *inode)
 925{
 926	struct pipe_inode_info *pipe;
 927
 928	pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
 929	if (pipe) {
 930		pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * PIPE_DEF_BUFFERS, GFP_KERNEL);
 931		if (pipe->bufs) {
 932			init_waitqueue_head(&pipe->wait);
 933			pipe->r_counter = pipe->w_counter = 1;
 934			pipe->inode = inode;
 935			pipe->buffers = PIPE_DEF_BUFFERS;
 936			return pipe;
 937		}
 938		kfree(pipe);
 939	}
 940
 
 
 
 
 
 941	return NULL;
 942}
 943
 944void __free_pipe_info(struct pipe_inode_info *pipe)
 945{
 946	int i;
 947
 948	for (i = 0; i < pipe->buffers; i++) {
 
 
 
 
 
 
 
 949		struct pipe_buffer *buf = pipe->bufs + i;
 950		if (buf->ops)
 951			buf->ops->release(pipe, buf);
 952	}
 
 
 
 
 953	if (pipe->tmp_page)
 954		__free_page(pipe->tmp_page);
 955	kfree(pipe->bufs);
 956	kfree(pipe);
 957}
 958
 959void free_pipe_info(struct inode *inode)
 960{
 961	__free_pipe_info(inode->i_pipe);
 962	inode->i_pipe = NULL;
 963}
 964
 965static struct vfsmount *pipe_mnt __read_mostly;
 966
 967/*
 968 * pipefs_dname() is called from d_path().
 969 */
 970static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
 971{
 972	return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
 973				dentry->d_inode->i_ino);
 974}
 975
 976static const struct dentry_operations pipefs_dentry_operations = {
 977	.d_dname	= pipefs_dname,
 978};
 979
 980static struct inode * get_pipe_inode(void)
 981{
 982	struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
 983	struct pipe_inode_info *pipe;
 984
 985	if (!inode)
 986		goto fail_inode;
 987
 988	inode->i_ino = get_next_ino();
 989
 990	pipe = alloc_pipe_info(inode);
 991	if (!pipe)
 992		goto fail_iput;
 
 993	inode->i_pipe = pipe;
 994
 995	pipe->readers = pipe->writers = 1;
 996	inode->i_fop = &rdwr_pipefifo_fops;
 997
 998	/*
 999	 * Mark the inode dirty from the very beginning,
1000	 * that way it will never be moved to the dirty
1001	 * list because "mark_inode_dirty()" will think
1002	 * that it already _is_ on the dirty list.
1003	 */
1004	inode->i_state = I_DIRTY;
1005	inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
1006	inode->i_uid = current_fsuid();
1007	inode->i_gid = current_fsgid();
1008	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1009
1010	return inode;
1011
1012fail_iput:
1013	iput(inode);
1014
1015fail_inode:
1016	return NULL;
1017}
1018
1019struct file *create_write_pipe(int flags)
1020{
1021	int err;
1022	struct inode *inode;
1023	struct file *f;
1024	struct path path;
1025	struct qstr name = { .name = "" };
1026
1027	err = -ENFILE;
1028	inode = get_pipe_inode();
1029	if (!inode)
1030		goto err;
1031
1032	err = -ENOMEM;
1033	path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &name);
1034	if (!path.dentry)
1035		goto err_inode;
1036	path.mnt = mntget(pipe_mnt);
1037
1038	d_instantiate(path.dentry, inode);
1039
1040	err = -ENFILE;
1041	f = alloc_file(&path, FMODE_WRITE, &write_pipefifo_fops);
1042	if (!f)
1043		goto err_dentry;
1044	f->f_mapping = inode->i_mapping;
1045
1046	f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
1047	f->f_version = 0;
1048
1049	return f;
1050
1051 err_dentry:
1052	free_pipe_info(inode);
1053	path_put(&path);
1054	return ERR_PTR(err);
1055
1056 err_inode:
1057	free_pipe_info(inode);
1058	iput(inode);
1059 err:
1060	return ERR_PTR(err);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1061}
1062
1063void free_write_pipe(struct file *f)
1064{
1065	free_pipe_info(f->f_dentry->d_inode);
1066	path_put(&f->f_path);
1067	put_filp(f);
1068}
1069
1070struct file *create_read_pipe(struct file *wrf, int flags)
1071{
1072	/* Grab pipe from the writer */
1073	struct file *f = alloc_file(&wrf->f_path, FMODE_READ,
1074				    &read_pipefifo_fops);
1075	if (!f)
1076		return ERR_PTR(-ENFILE);
1077
1078	path_get(&wrf->f_path);
1079	f->f_flags = O_RDONLY | (flags & O_NONBLOCK);
1080
1081	return f;
1082}
1083
1084int do_pipe_flags(int *fd, int flags)
1085{
1086	struct file *fw, *fr;
1087	int error;
1088	int fdw, fdr;
1089
1090	if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
1091		return -EINVAL;
1092
1093	fw = create_write_pipe(flags);
1094	if (IS_ERR(fw))
1095		return PTR_ERR(fw);
1096	fr = create_read_pipe(fw, flags);
1097	error = PTR_ERR(fr);
1098	if (IS_ERR(fr))
1099		goto err_write_pipe;
1100
1101	error = get_unused_fd_flags(flags);
1102	if (error < 0)
1103		goto err_read_pipe;
1104	fdr = error;
1105
1106	error = get_unused_fd_flags(flags);
1107	if (error < 0)
1108		goto err_fdr;
1109	fdw = error;
1110
1111	audit_fd_pair(fdr, fdw);
1112	fd_install(fdr, fr);
1113	fd_install(fdw, fw);
1114	fd[0] = fdr;
1115	fd[1] = fdw;
1116
 
 
1117	return 0;
1118
1119 err_fdr:
1120	put_unused_fd(fdr);
1121 err_read_pipe:
1122	path_put(&fr->f_path);
1123	put_filp(fr);
1124 err_write_pipe:
1125	free_write_pipe(fw);
 
 
 
 
 
 
 
 
 
1126	return error;
1127}
1128
1129/*
1130 * sys_pipe() is the normal C calling standard for creating
1131 * a pipe. It's not the way Unix traditionally does this, though.
1132 */
1133SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
1134{
 
1135	int fd[2];
1136	int error;
1137
1138	error = do_pipe_flags(fd, flags);
1139	if (!error) {
1140		if (copy_to_user(fildes, fd, sizeof(fd))) {
1141			sys_close(fd[0]);
1142			sys_close(fd[1]);
 
 
1143			error = -EFAULT;
 
 
 
1144		}
1145	}
1146	return error;
1147}
1148
 
 
 
 
 
1149SYSCALL_DEFINE1(pipe, int __user *, fildes)
1150{
1151	return sys_pipe2(fildes, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1152}
1153
1154/*
1155 * Allocate a new array of pipe buffers and copy the info over. Returns the
1156 * pipe size if successful, or return -ERROR on error.
 
 
 
 
 
 
 
 
1157 */
1158static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1159{
1160	struct pipe_buffer *bufs;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1161
 
1162	/*
1163	 * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
1164	 * expect a lot of shrink+grow operations, just free and allocate
1165	 * again like we would do for growing. If the pipe currently
1166	 * contains more buffers than arg, then return busy.
1167	 */
1168	if (nr_pages < pipe->nrbufs)
1169		return -EBUSY;
1170
1171	bufs = kcalloc(nr_pages, sizeof(*bufs), GFP_KERNEL | __GFP_NOWARN);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1172	if (unlikely(!bufs))
1173		return -ENOMEM;
1174
 
 
 
 
 
 
 
 
 
 
 
 
1175	/*
1176	 * The pipe array wraps around, so just start the new one at zero
1177	 * and adjust the indexes.
1178	 */
1179	if (pipe->nrbufs) {
1180		unsigned int tail;
1181		unsigned int head;
1182
1183		tail = pipe->curbuf + pipe->nrbufs;
1184		if (tail < pipe->buffers)
1185			tail = 0;
1186		else
1187			tail &= (pipe->buffers - 1);
1188
1189		head = pipe->nrbufs - tail;
1190		if (head)
1191			memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
1192		if (tail)
1193			memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
1194	}
1195
1196	pipe->curbuf = 0;
 
 
1197	kfree(pipe->bufs);
1198	pipe->bufs = bufs;
1199	pipe->buffers = nr_pages;
1200	return nr_pages * PAGE_SIZE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1201}
1202
1203/*
1204 * Currently we rely on the pipe array holding a power-of-2 number
1205 * of pages.
1206 */
1207static inline unsigned int round_pipe_size(unsigned int size)
1208{
1209	unsigned long nr_pages;
 
 
 
 
 
 
 
 
1210
1211	nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1212	return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
1213}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1214
1215/*
1216 * This should work even if CONFIG_PROC_FS isn't set, as proc_dointvec_minmax
1217 * will return an error.
1218 */
1219int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
1220		 size_t *lenp, loff_t *ppos)
1221{
1222	int ret;
1223
1224	ret = proc_dointvec_minmax(table, write, buf, lenp, ppos);
1225	if (ret < 0 || !write)
1226		return ret;
1227
1228	pipe_max_size = round_pipe_size(pipe_max_size);
 
1229	return ret;
1230}
1231
1232/*
1233 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1234 * location, so checking ->i_pipe is not enough to verify that this is a
1235 * pipe.
1236 */
1237struct pipe_inode_info *get_pipe_info(struct file *file)
1238{
1239	struct inode *i = file->f_path.dentry->d_inode;
1240
1241	return S_ISFIFO(i->i_mode) ? i->i_pipe : NULL;
 
 
 
 
1242}
1243
1244long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1245{
1246	struct pipe_inode_info *pipe;
1247	long ret;
1248
1249	pipe = get_pipe_info(file);
1250	if (!pipe)
1251		return -EBADF;
1252
1253	mutex_lock(&pipe->inode->i_mutex);
1254
1255	switch (cmd) {
1256	case F_SETPIPE_SZ: {
1257		unsigned int size, nr_pages;
1258
1259		size = round_pipe_size(arg);
1260		nr_pages = size >> PAGE_SHIFT;
1261
1262		ret = -EINVAL;
1263		if (!nr_pages)
1264			goto out;
1265
1266		if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
1267			ret = -EPERM;
1268			goto out;
1269		}
1270		ret = pipe_set_size(pipe, nr_pages);
1271		break;
1272		}
1273	case F_GETPIPE_SZ:
1274		ret = pipe->buffers * PAGE_SIZE;
1275		break;
1276	default:
1277		ret = -EINVAL;
1278		break;
1279	}
1280
1281out:
1282	mutex_unlock(&pipe->inode->i_mutex);
1283	return ret;
1284}
1285
1286static const struct super_operations pipefs_ops = {
1287	.destroy_inode = free_inode_nonrcu,
1288	.statfs = simple_statfs,
1289};
1290
1291/*
1292 * pipefs should _never_ be mounted by userland - too much of security hassle,
1293 * no real gain from having the whole whorehouse mounted. So we don't need
1294 * any operations on the root directory. However, we need a non-trivial
1295 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1296 */
1297static struct dentry *pipefs_mount(struct file_system_type *fs_type,
1298			 int flags, const char *dev_name, void *data)
1299{
1300	return mount_pseudo(fs_type, "pipe:", &pipefs_ops,
1301			&pipefs_dentry_operations, PIPEFS_MAGIC);
 
 
 
 
1302}
1303
1304static struct file_system_type pipe_fs_type = {
1305	.name		= "pipefs",
1306	.mount		= pipefs_mount,
1307	.kill_sb	= kill_anon_super,
1308};
1309
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1310static int __init init_pipe_fs(void)
1311{
1312	int err = register_filesystem(&pipe_fs_type);
1313
1314	if (!err) {
1315		pipe_mnt = kern_mount(&pipe_fs_type);
1316		if (IS_ERR(pipe_mnt)) {
1317			err = PTR_ERR(pipe_mnt);
1318			unregister_filesystem(&pipe_fs_type);
1319		}
1320	}
 
 
 
1321	return err;
1322}
1323
1324fs_initcall(init_pipe_fs);