Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/pipe.c
4 *
5 * Copyright (C) 1991, 1992, 1999 Linus Torvalds
6 */
7
8#include <linux/mm.h>
9#include <linux/file.h>
10#include <linux/poll.h>
11#include <linux/slab.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/fs.h>
15#include <linux/log2.h>
16#include <linux/mount.h>
17#include <linux/pseudo_fs.h>
18#include <linux/magic.h>
19#include <linux/pipe_fs_i.h>
20#include <linux/uio.h>
21#include <linux/highmem.h>
22#include <linux/pagemap.h>
23#include <linux/audit.h>
24#include <linux/syscalls.h>
25#include <linux/fcntl.h>
26#include <linux/memcontrol.h>
27#include <linux/watch_queue.h>
28#include <linux/sysctl.h>
29
30#include <linux/uaccess.h>
31#include <asm/ioctls.h>
32
33#include "internal.h"
34
35/*
36 * New pipe buffers will be restricted to this size while the user is exceeding
37 * their pipe buffer quota. The general pipe use case needs at least two
38 * buffers: one for data yet to be read, and one for new data. If this is less
39 * than two, then a write to a non-empty pipe may block even if the pipe is not
40 * full. This can occur with GNU make jobserver or similar uses of pipes as
41 * semaphores: multiple processes may be waiting to write tokens back to the
42 * pipe before reading tokens: https://lore.kernel.org/lkml/1628086770.5rn8p04n6j.none@localhost/.
43 *
44 * Users can reduce their pipe buffers with F_SETPIPE_SZ below this at their
45 * own risk, namely: pipe writes to non-full pipes may block until the pipe is
46 * emptied.
47 */
48#define PIPE_MIN_DEF_BUFFERS 2
49
50/*
51 * The max size that a non-root user is allowed to grow the pipe. Can
52 * be set by root in /proc/sys/fs/pipe-max-size
53 */
54static unsigned int pipe_max_size = 1048576;
55
56/* Maximum allocatable pages per user. Hard limit is unset by default, soft
57 * matches default values.
58 */
59static unsigned long pipe_user_pages_hard;
60static unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
61
62/*
63 * We use head and tail indices that aren't masked off, except at the point of
64 * dereference, but rather they're allowed to wrap naturally. This means there
65 * isn't a dead spot in the buffer, but the ring has to be a power of two and
66 * <= 2^31.
67 * -- David Howells 2019-09-23.
68 *
69 * Reads with count = 0 should always return 0.
70 * -- Julian Bradfield 1999-06-07.
71 *
72 * FIFOs and Pipes now generate SIGIO for both readers and writers.
73 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
74 *
75 * pipe_read & write cleanup
76 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
77 */
78
79#define cmp_int(l, r) ((l > r) - (l < r))
80
81#ifdef CONFIG_PROVE_LOCKING
82static int pipe_lock_cmp_fn(const struct lockdep_map *a,
83 const struct lockdep_map *b)
84{
85 return cmp_int((unsigned long) a, (unsigned long) b);
86}
87#endif
88
89void pipe_lock(struct pipe_inode_info *pipe)
90{
91 if (pipe->files)
92 mutex_lock(&pipe->mutex);
93}
94EXPORT_SYMBOL(pipe_lock);
95
96void pipe_unlock(struct pipe_inode_info *pipe)
97{
98 if (pipe->files)
99 mutex_unlock(&pipe->mutex);
100}
101EXPORT_SYMBOL(pipe_unlock);
102
103void pipe_double_lock(struct pipe_inode_info *pipe1,
104 struct pipe_inode_info *pipe2)
105{
106 BUG_ON(pipe1 == pipe2);
107
108 if (pipe1 > pipe2)
109 swap(pipe1, pipe2);
110
111 pipe_lock(pipe1);
112 pipe_lock(pipe2);
113}
114
115static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
116 struct pipe_buffer *buf)
117{
118 struct page *page = buf->page;
119
120 /*
121 * If nobody else uses this page, and we don't already have a
122 * temporary page, let's keep track of it as a one-deep
123 * allocation cache. (Otherwise just release our reference to it)
124 */
125 if (page_count(page) == 1 && !pipe->tmp_page)
126 pipe->tmp_page = page;
127 else
128 put_page(page);
129}
130
131static bool anon_pipe_buf_try_steal(struct pipe_inode_info *pipe,
132 struct pipe_buffer *buf)
133{
134 struct page *page = buf->page;
135
136 if (page_count(page) != 1)
137 return false;
138 memcg_kmem_uncharge_page(page, 0);
139 __SetPageLocked(page);
140 return true;
141}
142
143/**
144 * generic_pipe_buf_try_steal - attempt to take ownership of a &pipe_buffer
145 * @pipe: the pipe that the buffer belongs to
146 * @buf: the buffer to attempt to steal
147 *
148 * Description:
149 * This function attempts to steal the &struct page attached to
150 * @buf. If successful, this function returns 0 and returns with
151 * the page locked. The caller may then reuse the page for whatever
152 * he wishes; the typical use is insertion into a different file
153 * page cache.
154 */
155bool generic_pipe_buf_try_steal(struct pipe_inode_info *pipe,
156 struct pipe_buffer *buf)
157{
158 struct page *page = buf->page;
159
160 /*
161 * A reference of one is golden, that means that the owner of this
162 * page is the only one holding a reference to it. lock the page
163 * and return OK.
164 */
165 if (page_count(page) == 1) {
166 lock_page(page);
167 return true;
168 }
169 return false;
170}
171EXPORT_SYMBOL(generic_pipe_buf_try_steal);
172
173/**
174 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
175 * @pipe: the pipe that the buffer belongs to
176 * @buf: the buffer to get a reference to
177 *
178 * Description:
179 * This function grabs an extra reference to @buf. It's used in
180 * the tee() system call, when we duplicate the buffers in one
181 * pipe into another.
182 */
183bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
184{
185 return try_get_page(buf->page);
186}
187EXPORT_SYMBOL(generic_pipe_buf_get);
188
189/**
190 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
191 * @pipe: the pipe that the buffer belongs to
192 * @buf: the buffer to put a reference to
193 *
194 * Description:
195 * This function releases a reference to @buf.
196 */
197void generic_pipe_buf_release(struct pipe_inode_info *pipe,
198 struct pipe_buffer *buf)
199{
200 put_page(buf->page);
201}
202EXPORT_SYMBOL(generic_pipe_buf_release);
203
204static const struct pipe_buf_operations anon_pipe_buf_ops = {
205 .release = anon_pipe_buf_release,
206 .try_steal = anon_pipe_buf_try_steal,
207 .get = generic_pipe_buf_get,
208};
209
210/* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
211static inline bool pipe_readable(const struct pipe_inode_info *pipe)
212{
213 unsigned int head = READ_ONCE(pipe->head);
214 unsigned int tail = READ_ONCE(pipe->tail);
215 unsigned int writers = READ_ONCE(pipe->writers);
216
217 return !pipe_empty(head, tail) || !writers;
218}
219
220static inline unsigned int pipe_update_tail(struct pipe_inode_info *pipe,
221 struct pipe_buffer *buf,
222 unsigned int tail)
223{
224 pipe_buf_release(pipe, buf);
225
226 /*
227 * If the pipe has a watch_queue, we need additional protection
228 * by the spinlock because notifications get posted with only
229 * this spinlock, no mutex
230 */
231 if (pipe_has_watch_queue(pipe)) {
232 spin_lock_irq(&pipe->rd_wait.lock);
233#ifdef CONFIG_WATCH_QUEUE
234 if (buf->flags & PIPE_BUF_FLAG_LOSS)
235 pipe->note_loss = true;
236#endif
237 pipe->tail = ++tail;
238 spin_unlock_irq(&pipe->rd_wait.lock);
239 return tail;
240 }
241
242 /*
243 * Without a watch_queue, we can simply increment the tail
244 * without the spinlock - the mutex is enough.
245 */
246 pipe->tail = ++tail;
247 return tail;
248}
249
250static ssize_t
251pipe_read(struct kiocb *iocb, struct iov_iter *to)
252{
253 size_t total_len = iov_iter_count(to);
254 struct file *filp = iocb->ki_filp;
255 struct pipe_inode_info *pipe = filp->private_data;
256 bool was_full, wake_next_reader = false;
257 ssize_t ret;
258
259 /* Null read succeeds. */
260 if (unlikely(total_len == 0))
261 return 0;
262
263 ret = 0;
264 mutex_lock(&pipe->mutex);
265
266 /*
267 * We only wake up writers if the pipe was full when we started
268 * reading in order to avoid unnecessary wakeups.
269 *
270 * But when we do wake up writers, we do so using a sync wakeup
271 * (WF_SYNC), because we want them to get going and generate more
272 * data for us.
273 */
274 was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
275 for (;;) {
276 /* Read ->head with a barrier vs post_one_notification() */
277 unsigned int head = smp_load_acquire(&pipe->head);
278 unsigned int tail = pipe->tail;
279 unsigned int mask = pipe->ring_size - 1;
280
281#ifdef CONFIG_WATCH_QUEUE
282 if (pipe->note_loss) {
283 struct watch_notification n;
284
285 if (total_len < 8) {
286 if (ret == 0)
287 ret = -ENOBUFS;
288 break;
289 }
290
291 n.type = WATCH_TYPE_META;
292 n.subtype = WATCH_META_LOSS_NOTIFICATION;
293 n.info = watch_sizeof(n);
294 if (copy_to_iter(&n, sizeof(n), to) != sizeof(n)) {
295 if (ret == 0)
296 ret = -EFAULT;
297 break;
298 }
299 ret += sizeof(n);
300 total_len -= sizeof(n);
301 pipe->note_loss = false;
302 }
303#endif
304
305 if (!pipe_empty(head, tail)) {
306 struct pipe_buffer *buf = &pipe->bufs[tail & mask];
307 size_t chars = buf->len;
308 size_t written;
309 int error;
310
311 if (chars > total_len) {
312 if (buf->flags & PIPE_BUF_FLAG_WHOLE) {
313 if (ret == 0)
314 ret = -ENOBUFS;
315 break;
316 }
317 chars = total_len;
318 }
319
320 error = pipe_buf_confirm(pipe, buf);
321 if (error) {
322 if (!ret)
323 ret = error;
324 break;
325 }
326
327 written = copy_page_to_iter(buf->page, buf->offset, chars, to);
328 if (unlikely(written < chars)) {
329 if (!ret)
330 ret = -EFAULT;
331 break;
332 }
333 ret += chars;
334 buf->offset += chars;
335 buf->len -= chars;
336
337 /* Was it a packet buffer? Clean up and exit */
338 if (buf->flags & PIPE_BUF_FLAG_PACKET) {
339 total_len = chars;
340 buf->len = 0;
341 }
342
343 if (!buf->len)
344 tail = pipe_update_tail(pipe, buf, tail);
345 total_len -= chars;
346 if (!total_len)
347 break; /* common path: read succeeded */
348 if (!pipe_empty(head, tail)) /* More to do? */
349 continue;
350 }
351
352 if (!pipe->writers)
353 break;
354 if (ret)
355 break;
356 if ((filp->f_flags & O_NONBLOCK) ||
357 (iocb->ki_flags & IOCB_NOWAIT)) {
358 ret = -EAGAIN;
359 break;
360 }
361 mutex_unlock(&pipe->mutex);
362
363 /*
364 * We only get here if we didn't actually read anything.
365 *
366 * However, we could have seen (and removed) a zero-sized
367 * pipe buffer, and might have made space in the buffers
368 * that way.
369 *
370 * You can't make zero-sized pipe buffers by doing an empty
371 * write (not even in packet mode), but they can happen if
372 * the writer gets an EFAULT when trying to fill a buffer
373 * that already got allocated and inserted in the buffer
374 * array.
375 *
376 * So we still need to wake up any pending writers in the
377 * _very_ unlikely case that the pipe was full, but we got
378 * no data.
379 */
380 if (unlikely(was_full))
381 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
382 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
383
384 /*
385 * But because we didn't read anything, at this point we can
386 * just return directly with -ERESTARTSYS if we're interrupted,
387 * since we've done any required wakeups and there's no need
388 * to mark anything accessed. And we've dropped the lock.
389 */
390 if (wait_event_interruptible_exclusive(pipe->rd_wait, pipe_readable(pipe)) < 0)
391 return -ERESTARTSYS;
392
393 mutex_lock(&pipe->mutex);
394 was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
395 wake_next_reader = true;
396 }
397 if (pipe_empty(pipe->head, pipe->tail))
398 wake_next_reader = false;
399 mutex_unlock(&pipe->mutex);
400
401 if (was_full)
402 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
403 if (wake_next_reader)
404 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
405 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
406 if (ret > 0)
407 file_accessed(filp);
408 return ret;
409}
410
411static inline int is_packetized(struct file *file)
412{
413 return (file->f_flags & O_DIRECT) != 0;
414}
415
416/* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
417static inline bool pipe_writable(const struct pipe_inode_info *pipe)
418{
419 unsigned int head = READ_ONCE(pipe->head);
420 unsigned int tail = READ_ONCE(pipe->tail);
421 unsigned int max_usage = READ_ONCE(pipe->max_usage);
422
423 return !pipe_full(head, tail, max_usage) ||
424 !READ_ONCE(pipe->readers);
425}
426
427static ssize_t
428pipe_write(struct kiocb *iocb, struct iov_iter *from)
429{
430 struct file *filp = iocb->ki_filp;
431 struct pipe_inode_info *pipe = filp->private_data;
432 unsigned int head;
433 ssize_t ret = 0;
434 size_t total_len = iov_iter_count(from);
435 ssize_t chars;
436 bool was_empty = false;
437 bool wake_next_writer = false;
438
439 /*
440 * Reject writing to watch queue pipes before the point where we lock
441 * the pipe.
442 * Otherwise, lockdep would be unhappy if the caller already has another
443 * pipe locked.
444 * If we had to support locking a normal pipe and a notification pipe at
445 * the same time, we could set up lockdep annotations for that, but
446 * since we don't actually need that, it's simpler to just bail here.
447 */
448 if (pipe_has_watch_queue(pipe))
449 return -EXDEV;
450
451 /* Null write succeeds. */
452 if (unlikely(total_len == 0))
453 return 0;
454
455 mutex_lock(&pipe->mutex);
456
457 if (!pipe->readers) {
458 send_sig(SIGPIPE, current, 0);
459 ret = -EPIPE;
460 goto out;
461 }
462
463 /*
464 * If it wasn't empty we try to merge new data into
465 * the last buffer.
466 *
467 * That naturally merges small writes, but it also
468 * page-aligns the rest of the writes for large writes
469 * spanning multiple pages.
470 */
471 head = pipe->head;
472 was_empty = pipe_empty(head, pipe->tail);
473 chars = total_len & (PAGE_SIZE-1);
474 if (chars && !was_empty) {
475 unsigned int mask = pipe->ring_size - 1;
476 struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
477 int offset = buf->offset + buf->len;
478
479 if ((buf->flags & PIPE_BUF_FLAG_CAN_MERGE) &&
480 offset + chars <= PAGE_SIZE) {
481 ret = pipe_buf_confirm(pipe, buf);
482 if (ret)
483 goto out;
484
485 ret = copy_page_from_iter(buf->page, offset, chars, from);
486 if (unlikely(ret < chars)) {
487 ret = -EFAULT;
488 goto out;
489 }
490
491 buf->len += ret;
492 if (!iov_iter_count(from))
493 goto out;
494 }
495 }
496
497 for (;;) {
498 if (!pipe->readers) {
499 send_sig(SIGPIPE, current, 0);
500 if (!ret)
501 ret = -EPIPE;
502 break;
503 }
504
505 head = pipe->head;
506 if (!pipe_full(head, pipe->tail, pipe->max_usage)) {
507 unsigned int mask = pipe->ring_size - 1;
508 struct pipe_buffer *buf;
509 struct page *page = pipe->tmp_page;
510 int copied;
511
512 if (!page) {
513 page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
514 if (unlikely(!page)) {
515 ret = ret ? : -ENOMEM;
516 break;
517 }
518 pipe->tmp_page = page;
519 }
520
521 /* Allocate a slot in the ring in advance and attach an
522 * empty buffer. If we fault or otherwise fail to use
523 * it, either the reader will consume it or it'll still
524 * be there for the next write.
525 */
526 pipe->head = head + 1;
527
528 /* Insert it into the buffer array */
529 buf = &pipe->bufs[head & mask];
530 buf->page = page;
531 buf->ops = &anon_pipe_buf_ops;
532 buf->offset = 0;
533 buf->len = 0;
534 if (is_packetized(filp))
535 buf->flags = PIPE_BUF_FLAG_PACKET;
536 else
537 buf->flags = PIPE_BUF_FLAG_CAN_MERGE;
538 pipe->tmp_page = NULL;
539
540 copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
541 if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
542 if (!ret)
543 ret = -EFAULT;
544 break;
545 }
546 ret += copied;
547 buf->len = copied;
548
549 if (!iov_iter_count(from))
550 break;
551 }
552
553 if (!pipe_full(head, pipe->tail, pipe->max_usage))
554 continue;
555
556 /* Wait for buffer space to become available. */
557 if ((filp->f_flags & O_NONBLOCK) ||
558 (iocb->ki_flags & IOCB_NOWAIT)) {
559 if (!ret)
560 ret = -EAGAIN;
561 break;
562 }
563 if (signal_pending(current)) {
564 if (!ret)
565 ret = -ERESTARTSYS;
566 break;
567 }
568
569 /*
570 * We're going to release the pipe lock and wait for more
571 * space. We wake up any readers if necessary, and then
572 * after waiting we need to re-check whether the pipe
573 * become empty while we dropped the lock.
574 */
575 mutex_unlock(&pipe->mutex);
576 if (was_empty)
577 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
578 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
579 wait_event_interruptible_exclusive(pipe->wr_wait, pipe_writable(pipe));
580 mutex_lock(&pipe->mutex);
581 was_empty = pipe_empty(pipe->head, pipe->tail);
582 wake_next_writer = true;
583 }
584out:
585 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
586 wake_next_writer = false;
587 mutex_unlock(&pipe->mutex);
588
589 /*
590 * If we do do a wakeup event, we do a 'sync' wakeup, because we
591 * want the reader to start processing things asap, rather than
592 * leave the data pending.
593 *
594 * This is particularly important for small writes, because of
595 * how (for example) the GNU make jobserver uses small writes to
596 * wake up pending jobs
597 *
598 * Epoll nonsensically wants a wakeup whether the pipe
599 * was already empty or not.
600 */
601 if (was_empty || pipe->poll_usage)
602 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
603 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
604 if (wake_next_writer)
605 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
606 if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
607 int err = file_update_time(filp);
608 if (err)
609 ret = err;
610 sb_end_write(file_inode(filp)->i_sb);
611 }
612 return ret;
613}
614
615static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
616{
617 struct pipe_inode_info *pipe = filp->private_data;
618 unsigned int count, head, tail, mask;
619
620 switch (cmd) {
621 case FIONREAD:
622 mutex_lock(&pipe->mutex);
623 count = 0;
624 head = pipe->head;
625 tail = pipe->tail;
626 mask = pipe->ring_size - 1;
627
628 while (tail != head) {
629 count += pipe->bufs[tail & mask].len;
630 tail++;
631 }
632 mutex_unlock(&pipe->mutex);
633
634 return put_user(count, (int __user *)arg);
635
636#ifdef CONFIG_WATCH_QUEUE
637 case IOC_WATCH_QUEUE_SET_SIZE: {
638 int ret;
639 mutex_lock(&pipe->mutex);
640 ret = watch_queue_set_size(pipe, arg);
641 mutex_unlock(&pipe->mutex);
642 return ret;
643 }
644
645 case IOC_WATCH_QUEUE_SET_FILTER:
646 return watch_queue_set_filter(
647 pipe, (struct watch_notification_filter __user *)arg);
648#endif
649
650 default:
651 return -ENOIOCTLCMD;
652 }
653}
654
655/* No kernel lock held - fine */
656static __poll_t
657pipe_poll(struct file *filp, poll_table *wait)
658{
659 __poll_t mask;
660 struct pipe_inode_info *pipe = filp->private_data;
661 unsigned int head, tail;
662
663 /* Epoll has some historical nasty semantics, this enables them */
664 WRITE_ONCE(pipe->poll_usage, true);
665
666 /*
667 * Reading pipe state only -- no need for acquiring the semaphore.
668 *
669 * But because this is racy, the code has to add the
670 * entry to the poll table _first_ ..
671 */
672 if (filp->f_mode & FMODE_READ)
673 poll_wait(filp, &pipe->rd_wait, wait);
674 if (filp->f_mode & FMODE_WRITE)
675 poll_wait(filp, &pipe->wr_wait, wait);
676
677 /*
678 * .. and only then can you do the racy tests. That way,
679 * if something changes and you got it wrong, the poll
680 * table entry will wake you up and fix it.
681 */
682 head = READ_ONCE(pipe->head);
683 tail = READ_ONCE(pipe->tail);
684
685 mask = 0;
686 if (filp->f_mode & FMODE_READ) {
687 if (!pipe_empty(head, tail))
688 mask |= EPOLLIN | EPOLLRDNORM;
689 if (!pipe->writers && filp->f_pipe != pipe->w_counter)
690 mask |= EPOLLHUP;
691 }
692
693 if (filp->f_mode & FMODE_WRITE) {
694 if (!pipe_full(head, tail, pipe->max_usage))
695 mask |= EPOLLOUT | EPOLLWRNORM;
696 /*
697 * Most Unices do not set EPOLLERR for FIFOs but on Linux they
698 * behave exactly like pipes for poll().
699 */
700 if (!pipe->readers)
701 mask |= EPOLLERR;
702 }
703
704 return mask;
705}
706
707static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
708{
709 int kill = 0;
710
711 spin_lock(&inode->i_lock);
712 if (!--pipe->files) {
713 inode->i_pipe = NULL;
714 kill = 1;
715 }
716 spin_unlock(&inode->i_lock);
717
718 if (kill)
719 free_pipe_info(pipe);
720}
721
722static int
723pipe_release(struct inode *inode, struct file *file)
724{
725 struct pipe_inode_info *pipe = file->private_data;
726
727 mutex_lock(&pipe->mutex);
728 if (file->f_mode & FMODE_READ)
729 pipe->readers--;
730 if (file->f_mode & FMODE_WRITE)
731 pipe->writers--;
732
733 /* Was that the last reader or writer, but not the other side? */
734 if (!pipe->readers != !pipe->writers) {
735 wake_up_interruptible_all(&pipe->rd_wait);
736 wake_up_interruptible_all(&pipe->wr_wait);
737 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
738 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
739 }
740 mutex_unlock(&pipe->mutex);
741
742 put_pipe_info(inode, pipe);
743 return 0;
744}
745
746static int
747pipe_fasync(int fd, struct file *filp, int on)
748{
749 struct pipe_inode_info *pipe = filp->private_data;
750 int retval = 0;
751
752 mutex_lock(&pipe->mutex);
753 if (filp->f_mode & FMODE_READ)
754 retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
755 if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
756 retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
757 if (retval < 0 && (filp->f_mode & FMODE_READ))
758 /* this can happen only if on == T */
759 fasync_helper(-1, filp, 0, &pipe->fasync_readers);
760 }
761 mutex_unlock(&pipe->mutex);
762 return retval;
763}
764
765unsigned long account_pipe_buffers(struct user_struct *user,
766 unsigned long old, unsigned long new)
767{
768 return atomic_long_add_return(new - old, &user->pipe_bufs);
769}
770
771bool too_many_pipe_buffers_soft(unsigned long user_bufs)
772{
773 unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft);
774
775 return soft_limit && user_bufs > soft_limit;
776}
777
778bool too_many_pipe_buffers_hard(unsigned long user_bufs)
779{
780 unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard);
781
782 return hard_limit && user_bufs > hard_limit;
783}
784
785bool pipe_is_unprivileged_user(void)
786{
787 return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
788}
789
790struct pipe_inode_info *alloc_pipe_info(void)
791{
792 struct pipe_inode_info *pipe;
793 unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
794 struct user_struct *user = get_current_user();
795 unsigned long user_bufs;
796 unsigned int max_size = READ_ONCE(pipe_max_size);
797
798 pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
799 if (pipe == NULL)
800 goto out_free_uid;
801
802 if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE))
803 pipe_bufs = max_size >> PAGE_SHIFT;
804
805 user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
806
807 if (too_many_pipe_buffers_soft(user_bufs) && pipe_is_unprivileged_user()) {
808 user_bufs = account_pipe_buffers(user, pipe_bufs, PIPE_MIN_DEF_BUFFERS);
809 pipe_bufs = PIPE_MIN_DEF_BUFFERS;
810 }
811
812 if (too_many_pipe_buffers_hard(user_bufs) && pipe_is_unprivileged_user())
813 goto out_revert_acct;
814
815 pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
816 GFP_KERNEL_ACCOUNT);
817
818 if (pipe->bufs) {
819 init_waitqueue_head(&pipe->rd_wait);
820 init_waitqueue_head(&pipe->wr_wait);
821 pipe->r_counter = pipe->w_counter = 1;
822 pipe->max_usage = pipe_bufs;
823 pipe->ring_size = pipe_bufs;
824 pipe->nr_accounted = pipe_bufs;
825 pipe->user = user;
826 mutex_init(&pipe->mutex);
827 lock_set_cmp_fn(&pipe->mutex, pipe_lock_cmp_fn, NULL);
828 return pipe;
829 }
830
831out_revert_acct:
832 (void) account_pipe_buffers(user, pipe_bufs, 0);
833 kfree(pipe);
834out_free_uid:
835 free_uid(user);
836 return NULL;
837}
838
839void free_pipe_info(struct pipe_inode_info *pipe)
840{
841 unsigned int i;
842
843#ifdef CONFIG_WATCH_QUEUE
844 if (pipe->watch_queue)
845 watch_queue_clear(pipe->watch_queue);
846#endif
847
848 (void) account_pipe_buffers(pipe->user, pipe->nr_accounted, 0);
849 free_uid(pipe->user);
850 for (i = 0; i < pipe->ring_size; i++) {
851 struct pipe_buffer *buf = pipe->bufs + i;
852 if (buf->ops)
853 pipe_buf_release(pipe, buf);
854 }
855#ifdef CONFIG_WATCH_QUEUE
856 if (pipe->watch_queue)
857 put_watch_queue(pipe->watch_queue);
858#endif
859 if (pipe->tmp_page)
860 __free_page(pipe->tmp_page);
861 kfree(pipe->bufs);
862 kfree(pipe);
863}
864
865static struct vfsmount *pipe_mnt __ro_after_init;
866
867/*
868 * pipefs_dname() is called from d_path().
869 */
870static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
871{
872 return dynamic_dname(buffer, buflen, "pipe:[%lu]",
873 d_inode(dentry)->i_ino);
874}
875
876static const struct dentry_operations pipefs_dentry_operations = {
877 .d_dname = pipefs_dname,
878};
879
880static struct inode * get_pipe_inode(void)
881{
882 struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
883 struct pipe_inode_info *pipe;
884
885 if (!inode)
886 goto fail_inode;
887
888 inode->i_ino = get_next_ino();
889
890 pipe = alloc_pipe_info();
891 if (!pipe)
892 goto fail_iput;
893
894 inode->i_pipe = pipe;
895 pipe->files = 2;
896 pipe->readers = pipe->writers = 1;
897 inode->i_fop = &pipefifo_fops;
898
899 /*
900 * Mark the inode dirty from the very beginning,
901 * that way it will never be moved to the dirty
902 * list because "mark_inode_dirty()" will think
903 * that it already _is_ on the dirty list.
904 */
905 inode->i_state = I_DIRTY;
906 inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
907 inode->i_uid = current_fsuid();
908 inode->i_gid = current_fsgid();
909 simple_inode_init_ts(inode);
910
911 return inode;
912
913fail_iput:
914 iput(inode);
915
916fail_inode:
917 return NULL;
918}
919
920int create_pipe_files(struct file **res, int flags)
921{
922 struct inode *inode = get_pipe_inode();
923 struct file *f;
924 int error;
925
926 if (!inode)
927 return -ENFILE;
928
929 if (flags & O_NOTIFICATION_PIPE) {
930 error = watch_queue_init(inode->i_pipe);
931 if (error) {
932 free_pipe_info(inode->i_pipe);
933 iput(inode);
934 return error;
935 }
936 }
937
938 f = alloc_file_pseudo(inode, pipe_mnt, "",
939 O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
940 &pipefifo_fops);
941 if (IS_ERR(f)) {
942 free_pipe_info(inode->i_pipe);
943 iput(inode);
944 return PTR_ERR(f);
945 }
946
947 f->private_data = inode->i_pipe;
948 f->f_pipe = 0;
949
950 res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
951 &pipefifo_fops);
952 if (IS_ERR(res[0])) {
953 put_pipe_info(inode, inode->i_pipe);
954 fput(f);
955 return PTR_ERR(res[0]);
956 }
957 res[0]->private_data = inode->i_pipe;
958 res[0]->f_pipe = 0;
959 res[1] = f;
960 stream_open(inode, res[0]);
961 stream_open(inode, res[1]);
962 return 0;
963}
964
965static int __do_pipe_flags(int *fd, struct file **files, int flags)
966{
967 int error;
968 int fdw, fdr;
969
970 if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT | O_NOTIFICATION_PIPE))
971 return -EINVAL;
972
973 error = create_pipe_files(files, flags);
974 if (error)
975 return error;
976
977 error = get_unused_fd_flags(flags);
978 if (error < 0)
979 goto err_read_pipe;
980 fdr = error;
981
982 error = get_unused_fd_flags(flags);
983 if (error < 0)
984 goto err_fdr;
985 fdw = error;
986
987 audit_fd_pair(fdr, fdw);
988 fd[0] = fdr;
989 fd[1] = fdw;
990 /* pipe groks IOCB_NOWAIT */
991 files[0]->f_mode |= FMODE_NOWAIT;
992 files[1]->f_mode |= FMODE_NOWAIT;
993 return 0;
994
995 err_fdr:
996 put_unused_fd(fdr);
997 err_read_pipe:
998 fput(files[0]);
999 fput(files[1]);
1000 return error;
1001}
1002
1003int do_pipe_flags(int *fd, int flags)
1004{
1005 struct file *files[2];
1006 int error = __do_pipe_flags(fd, files, flags);
1007 if (!error) {
1008 fd_install(fd[0], files[0]);
1009 fd_install(fd[1], files[1]);
1010 }
1011 return error;
1012}
1013
1014/*
1015 * sys_pipe() is the normal C calling standard for creating
1016 * a pipe. It's not the way Unix traditionally does this, though.
1017 */
1018static int do_pipe2(int __user *fildes, int flags)
1019{
1020 struct file *files[2];
1021 int fd[2];
1022 int error;
1023
1024 error = __do_pipe_flags(fd, files, flags);
1025 if (!error) {
1026 if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
1027 fput(files[0]);
1028 fput(files[1]);
1029 put_unused_fd(fd[0]);
1030 put_unused_fd(fd[1]);
1031 error = -EFAULT;
1032 } else {
1033 fd_install(fd[0], files[0]);
1034 fd_install(fd[1], files[1]);
1035 }
1036 }
1037 return error;
1038}
1039
1040SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
1041{
1042 return do_pipe2(fildes, flags);
1043}
1044
1045SYSCALL_DEFINE1(pipe, int __user *, fildes)
1046{
1047 return do_pipe2(fildes, 0);
1048}
1049
1050/*
1051 * This is the stupid "wait for pipe to be readable or writable"
1052 * model.
1053 *
1054 * See pipe_read/write() for the proper kind of exclusive wait,
1055 * but that requires that we wake up any other readers/writers
1056 * if we then do not end up reading everything (ie the whole
1057 * "wake_next_reader/writer" logic in pipe_read/write()).
1058 */
1059void pipe_wait_readable(struct pipe_inode_info *pipe)
1060{
1061 pipe_unlock(pipe);
1062 wait_event_interruptible(pipe->rd_wait, pipe_readable(pipe));
1063 pipe_lock(pipe);
1064}
1065
1066void pipe_wait_writable(struct pipe_inode_info *pipe)
1067{
1068 pipe_unlock(pipe);
1069 wait_event_interruptible(pipe->wr_wait, pipe_writable(pipe));
1070 pipe_lock(pipe);
1071}
1072
1073/*
1074 * This depends on both the wait (here) and the wakeup (wake_up_partner)
1075 * holding the pipe lock, so "*cnt" is stable and we know a wakeup cannot
1076 * race with the count check and waitqueue prep.
1077 *
1078 * Normally in order to avoid races, you'd do the prepare_to_wait() first,
1079 * then check the condition you're waiting for, and only then sleep. But
1080 * because of the pipe lock, we can check the condition before being on
1081 * the wait queue.
1082 *
1083 * We use the 'rd_wait' waitqueue for pipe partner waiting.
1084 */
1085static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
1086{
1087 DEFINE_WAIT(rdwait);
1088 int cur = *cnt;
1089
1090 while (cur == *cnt) {
1091 prepare_to_wait(&pipe->rd_wait, &rdwait, TASK_INTERRUPTIBLE);
1092 pipe_unlock(pipe);
1093 schedule();
1094 finish_wait(&pipe->rd_wait, &rdwait);
1095 pipe_lock(pipe);
1096 if (signal_pending(current))
1097 break;
1098 }
1099 return cur == *cnt ? -ERESTARTSYS : 0;
1100}
1101
1102static void wake_up_partner(struct pipe_inode_info *pipe)
1103{
1104 wake_up_interruptible_all(&pipe->rd_wait);
1105}
1106
1107static int fifo_open(struct inode *inode, struct file *filp)
1108{
1109 struct pipe_inode_info *pipe;
1110 bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
1111 int ret;
1112
1113 filp->f_pipe = 0;
1114
1115 spin_lock(&inode->i_lock);
1116 if (inode->i_pipe) {
1117 pipe = inode->i_pipe;
1118 pipe->files++;
1119 spin_unlock(&inode->i_lock);
1120 } else {
1121 spin_unlock(&inode->i_lock);
1122 pipe = alloc_pipe_info();
1123 if (!pipe)
1124 return -ENOMEM;
1125 pipe->files = 1;
1126 spin_lock(&inode->i_lock);
1127 if (unlikely(inode->i_pipe)) {
1128 inode->i_pipe->files++;
1129 spin_unlock(&inode->i_lock);
1130 free_pipe_info(pipe);
1131 pipe = inode->i_pipe;
1132 } else {
1133 inode->i_pipe = pipe;
1134 spin_unlock(&inode->i_lock);
1135 }
1136 }
1137 filp->private_data = pipe;
1138 /* OK, we have a pipe and it's pinned down */
1139
1140 mutex_lock(&pipe->mutex);
1141
1142 /* We can only do regular read/write on fifos */
1143 stream_open(inode, filp);
1144
1145 switch (filp->f_mode & (FMODE_READ | FMODE_WRITE)) {
1146 case FMODE_READ:
1147 /*
1148 * O_RDONLY
1149 * POSIX.1 says that O_NONBLOCK means return with the FIFO
1150 * opened, even when there is no process writing the FIFO.
1151 */
1152 pipe->r_counter++;
1153 if (pipe->readers++ == 0)
1154 wake_up_partner(pipe);
1155
1156 if (!is_pipe && !pipe->writers) {
1157 if ((filp->f_flags & O_NONBLOCK)) {
1158 /* suppress EPOLLHUP until we have
1159 * seen a writer */
1160 filp->f_pipe = pipe->w_counter;
1161 } else {
1162 if (wait_for_partner(pipe, &pipe->w_counter))
1163 goto err_rd;
1164 }
1165 }
1166 break;
1167
1168 case FMODE_WRITE:
1169 /*
1170 * O_WRONLY
1171 * POSIX.1 says that O_NONBLOCK means return -1 with
1172 * errno=ENXIO when there is no process reading the FIFO.
1173 */
1174 ret = -ENXIO;
1175 if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
1176 goto err;
1177
1178 pipe->w_counter++;
1179 if (!pipe->writers++)
1180 wake_up_partner(pipe);
1181
1182 if (!is_pipe && !pipe->readers) {
1183 if (wait_for_partner(pipe, &pipe->r_counter))
1184 goto err_wr;
1185 }
1186 break;
1187
1188 case FMODE_READ | FMODE_WRITE:
1189 /*
1190 * O_RDWR
1191 * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
1192 * This implementation will NEVER block on a O_RDWR open, since
1193 * the process can at least talk to itself.
1194 */
1195
1196 pipe->readers++;
1197 pipe->writers++;
1198 pipe->r_counter++;
1199 pipe->w_counter++;
1200 if (pipe->readers == 1 || pipe->writers == 1)
1201 wake_up_partner(pipe);
1202 break;
1203
1204 default:
1205 ret = -EINVAL;
1206 goto err;
1207 }
1208
1209 /* Ok! */
1210 mutex_unlock(&pipe->mutex);
1211 return 0;
1212
1213err_rd:
1214 if (!--pipe->readers)
1215 wake_up_interruptible(&pipe->wr_wait);
1216 ret = -ERESTARTSYS;
1217 goto err;
1218
1219err_wr:
1220 if (!--pipe->writers)
1221 wake_up_interruptible_all(&pipe->rd_wait);
1222 ret = -ERESTARTSYS;
1223 goto err;
1224
1225err:
1226 mutex_unlock(&pipe->mutex);
1227
1228 put_pipe_info(inode, pipe);
1229 return ret;
1230}
1231
1232const struct file_operations pipefifo_fops = {
1233 .open = fifo_open,
1234 .read_iter = pipe_read,
1235 .write_iter = pipe_write,
1236 .poll = pipe_poll,
1237 .unlocked_ioctl = pipe_ioctl,
1238 .release = pipe_release,
1239 .fasync = pipe_fasync,
1240 .splice_write = iter_file_splice_write,
1241};
1242
1243/*
1244 * Currently we rely on the pipe array holding a power-of-2 number
1245 * of pages. Returns 0 on error.
1246 */
1247unsigned int round_pipe_size(unsigned int size)
1248{
1249 if (size > (1U << 31))
1250 return 0;
1251
1252 /* Minimum pipe size, as required by POSIX */
1253 if (size < PAGE_SIZE)
1254 return PAGE_SIZE;
1255
1256 return roundup_pow_of_two(size);
1257}
1258
1259/*
1260 * Resize the pipe ring to a number of slots.
1261 *
1262 * Note the pipe can be reduced in capacity, but only if the current
1263 * occupancy doesn't exceed nr_slots; if it does, EBUSY will be
1264 * returned instead.
1265 */
1266int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
1267{
1268 struct pipe_buffer *bufs;
1269 unsigned int head, tail, mask, n;
1270
1271 bufs = kcalloc(nr_slots, sizeof(*bufs),
1272 GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1273 if (unlikely(!bufs))
1274 return -ENOMEM;
1275
1276 spin_lock_irq(&pipe->rd_wait.lock);
1277 mask = pipe->ring_size - 1;
1278 head = pipe->head;
1279 tail = pipe->tail;
1280
1281 n = pipe_occupancy(head, tail);
1282 if (nr_slots < n) {
1283 spin_unlock_irq(&pipe->rd_wait.lock);
1284 kfree(bufs);
1285 return -EBUSY;
1286 }
1287
1288 /*
1289 * The pipe array wraps around, so just start the new one at zero
1290 * and adjust the indices.
1291 */
1292 if (n > 0) {
1293 unsigned int h = head & mask;
1294 unsigned int t = tail & mask;
1295 if (h > t) {
1296 memcpy(bufs, pipe->bufs + t,
1297 n * sizeof(struct pipe_buffer));
1298 } else {
1299 unsigned int tsize = pipe->ring_size - t;
1300 if (h > 0)
1301 memcpy(bufs + tsize, pipe->bufs,
1302 h * sizeof(struct pipe_buffer));
1303 memcpy(bufs, pipe->bufs + t,
1304 tsize * sizeof(struct pipe_buffer));
1305 }
1306 }
1307
1308 head = n;
1309 tail = 0;
1310
1311 kfree(pipe->bufs);
1312 pipe->bufs = bufs;
1313 pipe->ring_size = nr_slots;
1314 if (pipe->max_usage > nr_slots)
1315 pipe->max_usage = nr_slots;
1316 pipe->tail = tail;
1317 pipe->head = head;
1318
1319 if (!pipe_has_watch_queue(pipe)) {
1320 pipe->max_usage = nr_slots;
1321 pipe->nr_accounted = nr_slots;
1322 }
1323
1324 spin_unlock_irq(&pipe->rd_wait.lock);
1325
1326 /* This might have made more room for writers */
1327 wake_up_interruptible(&pipe->wr_wait);
1328 return 0;
1329}
1330
1331/*
1332 * Allocate a new array of pipe buffers and copy the info over. Returns the
1333 * pipe size if successful, or return -ERROR on error.
1334 */
1335static long pipe_set_size(struct pipe_inode_info *pipe, unsigned int arg)
1336{
1337 unsigned long user_bufs;
1338 unsigned int nr_slots, size;
1339 long ret = 0;
1340
1341 if (pipe_has_watch_queue(pipe))
1342 return -EBUSY;
1343
1344 size = round_pipe_size(arg);
1345 nr_slots = size >> PAGE_SHIFT;
1346
1347 if (!nr_slots)
1348 return -EINVAL;
1349
1350 /*
1351 * If trying to increase the pipe capacity, check that an
1352 * unprivileged user is not trying to exceed various limits
1353 * (soft limit check here, hard limit check just below).
1354 * Decreasing the pipe capacity is always permitted, even
1355 * if the user is currently over a limit.
1356 */
1357 if (nr_slots > pipe->max_usage &&
1358 size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
1359 return -EPERM;
1360
1361 user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_slots);
1362
1363 if (nr_slots > pipe->max_usage &&
1364 (too_many_pipe_buffers_hard(user_bufs) ||
1365 too_many_pipe_buffers_soft(user_bufs)) &&
1366 pipe_is_unprivileged_user()) {
1367 ret = -EPERM;
1368 goto out_revert_acct;
1369 }
1370
1371 ret = pipe_resize_ring(pipe, nr_slots);
1372 if (ret < 0)
1373 goto out_revert_acct;
1374
1375 return pipe->max_usage * PAGE_SIZE;
1376
1377out_revert_acct:
1378 (void) account_pipe_buffers(pipe->user, nr_slots, pipe->nr_accounted);
1379 return ret;
1380}
1381
1382/*
1383 * Note that i_pipe and i_cdev share the same location, so checking ->i_pipe is
1384 * not enough to verify that this is a pipe.
1385 */
1386struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice)
1387{
1388 struct pipe_inode_info *pipe = file->private_data;
1389
1390 if (file->f_op != &pipefifo_fops || !pipe)
1391 return NULL;
1392 if (for_splice && pipe_has_watch_queue(pipe))
1393 return NULL;
1394 return pipe;
1395}
1396
1397long pipe_fcntl(struct file *file, unsigned int cmd, unsigned int arg)
1398{
1399 struct pipe_inode_info *pipe;
1400 long ret;
1401
1402 pipe = get_pipe_info(file, false);
1403 if (!pipe)
1404 return -EBADF;
1405
1406 mutex_lock(&pipe->mutex);
1407
1408 switch (cmd) {
1409 case F_SETPIPE_SZ:
1410 ret = pipe_set_size(pipe, arg);
1411 break;
1412 case F_GETPIPE_SZ:
1413 ret = pipe->max_usage * PAGE_SIZE;
1414 break;
1415 default:
1416 ret = -EINVAL;
1417 break;
1418 }
1419
1420 mutex_unlock(&pipe->mutex);
1421 return ret;
1422}
1423
1424static const struct super_operations pipefs_ops = {
1425 .destroy_inode = free_inode_nonrcu,
1426 .statfs = simple_statfs,
1427};
1428
1429/*
1430 * pipefs should _never_ be mounted by userland - too much of security hassle,
1431 * no real gain from having the whole file system mounted. So we don't need
1432 * any operations on the root directory. However, we need a non-trivial
1433 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1434 */
1435
1436static int pipefs_init_fs_context(struct fs_context *fc)
1437{
1438 struct pseudo_fs_context *ctx = init_pseudo(fc, PIPEFS_MAGIC);
1439 if (!ctx)
1440 return -ENOMEM;
1441 ctx->ops = &pipefs_ops;
1442 ctx->dops = &pipefs_dentry_operations;
1443 return 0;
1444}
1445
1446static struct file_system_type pipe_fs_type = {
1447 .name = "pipefs",
1448 .init_fs_context = pipefs_init_fs_context,
1449 .kill_sb = kill_anon_super,
1450};
1451
1452#ifdef CONFIG_SYSCTL
1453static int do_proc_dopipe_max_size_conv(unsigned long *lvalp,
1454 unsigned int *valp,
1455 int write, void *data)
1456{
1457 if (write) {
1458 unsigned int val;
1459
1460 val = round_pipe_size(*lvalp);
1461 if (val == 0)
1462 return -EINVAL;
1463
1464 *valp = val;
1465 } else {
1466 unsigned int val = *valp;
1467 *lvalp = (unsigned long) val;
1468 }
1469
1470 return 0;
1471}
1472
1473static int proc_dopipe_max_size(const struct ctl_table *table, int write,
1474 void *buffer, size_t *lenp, loff_t *ppos)
1475{
1476 return do_proc_douintvec(table, write, buffer, lenp, ppos,
1477 do_proc_dopipe_max_size_conv, NULL);
1478}
1479
1480static struct ctl_table fs_pipe_sysctls[] = {
1481 {
1482 .procname = "pipe-max-size",
1483 .data = &pipe_max_size,
1484 .maxlen = sizeof(pipe_max_size),
1485 .mode = 0644,
1486 .proc_handler = proc_dopipe_max_size,
1487 },
1488 {
1489 .procname = "pipe-user-pages-hard",
1490 .data = &pipe_user_pages_hard,
1491 .maxlen = sizeof(pipe_user_pages_hard),
1492 .mode = 0644,
1493 .proc_handler = proc_doulongvec_minmax,
1494 },
1495 {
1496 .procname = "pipe-user-pages-soft",
1497 .data = &pipe_user_pages_soft,
1498 .maxlen = sizeof(pipe_user_pages_soft),
1499 .mode = 0644,
1500 .proc_handler = proc_doulongvec_minmax,
1501 },
1502};
1503#endif
1504
1505static int __init init_pipe_fs(void)
1506{
1507 int err = register_filesystem(&pipe_fs_type);
1508
1509 if (!err) {
1510 pipe_mnt = kern_mount(&pipe_fs_type);
1511 if (IS_ERR(pipe_mnt)) {
1512 err = PTR_ERR(pipe_mnt);
1513 unregister_filesystem(&pipe_fs_type);
1514 }
1515 }
1516#ifdef CONFIG_SYSCTL
1517 register_sysctl_init("fs", fs_pipe_sysctls);
1518#endif
1519 return err;
1520}
1521
1522fs_initcall(init_pipe_fs);
1/*
2 * linux/fs/pipe.c
3 *
4 * Copyright (C) 1991, 1992, 1999 Linus Torvalds
5 */
6
7#include <linux/mm.h>
8#include <linux/file.h>
9#include <linux/poll.h>
10#include <linux/slab.h>
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/fs.h>
14#include <linux/log2.h>
15#include <linux/mount.h>
16#include <linux/pipe_fs_i.h>
17#include <linux/uio.h>
18#include <linux/highmem.h>
19#include <linux/pagemap.h>
20#include <linux/audit.h>
21#include <linux/syscalls.h>
22#include <linux/fcntl.h>
23
24#include <asm/uaccess.h>
25#include <asm/ioctls.h>
26
27/*
28 * The max size that a non-root user is allowed to grow the pipe. Can
29 * be set by root in /proc/sys/fs/pipe-max-size
30 */
31unsigned int pipe_max_size = 1048576;
32
33/*
34 * Minimum pipe size, as required by POSIX
35 */
36unsigned int pipe_min_size = PAGE_SIZE;
37
38/*
39 * We use a start+len construction, which provides full use of the
40 * allocated memory.
41 * -- Florian Coosmann (FGC)
42 *
43 * Reads with count = 0 should always return 0.
44 * -- Julian Bradfield 1999-06-07.
45 *
46 * FIFOs and Pipes now generate SIGIO for both readers and writers.
47 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
48 *
49 * pipe_read & write cleanup
50 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
51 */
52
53static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
54{
55 if (pipe->inode)
56 mutex_lock_nested(&pipe->inode->i_mutex, subclass);
57}
58
59void pipe_lock(struct pipe_inode_info *pipe)
60{
61 /*
62 * pipe_lock() nests non-pipe inode locks (for writing to a file)
63 */
64 pipe_lock_nested(pipe, I_MUTEX_PARENT);
65}
66EXPORT_SYMBOL(pipe_lock);
67
68void pipe_unlock(struct pipe_inode_info *pipe)
69{
70 if (pipe->inode)
71 mutex_unlock(&pipe->inode->i_mutex);
72}
73EXPORT_SYMBOL(pipe_unlock);
74
75void pipe_double_lock(struct pipe_inode_info *pipe1,
76 struct pipe_inode_info *pipe2)
77{
78 BUG_ON(pipe1 == pipe2);
79
80 if (pipe1 < pipe2) {
81 pipe_lock_nested(pipe1, I_MUTEX_PARENT);
82 pipe_lock_nested(pipe2, I_MUTEX_CHILD);
83 } else {
84 pipe_lock_nested(pipe2, I_MUTEX_PARENT);
85 pipe_lock_nested(pipe1, I_MUTEX_CHILD);
86 }
87}
88
89/* Drop the inode semaphore and wait for a pipe event, atomically */
90void pipe_wait(struct pipe_inode_info *pipe)
91{
92 DEFINE_WAIT(wait);
93
94 /*
95 * Pipes are system-local resources, so sleeping on them
96 * is considered a noninteractive wait:
97 */
98 prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
99 pipe_unlock(pipe);
100 schedule();
101 finish_wait(&pipe->wait, &wait);
102 pipe_lock(pipe);
103}
104
105static int
106pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
107 int atomic)
108{
109 unsigned long copy;
110
111 while (len > 0) {
112 while (!iov->iov_len)
113 iov++;
114 copy = min_t(unsigned long, len, iov->iov_len);
115
116 if (atomic) {
117 if (__copy_from_user_inatomic(to, iov->iov_base, copy))
118 return -EFAULT;
119 } else {
120 if (copy_from_user(to, iov->iov_base, copy))
121 return -EFAULT;
122 }
123 to += copy;
124 len -= copy;
125 iov->iov_base += copy;
126 iov->iov_len -= copy;
127 }
128 return 0;
129}
130
131static int
132pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len,
133 int atomic)
134{
135 unsigned long copy;
136
137 while (len > 0) {
138 while (!iov->iov_len)
139 iov++;
140 copy = min_t(unsigned long, len, iov->iov_len);
141
142 if (atomic) {
143 if (__copy_to_user_inatomic(iov->iov_base, from, copy))
144 return -EFAULT;
145 } else {
146 if (copy_to_user(iov->iov_base, from, copy))
147 return -EFAULT;
148 }
149 from += copy;
150 len -= copy;
151 iov->iov_base += copy;
152 iov->iov_len -= copy;
153 }
154 return 0;
155}
156
157/*
158 * Attempt to pre-fault in the user memory, so we can use atomic copies.
159 * Returns the number of bytes not faulted in.
160 */
161static int iov_fault_in_pages_write(struct iovec *iov, unsigned long len)
162{
163 while (!iov->iov_len)
164 iov++;
165
166 while (len > 0) {
167 unsigned long this_len;
168
169 this_len = min_t(unsigned long, len, iov->iov_len);
170 if (fault_in_pages_writeable(iov->iov_base, this_len))
171 break;
172
173 len -= this_len;
174 iov++;
175 }
176
177 return len;
178}
179
180/*
181 * Pre-fault in the user memory, so we can use atomic copies.
182 */
183static void iov_fault_in_pages_read(struct iovec *iov, unsigned long len)
184{
185 while (!iov->iov_len)
186 iov++;
187
188 while (len > 0) {
189 unsigned long this_len;
190
191 this_len = min_t(unsigned long, len, iov->iov_len);
192 fault_in_pages_readable(iov->iov_base, this_len);
193 len -= this_len;
194 iov++;
195 }
196}
197
198static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
199 struct pipe_buffer *buf)
200{
201 struct page *page = buf->page;
202
203 /*
204 * If nobody else uses this page, and we don't already have a
205 * temporary page, let's keep track of it as a one-deep
206 * allocation cache. (Otherwise just release our reference to it)
207 */
208 if (page_count(page) == 1 && !pipe->tmp_page)
209 pipe->tmp_page = page;
210 else
211 page_cache_release(page);
212}
213
214/**
215 * generic_pipe_buf_map - virtually map a pipe buffer
216 * @pipe: the pipe that the buffer belongs to
217 * @buf: the buffer that should be mapped
218 * @atomic: whether to use an atomic map
219 *
220 * Description:
221 * This function returns a kernel virtual address mapping for the
222 * pipe_buffer passed in @buf. If @atomic is set, an atomic map is provided
223 * and the caller has to be careful not to fault before calling
224 * the unmap function.
225 *
226 * Note that this function occupies KM_USER0 if @atomic != 0.
227 */
228void *generic_pipe_buf_map(struct pipe_inode_info *pipe,
229 struct pipe_buffer *buf, int atomic)
230{
231 if (atomic) {
232 buf->flags |= PIPE_BUF_FLAG_ATOMIC;
233 return kmap_atomic(buf->page, KM_USER0);
234 }
235
236 return kmap(buf->page);
237}
238EXPORT_SYMBOL(generic_pipe_buf_map);
239
240/**
241 * generic_pipe_buf_unmap - unmap a previously mapped pipe buffer
242 * @pipe: the pipe that the buffer belongs to
243 * @buf: the buffer that should be unmapped
244 * @map_data: the data that the mapping function returned
245 *
246 * Description:
247 * This function undoes the mapping that ->map() provided.
248 */
249void generic_pipe_buf_unmap(struct pipe_inode_info *pipe,
250 struct pipe_buffer *buf, void *map_data)
251{
252 if (buf->flags & PIPE_BUF_FLAG_ATOMIC) {
253 buf->flags &= ~PIPE_BUF_FLAG_ATOMIC;
254 kunmap_atomic(map_data, KM_USER0);
255 } else
256 kunmap(buf->page);
257}
258EXPORT_SYMBOL(generic_pipe_buf_unmap);
259
260/**
261 * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
262 * @pipe: the pipe that the buffer belongs to
263 * @buf: the buffer to attempt to steal
264 *
265 * Description:
266 * This function attempts to steal the &struct page attached to
267 * @buf. If successful, this function returns 0 and returns with
268 * the page locked. The caller may then reuse the page for whatever
269 * he wishes; the typical use is insertion into a different file
270 * page cache.
271 */
272int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
273 struct pipe_buffer *buf)
274{
275 struct page *page = buf->page;
276
277 /*
278 * A reference of one is golden, that means that the owner of this
279 * page is the only one holding a reference to it. lock the page
280 * and return OK.
281 */
282 if (page_count(page) == 1) {
283 lock_page(page);
284 return 0;
285 }
286
287 return 1;
288}
289EXPORT_SYMBOL(generic_pipe_buf_steal);
290
291/**
292 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
293 * @pipe: the pipe that the buffer belongs to
294 * @buf: the buffer to get a reference to
295 *
296 * Description:
297 * This function grabs an extra reference to @buf. It's used in
298 * in the tee() system call, when we duplicate the buffers in one
299 * pipe into another.
300 */
301void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
302{
303 page_cache_get(buf->page);
304}
305EXPORT_SYMBOL(generic_pipe_buf_get);
306
307/**
308 * generic_pipe_buf_confirm - verify contents of the pipe buffer
309 * @info: the pipe that the buffer belongs to
310 * @buf: the buffer to confirm
311 *
312 * Description:
313 * This function does nothing, because the generic pipe code uses
314 * pages that are always good when inserted into the pipe.
315 */
316int generic_pipe_buf_confirm(struct pipe_inode_info *info,
317 struct pipe_buffer *buf)
318{
319 return 0;
320}
321EXPORT_SYMBOL(generic_pipe_buf_confirm);
322
323/**
324 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
325 * @pipe: the pipe that the buffer belongs to
326 * @buf: the buffer to put a reference to
327 *
328 * Description:
329 * This function releases a reference to @buf.
330 */
331void generic_pipe_buf_release(struct pipe_inode_info *pipe,
332 struct pipe_buffer *buf)
333{
334 page_cache_release(buf->page);
335}
336EXPORT_SYMBOL(generic_pipe_buf_release);
337
338static const struct pipe_buf_operations anon_pipe_buf_ops = {
339 .can_merge = 1,
340 .map = generic_pipe_buf_map,
341 .unmap = generic_pipe_buf_unmap,
342 .confirm = generic_pipe_buf_confirm,
343 .release = anon_pipe_buf_release,
344 .steal = generic_pipe_buf_steal,
345 .get = generic_pipe_buf_get,
346};
347
348static ssize_t
349pipe_read(struct kiocb *iocb, const struct iovec *_iov,
350 unsigned long nr_segs, loff_t pos)
351{
352 struct file *filp = iocb->ki_filp;
353 struct inode *inode = filp->f_path.dentry->d_inode;
354 struct pipe_inode_info *pipe;
355 int do_wakeup;
356 ssize_t ret;
357 struct iovec *iov = (struct iovec *)_iov;
358 size_t total_len;
359
360 total_len = iov_length(iov, nr_segs);
361 /* Null read succeeds. */
362 if (unlikely(total_len == 0))
363 return 0;
364
365 do_wakeup = 0;
366 ret = 0;
367 mutex_lock(&inode->i_mutex);
368 pipe = inode->i_pipe;
369 for (;;) {
370 int bufs = pipe->nrbufs;
371 if (bufs) {
372 int curbuf = pipe->curbuf;
373 struct pipe_buffer *buf = pipe->bufs + curbuf;
374 const struct pipe_buf_operations *ops = buf->ops;
375 void *addr;
376 size_t chars = buf->len;
377 int error, atomic;
378
379 if (chars > total_len)
380 chars = total_len;
381
382 error = ops->confirm(pipe, buf);
383 if (error) {
384 if (!ret)
385 ret = error;
386 break;
387 }
388
389 atomic = !iov_fault_in_pages_write(iov, chars);
390redo:
391 addr = ops->map(pipe, buf, atomic);
392 error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic);
393 ops->unmap(pipe, buf, addr);
394 if (unlikely(error)) {
395 /*
396 * Just retry with the slow path if we failed.
397 */
398 if (atomic) {
399 atomic = 0;
400 goto redo;
401 }
402 if (!ret)
403 ret = error;
404 break;
405 }
406 ret += chars;
407 buf->offset += chars;
408 buf->len -= chars;
409 if (!buf->len) {
410 buf->ops = NULL;
411 ops->release(pipe, buf);
412 curbuf = (curbuf + 1) & (pipe->buffers - 1);
413 pipe->curbuf = curbuf;
414 pipe->nrbufs = --bufs;
415 do_wakeup = 1;
416 }
417 total_len -= chars;
418 if (!total_len)
419 break; /* common path: read succeeded */
420 }
421 if (bufs) /* More to do? */
422 continue;
423 if (!pipe->writers)
424 break;
425 if (!pipe->waiting_writers) {
426 /* syscall merging: Usually we must not sleep
427 * if O_NONBLOCK is set, or if we got some data.
428 * But if a writer sleeps in kernel space, then
429 * we can wait for that data without violating POSIX.
430 */
431 if (ret)
432 break;
433 if (filp->f_flags & O_NONBLOCK) {
434 ret = -EAGAIN;
435 break;
436 }
437 }
438 if (signal_pending(current)) {
439 if (!ret)
440 ret = -ERESTARTSYS;
441 break;
442 }
443 if (do_wakeup) {
444 wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
445 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
446 }
447 pipe_wait(pipe);
448 }
449 mutex_unlock(&inode->i_mutex);
450
451 /* Signal writers asynchronously that there is more room. */
452 if (do_wakeup) {
453 wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
454 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
455 }
456 if (ret > 0)
457 file_accessed(filp);
458 return ret;
459}
460
461static ssize_t
462pipe_write(struct kiocb *iocb, const struct iovec *_iov,
463 unsigned long nr_segs, loff_t ppos)
464{
465 struct file *filp = iocb->ki_filp;
466 struct inode *inode = filp->f_path.dentry->d_inode;
467 struct pipe_inode_info *pipe;
468 ssize_t ret;
469 int do_wakeup;
470 struct iovec *iov = (struct iovec *)_iov;
471 size_t total_len;
472 ssize_t chars;
473
474 total_len = iov_length(iov, nr_segs);
475 /* Null write succeeds. */
476 if (unlikely(total_len == 0))
477 return 0;
478
479 do_wakeup = 0;
480 ret = 0;
481 mutex_lock(&inode->i_mutex);
482 pipe = inode->i_pipe;
483
484 if (!pipe->readers) {
485 send_sig(SIGPIPE, current, 0);
486 ret = -EPIPE;
487 goto out;
488 }
489
490 /* We try to merge small writes */
491 chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
492 if (pipe->nrbufs && chars != 0) {
493 int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
494 (pipe->buffers - 1);
495 struct pipe_buffer *buf = pipe->bufs + lastbuf;
496 const struct pipe_buf_operations *ops = buf->ops;
497 int offset = buf->offset + buf->len;
498
499 if (ops->can_merge && offset + chars <= PAGE_SIZE) {
500 int error, atomic = 1;
501 void *addr;
502
503 error = ops->confirm(pipe, buf);
504 if (error)
505 goto out;
506
507 iov_fault_in_pages_read(iov, chars);
508redo1:
509 addr = ops->map(pipe, buf, atomic);
510 error = pipe_iov_copy_from_user(offset + addr, iov,
511 chars, atomic);
512 ops->unmap(pipe, buf, addr);
513 ret = error;
514 do_wakeup = 1;
515 if (error) {
516 if (atomic) {
517 atomic = 0;
518 goto redo1;
519 }
520 goto out;
521 }
522 buf->len += chars;
523 total_len -= chars;
524 ret = chars;
525 if (!total_len)
526 goto out;
527 }
528 }
529
530 for (;;) {
531 int bufs;
532
533 if (!pipe->readers) {
534 send_sig(SIGPIPE, current, 0);
535 if (!ret)
536 ret = -EPIPE;
537 break;
538 }
539 bufs = pipe->nrbufs;
540 if (bufs < pipe->buffers) {
541 int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
542 struct pipe_buffer *buf = pipe->bufs + newbuf;
543 struct page *page = pipe->tmp_page;
544 char *src;
545 int error, atomic = 1;
546
547 if (!page) {
548 page = alloc_page(GFP_HIGHUSER);
549 if (unlikely(!page)) {
550 ret = ret ? : -ENOMEM;
551 break;
552 }
553 pipe->tmp_page = page;
554 }
555 /* Always wake up, even if the copy fails. Otherwise
556 * we lock up (O_NONBLOCK-)readers that sleep due to
557 * syscall merging.
558 * FIXME! Is this really true?
559 */
560 do_wakeup = 1;
561 chars = PAGE_SIZE;
562 if (chars > total_len)
563 chars = total_len;
564
565 iov_fault_in_pages_read(iov, chars);
566redo2:
567 if (atomic)
568 src = kmap_atomic(page, KM_USER0);
569 else
570 src = kmap(page);
571
572 error = pipe_iov_copy_from_user(src, iov, chars,
573 atomic);
574 if (atomic)
575 kunmap_atomic(src, KM_USER0);
576 else
577 kunmap(page);
578
579 if (unlikely(error)) {
580 if (atomic) {
581 atomic = 0;
582 goto redo2;
583 }
584 if (!ret)
585 ret = error;
586 break;
587 }
588 ret += chars;
589
590 /* Insert it into the buffer array */
591 buf->page = page;
592 buf->ops = &anon_pipe_buf_ops;
593 buf->offset = 0;
594 buf->len = chars;
595 pipe->nrbufs = ++bufs;
596 pipe->tmp_page = NULL;
597
598 total_len -= chars;
599 if (!total_len)
600 break;
601 }
602 if (bufs < pipe->buffers)
603 continue;
604 if (filp->f_flags & O_NONBLOCK) {
605 if (!ret)
606 ret = -EAGAIN;
607 break;
608 }
609 if (signal_pending(current)) {
610 if (!ret)
611 ret = -ERESTARTSYS;
612 break;
613 }
614 if (do_wakeup) {
615 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
616 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
617 do_wakeup = 0;
618 }
619 pipe->waiting_writers++;
620 pipe_wait(pipe);
621 pipe->waiting_writers--;
622 }
623out:
624 mutex_unlock(&inode->i_mutex);
625 if (do_wakeup) {
626 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
627 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
628 }
629 if (ret > 0)
630 file_update_time(filp);
631 return ret;
632}
633
634static ssize_t
635bad_pipe_r(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
636{
637 return -EBADF;
638}
639
640static ssize_t
641bad_pipe_w(struct file *filp, const char __user *buf, size_t count,
642 loff_t *ppos)
643{
644 return -EBADF;
645}
646
647static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
648{
649 struct inode *inode = filp->f_path.dentry->d_inode;
650 struct pipe_inode_info *pipe;
651 int count, buf, nrbufs;
652
653 switch (cmd) {
654 case FIONREAD:
655 mutex_lock(&inode->i_mutex);
656 pipe = inode->i_pipe;
657 count = 0;
658 buf = pipe->curbuf;
659 nrbufs = pipe->nrbufs;
660 while (--nrbufs >= 0) {
661 count += pipe->bufs[buf].len;
662 buf = (buf+1) & (pipe->buffers - 1);
663 }
664 mutex_unlock(&inode->i_mutex);
665
666 return put_user(count, (int __user *)arg);
667 default:
668 return -EINVAL;
669 }
670}
671
672/* No kernel lock held - fine */
673static unsigned int
674pipe_poll(struct file *filp, poll_table *wait)
675{
676 unsigned int mask;
677 struct inode *inode = filp->f_path.dentry->d_inode;
678 struct pipe_inode_info *pipe = inode->i_pipe;
679 int nrbufs;
680
681 poll_wait(filp, &pipe->wait, wait);
682
683 /* Reading only -- no need for acquiring the semaphore. */
684 nrbufs = pipe->nrbufs;
685 mask = 0;
686 if (filp->f_mode & FMODE_READ) {
687 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
688 if (!pipe->writers && filp->f_version != pipe->w_counter)
689 mask |= POLLHUP;
690 }
691
692 if (filp->f_mode & FMODE_WRITE) {
693 mask |= (nrbufs < pipe->buffers) ? POLLOUT | POLLWRNORM : 0;
694 /*
695 * Most Unices do not set POLLERR for FIFOs but on Linux they
696 * behave exactly like pipes for poll().
697 */
698 if (!pipe->readers)
699 mask |= POLLERR;
700 }
701
702 return mask;
703}
704
705static int
706pipe_release(struct inode *inode, int decr, int decw)
707{
708 struct pipe_inode_info *pipe;
709
710 mutex_lock(&inode->i_mutex);
711 pipe = inode->i_pipe;
712 pipe->readers -= decr;
713 pipe->writers -= decw;
714
715 if (!pipe->readers && !pipe->writers) {
716 free_pipe_info(inode);
717 } else {
718 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
719 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
720 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
721 }
722 mutex_unlock(&inode->i_mutex);
723
724 return 0;
725}
726
727static int
728pipe_read_fasync(int fd, struct file *filp, int on)
729{
730 struct inode *inode = filp->f_path.dentry->d_inode;
731 int retval;
732
733 mutex_lock(&inode->i_mutex);
734 retval = fasync_helper(fd, filp, on, &inode->i_pipe->fasync_readers);
735 mutex_unlock(&inode->i_mutex);
736
737 return retval;
738}
739
740
741static int
742pipe_write_fasync(int fd, struct file *filp, int on)
743{
744 struct inode *inode = filp->f_path.dentry->d_inode;
745 int retval;
746
747 mutex_lock(&inode->i_mutex);
748 retval = fasync_helper(fd, filp, on, &inode->i_pipe->fasync_writers);
749 mutex_unlock(&inode->i_mutex);
750
751 return retval;
752}
753
754
755static int
756pipe_rdwr_fasync(int fd, struct file *filp, int on)
757{
758 struct inode *inode = filp->f_path.dentry->d_inode;
759 struct pipe_inode_info *pipe = inode->i_pipe;
760 int retval;
761
762 mutex_lock(&inode->i_mutex);
763 retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
764 if (retval >= 0) {
765 retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
766 if (retval < 0) /* this can happen only if on == T */
767 fasync_helper(-1, filp, 0, &pipe->fasync_readers);
768 }
769 mutex_unlock(&inode->i_mutex);
770 return retval;
771}
772
773
774static int
775pipe_read_release(struct inode *inode, struct file *filp)
776{
777 return pipe_release(inode, 1, 0);
778}
779
780static int
781pipe_write_release(struct inode *inode, struct file *filp)
782{
783 return pipe_release(inode, 0, 1);
784}
785
786static int
787pipe_rdwr_release(struct inode *inode, struct file *filp)
788{
789 int decr, decw;
790
791 decr = (filp->f_mode & FMODE_READ) != 0;
792 decw = (filp->f_mode & FMODE_WRITE) != 0;
793 return pipe_release(inode, decr, decw);
794}
795
796static int
797pipe_read_open(struct inode *inode, struct file *filp)
798{
799 int ret = -ENOENT;
800
801 mutex_lock(&inode->i_mutex);
802
803 if (inode->i_pipe) {
804 ret = 0;
805 inode->i_pipe->readers++;
806 }
807
808 mutex_unlock(&inode->i_mutex);
809
810 return ret;
811}
812
813static int
814pipe_write_open(struct inode *inode, struct file *filp)
815{
816 int ret = -ENOENT;
817
818 mutex_lock(&inode->i_mutex);
819
820 if (inode->i_pipe) {
821 ret = 0;
822 inode->i_pipe->writers++;
823 }
824
825 mutex_unlock(&inode->i_mutex);
826
827 return ret;
828}
829
830static int
831pipe_rdwr_open(struct inode *inode, struct file *filp)
832{
833 int ret = -ENOENT;
834
835 mutex_lock(&inode->i_mutex);
836
837 if (inode->i_pipe) {
838 ret = 0;
839 if (filp->f_mode & FMODE_READ)
840 inode->i_pipe->readers++;
841 if (filp->f_mode & FMODE_WRITE)
842 inode->i_pipe->writers++;
843 }
844
845 mutex_unlock(&inode->i_mutex);
846
847 return ret;
848}
849
850/*
851 * The file_operations structs are not static because they
852 * are also used in linux/fs/fifo.c to do operations on FIFOs.
853 *
854 * Pipes reuse fifos' file_operations structs.
855 */
856const struct file_operations read_pipefifo_fops = {
857 .llseek = no_llseek,
858 .read = do_sync_read,
859 .aio_read = pipe_read,
860 .write = bad_pipe_w,
861 .poll = pipe_poll,
862 .unlocked_ioctl = pipe_ioctl,
863 .open = pipe_read_open,
864 .release = pipe_read_release,
865 .fasync = pipe_read_fasync,
866};
867
868const struct file_operations write_pipefifo_fops = {
869 .llseek = no_llseek,
870 .read = bad_pipe_r,
871 .write = do_sync_write,
872 .aio_write = pipe_write,
873 .poll = pipe_poll,
874 .unlocked_ioctl = pipe_ioctl,
875 .open = pipe_write_open,
876 .release = pipe_write_release,
877 .fasync = pipe_write_fasync,
878};
879
880const struct file_operations rdwr_pipefifo_fops = {
881 .llseek = no_llseek,
882 .read = do_sync_read,
883 .aio_read = pipe_read,
884 .write = do_sync_write,
885 .aio_write = pipe_write,
886 .poll = pipe_poll,
887 .unlocked_ioctl = pipe_ioctl,
888 .open = pipe_rdwr_open,
889 .release = pipe_rdwr_release,
890 .fasync = pipe_rdwr_fasync,
891};
892
893struct pipe_inode_info * alloc_pipe_info(struct inode *inode)
894{
895 struct pipe_inode_info *pipe;
896
897 pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
898 if (pipe) {
899 pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * PIPE_DEF_BUFFERS, GFP_KERNEL);
900 if (pipe->bufs) {
901 init_waitqueue_head(&pipe->wait);
902 pipe->r_counter = pipe->w_counter = 1;
903 pipe->inode = inode;
904 pipe->buffers = PIPE_DEF_BUFFERS;
905 return pipe;
906 }
907 kfree(pipe);
908 }
909
910 return NULL;
911}
912
913void __free_pipe_info(struct pipe_inode_info *pipe)
914{
915 int i;
916
917 for (i = 0; i < pipe->buffers; i++) {
918 struct pipe_buffer *buf = pipe->bufs + i;
919 if (buf->ops)
920 buf->ops->release(pipe, buf);
921 }
922 if (pipe->tmp_page)
923 __free_page(pipe->tmp_page);
924 kfree(pipe->bufs);
925 kfree(pipe);
926}
927
928void free_pipe_info(struct inode *inode)
929{
930 __free_pipe_info(inode->i_pipe);
931 inode->i_pipe = NULL;
932}
933
934static struct vfsmount *pipe_mnt __read_mostly;
935
936/*
937 * pipefs_dname() is called from d_path().
938 */
939static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
940{
941 return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
942 dentry->d_inode->i_ino);
943}
944
945static const struct dentry_operations pipefs_dentry_operations = {
946 .d_dname = pipefs_dname,
947};
948
949static struct inode * get_pipe_inode(void)
950{
951 struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
952 struct pipe_inode_info *pipe;
953
954 if (!inode)
955 goto fail_inode;
956
957 inode->i_ino = get_next_ino();
958
959 pipe = alloc_pipe_info(inode);
960 if (!pipe)
961 goto fail_iput;
962 inode->i_pipe = pipe;
963
964 pipe->readers = pipe->writers = 1;
965 inode->i_fop = &rdwr_pipefifo_fops;
966
967 /*
968 * Mark the inode dirty from the very beginning,
969 * that way it will never be moved to the dirty
970 * list because "mark_inode_dirty()" will think
971 * that it already _is_ on the dirty list.
972 */
973 inode->i_state = I_DIRTY;
974 inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
975 inode->i_uid = current_fsuid();
976 inode->i_gid = current_fsgid();
977 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
978
979 return inode;
980
981fail_iput:
982 iput(inode);
983
984fail_inode:
985 return NULL;
986}
987
988struct file *create_write_pipe(int flags)
989{
990 int err;
991 struct inode *inode;
992 struct file *f;
993 struct path path;
994 struct qstr name = { .name = "" };
995
996 err = -ENFILE;
997 inode = get_pipe_inode();
998 if (!inode)
999 goto err;
1000
1001 err = -ENOMEM;
1002 path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &name);
1003 if (!path.dentry)
1004 goto err_inode;
1005 path.mnt = mntget(pipe_mnt);
1006
1007 d_instantiate(path.dentry, inode);
1008
1009 err = -ENFILE;
1010 f = alloc_file(&path, FMODE_WRITE, &write_pipefifo_fops);
1011 if (!f)
1012 goto err_dentry;
1013 f->f_mapping = inode->i_mapping;
1014
1015 f->f_flags = O_WRONLY | (flags & O_NONBLOCK);
1016 f->f_version = 0;
1017
1018 return f;
1019
1020 err_dentry:
1021 free_pipe_info(inode);
1022 path_put(&path);
1023 return ERR_PTR(err);
1024
1025 err_inode:
1026 free_pipe_info(inode);
1027 iput(inode);
1028 err:
1029 return ERR_PTR(err);
1030}
1031
1032void free_write_pipe(struct file *f)
1033{
1034 free_pipe_info(f->f_dentry->d_inode);
1035 path_put(&f->f_path);
1036 put_filp(f);
1037}
1038
1039struct file *create_read_pipe(struct file *wrf, int flags)
1040{
1041 /* Grab pipe from the writer */
1042 struct file *f = alloc_file(&wrf->f_path, FMODE_READ,
1043 &read_pipefifo_fops);
1044 if (!f)
1045 return ERR_PTR(-ENFILE);
1046
1047 path_get(&wrf->f_path);
1048 f->f_flags = O_RDONLY | (flags & O_NONBLOCK);
1049
1050 return f;
1051}
1052
1053int do_pipe_flags(int *fd, int flags)
1054{
1055 struct file *fw, *fr;
1056 int error;
1057 int fdw, fdr;
1058
1059 if (flags & ~(O_CLOEXEC | O_NONBLOCK))
1060 return -EINVAL;
1061
1062 fw = create_write_pipe(flags);
1063 if (IS_ERR(fw))
1064 return PTR_ERR(fw);
1065 fr = create_read_pipe(fw, flags);
1066 error = PTR_ERR(fr);
1067 if (IS_ERR(fr))
1068 goto err_write_pipe;
1069
1070 error = get_unused_fd_flags(flags);
1071 if (error < 0)
1072 goto err_read_pipe;
1073 fdr = error;
1074
1075 error = get_unused_fd_flags(flags);
1076 if (error < 0)
1077 goto err_fdr;
1078 fdw = error;
1079
1080 audit_fd_pair(fdr, fdw);
1081 fd_install(fdr, fr);
1082 fd_install(fdw, fw);
1083 fd[0] = fdr;
1084 fd[1] = fdw;
1085
1086 return 0;
1087
1088 err_fdr:
1089 put_unused_fd(fdr);
1090 err_read_pipe:
1091 path_put(&fr->f_path);
1092 put_filp(fr);
1093 err_write_pipe:
1094 free_write_pipe(fw);
1095 return error;
1096}
1097
1098/*
1099 * sys_pipe() is the normal C calling standard for creating
1100 * a pipe. It's not the way Unix traditionally does this, though.
1101 */
1102SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
1103{
1104 int fd[2];
1105 int error;
1106
1107 error = do_pipe_flags(fd, flags);
1108 if (!error) {
1109 if (copy_to_user(fildes, fd, sizeof(fd))) {
1110 sys_close(fd[0]);
1111 sys_close(fd[1]);
1112 error = -EFAULT;
1113 }
1114 }
1115 return error;
1116}
1117
1118SYSCALL_DEFINE1(pipe, int __user *, fildes)
1119{
1120 return sys_pipe2(fildes, 0);
1121}
1122
1123/*
1124 * Allocate a new array of pipe buffers and copy the info over. Returns the
1125 * pipe size if successful, or return -ERROR on error.
1126 */
1127static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
1128{
1129 struct pipe_buffer *bufs;
1130
1131 /*
1132 * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
1133 * expect a lot of shrink+grow operations, just free and allocate
1134 * again like we would do for growing. If the pipe currently
1135 * contains more buffers than arg, then return busy.
1136 */
1137 if (nr_pages < pipe->nrbufs)
1138 return -EBUSY;
1139
1140 bufs = kcalloc(nr_pages, sizeof(struct pipe_buffer), GFP_KERNEL);
1141 if (unlikely(!bufs))
1142 return -ENOMEM;
1143
1144 /*
1145 * The pipe array wraps around, so just start the new one at zero
1146 * and adjust the indexes.
1147 */
1148 if (pipe->nrbufs) {
1149 unsigned int tail;
1150 unsigned int head;
1151
1152 tail = pipe->curbuf + pipe->nrbufs;
1153 if (tail < pipe->buffers)
1154 tail = 0;
1155 else
1156 tail &= (pipe->buffers - 1);
1157
1158 head = pipe->nrbufs - tail;
1159 if (head)
1160 memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
1161 if (tail)
1162 memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
1163 }
1164
1165 pipe->curbuf = 0;
1166 kfree(pipe->bufs);
1167 pipe->bufs = bufs;
1168 pipe->buffers = nr_pages;
1169 return nr_pages * PAGE_SIZE;
1170}
1171
1172/*
1173 * Currently we rely on the pipe array holding a power-of-2 number
1174 * of pages.
1175 */
1176static inline unsigned int round_pipe_size(unsigned int size)
1177{
1178 unsigned long nr_pages;
1179
1180 nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1181 return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
1182}
1183
1184/*
1185 * This should work even if CONFIG_PROC_FS isn't set, as proc_dointvec_minmax
1186 * will return an error.
1187 */
1188int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
1189 size_t *lenp, loff_t *ppos)
1190{
1191 int ret;
1192
1193 ret = proc_dointvec_minmax(table, write, buf, lenp, ppos);
1194 if (ret < 0 || !write)
1195 return ret;
1196
1197 pipe_max_size = round_pipe_size(pipe_max_size);
1198 return ret;
1199}
1200
1201/*
1202 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1203 * location, so checking ->i_pipe is not enough to verify that this is a
1204 * pipe.
1205 */
1206struct pipe_inode_info *get_pipe_info(struct file *file)
1207{
1208 struct inode *i = file->f_path.dentry->d_inode;
1209
1210 return S_ISFIFO(i->i_mode) ? i->i_pipe : NULL;
1211}
1212
1213long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1214{
1215 struct pipe_inode_info *pipe;
1216 long ret;
1217
1218 pipe = get_pipe_info(file);
1219 if (!pipe)
1220 return -EBADF;
1221
1222 mutex_lock(&pipe->inode->i_mutex);
1223
1224 switch (cmd) {
1225 case F_SETPIPE_SZ: {
1226 unsigned int size, nr_pages;
1227
1228 size = round_pipe_size(arg);
1229 nr_pages = size >> PAGE_SHIFT;
1230
1231 ret = -EINVAL;
1232 if (!nr_pages)
1233 goto out;
1234
1235 if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
1236 ret = -EPERM;
1237 goto out;
1238 }
1239 ret = pipe_set_size(pipe, nr_pages);
1240 break;
1241 }
1242 case F_GETPIPE_SZ:
1243 ret = pipe->buffers * PAGE_SIZE;
1244 break;
1245 default:
1246 ret = -EINVAL;
1247 break;
1248 }
1249
1250out:
1251 mutex_unlock(&pipe->inode->i_mutex);
1252 return ret;
1253}
1254
1255static const struct super_operations pipefs_ops = {
1256 .destroy_inode = free_inode_nonrcu,
1257};
1258
1259/*
1260 * pipefs should _never_ be mounted by userland - too much of security hassle,
1261 * no real gain from having the whole whorehouse mounted. So we don't need
1262 * any operations on the root directory. However, we need a non-trivial
1263 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1264 */
1265static struct dentry *pipefs_mount(struct file_system_type *fs_type,
1266 int flags, const char *dev_name, void *data)
1267{
1268 return mount_pseudo(fs_type, "pipe:", &pipefs_ops,
1269 &pipefs_dentry_operations, PIPEFS_MAGIC);
1270}
1271
1272static struct file_system_type pipe_fs_type = {
1273 .name = "pipefs",
1274 .mount = pipefs_mount,
1275 .kill_sb = kill_anon_super,
1276};
1277
1278static int __init init_pipe_fs(void)
1279{
1280 int err = register_filesystem(&pipe_fs_type);
1281
1282 if (!err) {
1283 pipe_mnt = kern_mount(&pipe_fs_type);
1284 if (IS_ERR(pipe_mnt)) {
1285 err = PTR_ERR(pipe_mnt);
1286 unregister_filesystem(&pipe_fs_type);
1287 }
1288 }
1289 return err;
1290}
1291
1292static void __exit exit_pipe_fs(void)
1293{
1294 kern_unmount(pipe_mnt);
1295 unregister_filesystem(&pipe_fs_type);
1296}
1297
1298fs_initcall(init_pipe_fs);
1299module_exit(exit_pipe_fs);