Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/pipe.c
4 *
5 * Copyright (C) 1991, 1992, 1999 Linus Torvalds
6 */
7
8#include <linux/mm.h>
9#include <linux/file.h>
10#include <linux/poll.h>
11#include <linux/slab.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/fs.h>
15#include <linux/log2.h>
16#include <linux/mount.h>
17#include <linux/pseudo_fs.h>
18#include <linux/magic.h>
19#include <linux/pipe_fs_i.h>
20#include <linux/uio.h>
21#include <linux/highmem.h>
22#include <linux/pagemap.h>
23#include <linux/audit.h>
24#include <linux/syscalls.h>
25#include <linux/fcntl.h>
26#include <linux/memcontrol.h>
27#include <linux/watch_queue.h>
28
29#include <linux/uaccess.h>
30#include <asm/ioctls.h>
31
32#include "internal.h"
33
34/*
35 * The max size that a non-root user is allowed to grow the pipe. Can
36 * be set by root in /proc/sys/fs/pipe-max-size
37 */
38unsigned int pipe_max_size = 1048576;
39
40/* Maximum allocatable pages per user. Hard limit is unset by default, soft
41 * matches default values.
42 */
43unsigned long pipe_user_pages_hard;
44unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
45
46/*
47 * We use head and tail indices that aren't masked off, except at the point of
48 * dereference, but rather they're allowed to wrap naturally. This means there
49 * isn't a dead spot in the buffer, but the ring has to be a power of two and
50 * <= 2^31.
51 * -- David Howells 2019-09-23.
52 *
53 * Reads with count = 0 should always return 0.
54 * -- Julian Bradfield 1999-06-07.
55 *
56 * FIFOs and Pipes now generate SIGIO for both readers and writers.
57 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
58 *
59 * pipe_read & write cleanup
60 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
61 */
62
63static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
64{
65 if (pipe->files)
66 mutex_lock_nested(&pipe->mutex, subclass);
67}
68
69void pipe_lock(struct pipe_inode_info *pipe)
70{
71 /*
72 * pipe_lock() nests non-pipe inode locks (for writing to a file)
73 */
74 pipe_lock_nested(pipe, I_MUTEX_PARENT);
75}
76EXPORT_SYMBOL(pipe_lock);
77
78void pipe_unlock(struct pipe_inode_info *pipe)
79{
80 if (pipe->files)
81 mutex_unlock(&pipe->mutex);
82}
83EXPORT_SYMBOL(pipe_unlock);
84
85static inline void __pipe_lock(struct pipe_inode_info *pipe)
86{
87 mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
88}
89
90static inline void __pipe_unlock(struct pipe_inode_info *pipe)
91{
92 mutex_unlock(&pipe->mutex);
93}
94
95void pipe_double_lock(struct pipe_inode_info *pipe1,
96 struct pipe_inode_info *pipe2)
97{
98 BUG_ON(pipe1 == pipe2);
99
100 if (pipe1 < pipe2) {
101 pipe_lock_nested(pipe1, I_MUTEX_PARENT);
102 pipe_lock_nested(pipe2, I_MUTEX_CHILD);
103 } else {
104 pipe_lock_nested(pipe2, I_MUTEX_PARENT);
105 pipe_lock_nested(pipe1, I_MUTEX_CHILD);
106 }
107}
108
109static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
110 struct pipe_buffer *buf)
111{
112 struct page *page = buf->page;
113
114 /*
115 * If nobody else uses this page, and we don't already have a
116 * temporary page, let's keep track of it as a one-deep
117 * allocation cache. (Otherwise just release our reference to it)
118 */
119 if (page_count(page) == 1 && !pipe->tmp_page)
120 pipe->tmp_page = page;
121 else
122 put_page(page);
123}
124
125static bool anon_pipe_buf_try_steal(struct pipe_inode_info *pipe,
126 struct pipe_buffer *buf)
127{
128 struct page *page = buf->page;
129
130 if (page_count(page) != 1)
131 return false;
132 memcg_kmem_uncharge_page(page, 0);
133 __SetPageLocked(page);
134 return true;
135}
136
137/**
138 * generic_pipe_buf_try_steal - attempt to take ownership of a &pipe_buffer
139 * @pipe: the pipe that the buffer belongs to
140 * @buf: the buffer to attempt to steal
141 *
142 * Description:
143 * This function attempts to steal the &struct page attached to
144 * @buf. If successful, this function returns 0 and returns with
145 * the page locked. The caller may then reuse the page for whatever
146 * he wishes; the typical use is insertion into a different file
147 * page cache.
148 */
149bool generic_pipe_buf_try_steal(struct pipe_inode_info *pipe,
150 struct pipe_buffer *buf)
151{
152 struct page *page = buf->page;
153
154 /*
155 * A reference of one is golden, that means that the owner of this
156 * page is the only one holding a reference to it. lock the page
157 * and return OK.
158 */
159 if (page_count(page) == 1) {
160 lock_page(page);
161 return true;
162 }
163 return false;
164}
165EXPORT_SYMBOL(generic_pipe_buf_try_steal);
166
167/**
168 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
169 * @pipe: the pipe that the buffer belongs to
170 * @buf: the buffer to get a reference to
171 *
172 * Description:
173 * This function grabs an extra reference to @buf. It's used in
174 * in the tee() system call, when we duplicate the buffers in one
175 * pipe into another.
176 */
177bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
178{
179 return try_get_page(buf->page);
180}
181EXPORT_SYMBOL(generic_pipe_buf_get);
182
183/**
184 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
185 * @pipe: the pipe that the buffer belongs to
186 * @buf: the buffer to put a reference to
187 *
188 * Description:
189 * This function releases a reference to @buf.
190 */
191void generic_pipe_buf_release(struct pipe_inode_info *pipe,
192 struct pipe_buffer *buf)
193{
194 put_page(buf->page);
195}
196EXPORT_SYMBOL(generic_pipe_buf_release);
197
198static const struct pipe_buf_operations anon_pipe_buf_ops = {
199 .release = anon_pipe_buf_release,
200 .try_steal = anon_pipe_buf_try_steal,
201 .get = generic_pipe_buf_get,
202};
203
204/* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
205static inline bool pipe_readable(const struct pipe_inode_info *pipe)
206{
207 unsigned int head = READ_ONCE(pipe->head);
208 unsigned int tail = READ_ONCE(pipe->tail);
209 unsigned int writers = READ_ONCE(pipe->writers);
210
211 return !pipe_empty(head, tail) || !writers;
212}
213
214static ssize_t
215pipe_read(struct kiocb *iocb, struct iov_iter *to)
216{
217 size_t total_len = iov_iter_count(to);
218 struct file *filp = iocb->ki_filp;
219 struct pipe_inode_info *pipe = filp->private_data;
220 bool was_full, wake_next_reader = false;
221 ssize_t ret;
222
223 /* Null read succeeds. */
224 if (unlikely(total_len == 0))
225 return 0;
226
227 ret = 0;
228 __pipe_lock(pipe);
229
230 /*
231 * We only wake up writers if the pipe was full when we started
232 * reading in order to avoid unnecessary wakeups.
233 *
234 * But when we do wake up writers, we do so using a sync wakeup
235 * (WF_SYNC), because we want them to get going and generate more
236 * data for us.
237 */
238 was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
239 for (;;) {
240 unsigned int head = pipe->head;
241 unsigned int tail = pipe->tail;
242 unsigned int mask = pipe->ring_size - 1;
243
244#ifdef CONFIG_WATCH_QUEUE
245 if (pipe->note_loss) {
246 struct watch_notification n;
247
248 if (total_len < 8) {
249 if (ret == 0)
250 ret = -ENOBUFS;
251 break;
252 }
253
254 n.type = WATCH_TYPE_META;
255 n.subtype = WATCH_META_LOSS_NOTIFICATION;
256 n.info = watch_sizeof(n);
257 if (copy_to_iter(&n, sizeof(n), to) != sizeof(n)) {
258 if (ret == 0)
259 ret = -EFAULT;
260 break;
261 }
262 ret += sizeof(n);
263 total_len -= sizeof(n);
264 pipe->note_loss = false;
265 }
266#endif
267
268 if (!pipe_empty(head, tail)) {
269 struct pipe_buffer *buf = &pipe->bufs[tail & mask];
270 size_t chars = buf->len;
271 size_t written;
272 int error;
273
274 if (chars > total_len) {
275 if (buf->flags & PIPE_BUF_FLAG_WHOLE) {
276 if (ret == 0)
277 ret = -ENOBUFS;
278 break;
279 }
280 chars = total_len;
281 }
282
283 error = pipe_buf_confirm(pipe, buf);
284 if (error) {
285 if (!ret)
286 ret = error;
287 break;
288 }
289
290 written = copy_page_to_iter(buf->page, buf->offset, chars, to);
291 if (unlikely(written < chars)) {
292 if (!ret)
293 ret = -EFAULT;
294 break;
295 }
296 ret += chars;
297 buf->offset += chars;
298 buf->len -= chars;
299
300 /* Was it a packet buffer? Clean up and exit */
301 if (buf->flags & PIPE_BUF_FLAG_PACKET) {
302 total_len = chars;
303 buf->len = 0;
304 }
305
306 if (!buf->len) {
307 pipe_buf_release(pipe, buf);
308 spin_lock_irq(&pipe->rd_wait.lock);
309#ifdef CONFIG_WATCH_QUEUE
310 if (buf->flags & PIPE_BUF_FLAG_LOSS)
311 pipe->note_loss = true;
312#endif
313 tail++;
314 pipe->tail = tail;
315 spin_unlock_irq(&pipe->rd_wait.lock);
316 }
317 total_len -= chars;
318 if (!total_len)
319 break; /* common path: read succeeded */
320 if (!pipe_empty(head, tail)) /* More to do? */
321 continue;
322 }
323
324 if (!pipe->writers)
325 break;
326 if (ret)
327 break;
328 if (filp->f_flags & O_NONBLOCK) {
329 ret = -EAGAIN;
330 break;
331 }
332 __pipe_unlock(pipe);
333
334 /*
335 * We only get here if we didn't actually read anything.
336 *
337 * However, we could have seen (and removed) a zero-sized
338 * pipe buffer, and might have made space in the buffers
339 * that way.
340 *
341 * You can't make zero-sized pipe buffers by doing an empty
342 * write (not even in packet mode), but they can happen if
343 * the writer gets an EFAULT when trying to fill a buffer
344 * that already got allocated and inserted in the buffer
345 * array.
346 *
347 * So we still need to wake up any pending writers in the
348 * _very_ unlikely case that the pipe was full, but we got
349 * no data.
350 */
351 if (unlikely(was_full)) {
352 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
353 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
354 }
355
356 /*
357 * But because we didn't read anything, at this point we can
358 * just return directly with -ERESTARTSYS if we're interrupted,
359 * since we've done any required wakeups and there's no need
360 * to mark anything accessed. And we've dropped the lock.
361 */
362 if (wait_event_interruptible_exclusive(pipe->rd_wait, pipe_readable(pipe)) < 0)
363 return -ERESTARTSYS;
364
365 __pipe_lock(pipe);
366 was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
367 wake_next_reader = true;
368 }
369 if (pipe_empty(pipe->head, pipe->tail))
370 wake_next_reader = false;
371 __pipe_unlock(pipe);
372
373 if (was_full) {
374 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
375 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
376 }
377 if (wake_next_reader)
378 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
379 if (ret > 0)
380 file_accessed(filp);
381 return ret;
382}
383
384static inline int is_packetized(struct file *file)
385{
386 return (file->f_flags & O_DIRECT) != 0;
387}
388
389/* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
390static inline bool pipe_writable(const struct pipe_inode_info *pipe)
391{
392 unsigned int head = READ_ONCE(pipe->head);
393 unsigned int tail = READ_ONCE(pipe->tail);
394 unsigned int max_usage = READ_ONCE(pipe->max_usage);
395
396 return !pipe_full(head, tail, max_usage) ||
397 !READ_ONCE(pipe->readers);
398}
399
400static ssize_t
401pipe_write(struct kiocb *iocb, struct iov_iter *from)
402{
403 struct file *filp = iocb->ki_filp;
404 struct pipe_inode_info *pipe = filp->private_data;
405 unsigned int head;
406 ssize_t ret = 0;
407 size_t total_len = iov_iter_count(from);
408 ssize_t chars;
409 bool was_empty = false;
410 bool wake_next_writer = false;
411
412 /* Null write succeeds. */
413 if (unlikely(total_len == 0))
414 return 0;
415
416 __pipe_lock(pipe);
417
418 if (!pipe->readers) {
419 send_sig(SIGPIPE, current, 0);
420 ret = -EPIPE;
421 goto out;
422 }
423
424#ifdef CONFIG_WATCH_QUEUE
425 if (pipe->watch_queue) {
426 ret = -EXDEV;
427 goto out;
428 }
429#endif
430
431 /*
432 * Only wake up if the pipe started out empty, since
433 * otherwise there should be no readers waiting.
434 *
435 * If it wasn't empty we try to merge new data into
436 * the last buffer.
437 *
438 * That naturally merges small writes, but it also
439 * page-aligs the rest of the writes for large writes
440 * spanning multiple pages.
441 */
442 head = pipe->head;
443 was_empty = pipe_empty(head, pipe->tail);
444 chars = total_len & (PAGE_SIZE-1);
445 if (chars && !was_empty) {
446 unsigned int mask = pipe->ring_size - 1;
447 struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
448 int offset = buf->offset + buf->len;
449
450 if ((buf->flags & PIPE_BUF_FLAG_CAN_MERGE) &&
451 offset + chars <= PAGE_SIZE) {
452 ret = pipe_buf_confirm(pipe, buf);
453 if (ret)
454 goto out;
455
456 ret = copy_page_from_iter(buf->page, offset, chars, from);
457 if (unlikely(ret < chars)) {
458 ret = -EFAULT;
459 goto out;
460 }
461
462 buf->len += ret;
463 if (!iov_iter_count(from))
464 goto out;
465 }
466 }
467
468 for (;;) {
469 if (!pipe->readers) {
470 send_sig(SIGPIPE, current, 0);
471 if (!ret)
472 ret = -EPIPE;
473 break;
474 }
475
476 head = pipe->head;
477 if (!pipe_full(head, pipe->tail, pipe->max_usage)) {
478 unsigned int mask = pipe->ring_size - 1;
479 struct pipe_buffer *buf = &pipe->bufs[head & mask];
480 struct page *page = pipe->tmp_page;
481 int copied;
482
483 if (!page) {
484 page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
485 if (unlikely(!page)) {
486 ret = ret ? : -ENOMEM;
487 break;
488 }
489 pipe->tmp_page = page;
490 }
491
492 /* Allocate a slot in the ring in advance and attach an
493 * empty buffer. If we fault or otherwise fail to use
494 * it, either the reader will consume it or it'll still
495 * be there for the next write.
496 */
497 spin_lock_irq(&pipe->rd_wait.lock);
498
499 head = pipe->head;
500 if (pipe_full(head, pipe->tail, pipe->max_usage)) {
501 spin_unlock_irq(&pipe->rd_wait.lock);
502 continue;
503 }
504
505 pipe->head = head + 1;
506 spin_unlock_irq(&pipe->rd_wait.lock);
507
508 /* Insert it into the buffer array */
509 buf = &pipe->bufs[head & mask];
510 buf->page = page;
511 buf->ops = &anon_pipe_buf_ops;
512 buf->offset = 0;
513 buf->len = 0;
514 if (is_packetized(filp))
515 buf->flags = PIPE_BUF_FLAG_PACKET;
516 else
517 buf->flags = PIPE_BUF_FLAG_CAN_MERGE;
518 pipe->tmp_page = NULL;
519
520 copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
521 if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
522 if (!ret)
523 ret = -EFAULT;
524 break;
525 }
526 ret += copied;
527 buf->offset = 0;
528 buf->len = copied;
529
530 if (!iov_iter_count(from))
531 break;
532 }
533
534 if (!pipe_full(head, pipe->tail, pipe->max_usage))
535 continue;
536
537 /* Wait for buffer space to become available. */
538 if (filp->f_flags & O_NONBLOCK) {
539 if (!ret)
540 ret = -EAGAIN;
541 break;
542 }
543 if (signal_pending(current)) {
544 if (!ret)
545 ret = -ERESTARTSYS;
546 break;
547 }
548
549 /*
550 * We're going to release the pipe lock and wait for more
551 * space. We wake up any readers if necessary, and then
552 * after waiting we need to re-check whether the pipe
553 * become empty while we dropped the lock.
554 */
555 __pipe_unlock(pipe);
556 if (was_empty) {
557 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
558 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
559 }
560 wait_event_interruptible_exclusive(pipe->wr_wait, pipe_writable(pipe));
561 __pipe_lock(pipe);
562 was_empty = pipe_empty(pipe->head, pipe->tail);
563 wake_next_writer = true;
564 }
565out:
566 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
567 wake_next_writer = false;
568 __pipe_unlock(pipe);
569
570 /*
571 * If we do do a wakeup event, we do a 'sync' wakeup, because we
572 * want the reader to start processing things asap, rather than
573 * leave the data pending.
574 *
575 * This is particularly important for small writes, because of
576 * how (for example) the GNU make jobserver uses small writes to
577 * wake up pending jobs
578 */
579 if (was_empty) {
580 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
581 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
582 }
583 if (wake_next_writer)
584 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
585 if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
586 int err = file_update_time(filp);
587 if (err)
588 ret = err;
589 sb_end_write(file_inode(filp)->i_sb);
590 }
591 return ret;
592}
593
594static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
595{
596 struct pipe_inode_info *pipe = filp->private_data;
597 int count, head, tail, mask;
598
599 switch (cmd) {
600 case FIONREAD:
601 __pipe_lock(pipe);
602 count = 0;
603 head = pipe->head;
604 tail = pipe->tail;
605 mask = pipe->ring_size - 1;
606
607 while (tail != head) {
608 count += pipe->bufs[tail & mask].len;
609 tail++;
610 }
611 __pipe_unlock(pipe);
612
613 return put_user(count, (int __user *)arg);
614
615#ifdef CONFIG_WATCH_QUEUE
616 case IOC_WATCH_QUEUE_SET_SIZE: {
617 int ret;
618 __pipe_lock(pipe);
619 ret = watch_queue_set_size(pipe, arg);
620 __pipe_unlock(pipe);
621 return ret;
622 }
623
624 case IOC_WATCH_QUEUE_SET_FILTER:
625 return watch_queue_set_filter(
626 pipe, (struct watch_notification_filter __user *)arg);
627#endif
628
629 default:
630 return -ENOIOCTLCMD;
631 }
632}
633
634/* No kernel lock held - fine */
635static __poll_t
636pipe_poll(struct file *filp, poll_table *wait)
637{
638 __poll_t mask;
639 struct pipe_inode_info *pipe = filp->private_data;
640 unsigned int head, tail;
641
642 /*
643 * Reading pipe state only -- no need for acquiring the semaphore.
644 *
645 * But because this is racy, the code has to add the
646 * entry to the poll table _first_ ..
647 */
648 if (filp->f_mode & FMODE_READ)
649 poll_wait(filp, &pipe->rd_wait, wait);
650 if (filp->f_mode & FMODE_WRITE)
651 poll_wait(filp, &pipe->wr_wait, wait);
652
653 /*
654 * .. and only then can you do the racy tests. That way,
655 * if something changes and you got it wrong, the poll
656 * table entry will wake you up and fix it.
657 */
658 head = READ_ONCE(pipe->head);
659 tail = READ_ONCE(pipe->tail);
660
661 mask = 0;
662 if (filp->f_mode & FMODE_READ) {
663 if (!pipe_empty(head, tail))
664 mask |= EPOLLIN | EPOLLRDNORM;
665 if (!pipe->writers && filp->f_version != pipe->w_counter)
666 mask |= EPOLLHUP;
667 }
668
669 if (filp->f_mode & FMODE_WRITE) {
670 if (!pipe_full(head, tail, pipe->max_usage))
671 mask |= EPOLLOUT | EPOLLWRNORM;
672 /*
673 * Most Unices do not set EPOLLERR for FIFOs but on Linux they
674 * behave exactly like pipes for poll().
675 */
676 if (!pipe->readers)
677 mask |= EPOLLERR;
678 }
679
680 return mask;
681}
682
683static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
684{
685 int kill = 0;
686
687 spin_lock(&inode->i_lock);
688 if (!--pipe->files) {
689 inode->i_pipe = NULL;
690 kill = 1;
691 }
692 spin_unlock(&inode->i_lock);
693
694 if (kill)
695 free_pipe_info(pipe);
696}
697
698static int
699pipe_release(struct inode *inode, struct file *file)
700{
701 struct pipe_inode_info *pipe = file->private_data;
702
703 __pipe_lock(pipe);
704 if (file->f_mode & FMODE_READ)
705 pipe->readers--;
706 if (file->f_mode & FMODE_WRITE)
707 pipe->writers--;
708
709 /* Was that the last reader or writer, but not the other side? */
710 if (!pipe->readers != !pipe->writers) {
711 wake_up_interruptible_all(&pipe->rd_wait);
712 wake_up_interruptible_all(&pipe->wr_wait);
713 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
714 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
715 }
716 __pipe_unlock(pipe);
717
718 put_pipe_info(inode, pipe);
719 return 0;
720}
721
722static int
723pipe_fasync(int fd, struct file *filp, int on)
724{
725 struct pipe_inode_info *pipe = filp->private_data;
726 int retval = 0;
727
728 __pipe_lock(pipe);
729 if (filp->f_mode & FMODE_READ)
730 retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
731 if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
732 retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
733 if (retval < 0 && (filp->f_mode & FMODE_READ))
734 /* this can happen only if on == T */
735 fasync_helper(-1, filp, 0, &pipe->fasync_readers);
736 }
737 __pipe_unlock(pipe);
738 return retval;
739}
740
741unsigned long account_pipe_buffers(struct user_struct *user,
742 unsigned long old, unsigned long new)
743{
744 return atomic_long_add_return(new - old, &user->pipe_bufs);
745}
746
747bool too_many_pipe_buffers_soft(unsigned long user_bufs)
748{
749 unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft);
750
751 return soft_limit && user_bufs > soft_limit;
752}
753
754bool too_many_pipe_buffers_hard(unsigned long user_bufs)
755{
756 unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard);
757
758 return hard_limit && user_bufs > hard_limit;
759}
760
761bool pipe_is_unprivileged_user(void)
762{
763 return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
764}
765
766struct pipe_inode_info *alloc_pipe_info(void)
767{
768 struct pipe_inode_info *pipe;
769 unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
770 struct user_struct *user = get_current_user();
771 unsigned long user_bufs;
772 unsigned int max_size = READ_ONCE(pipe_max_size);
773
774 pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
775 if (pipe == NULL)
776 goto out_free_uid;
777
778 if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE))
779 pipe_bufs = max_size >> PAGE_SHIFT;
780
781 user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
782
783 if (too_many_pipe_buffers_soft(user_bufs) && pipe_is_unprivileged_user()) {
784 user_bufs = account_pipe_buffers(user, pipe_bufs, 1);
785 pipe_bufs = 1;
786 }
787
788 if (too_many_pipe_buffers_hard(user_bufs) && pipe_is_unprivileged_user())
789 goto out_revert_acct;
790
791 pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
792 GFP_KERNEL_ACCOUNT);
793
794 if (pipe->bufs) {
795 init_waitqueue_head(&pipe->rd_wait);
796 init_waitqueue_head(&pipe->wr_wait);
797 pipe->r_counter = pipe->w_counter = 1;
798 pipe->max_usage = pipe_bufs;
799 pipe->ring_size = pipe_bufs;
800 pipe->nr_accounted = pipe_bufs;
801 pipe->user = user;
802 mutex_init(&pipe->mutex);
803 return pipe;
804 }
805
806out_revert_acct:
807 (void) account_pipe_buffers(user, pipe_bufs, 0);
808 kfree(pipe);
809out_free_uid:
810 free_uid(user);
811 return NULL;
812}
813
814void free_pipe_info(struct pipe_inode_info *pipe)
815{
816 int i;
817
818#ifdef CONFIG_WATCH_QUEUE
819 if (pipe->watch_queue) {
820 watch_queue_clear(pipe->watch_queue);
821 put_watch_queue(pipe->watch_queue);
822 }
823#endif
824
825 (void) account_pipe_buffers(pipe->user, pipe->nr_accounted, 0);
826 free_uid(pipe->user);
827 for (i = 0; i < pipe->ring_size; i++) {
828 struct pipe_buffer *buf = pipe->bufs + i;
829 if (buf->ops)
830 pipe_buf_release(pipe, buf);
831 }
832 if (pipe->tmp_page)
833 __free_page(pipe->tmp_page);
834 kfree(pipe->bufs);
835 kfree(pipe);
836}
837
838static struct vfsmount *pipe_mnt __read_mostly;
839
840/*
841 * pipefs_dname() is called from d_path().
842 */
843static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
844{
845 return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
846 d_inode(dentry)->i_ino);
847}
848
849static const struct dentry_operations pipefs_dentry_operations = {
850 .d_dname = pipefs_dname,
851};
852
853static struct inode * get_pipe_inode(void)
854{
855 struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
856 struct pipe_inode_info *pipe;
857
858 if (!inode)
859 goto fail_inode;
860
861 inode->i_ino = get_next_ino();
862
863 pipe = alloc_pipe_info();
864 if (!pipe)
865 goto fail_iput;
866
867 inode->i_pipe = pipe;
868 pipe->files = 2;
869 pipe->readers = pipe->writers = 1;
870 inode->i_fop = &pipefifo_fops;
871
872 /*
873 * Mark the inode dirty from the very beginning,
874 * that way it will never be moved to the dirty
875 * list because "mark_inode_dirty()" will think
876 * that it already _is_ on the dirty list.
877 */
878 inode->i_state = I_DIRTY;
879 inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
880 inode->i_uid = current_fsuid();
881 inode->i_gid = current_fsgid();
882 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
883
884 return inode;
885
886fail_iput:
887 iput(inode);
888
889fail_inode:
890 return NULL;
891}
892
893int create_pipe_files(struct file **res, int flags)
894{
895 struct inode *inode = get_pipe_inode();
896 struct file *f;
897 int error;
898
899 if (!inode)
900 return -ENFILE;
901
902 if (flags & O_NOTIFICATION_PIPE) {
903 error = watch_queue_init(inode->i_pipe);
904 if (error) {
905 free_pipe_info(inode->i_pipe);
906 iput(inode);
907 return error;
908 }
909 }
910
911 f = alloc_file_pseudo(inode, pipe_mnt, "",
912 O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
913 &pipefifo_fops);
914 if (IS_ERR(f)) {
915 free_pipe_info(inode->i_pipe);
916 iput(inode);
917 return PTR_ERR(f);
918 }
919
920 f->private_data = inode->i_pipe;
921
922 res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
923 &pipefifo_fops);
924 if (IS_ERR(res[0])) {
925 put_pipe_info(inode, inode->i_pipe);
926 fput(f);
927 return PTR_ERR(res[0]);
928 }
929 res[0]->private_data = inode->i_pipe;
930 res[1] = f;
931 stream_open(inode, res[0]);
932 stream_open(inode, res[1]);
933 return 0;
934}
935
936static int __do_pipe_flags(int *fd, struct file **files, int flags)
937{
938 int error;
939 int fdw, fdr;
940
941 if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT | O_NOTIFICATION_PIPE))
942 return -EINVAL;
943
944 error = create_pipe_files(files, flags);
945 if (error)
946 return error;
947
948 error = get_unused_fd_flags(flags);
949 if (error < 0)
950 goto err_read_pipe;
951 fdr = error;
952
953 error = get_unused_fd_flags(flags);
954 if (error < 0)
955 goto err_fdr;
956 fdw = error;
957
958 audit_fd_pair(fdr, fdw);
959 fd[0] = fdr;
960 fd[1] = fdw;
961 return 0;
962
963 err_fdr:
964 put_unused_fd(fdr);
965 err_read_pipe:
966 fput(files[0]);
967 fput(files[1]);
968 return error;
969}
970
971int do_pipe_flags(int *fd, int flags)
972{
973 struct file *files[2];
974 int error = __do_pipe_flags(fd, files, flags);
975 if (!error) {
976 fd_install(fd[0], files[0]);
977 fd_install(fd[1], files[1]);
978 }
979 return error;
980}
981
982/*
983 * sys_pipe() is the normal C calling standard for creating
984 * a pipe. It's not the way Unix traditionally does this, though.
985 */
986static int do_pipe2(int __user *fildes, int flags)
987{
988 struct file *files[2];
989 int fd[2];
990 int error;
991
992 error = __do_pipe_flags(fd, files, flags);
993 if (!error) {
994 if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
995 fput(files[0]);
996 fput(files[1]);
997 put_unused_fd(fd[0]);
998 put_unused_fd(fd[1]);
999 error = -EFAULT;
1000 } else {
1001 fd_install(fd[0], files[0]);
1002 fd_install(fd[1], files[1]);
1003 }
1004 }
1005 return error;
1006}
1007
1008SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
1009{
1010 return do_pipe2(fildes, flags);
1011}
1012
1013SYSCALL_DEFINE1(pipe, int __user *, fildes)
1014{
1015 return do_pipe2(fildes, 0);
1016}
1017
1018/*
1019 * This is the stupid "wait for pipe to be readable or writable"
1020 * model.
1021 *
1022 * See pipe_read/write() for the proper kind of exclusive wait,
1023 * but that requires that we wake up any other readers/writers
1024 * if we then do not end up reading everything (ie the whole
1025 * "wake_next_reader/writer" logic in pipe_read/write()).
1026 */
1027void pipe_wait_readable(struct pipe_inode_info *pipe)
1028{
1029 pipe_unlock(pipe);
1030 wait_event_interruptible(pipe->rd_wait, pipe_readable(pipe));
1031 pipe_lock(pipe);
1032}
1033
1034void pipe_wait_writable(struct pipe_inode_info *pipe)
1035{
1036 pipe_unlock(pipe);
1037 wait_event_interruptible(pipe->wr_wait, pipe_writable(pipe));
1038 pipe_lock(pipe);
1039}
1040
1041/*
1042 * This depends on both the wait (here) and the wakeup (wake_up_partner)
1043 * holding the pipe lock, so "*cnt" is stable and we know a wakeup cannot
1044 * race with the count check and waitqueue prep.
1045 *
1046 * Normally in order to avoid races, you'd do the prepare_to_wait() first,
1047 * then check the condition you're waiting for, and only then sleep. But
1048 * because of the pipe lock, we can check the condition before being on
1049 * the wait queue.
1050 *
1051 * We use the 'rd_wait' waitqueue for pipe partner waiting.
1052 */
1053static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
1054{
1055 DEFINE_WAIT(rdwait);
1056 int cur = *cnt;
1057
1058 while (cur == *cnt) {
1059 prepare_to_wait(&pipe->rd_wait, &rdwait, TASK_INTERRUPTIBLE);
1060 pipe_unlock(pipe);
1061 schedule();
1062 finish_wait(&pipe->rd_wait, &rdwait);
1063 pipe_lock(pipe);
1064 if (signal_pending(current))
1065 break;
1066 }
1067 return cur == *cnt ? -ERESTARTSYS : 0;
1068}
1069
1070static void wake_up_partner(struct pipe_inode_info *pipe)
1071{
1072 wake_up_interruptible_all(&pipe->rd_wait);
1073}
1074
1075static int fifo_open(struct inode *inode, struct file *filp)
1076{
1077 struct pipe_inode_info *pipe;
1078 bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
1079 int ret;
1080
1081 filp->f_version = 0;
1082
1083 spin_lock(&inode->i_lock);
1084 if (inode->i_pipe) {
1085 pipe = inode->i_pipe;
1086 pipe->files++;
1087 spin_unlock(&inode->i_lock);
1088 } else {
1089 spin_unlock(&inode->i_lock);
1090 pipe = alloc_pipe_info();
1091 if (!pipe)
1092 return -ENOMEM;
1093 pipe->files = 1;
1094 spin_lock(&inode->i_lock);
1095 if (unlikely(inode->i_pipe)) {
1096 inode->i_pipe->files++;
1097 spin_unlock(&inode->i_lock);
1098 free_pipe_info(pipe);
1099 pipe = inode->i_pipe;
1100 } else {
1101 inode->i_pipe = pipe;
1102 spin_unlock(&inode->i_lock);
1103 }
1104 }
1105 filp->private_data = pipe;
1106 /* OK, we have a pipe and it's pinned down */
1107
1108 __pipe_lock(pipe);
1109
1110 /* We can only do regular read/write on fifos */
1111 stream_open(inode, filp);
1112
1113 switch (filp->f_mode & (FMODE_READ | FMODE_WRITE)) {
1114 case FMODE_READ:
1115 /*
1116 * O_RDONLY
1117 * POSIX.1 says that O_NONBLOCK means return with the FIFO
1118 * opened, even when there is no process writing the FIFO.
1119 */
1120 pipe->r_counter++;
1121 if (pipe->readers++ == 0)
1122 wake_up_partner(pipe);
1123
1124 if (!is_pipe && !pipe->writers) {
1125 if ((filp->f_flags & O_NONBLOCK)) {
1126 /* suppress EPOLLHUP until we have
1127 * seen a writer */
1128 filp->f_version = pipe->w_counter;
1129 } else {
1130 if (wait_for_partner(pipe, &pipe->w_counter))
1131 goto err_rd;
1132 }
1133 }
1134 break;
1135
1136 case FMODE_WRITE:
1137 /*
1138 * O_WRONLY
1139 * POSIX.1 says that O_NONBLOCK means return -1 with
1140 * errno=ENXIO when there is no process reading the FIFO.
1141 */
1142 ret = -ENXIO;
1143 if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
1144 goto err;
1145
1146 pipe->w_counter++;
1147 if (!pipe->writers++)
1148 wake_up_partner(pipe);
1149
1150 if (!is_pipe && !pipe->readers) {
1151 if (wait_for_partner(pipe, &pipe->r_counter))
1152 goto err_wr;
1153 }
1154 break;
1155
1156 case FMODE_READ | FMODE_WRITE:
1157 /*
1158 * O_RDWR
1159 * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
1160 * This implementation will NEVER block on a O_RDWR open, since
1161 * the process can at least talk to itself.
1162 */
1163
1164 pipe->readers++;
1165 pipe->writers++;
1166 pipe->r_counter++;
1167 pipe->w_counter++;
1168 if (pipe->readers == 1 || pipe->writers == 1)
1169 wake_up_partner(pipe);
1170 break;
1171
1172 default:
1173 ret = -EINVAL;
1174 goto err;
1175 }
1176
1177 /* Ok! */
1178 __pipe_unlock(pipe);
1179 return 0;
1180
1181err_rd:
1182 if (!--pipe->readers)
1183 wake_up_interruptible(&pipe->wr_wait);
1184 ret = -ERESTARTSYS;
1185 goto err;
1186
1187err_wr:
1188 if (!--pipe->writers)
1189 wake_up_interruptible_all(&pipe->rd_wait);
1190 ret = -ERESTARTSYS;
1191 goto err;
1192
1193err:
1194 __pipe_unlock(pipe);
1195
1196 put_pipe_info(inode, pipe);
1197 return ret;
1198}
1199
1200const struct file_operations pipefifo_fops = {
1201 .open = fifo_open,
1202 .llseek = no_llseek,
1203 .read_iter = pipe_read,
1204 .write_iter = pipe_write,
1205 .poll = pipe_poll,
1206 .unlocked_ioctl = pipe_ioctl,
1207 .release = pipe_release,
1208 .fasync = pipe_fasync,
1209};
1210
1211/*
1212 * Currently we rely on the pipe array holding a power-of-2 number
1213 * of pages. Returns 0 on error.
1214 */
1215unsigned int round_pipe_size(unsigned long size)
1216{
1217 if (size > (1U << 31))
1218 return 0;
1219
1220 /* Minimum pipe size, as required by POSIX */
1221 if (size < PAGE_SIZE)
1222 return PAGE_SIZE;
1223
1224 return roundup_pow_of_two(size);
1225}
1226
1227/*
1228 * Resize the pipe ring to a number of slots.
1229 */
1230int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
1231{
1232 struct pipe_buffer *bufs;
1233 unsigned int head, tail, mask, n;
1234
1235 /*
1236 * We can shrink the pipe, if arg is greater than the ring occupancy.
1237 * Since we don't expect a lot of shrink+grow operations, just free and
1238 * allocate again like we would do for growing. If the pipe currently
1239 * contains more buffers than arg, then return busy.
1240 */
1241 mask = pipe->ring_size - 1;
1242 head = pipe->head;
1243 tail = pipe->tail;
1244 n = pipe_occupancy(pipe->head, pipe->tail);
1245 if (nr_slots < n)
1246 return -EBUSY;
1247
1248 bufs = kcalloc(nr_slots, sizeof(*bufs),
1249 GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1250 if (unlikely(!bufs))
1251 return -ENOMEM;
1252
1253 /*
1254 * The pipe array wraps around, so just start the new one at zero
1255 * and adjust the indices.
1256 */
1257 if (n > 0) {
1258 unsigned int h = head & mask;
1259 unsigned int t = tail & mask;
1260 if (h > t) {
1261 memcpy(bufs, pipe->bufs + t,
1262 n * sizeof(struct pipe_buffer));
1263 } else {
1264 unsigned int tsize = pipe->ring_size - t;
1265 if (h > 0)
1266 memcpy(bufs + tsize, pipe->bufs,
1267 h * sizeof(struct pipe_buffer));
1268 memcpy(bufs, pipe->bufs + t,
1269 tsize * sizeof(struct pipe_buffer));
1270 }
1271 }
1272
1273 head = n;
1274 tail = 0;
1275
1276 kfree(pipe->bufs);
1277 pipe->bufs = bufs;
1278 pipe->ring_size = nr_slots;
1279 if (pipe->max_usage > nr_slots)
1280 pipe->max_usage = nr_slots;
1281 pipe->tail = tail;
1282 pipe->head = head;
1283
1284 /* This might have made more room for writers */
1285 wake_up_interruptible(&pipe->wr_wait);
1286 return 0;
1287}
1288
1289/*
1290 * Allocate a new array of pipe buffers and copy the info over. Returns the
1291 * pipe size if successful, or return -ERROR on error.
1292 */
1293static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
1294{
1295 unsigned long user_bufs;
1296 unsigned int nr_slots, size;
1297 long ret = 0;
1298
1299#ifdef CONFIG_WATCH_QUEUE
1300 if (pipe->watch_queue)
1301 return -EBUSY;
1302#endif
1303
1304 size = round_pipe_size(arg);
1305 nr_slots = size >> PAGE_SHIFT;
1306
1307 if (!nr_slots)
1308 return -EINVAL;
1309
1310 /*
1311 * If trying to increase the pipe capacity, check that an
1312 * unprivileged user is not trying to exceed various limits
1313 * (soft limit check here, hard limit check just below).
1314 * Decreasing the pipe capacity is always permitted, even
1315 * if the user is currently over a limit.
1316 */
1317 if (nr_slots > pipe->max_usage &&
1318 size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
1319 return -EPERM;
1320
1321 user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_slots);
1322
1323 if (nr_slots > pipe->max_usage &&
1324 (too_many_pipe_buffers_hard(user_bufs) ||
1325 too_many_pipe_buffers_soft(user_bufs)) &&
1326 pipe_is_unprivileged_user()) {
1327 ret = -EPERM;
1328 goto out_revert_acct;
1329 }
1330
1331 ret = pipe_resize_ring(pipe, nr_slots);
1332 if (ret < 0)
1333 goto out_revert_acct;
1334
1335 pipe->max_usage = nr_slots;
1336 pipe->nr_accounted = nr_slots;
1337 return pipe->max_usage * PAGE_SIZE;
1338
1339out_revert_acct:
1340 (void) account_pipe_buffers(pipe->user, nr_slots, pipe->nr_accounted);
1341 return ret;
1342}
1343
1344/*
1345 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1346 * location, so checking ->i_pipe is not enough to verify that this is a
1347 * pipe.
1348 */
1349struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice)
1350{
1351 struct pipe_inode_info *pipe = file->private_data;
1352
1353 if (file->f_op != &pipefifo_fops || !pipe)
1354 return NULL;
1355#ifdef CONFIG_WATCH_QUEUE
1356 if (for_splice && pipe->watch_queue)
1357 return NULL;
1358#endif
1359 return pipe;
1360}
1361
1362long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1363{
1364 struct pipe_inode_info *pipe;
1365 long ret;
1366
1367 pipe = get_pipe_info(file, false);
1368 if (!pipe)
1369 return -EBADF;
1370
1371 __pipe_lock(pipe);
1372
1373 switch (cmd) {
1374 case F_SETPIPE_SZ:
1375 ret = pipe_set_size(pipe, arg);
1376 break;
1377 case F_GETPIPE_SZ:
1378 ret = pipe->max_usage * PAGE_SIZE;
1379 break;
1380 default:
1381 ret = -EINVAL;
1382 break;
1383 }
1384
1385 __pipe_unlock(pipe);
1386 return ret;
1387}
1388
1389static const struct super_operations pipefs_ops = {
1390 .destroy_inode = free_inode_nonrcu,
1391 .statfs = simple_statfs,
1392};
1393
1394/*
1395 * pipefs should _never_ be mounted by userland - too much of security hassle,
1396 * no real gain from having the whole whorehouse mounted. So we don't need
1397 * any operations on the root directory. However, we need a non-trivial
1398 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1399 */
1400
1401static int pipefs_init_fs_context(struct fs_context *fc)
1402{
1403 struct pseudo_fs_context *ctx = init_pseudo(fc, PIPEFS_MAGIC);
1404 if (!ctx)
1405 return -ENOMEM;
1406 ctx->ops = &pipefs_ops;
1407 ctx->dops = &pipefs_dentry_operations;
1408 return 0;
1409}
1410
1411static struct file_system_type pipe_fs_type = {
1412 .name = "pipefs",
1413 .init_fs_context = pipefs_init_fs_context,
1414 .kill_sb = kill_anon_super,
1415};
1416
1417static int __init init_pipe_fs(void)
1418{
1419 int err = register_filesystem(&pipe_fs_type);
1420
1421 if (!err) {
1422 pipe_mnt = kern_mount(&pipe_fs_type);
1423 if (IS_ERR(pipe_mnt)) {
1424 err = PTR_ERR(pipe_mnt);
1425 unregister_filesystem(&pipe_fs_type);
1426 }
1427 }
1428 return err;
1429}
1430
1431fs_initcall(init_pipe_fs);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/pipe.c
4 *
5 * Copyright (C) 1991, 1992, 1999 Linus Torvalds
6 */
7
8#include <linux/mm.h>
9#include <linux/file.h>
10#include <linux/poll.h>
11#include <linux/slab.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/fs.h>
15#include <linux/log2.h>
16#include <linux/mount.h>
17#include <linux/pseudo_fs.h>
18#include <linux/magic.h>
19#include <linux/pipe_fs_i.h>
20#include <linux/uio.h>
21#include <linux/highmem.h>
22#include <linux/pagemap.h>
23#include <linux/audit.h>
24#include <linux/syscalls.h>
25#include <linux/fcntl.h>
26#include <linux/memcontrol.h>
27#include <linux/watch_queue.h>
28#include <linux/sysctl.h>
29
30#include <linux/uaccess.h>
31#include <asm/ioctls.h>
32
33#include "internal.h"
34
35/*
36 * New pipe buffers will be restricted to this size while the user is exceeding
37 * their pipe buffer quota. The general pipe use case needs at least two
38 * buffers: one for data yet to be read, and one for new data. If this is less
39 * than two, then a write to a non-empty pipe may block even if the pipe is not
40 * full. This can occur with GNU make jobserver or similar uses of pipes as
41 * semaphores: multiple processes may be waiting to write tokens back to the
42 * pipe before reading tokens: https://lore.kernel.org/lkml/1628086770.5rn8p04n6j.none@localhost/.
43 *
44 * Users can reduce their pipe buffers with F_SETPIPE_SZ below this at their
45 * own risk, namely: pipe writes to non-full pipes may block until the pipe is
46 * emptied.
47 */
48#define PIPE_MIN_DEF_BUFFERS 2
49
50/*
51 * The max size that a non-root user is allowed to grow the pipe. Can
52 * be set by root in /proc/sys/fs/pipe-max-size
53 */
54static unsigned int pipe_max_size = 1048576;
55
56/* Maximum allocatable pages per user. Hard limit is unset by default, soft
57 * matches default values.
58 */
59static unsigned long pipe_user_pages_hard;
60static unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
61
62/*
63 * We use head and tail indices that aren't masked off, except at the point of
64 * dereference, but rather they're allowed to wrap naturally. This means there
65 * isn't a dead spot in the buffer, but the ring has to be a power of two and
66 * <= 2^31.
67 * -- David Howells 2019-09-23.
68 *
69 * Reads with count = 0 should always return 0.
70 * -- Julian Bradfield 1999-06-07.
71 *
72 * FIFOs and Pipes now generate SIGIO for both readers and writers.
73 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
74 *
75 * pipe_read & write cleanup
76 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
77 */
78
79static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
80{
81 if (pipe->files)
82 mutex_lock_nested(&pipe->mutex, subclass);
83}
84
85void pipe_lock(struct pipe_inode_info *pipe)
86{
87 /*
88 * pipe_lock() nests non-pipe inode locks (for writing to a file)
89 */
90 pipe_lock_nested(pipe, I_MUTEX_PARENT);
91}
92EXPORT_SYMBOL(pipe_lock);
93
94void pipe_unlock(struct pipe_inode_info *pipe)
95{
96 if (pipe->files)
97 mutex_unlock(&pipe->mutex);
98}
99EXPORT_SYMBOL(pipe_unlock);
100
101static inline void __pipe_lock(struct pipe_inode_info *pipe)
102{
103 mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
104}
105
106static inline void __pipe_unlock(struct pipe_inode_info *pipe)
107{
108 mutex_unlock(&pipe->mutex);
109}
110
111void pipe_double_lock(struct pipe_inode_info *pipe1,
112 struct pipe_inode_info *pipe2)
113{
114 BUG_ON(pipe1 == pipe2);
115
116 if (pipe1 < pipe2) {
117 pipe_lock_nested(pipe1, I_MUTEX_PARENT);
118 pipe_lock_nested(pipe2, I_MUTEX_CHILD);
119 } else {
120 pipe_lock_nested(pipe2, I_MUTEX_PARENT);
121 pipe_lock_nested(pipe1, I_MUTEX_CHILD);
122 }
123}
124
125static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
126 struct pipe_buffer *buf)
127{
128 struct page *page = buf->page;
129
130 /*
131 * If nobody else uses this page, and we don't already have a
132 * temporary page, let's keep track of it as a one-deep
133 * allocation cache. (Otherwise just release our reference to it)
134 */
135 if (page_count(page) == 1 && !pipe->tmp_page)
136 pipe->tmp_page = page;
137 else
138 put_page(page);
139}
140
141static bool anon_pipe_buf_try_steal(struct pipe_inode_info *pipe,
142 struct pipe_buffer *buf)
143{
144 struct page *page = buf->page;
145
146 if (page_count(page) != 1)
147 return false;
148 memcg_kmem_uncharge_page(page, 0);
149 __SetPageLocked(page);
150 return true;
151}
152
153/**
154 * generic_pipe_buf_try_steal - attempt to take ownership of a &pipe_buffer
155 * @pipe: the pipe that the buffer belongs to
156 * @buf: the buffer to attempt to steal
157 *
158 * Description:
159 * This function attempts to steal the &struct page attached to
160 * @buf. If successful, this function returns 0 and returns with
161 * the page locked. The caller may then reuse the page for whatever
162 * he wishes; the typical use is insertion into a different file
163 * page cache.
164 */
165bool generic_pipe_buf_try_steal(struct pipe_inode_info *pipe,
166 struct pipe_buffer *buf)
167{
168 struct page *page = buf->page;
169
170 /*
171 * A reference of one is golden, that means that the owner of this
172 * page is the only one holding a reference to it. lock the page
173 * and return OK.
174 */
175 if (page_count(page) == 1) {
176 lock_page(page);
177 return true;
178 }
179 return false;
180}
181EXPORT_SYMBOL(generic_pipe_buf_try_steal);
182
183/**
184 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
185 * @pipe: the pipe that the buffer belongs to
186 * @buf: the buffer to get a reference to
187 *
188 * Description:
189 * This function grabs an extra reference to @buf. It's used in
190 * the tee() system call, when we duplicate the buffers in one
191 * pipe into another.
192 */
193bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
194{
195 return try_get_page(buf->page);
196}
197EXPORT_SYMBOL(generic_pipe_buf_get);
198
199/**
200 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
201 * @pipe: the pipe that the buffer belongs to
202 * @buf: the buffer to put a reference to
203 *
204 * Description:
205 * This function releases a reference to @buf.
206 */
207void generic_pipe_buf_release(struct pipe_inode_info *pipe,
208 struct pipe_buffer *buf)
209{
210 put_page(buf->page);
211}
212EXPORT_SYMBOL(generic_pipe_buf_release);
213
214static const struct pipe_buf_operations anon_pipe_buf_ops = {
215 .release = anon_pipe_buf_release,
216 .try_steal = anon_pipe_buf_try_steal,
217 .get = generic_pipe_buf_get,
218};
219
220/* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
221static inline bool pipe_readable(const struct pipe_inode_info *pipe)
222{
223 unsigned int head = READ_ONCE(pipe->head);
224 unsigned int tail = READ_ONCE(pipe->tail);
225 unsigned int writers = READ_ONCE(pipe->writers);
226
227 return !pipe_empty(head, tail) || !writers;
228}
229
230static inline unsigned int pipe_update_tail(struct pipe_inode_info *pipe,
231 struct pipe_buffer *buf,
232 unsigned int tail)
233{
234 pipe_buf_release(pipe, buf);
235
236 /*
237 * If the pipe has a watch_queue, we need additional protection
238 * by the spinlock because notifications get posted with only
239 * this spinlock, no mutex
240 */
241 if (pipe_has_watch_queue(pipe)) {
242 spin_lock_irq(&pipe->rd_wait.lock);
243#ifdef CONFIG_WATCH_QUEUE
244 if (buf->flags & PIPE_BUF_FLAG_LOSS)
245 pipe->note_loss = true;
246#endif
247 pipe->tail = ++tail;
248 spin_unlock_irq(&pipe->rd_wait.lock);
249 return tail;
250 }
251
252 /*
253 * Without a watch_queue, we can simply increment the tail
254 * without the spinlock - the mutex is enough.
255 */
256 pipe->tail = ++tail;
257 return tail;
258}
259
260static ssize_t
261pipe_read(struct kiocb *iocb, struct iov_iter *to)
262{
263 size_t total_len = iov_iter_count(to);
264 struct file *filp = iocb->ki_filp;
265 struct pipe_inode_info *pipe = filp->private_data;
266 bool was_full, wake_next_reader = false;
267 ssize_t ret;
268
269 /* Null read succeeds. */
270 if (unlikely(total_len == 0))
271 return 0;
272
273 ret = 0;
274 __pipe_lock(pipe);
275
276 /*
277 * We only wake up writers if the pipe was full when we started
278 * reading in order to avoid unnecessary wakeups.
279 *
280 * But when we do wake up writers, we do so using a sync wakeup
281 * (WF_SYNC), because we want them to get going and generate more
282 * data for us.
283 */
284 was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
285 for (;;) {
286 /* Read ->head with a barrier vs post_one_notification() */
287 unsigned int head = smp_load_acquire(&pipe->head);
288 unsigned int tail = pipe->tail;
289 unsigned int mask = pipe->ring_size - 1;
290
291#ifdef CONFIG_WATCH_QUEUE
292 if (pipe->note_loss) {
293 struct watch_notification n;
294
295 if (total_len < 8) {
296 if (ret == 0)
297 ret = -ENOBUFS;
298 break;
299 }
300
301 n.type = WATCH_TYPE_META;
302 n.subtype = WATCH_META_LOSS_NOTIFICATION;
303 n.info = watch_sizeof(n);
304 if (copy_to_iter(&n, sizeof(n), to) != sizeof(n)) {
305 if (ret == 0)
306 ret = -EFAULT;
307 break;
308 }
309 ret += sizeof(n);
310 total_len -= sizeof(n);
311 pipe->note_loss = false;
312 }
313#endif
314
315 if (!pipe_empty(head, tail)) {
316 struct pipe_buffer *buf = &pipe->bufs[tail & mask];
317 size_t chars = buf->len;
318 size_t written;
319 int error;
320
321 if (chars > total_len) {
322 if (buf->flags & PIPE_BUF_FLAG_WHOLE) {
323 if (ret == 0)
324 ret = -ENOBUFS;
325 break;
326 }
327 chars = total_len;
328 }
329
330 error = pipe_buf_confirm(pipe, buf);
331 if (error) {
332 if (!ret)
333 ret = error;
334 break;
335 }
336
337 written = copy_page_to_iter(buf->page, buf->offset, chars, to);
338 if (unlikely(written < chars)) {
339 if (!ret)
340 ret = -EFAULT;
341 break;
342 }
343 ret += chars;
344 buf->offset += chars;
345 buf->len -= chars;
346
347 /* Was it a packet buffer? Clean up and exit */
348 if (buf->flags & PIPE_BUF_FLAG_PACKET) {
349 total_len = chars;
350 buf->len = 0;
351 }
352
353 if (!buf->len)
354 tail = pipe_update_tail(pipe, buf, tail);
355 total_len -= chars;
356 if (!total_len)
357 break; /* common path: read succeeded */
358 if (!pipe_empty(head, tail)) /* More to do? */
359 continue;
360 }
361
362 if (!pipe->writers)
363 break;
364 if (ret)
365 break;
366 if ((filp->f_flags & O_NONBLOCK) ||
367 (iocb->ki_flags & IOCB_NOWAIT)) {
368 ret = -EAGAIN;
369 break;
370 }
371 __pipe_unlock(pipe);
372
373 /*
374 * We only get here if we didn't actually read anything.
375 *
376 * However, we could have seen (and removed) a zero-sized
377 * pipe buffer, and might have made space in the buffers
378 * that way.
379 *
380 * You can't make zero-sized pipe buffers by doing an empty
381 * write (not even in packet mode), but they can happen if
382 * the writer gets an EFAULT when trying to fill a buffer
383 * that already got allocated and inserted in the buffer
384 * array.
385 *
386 * So we still need to wake up any pending writers in the
387 * _very_ unlikely case that the pipe was full, but we got
388 * no data.
389 */
390 if (unlikely(was_full))
391 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
392 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
393
394 /*
395 * But because we didn't read anything, at this point we can
396 * just return directly with -ERESTARTSYS if we're interrupted,
397 * since we've done any required wakeups and there's no need
398 * to mark anything accessed. And we've dropped the lock.
399 */
400 if (wait_event_interruptible_exclusive(pipe->rd_wait, pipe_readable(pipe)) < 0)
401 return -ERESTARTSYS;
402
403 __pipe_lock(pipe);
404 was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
405 wake_next_reader = true;
406 }
407 if (pipe_empty(pipe->head, pipe->tail))
408 wake_next_reader = false;
409 __pipe_unlock(pipe);
410
411 if (was_full)
412 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
413 if (wake_next_reader)
414 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
415 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
416 if (ret > 0)
417 file_accessed(filp);
418 return ret;
419}
420
421static inline int is_packetized(struct file *file)
422{
423 return (file->f_flags & O_DIRECT) != 0;
424}
425
426/* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
427static inline bool pipe_writable(const struct pipe_inode_info *pipe)
428{
429 unsigned int head = READ_ONCE(pipe->head);
430 unsigned int tail = READ_ONCE(pipe->tail);
431 unsigned int max_usage = READ_ONCE(pipe->max_usage);
432
433 return !pipe_full(head, tail, max_usage) ||
434 !READ_ONCE(pipe->readers);
435}
436
437static ssize_t
438pipe_write(struct kiocb *iocb, struct iov_iter *from)
439{
440 struct file *filp = iocb->ki_filp;
441 struct pipe_inode_info *pipe = filp->private_data;
442 unsigned int head;
443 ssize_t ret = 0;
444 size_t total_len = iov_iter_count(from);
445 ssize_t chars;
446 bool was_empty = false;
447 bool wake_next_writer = false;
448
449 /*
450 * Reject writing to watch queue pipes before the point where we lock
451 * the pipe.
452 * Otherwise, lockdep would be unhappy if the caller already has another
453 * pipe locked.
454 * If we had to support locking a normal pipe and a notification pipe at
455 * the same time, we could set up lockdep annotations for that, but
456 * since we don't actually need that, it's simpler to just bail here.
457 */
458 if (pipe_has_watch_queue(pipe))
459 return -EXDEV;
460
461 /* Null write succeeds. */
462 if (unlikely(total_len == 0))
463 return 0;
464
465 __pipe_lock(pipe);
466
467 if (!pipe->readers) {
468 send_sig(SIGPIPE, current, 0);
469 ret = -EPIPE;
470 goto out;
471 }
472
473 /*
474 * If it wasn't empty we try to merge new data into
475 * the last buffer.
476 *
477 * That naturally merges small writes, but it also
478 * page-aligns the rest of the writes for large writes
479 * spanning multiple pages.
480 */
481 head = pipe->head;
482 was_empty = pipe_empty(head, pipe->tail);
483 chars = total_len & (PAGE_SIZE-1);
484 if (chars && !was_empty) {
485 unsigned int mask = pipe->ring_size - 1;
486 struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
487 int offset = buf->offset + buf->len;
488
489 if ((buf->flags & PIPE_BUF_FLAG_CAN_MERGE) &&
490 offset + chars <= PAGE_SIZE) {
491 ret = pipe_buf_confirm(pipe, buf);
492 if (ret)
493 goto out;
494
495 ret = copy_page_from_iter(buf->page, offset, chars, from);
496 if (unlikely(ret < chars)) {
497 ret = -EFAULT;
498 goto out;
499 }
500
501 buf->len += ret;
502 if (!iov_iter_count(from))
503 goto out;
504 }
505 }
506
507 for (;;) {
508 if (!pipe->readers) {
509 send_sig(SIGPIPE, current, 0);
510 if (!ret)
511 ret = -EPIPE;
512 break;
513 }
514
515 head = pipe->head;
516 if (!pipe_full(head, pipe->tail, pipe->max_usage)) {
517 unsigned int mask = pipe->ring_size - 1;
518 struct pipe_buffer *buf;
519 struct page *page = pipe->tmp_page;
520 int copied;
521
522 if (!page) {
523 page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
524 if (unlikely(!page)) {
525 ret = ret ? : -ENOMEM;
526 break;
527 }
528 pipe->tmp_page = page;
529 }
530
531 /* Allocate a slot in the ring in advance and attach an
532 * empty buffer. If we fault or otherwise fail to use
533 * it, either the reader will consume it or it'll still
534 * be there for the next write.
535 */
536 pipe->head = head + 1;
537
538 /* Insert it into the buffer array */
539 buf = &pipe->bufs[head & mask];
540 buf->page = page;
541 buf->ops = &anon_pipe_buf_ops;
542 buf->offset = 0;
543 buf->len = 0;
544 if (is_packetized(filp))
545 buf->flags = PIPE_BUF_FLAG_PACKET;
546 else
547 buf->flags = PIPE_BUF_FLAG_CAN_MERGE;
548 pipe->tmp_page = NULL;
549
550 copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
551 if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
552 if (!ret)
553 ret = -EFAULT;
554 break;
555 }
556 ret += copied;
557 buf->len = copied;
558
559 if (!iov_iter_count(from))
560 break;
561 }
562
563 if (!pipe_full(head, pipe->tail, pipe->max_usage))
564 continue;
565
566 /* Wait for buffer space to become available. */
567 if ((filp->f_flags & O_NONBLOCK) ||
568 (iocb->ki_flags & IOCB_NOWAIT)) {
569 if (!ret)
570 ret = -EAGAIN;
571 break;
572 }
573 if (signal_pending(current)) {
574 if (!ret)
575 ret = -ERESTARTSYS;
576 break;
577 }
578
579 /*
580 * We're going to release the pipe lock and wait for more
581 * space. We wake up any readers if necessary, and then
582 * after waiting we need to re-check whether the pipe
583 * become empty while we dropped the lock.
584 */
585 __pipe_unlock(pipe);
586 if (was_empty)
587 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
588 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
589 wait_event_interruptible_exclusive(pipe->wr_wait, pipe_writable(pipe));
590 __pipe_lock(pipe);
591 was_empty = pipe_empty(pipe->head, pipe->tail);
592 wake_next_writer = true;
593 }
594out:
595 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
596 wake_next_writer = false;
597 __pipe_unlock(pipe);
598
599 /*
600 * If we do do a wakeup event, we do a 'sync' wakeup, because we
601 * want the reader to start processing things asap, rather than
602 * leave the data pending.
603 *
604 * This is particularly important for small writes, because of
605 * how (for example) the GNU make jobserver uses small writes to
606 * wake up pending jobs
607 *
608 * Epoll nonsensically wants a wakeup whether the pipe
609 * was already empty or not.
610 */
611 if (was_empty || pipe->poll_usage)
612 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
613 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
614 if (wake_next_writer)
615 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
616 if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
617 int err = file_update_time(filp);
618 if (err)
619 ret = err;
620 sb_end_write(file_inode(filp)->i_sb);
621 }
622 return ret;
623}
624
625static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
626{
627 struct pipe_inode_info *pipe = filp->private_data;
628 unsigned int count, head, tail, mask;
629
630 switch (cmd) {
631 case FIONREAD:
632 __pipe_lock(pipe);
633 count = 0;
634 head = pipe->head;
635 tail = pipe->tail;
636 mask = pipe->ring_size - 1;
637
638 while (tail != head) {
639 count += pipe->bufs[tail & mask].len;
640 tail++;
641 }
642 __pipe_unlock(pipe);
643
644 return put_user(count, (int __user *)arg);
645
646#ifdef CONFIG_WATCH_QUEUE
647 case IOC_WATCH_QUEUE_SET_SIZE: {
648 int ret;
649 __pipe_lock(pipe);
650 ret = watch_queue_set_size(pipe, arg);
651 __pipe_unlock(pipe);
652 return ret;
653 }
654
655 case IOC_WATCH_QUEUE_SET_FILTER:
656 return watch_queue_set_filter(
657 pipe, (struct watch_notification_filter __user *)arg);
658#endif
659
660 default:
661 return -ENOIOCTLCMD;
662 }
663}
664
665/* No kernel lock held - fine */
666static __poll_t
667pipe_poll(struct file *filp, poll_table *wait)
668{
669 __poll_t mask;
670 struct pipe_inode_info *pipe = filp->private_data;
671 unsigned int head, tail;
672
673 /* Epoll has some historical nasty semantics, this enables them */
674 WRITE_ONCE(pipe->poll_usage, true);
675
676 /*
677 * Reading pipe state only -- no need for acquiring the semaphore.
678 *
679 * But because this is racy, the code has to add the
680 * entry to the poll table _first_ ..
681 */
682 if (filp->f_mode & FMODE_READ)
683 poll_wait(filp, &pipe->rd_wait, wait);
684 if (filp->f_mode & FMODE_WRITE)
685 poll_wait(filp, &pipe->wr_wait, wait);
686
687 /*
688 * .. and only then can you do the racy tests. That way,
689 * if something changes and you got it wrong, the poll
690 * table entry will wake you up and fix it.
691 */
692 head = READ_ONCE(pipe->head);
693 tail = READ_ONCE(pipe->tail);
694
695 mask = 0;
696 if (filp->f_mode & FMODE_READ) {
697 if (!pipe_empty(head, tail))
698 mask |= EPOLLIN | EPOLLRDNORM;
699 if (!pipe->writers && filp->f_version != pipe->w_counter)
700 mask |= EPOLLHUP;
701 }
702
703 if (filp->f_mode & FMODE_WRITE) {
704 if (!pipe_full(head, tail, pipe->max_usage))
705 mask |= EPOLLOUT | EPOLLWRNORM;
706 /*
707 * Most Unices do not set EPOLLERR for FIFOs but on Linux they
708 * behave exactly like pipes for poll().
709 */
710 if (!pipe->readers)
711 mask |= EPOLLERR;
712 }
713
714 return mask;
715}
716
717static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
718{
719 int kill = 0;
720
721 spin_lock(&inode->i_lock);
722 if (!--pipe->files) {
723 inode->i_pipe = NULL;
724 kill = 1;
725 }
726 spin_unlock(&inode->i_lock);
727
728 if (kill)
729 free_pipe_info(pipe);
730}
731
732static int
733pipe_release(struct inode *inode, struct file *file)
734{
735 struct pipe_inode_info *pipe = file->private_data;
736
737 __pipe_lock(pipe);
738 if (file->f_mode & FMODE_READ)
739 pipe->readers--;
740 if (file->f_mode & FMODE_WRITE)
741 pipe->writers--;
742
743 /* Was that the last reader or writer, but not the other side? */
744 if (!pipe->readers != !pipe->writers) {
745 wake_up_interruptible_all(&pipe->rd_wait);
746 wake_up_interruptible_all(&pipe->wr_wait);
747 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
748 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
749 }
750 __pipe_unlock(pipe);
751
752 put_pipe_info(inode, pipe);
753 return 0;
754}
755
756static int
757pipe_fasync(int fd, struct file *filp, int on)
758{
759 struct pipe_inode_info *pipe = filp->private_data;
760 int retval = 0;
761
762 __pipe_lock(pipe);
763 if (filp->f_mode & FMODE_READ)
764 retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
765 if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
766 retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
767 if (retval < 0 && (filp->f_mode & FMODE_READ))
768 /* this can happen only if on == T */
769 fasync_helper(-1, filp, 0, &pipe->fasync_readers);
770 }
771 __pipe_unlock(pipe);
772 return retval;
773}
774
775unsigned long account_pipe_buffers(struct user_struct *user,
776 unsigned long old, unsigned long new)
777{
778 return atomic_long_add_return(new - old, &user->pipe_bufs);
779}
780
781bool too_many_pipe_buffers_soft(unsigned long user_bufs)
782{
783 unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft);
784
785 return soft_limit && user_bufs > soft_limit;
786}
787
788bool too_many_pipe_buffers_hard(unsigned long user_bufs)
789{
790 unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard);
791
792 return hard_limit && user_bufs > hard_limit;
793}
794
795bool pipe_is_unprivileged_user(void)
796{
797 return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
798}
799
800struct pipe_inode_info *alloc_pipe_info(void)
801{
802 struct pipe_inode_info *pipe;
803 unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
804 struct user_struct *user = get_current_user();
805 unsigned long user_bufs;
806 unsigned int max_size = READ_ONCE(pipe_max_size);
807
808 pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
809 if (pipe == NULL)
810 goto out_free_uid;
811
812 if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE))
813 pipe_bufs = max_size >> PAGE_SHIFT;
814
815 user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
816
817 if (too_many_pipe_buffers_soft(user_bufs) && pipe_is_unprivileged_user()) {
818 user_bufs = account_pipe_buffers(user, pipe_bufs, PIPE_MIN_DEF_BUFFERS);
819 pipe_bufs = PIPE_MIN_DEF_BUFFERS;
820 }
821
822 if (too_many_pipe_buffers_hard(user_bufs) && pipe_is_unprivileged_user())
823 goto out_revert_acct;
824
825 pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
826 GFP_KERNEL_ACCOUNT);
827
828 if (pipe->bufs) {
829 init_waitqueue_head(&pipe->rd_wait);
830 init_waitqueue_head(&pipe->wr_wait);
831 pipe->r_counter = pipe->w_counter = 1;
832 pipe->max_usage = pipe_bufs;
833 pipe->ring_size = pipe_bufs;
834 pipe->nr_accounted = pipe_bufs;
835 pipe->user = user;
836 mutex_init(&pipe->mutex);
837 return pipe;
838 }
839
840out_revert_acct:
841 (void) account_pipe_buffers(user, pipe_bufs, 0);
842 kfree(pipe);
843out_free_uid:
844 free_uid(user);
845 return NULL;
846}
847
848void free_pipe_info(struct pipe_inode_info *pipe)
849{
850 unsigned int i;
851
852#ifdef CONFIG_WATCH_QUEUE
853 if (pipe->watch_queue)
854 watch_queue_clear(pipe->watch_queue);
855#endif
856
857 (void) account_pipe_buffers(pipe->user, pipe->nr_accounted, 0);
858 free_uid(pipe->user);
859 for (i = 0; i < pipe->ring_size; i++) {
860 struct pipe_buffer *buf = pipe->bufs + i;
861 if (buf->ops)
862 pipe_buf_release(pipe, buf);
863 }
864#ifdef CONFIG_WATCH_QUEUE
865 if (pipe->watch_queue)
866 put_watch_queue(pipe->watch_queue);
867#endif
868 if (pipe->tmp_page)
869 __free_page(pipe->tmp_page);
870 kfree(pipe->bufs);
871 kfree(pipe);
872}
873
874static struct vfsmount *pipe_mnt __ro_after_init;
875
876/*
877 * pipefs_dname() is called from d_path().
878 */
879static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
880{
881 return dynamic_dname(buffer, buflen, "pipe:[%lu]",
882 d_inode(dentry)->i_ino);
883}
884
885static const struct dentry_operations pipefs_dentry_operations = {
886 .d_dname = pipefs_dname,
887};
888
889static struct inode * get_pipe_inode(void)
890{
891 struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
892 struct pipe_inode_info *pipe;
893
894 if (!inode)
895 goto fail_inode;
896
897 inode->i_ino = get_next_ino();
898
899 pipe = alloc_pipe_info();
900 if (!pipe)
901 goto fail_iput;
902
903 inode->i_pipe = pipe;
904 pipe->files = 2;
905 pipe->readers = pipe->writers = 1;
906 inode->i_fop = &pipefifo_fops;
907
908 /*
909 * Mark the inode dirty from the very beginning,
910 * that way it will never be moved to the dirty
911 * list because "mark_inode_dirty()" will think
912 * that it already _is_ on the dirty list.
913 */
914 inode->i_state = I_DIRTY;
915 inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
916 inode->i_uid = current_fsuid();
917 inode->i_gid = current_fsgid();
918 simple_inode_init_ts(inode);
919
920 return inode;
921
922fail_iput:
923 iput(inode);
924
925fail_inode:
926 return NULL;
927}
928
929int create_pipe_files(struct file **res, int flags)
930{
931 struct inode *inode = get_pipe_inode();
932 struct file *f;
933 int error;
934
935 if (!inode)
936 return -ENFILE;
937
938 if (flags & O_NOTIFICATION_PIPE) {
939 error = watch_queue_init(inode->i_pipe);
940 if (error) {
941 free_pipe_info(inode->i_pipe);
942 iput(inode);
943 return error;
944 }
945 }
946
947 f = alloc_file_pseudo(inode, pipe_mnt, "",
948 O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
949 &pipefifo_fops);
950 if (IS_ERR(f)) {
951 free_pipe_info(inode->i_pipe);
952 iput(inode);
953 return PTR_ERR(f);
954 }
955
956 f->private_data = inode->i_pipe;
957
958 res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
959 &pipefifo_fops);
960 if (IS_ERR(res[0])) {
961 put_pipe_info(inode, inode->i_pipe);
962 fput(f);
963 return PTR_ERR(res[0]);
964 }
965 res[0]->private_data = inode->i_pipe;
966 res[1] = f;
967 stream_open(inode, res[0]);
968 stream_open(inode, res[1]);
969 return 0;
970}
971
972static int __do_pipe_flags(int *fd, struct file **files, int flags)
973{
974 int error;
975 int fdw, fdr;
976
977 if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT | O_NOTIFICATION_PIPE))
978 return -EINVAL;
979
980 error = create_pipe_files(files, flags);
981 if (error)
982 return error;
983
984 error = get_unused_fd_flags(flags);
985 if (error < 0)
986 goto err_read_pipe;
987 fdr = error;
988
989 error = get_unused_fd_flags(flags);
990 if (error < 0)
991 goto err_fdr;
992 fdw = error;
993
994 audit_fd_pair(fdr, fdw);
995 fd[0] = fdr;
996 fd[1] = fdw;
997 /* pipe groks IOCB_NOWAIT */
998 files[0]->f_mode |= FMODE_NOWAIT;
999 files[1]->f_mode |= FMODE_NOWAIT;
1000 return 0;
1001
1002 err_fdr:
1003 put_unused_fd(fdr);
1004 err_read_pipe:
1005 fput(files[0]);
1006 fput(files[1]);
1007 return error;
1008}
1009
1010int do_pipe_flags(int *fd, int flags)
1011{
1012 struct file *files[2];
1013 int error = __do_pipe_flags(fd, files, flags);
1014 if (!error) {
1015 fd_install(fd[0], files[0]);
1016 fd_install(fd[1], files[1]);
1017 }
1018 return error;
1019}
1020
1021/*
1022 * sys_pipe() is the normal C calling standard for creating
1023 * a pipe. It's not the way Unix traditionally does this, though.
1024 */
1025static int do_pipe2(int __user *fildes, int flags)
1026{
1027 struct file *files[2];
1028 int fd[2];
1029 int error;
1030
1031 error = __do_pipe_flags(fd, files, flags);
1032 if (!error) {
1033 if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
1034 fput(files[0]);
1035 fput(files[1]);
1036 put_unused_fd(fd[0]);
1037 put_unused_fd(fd[1]);
1038 error = -EFAULT;
1039 } else {
1040 fd_install(fd[0], files[0]);
1041 fd_install(fd[1], files[1]);
1042 }
1043 }
1044 return error;
1045}
1046
1047SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
1048{
1049 return do_pipe2(fildes, flags);
1050}
1051
1052SYSCALL_DEFINE1(pipe, int __user *, fildes)
1053{
1054 return do_pipe2(fildes, 0);
1055}
1056
1057/*
1058 * This is the stupid "wait for pipe to be readable or writable"
1059 * model.
1060 *
1061 * See pipe_read/write() for the proper kind of exclusive wait,
1062 * but that requires that we wake up any other readers/writers
1063 * if we then do not end up reading everything (ie the whole
1064 * "wake_next_reader/writer" logic in pipe_read/write()).
1065 */
1066void pipe_wait_readable(struct pipe_inode_info *pipe)
1067{
1068 pipe_unlock(pipe);
1069 wait_event_interruptible(pipe->rd_wait, pipe_readable(pipe));
1070 pipe_lock(pipe);
1071}
1072
1073void pipe_wait_writable(struct pipe_inode_info *pipe)
1074{
1075 pipe_unlock(pipe);
1076 wait_event_interruptible(pipe->wr_wait, pipe_writable(pipe));
1077 pipe_lock(pipe);
1078}
1079
1080/*
1081 * This depends on both the wait (here) and the wakeup (wake_up_partner)
1082 * holding the pipe lock, so "*cnt" is stable and we know a wakeup cannot
1083 * race with the count check and waitqueue prep.
1084 *
1085 * Normally in order to avoid races, you'd do the prepare_to_wait() first,
1086 * then check the condition you're waiting for, and only then sleep. But
1087 * because of the pipe lock, we can check the condition before being on
1088 * the wait queue.
1089 *
1090 * We use the 'rd_wait' waitqueue for pipe partner waiting.
1091 */
1092static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
1093{
1094 DEFINE_WAIT(rdwait);
1095 int cur = *cnt;
1096
1097 while (cur == *cnt) {
1098 prepare_to_wait(&pipe->rd_wait, &rdwait, TASK_INTERRUPTIBLE);
1099 pipe_unlock(pipe);
1100 schedule();
1101 finish_wait(&pipe->rd_wait, &rdwait);
1102 pipe_lock(pipe);
1103 if (signal_pending(current))
1104 break;
1105 }
1106 return cur == *cnt ? -ERESTARTSYS : 0;
1107}
1108
1109static void wake_up_partner(struct pipe_inode_info *pipe)
1110{
1111 wake_up_interruptible_all(&pipe->rd_wait);
1112}
1113
1114static int fifo_open(struct inode *inode, struct file *filp)
1115{
1116 struct pipe_inode_info *pipe;
1117 bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
1118 int ret;
1119
1120 filp->f_version = 0;
1121
1122 spin_lock(&inode->i_lock);
1123 if (inode->i_pipe) {
1124 pipe = inode->i_pipe;
1125 pipe->files++;
1126 spin_unlock(&inode->i_lock);
1127 } else {
1128 spin_unlock(&inode->i_lock);
1129 pipe = alloc_pipe_info();
1130 if (!pipe)
1131 return -ENOMEM;
1132 pipe->files = 1;
1133 spin_lock(&inode->i_lock);
1134 if (unlikely(inode->i_pipe)) {
1135 inode->i_pipe->files++;
1136 spin_unlock(&inode->i_lock);
1137 free_pipe_info(pipe);
1138 pipe = inode->i_pipe;
1139 } else {
1140 inode->i_pipe = pipe;
1141 spin_unlock(&inode->i_lock);
1142 }
1143 }
1144 filp->private_data = pipe;
1145 /* OK, we have a pipe and it's pinned down */
1146
1147 __pipe_lock(pipe);
1148
1149 /* We can only do regular read/write on fifos */
1150 stream_open(inode, filp);
1151
1152 switch (filp->f_mode & (FMODE_READ | FMODE_WRITE)) {
1153 case FMODE_READ:
1154 /*
1155 * O_RDONLY
1156 * POSIX.1 says that O_NONBLOCK means return with the FIFO
1157 * opened, even when there is no process writing the FIFO.
1158 */
1159 pipe->r_counter++;
1160 if (pipe->readers++ == 0)
1161 wake_up_partner(pipe);
1162
1163 if (!is_pipe && !pipe->writers) {
1164 if ((filp->f_flags & O_NONBLOCK)) {
1165 /* suppress EPOLLHUP until we have
1166 * seen a writer */
1167 filp->f_version = pipe->w_counter;
1168 } else {
1169 if (wait_for_partner(pipe, &pipe->w_counter))
1170 goto err_rd;
1171 }
1172 }
1173 break;
1174
1175 case FMODE_WRITE:
1176 /*
1177 * O_WRONLY
1178 * POSIX.1 says that O_NONBLOCK means return -1 with
1179 * errno=ENXIO when there is no process reading the FIFO.
1180 */
1181 ret = -ENXIO;
1182 if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
1183 goto err;
1184
1185 pipe->w_counter++;
1186 if (!pipe->writers++)
1187 wake_up_partner(pipe);
1188
1189 if (!is_pipe && !pipe->readers) {
1190 if (wait_for_partner(pipe, &pipe->r_counter))
1191 goto err_wr;
1192 }
1193 break;
1194
1195 case FMODE_READ | FMODE_WRITE:
1196 /*
1197 * O_RDWR
1198 * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
1199 * This implementation will NEVER block on a O_RDWR open, since
1200 * the process can at least talk to itself.
1201 */
1202
1203 pipe->readers++;
1204 pipe->writers++;
1205 pipe->r_counter++;
1206 pipe->w_counter++;
1207 if (pipe->readers == 1 || pipe->writers == 1)
1208 wake_up_partner(pipe);
1209 break;
1210
1211 default:
1212 ret = -EINVAL;
1213 goto err;
1214 }
1215
1216 /* Ok! */
1217 __pipe_unlock(pipe);
1218 return 0;
1219
1220err_rd:
1221 if (!--pipe->readers)
1222 wake_up_interruptible(&pipe->wr_wait);
1223 ret = -ERESTARTSYS;
1224 goto err;
1225
1226err_wr:
1227 if (!--pipe->writers)
1228 wake_up_interruptible_all(&pipe->rd_wait);
1229 ret = -ERESTARTSYS;
1230 goto err;
1231
1232err:
1233 __pipe_unlock(pipe);
1234
1235 put_pipe_info(inode, pipe);
1236 return ret;
1237}
1238
1239const struct file_operations pipefifo_fops = {
1240 .open = fifo_open,
1241 .llseek = no_llseek,
1242 .read_iter = pipe_read,
1243 .write_iter = pipe_write,
1244 .poll = pipe_poll,
1245 .unlocked_ioctl = pipe_ioctl,
1246 .release = pipe_release,
1247 .fasync = pipe_fasync,
1248 .splice_write = iter_file_splice_write,
1249};
1250
1251/*
1252 * Currently we rely on the pipe array holding a power-of-2 number
1253 * of pages. Returns 0 on error.
1254 */
1255unsigned int round_pipe_size(unsigned int size)
1256{
1257 if (size > (1U << 31))
1258 return 0;
1259
1260 /* Minimum pipe size, as required by POSIX */
1261 if (size < PAGE_SIZE)
1262 return PAGE_SIZE;
1263
1264 return roundup_pow_of_two(size);
1265}
1266
1267/*
1268 * Resize the pipe ring to a number of slots.
1269 *
1270 * Note the pipe can be reduced in capacity, but only if the current
1271 * occupancy doesn't exceed nr_slots; if it does, EBUSY will be
1272 * returned instead.
1273 */
1274int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
1275{
1276 struct pipe_buffer *bufs;
1277 unsigned int head, tail, mask, n;
1278
1279 bufs = kcalloc(nr_slots, sizeof(*bufs),
1280 GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1281 if (unlikely(!bufs))
1282 return -ENOMEM;
1283
1284 spin_lock_irq(&pipe->rd_wait.lock);
1285 mask = pipe->ring_size - 1;
1286 head = pipe->head;
1287 tail = pipe->tail;
1288
1289 n = pipe_occupancy(head, tail);
1290 if (nr_slots < n) {
1291 spin_unlock_irq(&pipe->rd_wait.lock);
1292 kfree(bufs);
1293 return -EBUSY;
1294 }
1295
1296 /*
1297 * The pipe array wraps around, so just start the new one at zero
1298 * and adjust the indices.
1299 */
1300 if (n > 0) {
1301 unsigned int h = head & mask;
1302 unsigned int t = tail & mask;
1303 if (h > t) {
1304 memcpy(bufs, pipe->bufs + t,
1305 n * sizeof(struct pipe_buffer));
1306 } else {
1307 unsigned int tsize = pipe->ring_size - t;
1308 if (h > 0)
1309 memcpy(bufs + tsize, pipe->bufs,
1310 h * sizeof(struct pipe_buffer));
1311 memcpy(bufs, pipe->bufs + t,
1312 tsize * sizeof(struct pipe_buffer));
1313 }
1314 }
1315
1316 head = n;
1317 tail = 0;
1318
1319 kfree(pipe->bufs);
1320 pipe->bufs = bufs;
1321 pipe->ring_size = nr_slots;
1322 if (pipe->max_usage > nr_slots)
1323 pipe->max_usage = nr_slots;
1324 pipe->tail = tail;
1325 pipe->head = head;
1326
1327 if (!pipe_has_watch_queue(pipe)) {
1328 pipe->max_usage = nr_slots;
1329 pipe->nr_accounted = nr_slots;
1330 }
1331
1332 spin_unlock_irq(&pipe->rd_wait.lock);
1333
1334 /* This might have made more room for writers */
1335 wake_up_interruptible(&pipe->wr_wait);
1336 return 0;
1337}
1338
1339/*
1340 * Allocate a new array of pipe buffers and copy the info over. Returns the
1341 * pipe size if successful, or return -ERROR on error.
1342 */
1343static long pipe_set_size(struct pipe_inode_info *pipe, unsigned int arg)
1344{
1345 unsigned long user_bufs;
1346 unsigned int nr_slots, size;
1347 long ret = 0;
1348
1349 if (pipe_has_watch_queue(pipe))
1350 return -EBUSY;
1351
1352 size = round_pipe_size(arg);
1353 nr_slots = size >> PAGE_SHIFT;
1354
1355 if (!nr_slots)
1356 return -EINVAL;
1357
1358 /*
1359 * If trying to increase the pipe capacity, check that an
1360 * unprivileged user is not trying to exceed various limits
1361 * (soft limit check here, hard limit check just below).
1362 * Decreasing the pipe capacity is always permitted, even
1363 * if the user is currently over a limit.
1364 */
1365 if (nr_slots > pipe->max_usage &&
1366 size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
1367 return -EPERM;
1368
1369 user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_slots);
1370
1371 if (nr_slots > pipe->max_usage &&
1372 (too_many_pipe_buffers_hard(user_bufs) ||
1373 too_many_pipe_buffers_soft(user_bufs)) &&
1374 pipe_is_unprivileged_user()) {
1375 ret = -EPERM;
1376 goto out_revert_acct;
1377 }
1378
1379 ret = pipe_resize_ring(pipe, nr_slots);
1380 if (ret < 0)
1381 goto out_revert_acct;
1382
1383 return pipe->max_usage * PAGE_SIZE;
1384
1385out_revert_acct:
1386 (void) account_pipe_buffers(pipe->user, nr_slots, pipe->nr_accounted);
1387 return ret;
1388}
1389
1390/*
1391 * Note that i_pipe and i_cdev share the same location, so checking ->i_pipe is
1392 * not enough to verify that this is a pipe.
1393 */
1394struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice)
1395{
1396 struct pipe_inode_info *pipe = file->private_data;
1397
1398 if (file->f_op != &pipefifo_fops || !pipe)
1399 return NULL;
1400 if (for_splice && pipe_has_watch_queue(pipe))
1401 return NULL;
1402 return pipe;
1403}
1404
1405long pipe_fcntl(struct file *file, unsigned int cmd, unsigned int arg)
1406{
1407 struct pipe_inode_info *pipe;
1408 long ret;
1409
1410 pipe = get_pipe_info(file, false);
1411 if (!pipe)
1412 return -EBADF;
1413
1414 __pipe_lock(pipe);
1415
1416 switch (cmd) {
1417 case F_SETPIPE_SZ:
1418 ret = pipe_set_size(pipe, arg);
1419 break;
1420 case F_GETPIPE_SZ:
1421 ret = pipe->max_usage * PAGE_SIZE;
1422 break;
1423 default:
1424 ret = -EINVAL;
1425 break;
1426 }
1427
1428 __pipe_unlock(pipe);
1429 return ret;
1430}
1431
1432static const struct super_operations pipefs_ops = {
1433 .destroy_inode = free_inode_nonrcu,
1434 .statfs = simple_statfs,
1435};
1436
1437/*
1438 * pipefs should _never_ be mounted by userland - too much of security hassle,
1439 * no real gain from having the whole whorehouse mounted. So we don't need
1440 * any operations on the root directory. However, we need a non-trivial
1441 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1442 */
1443
1444static int pipefs_init_fs_context(struct fs_context *fc)
1445{
1446 struct pseudo_fs_context *ctx = init_pseudo(fc, PIPEFS_MAGIC);
1447 if (!ctx)
1448 return -ENOMEM;
1449 ctx->ops = &pipefs_ops;
1450 ctx->dops = &pipefs_dentry_operations;
1451 return 0;
1452}
1453
1454static struct file_system_type pipe_fs_type = {
1455 .name = "pipefs",
1456 .init_fs_context = pipefs_init_fs_context,
1457 .kill_sb = kill_anon_super,
1458};
1459
1460#ifdef CONFIG_SYSCTL
1461static int do_proc_dopipe_max_size_conv(unsigned long *lvalp,
1462 unsigned int *valp,
1463 int write, void *data)
1464{
1465 if (write) {
1466 unsigned int val;
1467
1468 val = round_pipe_size(*lvalp);
1469 if (val == 0)
1470 return -EINVAL;
1471
1472 *valp = val;
1473 } else {
1474 unsigned int val = *valp;
1475 *lvalp = (unsigned long) val;
1476 }
1477
1478 return 0;
1479}
1480
1481static int proc_dopipe_max_size(struct ctl_table *table, int write,
1482 void *buffer, size_t *lenp, loff_t *ppos)
1483{
1484 return do_proc_douintvec(table, write, buffer, lenp, ppos,
1485 do_proc_dopipe_max_size_conv, NULL);
1486}
1487
1488static struct ctl_table fs_pipe_sysctls[] = {
1489 {
1490 .procname = "pipe-max-size",
1491 .data = &pipe_max_size,
1492 .maxlen = sizeof(pipe_max_size),
1493 .mode = 0644,
1494 .proc_handler = proc_dopipe_max_size,
1495 },
1496 {
1497 .procname = "pipe-user-pages-hard",
1498 .data = &pipe_user_pages_hard,
1499 .maxlen = sizeof(pipe_user_pages_hard),
1500 .mode = 0644,
1501 .proc_handler = proc_doulongvec_minmax,
1502 },
1503 {
1504 .procname = "pipe-user-pages-soft",
1505 .data = &pipe_user_pages_soft,
1506 .maxlen = sizeof(pipe_user_pages_soft),
1507 .mode = 0644,
1508 .proc_handler = proc_doulongvec_minmax,
1509 },
1510};
1511#endif
1512
1513static int __init init_pipe_fs(void)
1514{
1515 int err = register_filesystem(&pipe_fs_type);
1516
1517 if (!err) {
1518 pipe_mnt = kern_mount(&pipe_fs_type);
1519 if (IS_ERR(pipe_mnt)) {
1520 err = PTR_ERR(pipe_mnt);
1521 unregister_filesystem(&pipe_fs_type);
1522 }
1523 }
1524#ifdef CONFIG_SYSCTL
1525 register_sysctl_init("fs", fs_pipe_sysctls);
1526#endif
1527 return err;
1528}
1529
1530fs_initcall(init_pipe_fs);