Loading...
1/*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7*/
8
9#include "fuse_i.h"
10
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/poll.h>
14#include <linux/uio.h>
15#include <linux/miscdevice.h>
16#include <linux/pagemap.h>
17#include <linux/file.h>
18#include <linux/slab.h>
19#include <linux/pipe_fs_i.h>
20#include <linux/swap.h>
21#include <linux/splice.h>
22
23MODULE_ALIAS_MISCDEV(FUSE_MINOR);
24MODULE_ALIAS("devname:fuse");
25
26static struct kmem_cache *fuse_req_cachep;
27
28static struct fuse_dev *fuse_get_dev(struct file *file)
29{
30 /*
31 * Lockless access is OK, because file->private data is set
32 * once during mount and is valid until the file is released.
33 */
34 return ACCESS_ONCE(file->private_data);
35}
36
37static void fuse_request_init(struct fuse_req *req, struct page **pages,
38 struct fuse_page_desc *page_descs,
39 unsigned npages)
40{
41 memset(req, 0, sizeof(*req));
42 memset(pages, 0, sizeof(*pages) * npages);
43 memset(page_descs, 0, sizeof(*page_descs) * npages);
44 INIT_LIST_HEAD(&req->list);
45 INIT_LIST_HEAD(&req->intr_entry);
46 init_waitqueue_head(&req->waitq);
47 atomic_set(&req->count, 1);
48 req->pages = pages;
49 req->page_descs = page_descs;
50 req->max_pages = npages;
51 __set_bit(FR_PENDING, &req->flags);
52}
53
54static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
55{
56 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags);
57 if (req) {
58 struct page **pages;
59 struct fuse_page_desc *page_descs;
60
61 if (npages <= FUSE_REQ_INLINE_PAGES) {
62 pages = req->inline_pages;
63 page_descs = req->inline_page_descs;
64 } else {
65 pages = kmalloc(sizeof(struct page *) * npages, flags);
66 page_descs = kmalloc(sizeof(struct fuse_page_desc) *
67 npages, flags);
68 }
69
70 if (!pages || !page_descs) {
71 kfree(pages);
72 kfree(page_descs);
73 kmem_cache_free(fuse_req_cachep, req);
74 return NULL;
75 }
76
77 fuse_request_init(req, pages, page_descs, npages);
78 }
79 return req;
80}
81
82struct fuse_req *fuse_request_alloc(unsigned npages)
83{
84 return __fuse_request_alloc(npages, GFP_KERNEL);
85}
86EXPORT_SYMBOL_GPL(fuse_request_alloc);
87
88struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
89{
90 return __fuse_request_alloc(npages, GFP_NOFS);
91}
92
93void fuse_request_free(struct fuse_req *req)
94{
95 if (req->pages != req->inline_pages) {
96 kfree(req->pages);
97 kfree(req->page_descs);
98 }
99 kmem_cache_free(fuse_req_cachep, req);
100}
101
102void __fuse_get_request(struct fuse_req *req)
103{
104 atomic_inc(&req->count);
105}
106
107/* Must be called with > 1 refcount */
108static void __fuse_put_request(struct fuse_req *req)
109{
110 BUG_ON(atomic_read(&req->count) < 2);
111 atomic_dec(&req->count);
112}
113
114static void fuse_req_init_context(struct fuse_req *req)
115{
116 req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid());
117 req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid());
118 req->in.h.pid = current->pid;
119}
120
121void fuse_set_initialized(struct fuse_conn *fc)
122{
123 /* Make sure stores before this are seen on another CPU */
124 smp_wmb();
125 fc->initialized = 1;
126}
127
128static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
129{
130 return !fc->initialized || (for_background && fc->blocked);
131}
132
133static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
134 bool for_background)
135{
136 struct fuse_req *req;
137 int err;
138 atomic_inc(&fc->num_waiting);
139
140 if (fuse_block_alloc(fc, for_background)) {
141 err = -EINTR;
142 if (wait_event_killable_exclusive(fc->blocked_waitq,
143 !fuse_block_alloc(fc, for_background)))
144 goto out;
145 }
146 /* Matches smp_wmb() in fuse_set_initialized() */
147 smp_rmb();
148
149 err = -ENOTCONN;
150 if (!fc->connected)
151 goto out;
152
153 err = -ECONNREFUSED;
154 if (fc->conn_error)
155 goto out;
156
157 req = fuse_request_alloc(npages);
158 err = -ENOMEM;
159 if (!req) {
160 if (for_background)
161 wake_up(&fc->blocked_waitq);
162 goto out;
163 }
164
165 fuse_req_init_context(req);
166 __set_bit(FR_WAITING, &req->flags);
167 if (for_background)
168 __set_bit(FR_BACKGROUND, &req->flags);
169
170 return req;
171
172 out:
173 atomic_dec(&fc->num_waiting);
174 return ERR_PTR(err);
175}
176
177struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
178{
179 return __fuse_get_req(fc, npages, false);
180}
181EXPORT_SYMBOL_GPL(fuse_get_req);
182
183struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
184 unsigned npages)
185{
186 return __fuse_get_req(fc, npages, true);
187}
188EXPORT_SYMBOL_GPL(fuse_get_req_for_background);
189
190/*
191 * Return request in fuse_file->reserved_req. However that may
192 * currently be in use. If that is the case, wait for it to become
193 * available.
194 */
195static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
196 struct file *file)
197{
198 struct fuse_req *req = NULL;
199 struct fuse_file *ff = file->private_data;
200
201 do {
202 wait_event(fc->reserved_req_waitq, ff->reserved_req);
203 spin_lock(&fc->lock);
204 if (ff->reserved_req) {
205 req = ff->reserved_req;
206 ff->reserved_req = NULL;
207 req->stolen_file = get_file(file);
208 }
209 spin_unlock(&fc->lock);
210 } while (!req);
211
212 return req;
213}
214
215/*
216 * Put stolen request back into fuse_file->reserved_req
217 */
218static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
219{
220 struct file *file = req->stolen_file;
221 struct fuse_file *ff = file->private_data;
222
223 spin_lock(&fc->lock);
224 fuse_request_init(req, req->pages, req->page_descs, req->max_pages);
225 BUG_ON(ff->reserved_req);
226 ff->reserved_req = req;
227 wake_up_all(&fc->reserved_req_waitq);
228 spin_unlock(&fc->lock);
229 fput(file);
230}
231
232/*
233 * Gets a requests for a file operation, always succeeds
234 *
235 * This is used for sending the FLUSH request, which must get to
236 * userspace, due to POSIX locks which may need to be unlocked.
237 *
238 * If allocation fails due to OOM, use the reserved request in
239 * fuse_file.
240 *
241 * This is very unlikely to deadlock accidentally, since the
242 * filesystem should not have it's own file open. If deadlock is
243 * intentional, it can still be broken by "aborting" the filesystem.
244 */
245struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
246 struct file *file)
247{
248 struct fuse_req *req;
249
250 atomic_inc(&fc->num_waiting);
251 wait_event(fc->blocked_waitq, fc->initialized);
252 /* Matches smp_wmb() in fuse_set_initialized() */
253 smp_rmb();
254 req = fuse_request_alloc(0);
255 if (!req)
256 req = get_reserved_req(fc, file);
257
258 fuse_req_init_context(req);
259 __set_bit(FR_WAITING, &req->flags);
260 __clear_bit(FR_BACKGROUND, &req->flags);
261 return req;
262}
263
264void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
265{
266 if (atomic_dec_and_test(&req->count)) {
267 if (test_bit(FR_BACKGROUND, &req->flags)) {
268 /*
269 * We get here in the unlikely case that a background
270 * request was allocated but not sent
271 */
272 spin_lock(&fc->lock);
273 if (!fc->blocked)
274 wake_up(&fc->blocked_waitq);
275 spin_unlock(&fc->lock);
276 }
277
278 if (test_bit(FR_WAITING, &req->flags)) {
279 __clear_bit(FR_WAITING, &req->flags);
280 atomic_dec(&fc->num_waiting);
281 }
282
283 if (req->stolen_file)
284 put_reserved_req(fc, req);
285 else
286 fuse_request_free(req);
287 }
288}
289EXPORT_SYMBOL_GPL(fuse_put_request);
290
291static unsigned len_args(unsigned numargs, struct fuse_arg *args)
292{
293 unsigned nbytes = 0;
294 unsigned i;
295
296 for (i = 0; i < numargs; i++)
297 nbytes += args[i].size;
298
299 return nbytes;
300}
301
302static u64 fuse_get_unique(struct fuse_iqueue *fiq)
303{
304 return ++fiq->reqctr;
305}
306
307static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
308{
309 req->in.h.len = sizeof(struct fuse_in_header) +
310 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
311 list_add_tail(&req->list, &fiq->pending);
312 wake_up_locked(&fiq->waitq);
313 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
314}
315
316void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
317 u64 nodeid, u64 nlookup)
318{
319 struct fuse_iqueue *fiq = &fc->iq;
320
321 forget->forget_one.nodeid = nodeid;
322 forget->forget_one.nlookup = nlookup;
323
324 spin_lock(&fiq->waitq.lock);
325 if (fiq->connected) {
326 fiq->forget_list_tail->next = forget;
327 fiq->forget_list_tail = forget;
328 wake_up_locked(&fiq->waitq);
329 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
330 } else {
331 kfree(forget);
332 }
333 spin_unlock(&fiq->waitq.lock);
334}
335
336static void flush_bg_queue(struct fuse_conn *fc)
337{
338 while (fc->active_background < fc->max_background &&
339 !list_empty(&fc->bg_queue)) {
340 struct fuse_req *req;
341 struct fuse_iqueue *fiq = &fc->iq;
342
343 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
344 list_del(&req->list);
345 fc->active_background++;
346 spin_lock(&fiq->waitq.lock);
347 req->in.h.unique = fuse_get_unique(fiq);
348 queue_request(fiq, req);
349 spin_unlock(&fiq->waitq.lock);
350 }
351}
352
353/*
354 * This function is called when a request is finished. Either a reply
355 * has arrived or it was aborted (and not yet sent) or some error
356 * occurred during communication with userspace, or the device file
357 * was closed. The requester thread is woken up (if still waiting),
358 * the 'end' callback is called if given, else the reference to the
359 * request is released
360 */
361static void request_end(struct fuse_conn *fc, struct fuse_req *req)
362{
363 struct fuse_iqueue *fiq = &fc->iq;
364
365 if (test_and_set_bit(FR_FINISHED, &req->flags))
366 return;
367
368 spin_lock(&fiq->waitq.lock);
369 list_del_init(&req->intr_entry);
370 spin_unlock(&fiq->waitq.lock);
371 WARN_ON(test_bit(FR_PENDING, &req->flags));
372 WARN_ON(test_bit(FR_SENT, &req->flags));
373 if (test_bit(FR_BACKGROUND, &req->flags)) {
374 spin_lock(&fc->lock);
375 clear_bit(FR_BACKGROUND, &req->flags);
376 if (fc->num_background == fc->max_background)
377 fc->blocked = 0;
378
379 /* Wake up next waiter, if any */
380 if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
381 wake_up(&fc->blocked_waitq);
382
383 if (fc->num_background == fc->congestion_threshold &&
384 fc->connected && fc->bdi_initialized) {
385 clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);
386 clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
387 }
388 fc->num_background--;
389 fc->active_background--;
390 flush_bg_queue(fc);
391 spin_unlock(&fc->lock);
392 }
393 wake_up(&req->waitq);
394 if (req->end)
395 req->end(fc, req);
396 fuse_put_request(fc, req);
397}
398
399static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
400{
401 spin_lock(&fiq->waitq.lock);
402 if (test_bit(FR_FINISHED, &req->flags)) {
403 spin_unlock(&fiq->waitq.lock);
404 return;
405 }
406 if (list_empty(&req->intr_entry)) {
407 list_add_tail(&req->intr_entry, &fiq->interrupts);
408 wake_up_locked(&fiq->waitq);
409 }
410 spin_unlock(&fiq->waitq.lock);
411 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
412}
413
414static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
415{
416 struct fuse_iqueue *fiq = &fc->iq;
417 int err;
418
419 if (!fc->no_interrupt) {
420 /* Any signal may interrupt this */
421 err = wait_event_interruptible(req->waitq,
422 test_bit(FR_FINISHED, &req->flags));
423 if (!err)
424 return;
425
426 set_bit(FR_INTERRUPTED, &req->flags);
427 /* matches barrier in fuse_dev_do_read() */
428 smp_mb__after_atomic();
429 if (test_bit(FR_SENT, &req->flags))
430 queue_interrupt(fiq, req);
431 }
432
433 if (!test_bit(FR_FORCE, &req->flags)) {
434 /* Only fatal signals may interrupt this */
435 err = wait_event_killable(req->waitq,
436 test_bit(FR_FINISHED, &req->flags));
437 if (!err)
438 return;
439
440 spin_lock(&fiq->waitq.lock);
441 /* Request is not yet in userspace, bail out */
442 if (test_bit(FR_PENDING, &req->flags)) {
443 list_del(&req->list);
444 spin_unlock(&fiq->waitq.lock);
445 __fuse_put_request(req);
446 req->out.h.error = -EINTR;
447 return;
448 }
449 spin_unlock(&fiq->waitq.lock);
450 }
451
452 /*
453 * Either request is already in userspace, or it was forced.
454 * Wait it out.
455 */
456 wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
457}
458
459static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
460{
461 struct fuse_iqueue *fiq = &fc->iq;
462
463 BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
464 spin_lock(&fiq->waitq.lock);
465 if (!fiq->connected) {
466 spin_unlock(&fiq->waitq.lock);
467 req->out.h.error = -ENOTCONN;
468 } else {
469 req->in.h.unique = fuse_get_unique(fiq);
470 queue_request(fiq, req);
471 /* acquire extra reference, since request is still needed
472 after request_end() */
473 __fuse_get_request(req);
474 spin_unlock(&fiq->waitq.lock);
475
476 request_wait_answer(fc, req);
477 /* Pairs with smp_wmb() in request_end() */
478 smp_rmb();
479 }
480}
481
482void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
483{
484 __set_bit(FR_ISREPLY, &req->flags);
485 if (!test_bit(FR_WAITING, &req->flags)) {
486 __set_bit(FR_WAITING, &req->flags);
487 atomic_inc(&fc->num_waiting);
488 }
489 __fuse_request_send(fc, req);
490}
491EXPORT_SYMBOL_GPL(fuse_request_send);
492
493static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
494{
495 if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
496 args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
497
498 if (fc->minor < 9) {
499 switch (args->in.h.opcode) {
500 case FUSE_LOOKUP:
501 case FUSE_CREATE:
502 case FUSE_MKNOD:
503 case FUSE_MKDIR:
504 case FUSE_SYMLINK:
505 case FUSE_LINK:
506 args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
507 break;
508 case FUSE_GETATTR:
509 case FUSE_SETATTR:
510 args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
511 break;
512 }
513 }
514 if (fc->minor < 12) {
515 switch (args->in.h.opcode) {
516 case FUSE_CREATE:
517 args->in.args[0].size = sizeof(struct fuse_open_in);
518 break;
519 case FUSE_MKNOD:
520 args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
521 break;
522 }
523 }
524}
525
526ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
527{
528 struct fuse_req *req;
529 ssize_t ret;
530
531 req = fuse_get_req(fc, 0);
532 if (IS_ERR(req))
533 return PTR_ERR(req);
534
535 /* Needs to be done after fuse_get_req() so that fc->minor is valid */
536 fuse_adjust_compat(fc, args);
537
538 req->in.h.opcode = args->in.h.opcode;
539 req->in.h.nodeid = args->in.h.nodeid;
540 req->in.numargs = args->in.numargs;
541 memcpy(req->in.args, args->in.args,
542 args->in.numargs * sizeof(struct fuse_in_arg));
543 req->out.argvar = args->out.argvar;
544 req->out.numargs = args->out.numargs;
545 memcpy(req->out.args, args->out.args,
546 args->out.numargs * sizeof(struct fuse_arg));
547 fuse_request_send(fc, req);
548 ret = req->out.h.error;
549 if (!ret && args->out.argvar) {
550 BUG_ON(args->out.numargs != 1);
551 ret = req->out.args[0].size;
552 }
553 fuse_put_request(fc, req);
554
555 return ret;
556}
557
558/*
559 * Called under fc->lock
560 *
561 * fc->connected must have been checked previously
562 */
563void fuse_request_send_background_locked(struct fuse_conn *fc,
564 struct fuse_req *req)
565{
566 BUG_ON(!test_bit(FR_BACKGROUND, &req->flags));
567 if (!test_bit(FR_WAITING, &req->flags)) {
568 __set_bit(FR_WAITING, &req->flags);
569 atomic_inc(&fc->num_waiting);
570 }
571 __set_bit(FR_ISREPLY, &req->flags);
572 fc->num_background++;
573 if (fc->num_background == fc->max_background)
574 fc->blocked = 1;
575 if (fc->num_background == fc->congestion_threshold &&
576 fc->bdi_initialized) {
577 set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
578 set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
579 }
580 list_add_tail(&req->list, &fc->bg_queue);
581 flush_bg_queue(fc);
582}
583
584void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
585{
586 BUG_ON(!req->end);
587 spin_lock(&fc->lock);
588 if (fc->connected) {
589 fuse_request_send_background_locked(fc, req);
590 spin_unlock(&fc->lock);
591 } else {
592 spin_unlock(&fc->lock);
593 req->out.h.error = -ENOTCONN;
594 req->end(fc, req);
595 fuse_put_request(fc, req);
596 }
597}
598EXPORT_SYMBOL_GPL(fuse_request_send_background);
599
600static int fuse_request_send_notify_reply(struct fuse_conn *fc,
601 struct fuse_req *req, u64 unique)
602{
603 int err = -ENODEV;
604 struct fuse_iqueue *fiq = &fc->iq;
605
606 __clear_bit(FR_ISREPLY, &req->flags);
607 req->in.h.unique = unique;
608 spin_lock(&fiq->waitq.lock);
609 if (fiq->connected) {
610 queue_request(fiq, req);
611 err = 0;
612 }
613 spin_unlock(&fiq->waitq.lock);
614
615 return err;
616}
617
618void fuse_force_forget(struct file *file, u64 nodeid)
619{
620 struct inode *inode = file_inode(file);
621 struct fuse_conn *fc = get_fuse_conn(inode);
622 struct fuse_req *req;
623 struct fuse_forget_in inarg;
624
625 memset(&inarg, 0, sizeof(inarg));
626 inarg.nlookup = 1;
627 req = fuse_get_req_nofail_nopages(fc, file);
628 req->in.h.opcode = FUSE_FORGET;
629 req->in.h.nodeid = nodeid;
630 req->in.numargs = 1;
631 req->in.args[0].size = sizeof(inarg);
632 req->in.args[0].value = &inarg;
633 __clear_bit(FR_ISREPLY, &req->flags);
634 __fuse_request_send(fc, req);
635 /* ignore errors */
636 fuse_put_request(fc, req);
637}
638
639/*
640 * Lock the request. Up to the next unlock_request() there mustn't be
641 * anything that could cause a page-fault. If the request was already
642 * aborted bail out.
643 */
644static int lock_request(struct fuse_req *req)
645{
646 int err = 0;
647 if (req) {
648 spin_lock(&req->waitq.lock);
649 if (test_bit(FR_ABORTED, &req->flags))
650 err = -ENOENT;
651 else
652 set_bit(FR_LOCKED, &req->flags);
653 spin_unlock(&req->waitq.lock);
654 }
655 return err;
656}
657
658/*
659 * Unlock request. If it was aborted while locked, caller is responsible
660 * for unlocking and ending the request.
661 */
662static int unlock_request(struct fuse_req *req)
663{
664 int err = 0;
665 if (req) {
666 spin_lock(&req->waitq.lock);
667 if (test_bit(FR_ABORTED, &req->flags))
668 err = -ENOENT;
669 else
670 clear_bit(FR_LOCKED, &req->flags);
671 spin_unlock(&req->waitq.lock);
672 }
673 return err;
674}
675
676struct fuse_copy_state {
677 int write;
678 struct fuse_req *req;
679 struct iov_iter *iter;
680 struct pipe_buffer *pipebufs;
681 struct pipe_buffer *currbuf;
682 struct pipe_inode_info *pipe;
683 unsigned long nr_segs;
684 struct page *pg;
685 unsigned len;
686 unsigned offset;
687 unsigned move_pages:1;
688};
689
690static void fuse_copy_init(struct fuse_copy_state *cs, int write,
691 struct iov_iter *iter)
692{
693 memset(cs, 0, sizeof(*cs));
694 cs->write = write;
695 cs->iter = iter;
696}
697
698/* Unmap and put previous page of userspace buffer */
699static void fuse_copy_finish(struct fuse_copy_state *cs)
700{
701 if (cs->currbuf) {
702 struct pipe_buffer *buf = cs->currbuf;
703
704 if (cs->write)
705 buf->len = PAGE_SIZE - cs->len;
706 cs->currbuf = NULL;
707 } else if (cs->pg) {
708 if (cs->write) {
709 flush_dcache_page(cs->pg);
710 set_page_dirty_lock(cs->pg);
711 }
712 put_page(cs->pg);
713 }
714 cs->pg = NULL;
715}
716
717/*
718 * Get another pagefull of userspace buffer, and map it to kernel
719 * address space, and lock request
720 */
721static int fuse_copy_fill(struct fuse_copy_state *cs)
722{
723 struct page *page;
724 int err;
725
726 err = unlock_request(cs->req);
727 if (err)
728 return err;
729
730 fuse_copy_finish(cs);
731 if (cs->pipebufs) {
732 struct pipe_buffer *buf = cs->pipebufs;
733
734 if (!cs->write) {
735 err = pipe_buf_confirm(cs->pipe, buf);
736 if (err)
737 return err;
738
739 BUG_ON(!cs->nr_segs);
740 cs->currbuf = buf;
741 cs->pg = buf->page;
742 cs->offset = buf->offset;
743 cs->len = buf->len;
744 cs->pipebufs++;
745 cs->nr_segs--;
746 } else {
747 if (cs->nr_segs == cs->pipe->buffers)
748 return -EIO;
749
750 page = alloc_page(GFP_HIGHUSER);
751 if (!page)
752 return -ENOMEM;
753
754 buf->page = page;
755 buf->offset = 0;
756 buf->len = 0;
757
758 cs->currbuf = buf;
759 cs->pg = page;
760 cs->offset = 0;
761 cs->len = PAGE_SIZE;
762 cs->pipebufs++;
763 cs->nr_segs++;
764 }
765 } else {
766 size_t off;
767 err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
768 if (err < 0)
769 return err;
770 BUG_ON(!err);
771 cs->len = err;
772 cs->offset = off;
773 cs->pg = page;
774 iov_iter_advance(cs->iter, err);
775 }
776
777 return lock_request(cs->req);
778}
779
780/* Do as much copy to/from userspace buffer as we can */
781static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
782{
783 unsigned ncpy = min(*size, cs->len);
784 if (val) {
785 void *pgaddr = kmap_atomic(cs->pg);
786 void *buf = pgaddr + cs->offset;
787
788 if (cs->write)
789 memcpy(buf, *val, ncpy);
790 else
791 memcpy(*val, buf, ncpy);
792
793 kunmap_atomic(pgaddr);
794 *val += ncpy;
795 }
796 *size -= ncpy;
797 cs->len -= ncpy;
798 cs->offset += ncpy;
799 return ncpy;
800}
801
802static int fuse_check_page(struct page *page)
803{
804 if (page_mapcount(page) ||
805 page->mapping != NULL ||
806 page_count(page) != 1 ||
807 (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
808 ~(1 << PG_locked |
809 1 << PG_referenced |
810 1 << PG_uptodate |
811 1 << PG_lru |
812 1 << PG_active |
813 1 << PG_reclaim))) {
814 printk(KERN_WARNING "fuse: trying to steal weird page\n");
815 printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
816 return 1;
817 }
818 return 0;
819}
820
821static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
822{
823 int err;
824 struct page *oldpage = *pagep;
825 struct page *newpage;
826 struct pipe_buffer *buf = cs->pipebufs;
827
828 err = unlock_request(cs->req);
829 if (err)
830 return err;
831
832 fuse_copy_finish(cs);
833
834 err = pipe_buf_confirm(cs->pipe, buf);
835 if (err)
836 return err;
837
838 BUG_ON(!cs->nr_segs);
839 cs->currbuf = buf;
840 cs->len = buf->len;
841 cs->pipebufs++;
842 cs->nr_segs--;
843
844 if (cs->len != PAGE_SIZE)
845 goto out_fallback;
846
847 if (pipe_buf_steal(cs->pipe, buf) != 0)
848 goto out_fallback;
849
850 newpage = buf->page;
851
852 if (!PageUptodate(newpage))
853 SetPageUptodate(newpage);
854
855 ClearPageMappedToDisk(newpage);
856
857 if (fuse_check_page(newpage) != 0)
858 goto out_fallback_unlock;
859
860 /*
861 * This is a new and locked page, it shouldn't be mapped or
862 * have any special flags on it
863 */
864 if (WARN_ON(page_mapped(oldpage)))
865 goto out_fallback_unlock;
866 if (WARN_ON(page_has_private(oldpage)))
867 goto out_fallback_unlock;
868 if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
869 goto out_fallback_unlock;
870 if (WARN_ON(PageMlocked(oldpage)))
871 goto out_fallback_unlock;
872
873 err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
874 if (err) {
875 unlock_page(newpage);
876 return err;
877 }
878
879 get_page(newpage);
880
881 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
882 lru_cache_add_file(newpage);
883
884 err = 0;
885 spin_lock(&cs->req->waitq.lock);
886 if (test_bit(FR_ABORTED, &cs->req->flags))
887 err = -ENOENT;
888 else
889 *pagep = newpage;
890 spin_unlock(&cs->req->waitq.lock);
891
892 if (err) {
893 unlock_page(newpage);
894 put_page(newpage);
895 return err;
896 }
897
898 unlock_page(oldpage);
899 put_page(oldpage);
900 cs->len = 0;
901
902 return 0;
903
904out_fallback_unlock:
905 unlock_page(newpage);
906out_fallback:
907 cs->pg = buf->page;
908 cs->offset = buf->offset;
909
910 err = lock_request(cs->req);
911 if (err)
912 return err;
913
914 return 1;
915}
916
917static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
918 unsigned offset, unsigned count)
919{
920 struct pipe_buffer *buf;
921 int err;
922
923 if (cs->nr_segs == cs->pipe->buffers)
924 return -EIO;
925
926 err = unlock_request(cs->req);
927 if (err)
928 return err;
929
930 fuse_copy_finish(cs);
931
932 buf = cs->pipebufs;
933 get_page(page);
934 buf->page = page;
935 buf->offset = offset;
936 buf->len = count;
937
938 cs->pipebufs++;
939 cs->nr_segs++;
940 cs->len = 0;
941
942 return 0;
943}
944
945/*
946 * Copy a page in the request to/from the userspace buffer. Must be
947 * done atomically
948 */
949static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
950 unsigned offset, unsigned count, int zeroing)
951{
952 int err;
953 struct page *page = *pagep;
954
955 if (page && zeroing && count < PAGE_SIZE)
956 clear_highpage(page);
957
958 while (count) {
959 if (cs->write && cs->pipebufs && page) {
960 return fuse_ref_page(cs, page, offset, count);
961 } else if (!cs->len) {
962 if (cs->move_pages && page &&
963 offset == 0 && count == PAGE_SIZE) {
964 err = fuse_try_move_page(cs, pagep);
965 if (err <= 0)
966 return err;
967 } else {
968 err = fuse_copy_fill(cs);
969 if (err)
970 return err;
971 }
972 }
973 if (page) {
974 void *mapaddr = kmap_atomic(page);
975 void *buf = mapaddr + offset;
976 offset += fuse_copy_do(cs, &buf, &count);
977 kunmap_atomic(mapaddr);
978 } else
979 offset += fuse_copy_do(cs, NULL, &count);
980 }
981 if (page && !cs->write)
982 flush_dcache_page(page);
983 return 0;
984}
985
986/* Copy pages in the request to/from userspace buffer */
987static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
988 int zeroing)
989{
990 unsigned i;
991 struct fuse_req *req = cs->req;
992
993 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
994 int err;
995 unsigned offset = req->page_descs[i].offset;
996 unsigned count = min(nbytes, req->page_descs[i].length);
997
998 err = fuse_copy_page(cs, &req->pages[i], offset, count,
999 zeroing);
1000 if (err)
1001 return err;
1002
1003 nbytes -= count;
1004 }
1005 return 0;
1006}
1007
1008/* Copy a single argument in the request to/from userspace buffer */
1009static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
1010{
1011 while (size) {
1012 if (!cs->len) {
1013 int err = fuse_copy_fill(cs);
1014 if (err)
1015 return err;
1016 }
1017 fuse_copy_do(cs, &val, &size);
1018 }
1019 return 0;
1020}
1021
1022/* Copy request arguments to/from userspace buffer */
1023static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
1024 unsigned argpages, struct fuse_arg *args,
1025 int zeroing)
1026{
1027 int err = 0;
1028 unsigned i;
1029
1030 for (i = 0; !err && i < numargs; i++) {
1031 struct fuse_arg *arg = &args[i];
1032 if (i == numargs - 1 && argpages)
1033 err = fuse_copy_pages(cs, arg->size, zeroing);
1034 else
1035 err = fuse_copy_one(cs, arg->value, arg->size);
1036 }
1037 return err;
1038}
1039
1040static int forget_pending(struct fuse_iqueue *fiq)
1041{
1042 return fiq->forget_list_head.next != NULL;
1043}
1044
1045static int request_pending(struct fuse_iqueue *fiq)
1046{
1047 return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
1048 forget_pending(fiq);
1049}
1050
1051/*
1052 * Transfer an interrupt request to userspace
1053 *
1054 * Unlike other requests this is assembled on demand, without a need
1055 * to allocate a separate fuse_req structure.
1056 *
1057 * Called with fiq->waitq.lock held, releases it
1058 */
1059static int fuse_read_interrupt(struct fuse_iqueue *fiq,
1060 struct fuse_copy_state *cs,
1061 size_t nbytes, struct fuse_req *req)
1062__releases(fiq->waitq.lock)
1063{
1064 struct fuse_in_header ih;
1065 struct fuse_interrupt_in arg;
1066 unsigned reqsize = sizeof(ih) + sizeof(arg);
1067 int err;
1068
1069 list_del_init(&req->intr_entry);
1070 req->intr_unique = fuse_get_unique(fiq);
1071 memset(&ih, 0, sizeof(ih));
1072 memset(&arg, 0, sizeof(arg));
1073 ih.len = reqsize;
1074 ih.opcode = FUSE_INTERRUPT;
1075 ih.unique = req->intr_unique;
1076 arg.unique = req->in.h.unique;
1077
1078 spin_unlock(&fiq->waitq.lock);
1079 if (nbytes < reqsize)
1080 return -EINVAL;
1081
1082 err = fuse_copy_one(cs, &ih, sizeof(ih));
1083 if (!err)
1084 err = fuse_copy_one(cs, &arg, sizeof(arg));
1085 fuse_copy_finish(cs);
1086
1087 return err ? err : reqsize;
1088}
1089
1090static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
1091 unsigned max,
1092 unsigned *countp)
1093{
1094 struct fuse_forget_link *head = fiq->forget_list_head.next;
1095 struct fuse_forget_link **newhead = &head;
1096 unsigned count;
1097
1098 for (count = 0; *newhead != NULL && count < max; count++)
1099 newhead = &(*newhead)->next;
1100
1101 fiq->forget_list_head.next = *newhead;
1102 *newhead = NULL;
1103 if (fiq->forget_list_head.next == NULL)
1104 fiq->forget_list_tail = &fiq->forget_list_head;
1105
1106 if (countp != NULL)
1107 *countp = count;
1108
1109 return head;
1110}
1111
1112static int fuse_read_single_forget(struct fuse_iqueue *fiq,
1113 struct fuse_copy_state *cs,
1114 size_t nbytes)
1115__releases(fiq->waitq.lock)
1116{
1117 int err;
1118 struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
1119 struct fuse_forget_in arg = {
1120 .nlookup = forget->forget_one.nlookup,
1121 };
1122 struct fuse_in_header ih = {
1123 .opcode = FUSE_FORGET,
1124 .nodeid = forget->forget_one.nodeid,
1125 .unique = fuse_get_unique(fiq),
1126 .len = sizeof(ih) + sizeof(arg),
1127 };
1128
1129 spin_unlock(&fiq->waitq.lock);
1130 kfree(forget);
1131 if (nbytes < ih.len)
1132 return -EINVAL;
1133
1134 err = fuse_copy_one(cs, &ih, sizeof(ih));
1135 if (!err)
1136 err = fuse_copy_one(cs, &arg, sizeof(arg));
1137 fuse_copy_finish(cs);
1138
1139 if (err)
1140 return err;
1141
1142 return ih.len;
1143}
1144
1145static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
1146 struct fuse_copy_state *cs, size_t nbytes)
1147__releases(fiq->waitq.lock)
1148{
1149 int err;
1150 unsigned max_forgets;
1151 unsigned count;
1152 struct fuse_forget_link *head;
1153 struct fuse_batch_forget_in arg = { .count = 0 };
1154 struct fuse_in_header ih = {
1155 .opcode = FUSE_BATCH_FORGET,
1156 .unique = fuse_get_unique(fiq),
1157 .len = sizeof(ih) + sizeof(arg),
1158 };
1159
1160 if (nbytes < ih.len) {
1161 spin_unlock(&fiq->waitq.lock);
1162 return -EINVAL;
1163 }
1164
1165 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
1166 head = dequeue_forget(fiq, max_forgets, &count);
1167 spin_unlock(&fiq->waitq.lock);
1168
1169 arg.count = count;
1170 ih.len += count * sizeof(struct fuse_forget_one);
1171 err = fuse_copy_one(cs, &ih, sizeof(ih));
1172 if (!err)
1173 err = fuse_copy_one(cs, &arg, sizeof(arg));
1174
1175 while (head) {
1176 struct fuse_forget_link *forget = head;
1177
1178 if (!err) {
1179 err = fuse_copy_one(cs, &forget->forget_one,
1180 sizeof(forget->forget_one));
1181 }
1182 head = forget->next;
1183 kfree(forget);
1184 }
1185
1186 fuse_copy_finish(cs);
1187
1188 if (err)
1189 return err;
1190
1191 return ih.len;
1192}
1193
1194static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
1195 struct fuse_copy_state *cs,
1196 size_t nbytes)
1197__releases(fiq->waitq.lock)
1198{
1199 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
1200 return fuse_read_single_forget(fiq, cs, nbytes);
1201 else
1202 return fuse_read_batch_forget(fiq, cs, nbytes);
1203}
1204
1205/*
1206 * Read a single request into the userspace filesystem's buffer. This
1207 * function waits until a request is available, then removes it from
1208 * the pending list and copies request data to userspace buffer. If
1209 * no reply is needed (FORGET) or request has been aborted or there
1210 * was an error during the copying then it's finished by calling
1211 * request_end(). Otherwise add it to the processing list, and set
1212 * the 'sent' flag.
1213 */
1214static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
1215 struct fuse_copy_state *cs, size_t nbytes)
1216{
1217 ssize_t err;
1218 struct fuse_conn *fc = fud->fc;
1219 struct fuse_iqueue *fiq = &fc->iq;
1220 struct fuse_pqueue *fpq = &fud->pq;
1221 struct fuse_req *req;
1222 struct fuse_in *in;
1223 unsigned reqsize;
1224
1225 restart:
1226 spin_lock(&fiq->waitq.lock);
1227 err = -EAGAIN;
1228 if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
1229 !request_pending(fiq))
1230 goto err_unlock;
1231
1232 err = wait_event_interruptible_exclusive_locked(fiq->waitq,
1233 !fiq->connected || request_pending(fiq));
1234 if (err)
1235 goto err_unlock;
1236
1237 err = -ENODEV;
1238 if (!fiq->connected)
1239 goto err_unlock;
1240
1241 if (!list_empty(&fiq->interrupts)) {
1242 req = list_entry(fiq->interrupts.next, struct fuse_req,
1243 intr_entry);
1244 return fuse_read_interrupt(fiq, cs, nbytes, req);
1245 }
1246
1247 if (forget_pending(fiq)) {
1248 if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
1249 return fuse_read_forget(fc, fiq, cs, nbytes);
1250
1251 if (fiq->forget_batch <= -8)
1252 fiq->forget_batch = 16;
1253 }
1254
1255 req = list_entry(fiq->pending.next, struct fuse_req, list);
1256 clear_bit(FR_PENDING, &req->flags);
1257 list_del_init(&req->list);
1258 spin_unlock(&fiq->waitq.lock);
1259
1260 in = &req->in;
1261 reqsize = in->h.len;
1262 /* If request is too large, reply with an error and restart the read */
1263 if (nbytes < reqsize) {
1264 req->out.h.error = -EIO;
1265 /* SETXATTR is special, since it may contain too large data */
1266 if (in->h.opcode == FUSE_SETXATTR)
1267 req->out.h.error = -E2BIG;
1268 request_end(fc, req);
1269 goto restart;
1270 }
1271 spin_lock(&fpq->lock);
1272 list_add(&req->list, &fpq->io);
1273 spin_unlock(&fpq->lock);
1274 cs->req = req;
1275 err = fuse_copy_one(cs, &in->h, sizeof(in->h));
1276 if (!err)
1277 err = fuse_copy_args(cs, in->numargs, in->argpages,
1278 (struct fuse_arg *) in->args, 0);
1279 fuse_copy_finish(cs);
1280 spin_lock(&fpq->lock);
1281 clear_bit(FR_LOCKED, &req->flags);
1282 if (!fpq->connected) {
1283 err = -ENODEV;
1284 goto out_end;
1285 }
1286 if (err) {
1287 req->out.h.error = -EIO;
1288 goto out_end;
1289 }
1290 if (!test_bit(FR_ISREPLY, &req->flags)) {
1291 err = reqsize;
1292 goto out_end;
1293 }
1294 list_move_tail(&req->list, &fpq->processing);
1295 spin_unlock(&fpq->lock);
1296 set_bit(FR_SENT, &req->flags);
1297 /* matches barrier in request_wait_answer() */
1298 smp_mb__after_atomic();
1299 if (test_bit(FR_INTERRUPTED, &req->flags))
1300 queue_interrupt(fiq, req);
1301
1302 return reqsize;
1303
1304out_end:
1305 if (!test_bit(FR_PRIVATE, &req->flags))
1306 list_del_init(&req->list);
1307 spin_unlock(&fpq->lock);
1308 request_end(fc, req);
1309 return err;
1310
1311 err_unlock:
1312 spin_unlock(&fiq->waitq.lock);
1313 return err;
1314}
1315
1316static int fuse_dev_open(struct inode *inode, struct file *file)
1317{
1318 /*
1319 * The fuse device's file's private_data is used to hold
1320 * the fuse_conn(ection) when it is mounted, and is used to
1321 * keep track of whether the file has been mounted already.
1322 */
1323 file->private_data = NULL;
1324 return 0;
1325}
1326
1327static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
1328{
1329 struct fuse_copy_state cs;
1330 struct file *file = iocb->ki_filp;
1331 struct fuse_dev *fud = fuse_get_dev(file);
1332
1333 if (!fud)
1334 return -EPERM;
1335
1336 if (!iter_is_iovec(to))
1337 return -EINVAL;
1338
1339 fuse_copy_init(&cs, 1, to);
1340
1341 return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to));
1342}
1343
1344static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1345 struct pipe_inode_info *pipe,
1346 size_t len, unsigned int flags)
1347{
1348 int total, ret;
1349 int page_nr = 0;
1350 struct pipe_buffer *bufs;
1351 struct fuse_copy_state cs;
1352 struct fuse_dev *fud = fuse_get_dev(in);
1353
1354 if (!fud)
1355 return -EPERM;
1356
1357 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1358 if (!bufs)
1359 return -ENOMEM;
1360
1361 fuse_copy_init(&cs, 1, NULL);
1362 cs.pipebufs = bufs;
1363 cs.pipe = pipe;
1364 ret = fuse_dev_do_read(fud, in, &cs, len);
1365 if (ret < 0)
1366 goto out;
1367
1368 if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1369 ret = -EIO;
1370 goto out;
1371 }
1372
1373 for (ret = total = 0; page_nr < cs.nr_segs; total += ret) {
1374 /*
1375 * Need to be careful about this. Having buf->ops in module
1376 * code can Oops if the buffer persists after module unload.
1377 */
1378 bufs[page_nr].ops = &nosteal_pipe_buf_ops;
1379 bufs[page_nr].flags = 0;
1380 ret = add_to_pipe(pipe, &bufs[page_nr++]);
1381 if (unlikely(ret < 0))
1382 break;
1383 }
1384 if (total)
1385 ret = total;
1386out:
1387 for (; page_nr < cs.nr_segs; page_nr++)
1388 put_page(bufs[page_nr].page);
1389
1390 kfree(bufs);
1391 return ret;
1392}
1393
1394static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1395 struct fuse_copy_state *cs)
1396{
1397 struct fuse_notify_poll_wakeup_out outarg;
1398 int err = -EINVAL;
1399
1400 if (size != sizeof(outarg))
1401 goto err;
1402
1403 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1404 if (err)
1405 goto err;
1406
1407 fuse_copy_finish(cs);
1408 return fuse_notify_poll_wakeup(fc, &outarg);
1409
1410err:
1411 fuse_copy_finish(cs);
1412 return err;
1413}
1414
1415static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1416 struct fuse_copy_state *cs)
1417{
1418 struct fuse_notify_inval_inode_out outarg;
1419 int err = -EINVAL;
1420
1421 if (size != sizeof(outarg))
1422 goto err;
1423
1424 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1425 if (err)
1426 goto err;
1427 fuse_copy_finish(cs);
1428
1429 down_read(&fc->killsb);
1430 err = -ENOENT;
1431 if (fc->sb) {
1432 err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
1433 outarg.off, outarg.len);
1434 }
1435 up_read(&fc->killsb);
1436 return err;
1437
1438err:
1439 fuse_copy_finish(cs);
1440 return err;
1441}
1442
1443static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1444 struct fuse_copy_state *cs)
1445{
1446 struct fuse_notify_inval_entry_out outarg;
1447 int err = -ENOMEM;
1448 char *buf;
1449 struct qstr name;
1450
1451 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1452 if (!buf)
1453 goto err;
1454
1455 err = -EINVAL;
1456 if (size < sizeof(outarg))
1457 goto err;
1458
1459 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1460 if (err)
1461 goto err;
1462
1463 err = -ENAMETOOLONG;
1464 if (outarg.namelen > FUSE_NAME_MAX)
1465 goto err;
1466
1467 err = -EINVAL;
1468 if (size != sizeof(outarg) + outarg.namelen + 1)
1469 goto err;
1470
1471 name.name = buf;
1472 name.len = outarg.namelen;
1473 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1474 if (err)
1475 goto err;
1476 fuse_copy_finish(cs);
1477 buf[outarg.namelen] = 0;
1478
1479 down_read(&fc->killsb);
1480 err = -ENOENT;
1481 if (fc->sb)
1482 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name);
1483 up_read(&fc->killsb);
1484 kfree(buf);
1485 return err;
1486
1487err:
1488 kfree(buf);
1489 fuse_copy_finish(cs);
1490 return err;
1491}
1492
1493static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1494 struct fuse_copy_state *cs)
1495{
1496 struct fuse_notify_delete_out outarg;
1497 int err = -ENOMEM;
1498 char *buf;
1499 struct qstr name;
1500
1501 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1502 if (!buf)
1503 goto err;
1504
1505 err = -EINVAL;
1506 if (size < sizeof(outarg))
1507 goto err;
1508
1509 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1510 if (err)
1511 goto err;
1512
1513 err = -ENAMETOOLONG;
1514 if (outarg.namelen > FUSE_NAME_MAX)
1515 goto err;
1516
1517 err = -EINVAL;
1518 if (size != sizeof(outarg) + outarg.namelen + 1)
1519 goto err;
1520
1521 name.name = buf;
1522 name.len = outarg.namelen;
1523 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1524 if (err)
1525 goto err;
1526 fuse_copy_finish(cs);
1527 buf[outarg.namelen] = 0;
1528
1529 down_read(&fc->killsb);
1530 err = -ENOENT;
1531 if (fc->sb)
1532 err = fuse_reverse_inval_entry(fc->sb, outarg.parent,
1533 outarg.child, &name);
1534 up_read(&fc->killsb);
1535 kfree(buf);
1536 return err;
1537
1538err:
1539 kfree(buf);
1540 fuse_copy_finish(cs);
1541 return err;
1542}
1543
1544static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1545 struct fuse_copy_state *cs)
1546{
1547 struct fuse_notify_store_out outarg;
1548 struct inode *inode;
1549 struct address_space *mapping;
1550 u64 nodeid;
1551 int err;
1552 pgoff_t index;
1553 unsigned int offset;
1554 unsigned int num;
1555 loff_t file_size;
1556 loff_t end;
1557
1558 err = -EINVAL;
1559 if (size < sizeof(outarg))
1560 goto out_finish;
1561
1562 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1563 if (err)
1564 goto out_finish;
1565
1566 err = -EINVAL;
1567 if (size - sizeof(outarg) != outarg.size)
1568 goto out_finish;
1569
1570 nodeid = outarg.nodeid;
1571
1572 down_read(&fc->killsb);
1573
1574 err = -ENOENT;
1575 if (!fc->sb)
1576 goto out_up_killsb;
1577
1578 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1579 if (!inode)
1580 goto out_up_killsb;
1581
1582 mapping = inode->i_mapping;
1583 index = outarg.offset >> PAGE_SHIFT;
1584 offset = outarg.offset & ~PAGE_MASK;
1585 file_size = i_size_read(inode);
1586 end = outarg.offset + outarg.size;
1587 if (end > file_size) {
1588 file_size = end;
1589 fuse_write_update_size(inode, file_size);
1590 }
1591
1592 num = outarg.size;
1593 while (num) {
1594 struct page *page;
1595 unsigned int this_num;
1596
1597 err = -ENOMEM;
1598 page = find_or_create_page(mapping, index,
1599 mapping_gfp_mask(mapping));
1600 if (!page)
1601 goto out_iput;
1602
1603 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1604 err = fuse_copy_page(cs, &page, offset, this_num, 0);
1605 if (!err && offset == 0 &&
1606 (this_num == PAGE_SIZE || file_size == end))
1607 SetPageUptodate(page);
1608 unlock_page(page);
1609 put_page(page);
1610
1611 if (err)
1612 goto out_iput;
1613
1614 num -= this_num;
1615 offset = 0;
1616 index++;
1617 }
1618
1619 err = 0;
1620
1621out_iput:
1622 iput(inode);
1623out_up_killsb:
1624 up_read(&fc->killsb);
1625out_finish:
1626 fuse_copy_finish(cs);
1627 return err;
1628}
1629
1630static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
1631{
1632 release_pages(req->pages, req->num_pages, false);
1633}
1634
1635static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1636 struct fuse_notify_retrieve_out *outarg)
1637{
1638 int err;
1639 struct address_space *mapping = inode->i_mapping;
1640 struct fuse_req *req;
1641 pgoff_t index;
1642 loff_t file_size;
1643 unsigned int num;
1644 unsigned int offset;
1645 size_t total_len = 0;
1646 int num_pages;
1647
1648 offset = outarg->offset & ~PAGE_MASK;
1649 file_size = i_size_read(inode);
1650
1651 num = outarg->size;
1652 if (outarg->offset > file_size)
1653 num = 0;
1654 else if (outarg->offset + num > file_size)
1655 num = file_size - outarg->offset;
1656
1657 num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1658 num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ);
1659
1660 req = fuse_get_req(fc, num_pages);
1661 if (IS_ERR(req))
1662 return PTR_ERR(req);
1663
1664 req->in.h.opcode = FUSE_NOTIFY_REPLY;
1665 req->in.h.nodeid = outarg->nodeid;
1666 req->in.numargs = 2;
1667 req->in.argpages = 1;
1668 req->page_descs[0].offset = offset;
1669 req->end = fuse_retrieve_end;
1670
1671 index = outarg->offset >> PAGE_SHIFT;
1672
1673 while (num && req->num_pages < num_pages) {
1674 struct page *page;
1675 unsigned int this_num;
1676
1677 page = find_get_page(mapping, index);
1678 if (!page)
1679 break;
1680
1681 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1682 req->pages[req->num_pages] = page;
1683 req->page_descs[req->num_pages].length = this_num;
1684 req->num_pages++;
1685
1686 offset = 0;
1687 num -= this_num;
1688 total_len += this_num;
1689 index++;
1690 }
1691 req->misc.retrieve_in.offset = outarg->offset;
1692 req->misc.retrieve_in.size = total_len;
1693 req->in.args[0].size = sizeof(req->misc.retrieve_in);
1694 req->in.args[0].value = &req->misc.retrieve_in;
1695 req->in.args[1].size = total_len;
1696
1697 err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
1698 if (err)
1699 fuse_retrieve_end(fc, req);
1700
1701 return err;
1702}
1703
1704static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1705 struct fuse_copy_state *cs)
1706{
1707 struct fuse_notify_retrieve_out outarg;
1708 struct inode *inode;
1709 int err;
1710
1711 err = -EINVAL;
1712 if (size != sizeof(outarg))
1713 goto copy_finish;
1714
1715 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1716 if (err)
1717 goto copy_finish;
1718
1719 fuse_copy_finish(cs);
1720
1721 down_read(&fc->killsb);
1722 err = -ENOENT;
1723 if (fc->sb) {
1724 u64 nodeid = outarg.nodeid;
1725
1726 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1727 if (inode) {
1728 err = fuse_retrieve(fc, inode, &outarg);
1729 iput(inode);
1730 }
1731 }
1732 up_read(&fc->killsb);
1733
1734 return err;
1735
1736copy_finish:
1737 fuse_copy_finish(cs);
1738 return err;
1739}
1740
1741static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1742 unsigned int size, struct fuse_copy_state *cs)
1743{
1744 /* Don't try to move pages (yet) */
1745 cs->move_pages = 0;
1746
1747 switch (code) {
1748 case FUSE_NOTIFY_POLL:
1749 return fuse_notify_poll(fc, size, cs);
1750
1751 case FUSE_NOTIFY_INVAL_INODE:
1752 return fuse_notify_inval_inode(fc, size, cs);
1753
1754 case FUSE_NOTIFY_INVAL_ENTRY:
1755 return fuse_notify_inval_entry(fc, size, cs);
1756
1757 case FUSE_NOTIFY_STORE:
1758 return fuse_notify_store(fc, size, cs);
1759
1760 case FUSE_NOTIFY_RETRIEVE:
1761 return fuse_notify_retrieve(fc, size, cs);
1762
1763 case FUSE_NOTIFY_DELETE:
1764 return fuse_notify_delete(fc, size, cs);
1765
1766 default:
1767 fuse_copy_finish(cs);
1768 return -EINVAL;
1769 }
1770}
1771
1772/* Look up request on processing list by unique ID */
1773static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
1774{
1775 struct fuse_req *req;
1776
1777 list_for_each_entry(req, &fpq->processing, list) {
1778 if (req->in.h.unique == unique || req->intr_unique == unique)
1779 return req;
1780 }
1781 return NULL;
1782}
1783
1784static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
1785 unsigned nbytes)
1786{
1787 unsigned reqsize = sizeof(struct fuse_out_header);
1788
1789 if (out->h.error)
1790 return nbytes != reqsize ? -EINVAL : 0;
1791
1792 reqsize += len_args(out->numargs, out->args);
1793
1794 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
1795 return -EINVAL;
1796 else if (reqsize > nbytes) {
1797 struct fuse_arg *lastarg = &out->args[out->numargs-1];
1798 unsigned diffsize = reqsize - nbytes;
1799 if (diffsize > lastarg->size)
1800 return -EINVAL;
1801 lastarg->size -= diffsize;
1802 }
1803 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
1804 out->page_zeroing);
1805}
1806
1807/*
1808 * Write a single reply to a request. First the header is copied from
1809 * the write buffer. The request is then searched on the processing
1810 * list by the unique ID found in the header. If found, then remove
1811 * it from the list and copy the rest of the buffer to the request.
1812 * The request is finished by calling request_end()
1813 */
1814static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
1815 struct fuse_copy_state *cs, size_t nbytes)
1816{
1817 int err;
1818 struct fuse_conn *fc = fud->fc;
1819 struct fuse_pqueue *fpq = &fud->pq;
1820 struct fuse_req *req;
1821 struct fuse_out_header oh;
1822
1823 if (nbytes < sizeof(struct fuse_out_header))
1824 return -EINVAL;
1825
1826 err = fuse_copy_one(cs, &oh, sizeof(oh));
1827 if (err)
1828 goto err_finish;
1829
1830 err = -EINVAL;
1831 if (oh.len != nbytes)
1832 goto err_finish;
1833
1834 /*
1835 * Zero oh.unique indicates unsolicited notification message
1836 * and error contains notification code.
1837 */
1838 if (!oh.unique) {
1839 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
1840 return err ? err : nbytes;
1841 }
1842
1843 err = -EINVAL;
1844 if (oh.error <= -1000 || oh.error > 0)
1845 goto err_finish;
1846
1847 spin_lock(&fpq->lock);
1848 err = -ENOENT;
1849 if (!fpq->connected)
1850 goto err_unlock_pq;
1851
1852 req = request_find(fpq, oh.unique);
1853 if (!req)
1854 goto err_unlock_pq;
1855
1856 /* Is it an interrupt reply? */
1857 if (req->intr_unique == oh.unique) {
1858 spin_unlock(&fpq->lock);
1859
1860 err = -EINVAL;
1861 if (nbytes != sizeof(struct fuse_out_header))
1862 goto err_finish;
1863
1864 if (oh.error == -ENOSYS)
1865 fc->no_interrupt = 1;
1866 else if (oh.error == -EAGAIN)
1867 queue_interrupt(&fc->iq, req);
1868
1869 fuse_copy_finish(cs);
1870 return nbytes;
1871 }
1872
1873 clear_bit(FR_SENT, &req->flags);
1874 list_move(&req->list, &fpq->io);
1875 req->out.h = oh;
1876 set_bit(FR_LOCKED, &req->flags);
1877 spin_unlock(&fpq->lock);
1878 cs->req = req;
1879 if (!req->out.page_replace)
1880 cs->move_pages = 0;
1881
1882 err = copy_out_args(cs, &req->out, nbytes);
1883 fuse_copy_finish(cs);
1884
1885 spin_lock(&fpq->lock);
1886 clear_bit(FR_LOCKED, &req->flags);
1887 if (!fpq->connected)
1888 err = -ENOENT;
1889 else if (err)
1890 req->out.h.error = -EIO;
1891 if (!test_bit(FR_PRIVATE, &req->flags))
1892 list_del_init(&req->list);
1893 spin_unlock(&fpq->lock);
1894
1895 request_end(fc, req);
1896
1897 return err ? err : nbytes;
1898
1899 err_unlock_pq:
1900 spin_unlock(&fpq->lock);
1901 err_finish:
1902 fuse_copy_finish(cs);
1903 return err;
1904}
1905
1906static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
1907{
1908 struct fuse_copy_state cs;
1909 struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp);
1910
1911 if (!fud)
1912 return -EPERM;
1913
1914 if (!iter_is_iovec(from))
1915 return -EINVAL;
1916
1917 fuse_copy_init(&cs, 0, from);
1918
1919 return fuse_dev_do_write(fud, &cs, iov_iter_count(from));
1920}
1921
1922static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1923 struct file *out, loff_t *ppos,
1924 size_t len, unsigned int flags)
1925{
1926 unsigned nbuf;
1927 unsigned idx;
1928 struct pipe_buffer *bufs;
1929 struct fuse_copy_state cs;
1930 struct fuse_dev *fud;
1931 size_t rem;
1932 ssize_t ret;
1933
1934 fud = fuse_get_dev(out);
1935 if (!fud)
1936 return -EPERM;
1937
1938 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1939 if (!bufs)
1940 return -ENOMEM;
1941
1942 pipe_lock(pipe);
1943 nbuf = 0;
1944 rem = 0;
1945 for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
1946 rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
1947
1948 ret = -EINVAL;
1949 if (rem < len) {
1950 pipe_unlock(pipe);
1951 goto out;
1952 }
1953
1954 rem = len;
1955 while (rem) {
1956 struct pipe_buffer *ibuf;
1957 struct pipe_buffer *obuf;
1958
1959 BUG_ON(nbuf >= pipe->buffers);
1960 BUG_ON(!pipe->nrbufs);
1961 ibuf = &pipe->bufs[pipe->curbuf];
1962 obuf = &bufs[nbuf];
1963
1964 if (rem >= ibuf->len) {
1965 *obuf = *ibuf;
1966 ibuf->ops = NULL;
1967 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
1968 pipe->nrbufs--;
1969 } else {
1970 pipe_buf_get(pipe, ibuf);
1971 *obuf = *ibuf;
1972 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1973 obuf->len = rem;
1974 ibuf->offset += obuf->len;
1975 ibuf->len -= obuf->len;
1976 }
1977 nbuf++;
1978 rem -= obuf->len;
1979 }
1980 pipe_unlock(pipe);
1981
1982 fuse_copy_init(&cs, 0, NULL);
1983 cs.pipebufs = bufs;
1984 cs.nr_segs = nbuf;
1985 cs.pipe = pipe;
1986
1987 if (flags & SPLICE_F_MOVE)
1988 cs.move_pages = 1;
1989
1990 ret = fuse_dev_do_write(fud, &cs, len);
1991
1992 for (idx = 0; idx < nbuf; idx++)
1993 pipe_buf_release(pipe, &bufs[idx]);
1994
1995out:
1996 kfree(bufs);
1997 return ret;
1998}
1999
2000static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
2001{
2002 unsigned mask = POLLOUT | POLLWRNORM;
2003 struct fuse_iqueue *fiq;
2004 struct fuse_dev *fud = fuse_get_dev(file);
2005
2006 if (!fud)
2007 return POLLERR;
2008
2009 fiq = &fud->fc->iq;
2010 poll_wait(file, &fiq->waitq, wait);
2011
2012 spin_lock(&fiq->waitq.lock);
2013 if (!fiq->connected)
2014 mask = POLLERR;
2015 else if (request_pending(fiq))
2016 mask |= POLLIN | POLLRDNORM;
2017 spin_unlock(&fiq->waitq.lock);
2018
2019 return mask;
2020}
2021
2022/*
2023 * Abort all requests on the given list (pending or processing)
2024 *
2025 * This function releases and reacquires fc->lock
2026 */
2027static void end_requests(struct fuse_conn *fc, struct list_head *head)
2028{
2029 while (!list_empty(head)) {
2030 struct fuse_req *req;
2031 req = list_entry(head->next, struct fuse_req, list);
2032 req->out.h.error = -ECONNABORTED;
2033 clear_bit(FR_SENT, &req->flags);
2034 list_del_init(&req->list);
2035 request_end(fc, req);
2036 }
2037}
2038
2039static void end_polls(struct fuse_conn *fc)
2040{
2041 struct rb_node *p;
2042
2043 p = rb_first(&fc->polled_files);
2044
2045 while (p) {
2046 struct fuse_file *ff;
2047 ff = rb_entry(p, struct fuse_file, polled_node);
2048 wake_up_interruptible_all(&ff->poll_wait);
2049
2050 p = rb_next(p);
2051 }
2052}
2053
2054/*
2055 * Abort all requests.
2056 *
2057 * Emergency exit in case of a malicious or accidental deadlock, or just a hung
2058 * filesystem.
2059 *
2060 * The same effect is usually achievable through killing the filesystem daemon
2061 * and all users of the filesystem. The exception is the combination of an
2062 * asynchronous request and the tricky deadlock (see
2063 * Documentation/filesystems/fuse.txt).
2064 *
2065 * Aborting requests under I/O goes as follows: 1: Separate out unlocked
2066 * requests, they should be finished off immediately. Locked requests will be
2067 * finished after unlock; see unlock_request(). 2: Finish off the unlocked
2068 * requests. It is possible that some request will finish before we can. This
2069 * is OK, the request will in that case be removed from the list before we touch
2070 * it.
2071 */
2072void fuse_abort_conn(struct fuse_conn *fc)
2073{
2074 struct fuse_iqueue *fiq = &fc->iq;
2075
2076 spin_lock(&fc->lock);
2077 if (fc->connected) {
2078 struct fuse_dev *fud;
2079 struct fuse_req *req, *next;
2080 LIST_HEAD(to_end1);
2081 LIST_HEAD(to_end2);
2082
2083 fc->connected = 0;
2084 fc->blocked = 0;
2085 fuse_set_initialized(fc);
2086 list_for_each_entry(fud, &fc->devices, entry) {
2087 struct fuse_pqueue *fpq = &fud->pq;
2088
2089 spin_lock(&fpq->lock);
2090 fpq->connected = 0;
2091 list_for_each_entry_safe(req, next, &fpq->io, list) {
2092 req->out.h.error = -ECONNABORTED;
2093 spin_lock(&req->waitq.lock);
2094 set_bit(FR_ABORTED, &req->flags);
2095 if (!test_bit(FR_LOCKED, &req->flags)) {
2096 set_bit(FR_PRIVATE, &req->flags);
2097 list_move(&req->list, &to_end1);
2098 }
2099 spin_unlock(&req->waitq.lock);
2100 }
2101 list_splice_init(&fpq->processing, &to_end2);
2102 spin_unlock(&fpq->lock);
2103 }
2104 fc->max_background = UINT_MAX;
2105 flush_bg_queue(fc);
2106
2107 spin_lock(&fiq->waitq.lock);
2108 fiq->connected = 0;
2109 list_splice_init(&fiq->pending, &to_end2);
2110 list_for_each_entry(req, &to_end2, list)
2111 clear_bit(FR_PENDING, &req->flags);
2112 while (forget_pending(fiq))
2113 kfree(dequeue_forget(fiq, 1, NULL));
2114 wake_up_all_locked(&fiq->waitq);
2115 spin_unlock(&fiq->waitq.lock);
2116 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
2117 end_polls(fc);
2118 wake_up_all(&fc->blocked_waitq);
2119 spin_unlock(&fc->lock);
2120
2121 while (!list_empty(&to_end1)) {
2122 req = list_first_entry(&to_end1, struct fuse_req, list);
2123 __fuse_get_request(req);
2124 list_del_init(&req->list);
2125 request_end(fc, req);
2126 }
2127 end_requests(fc, &to_end2);
2128 } else {
2129 spin_unlock(&fc->lock);
2130 }
2131}
2132EXPORT_SYMBOL_GPL(fuse_abort_conn);
2133
2134int fuse_dev_release(struct inode *inode, struct file *file)
2135{
2136 struct fuse_dev *fud = fuse_get_dev(file);
2137
2138 if (fud) {
2139 struct fuse_conn *fc = fud->fc;
2140 struct fuse_pqueue *fpq = &fud->pq;
2141
2142 WARN_ON(!list_empty(&fpq->io));
2143 end_requests(fc, &fpq->processing);
2144 /* Are we the last open device? */
2145 if (atomic_dec_and_test(&fc->dev_count)) {
2146 WARN_ON(fc->iq.fasync != NULL);
2147 fuse_abort_conn(fc);
2148 }
2149 fuse_dev_free(fud);
2150 }
2151 return 0;
2152}
2153EXPORT_SYMBOL_GPL(fuse_dev_release);
2154
2155static int fuse_dev_fasync(int fd, struct file *file, int on)
2156{
2157 struct fuse_dev *fud = fuse_get_dev(file);
2158
2159 if (!fud)
2160 return -EPERM;
2161
2162 /* No locking - fasync_helper does its own locking */
2163 return fasync_helper(fd, file, on, &fud->fc->iq.fasync);
2164}
2165
2166static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
2167{
2168 struct fuse_dev *fud;
2169
2170 if (new->private_data)
2171 return -EINVAL;
2172
2173 fud = fuse_dev_alloc(fc);
2174 if (!fud)
2175 return -ENOMEM;
2176
2177 new->private_data = fud;
2178 atomic_inc(&fc->dev_count);
2179
2180 return 0;
2181}
2182
2183static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
2184 unsigned long arg)
2185{
2186 int err = -ENOTTY;
2187
2188 if (cmd == FUSE_DEV_IOC_CLONE) {
2189 int oldfd;
2190
2191 err = -EFAULT;
2192 if (!get_user(oldfd, (__u32 __user *) arg)) {
2193 struct file *old = fget(oldfd);
2194
2195 err = -EINVAL;
2196 if (old) {
2197 struct fuse_dev *fud = NULL;
2198
2199 /*
2200 * Check against file->f_op because CUSE
2201 * uses the same ioctl handler.
2202 */
2203 if (old->f_op == file->f_op &&
2204 old->f_cred->user_ns == file->f_cred->user_ns)
2205 fud = fuse_get_dev(old);
2206
2207 if (fud) {
2208 mutex_lock(&fuse_mutex);
2209 err = fuse_device_clone(fud->fc, file);
2210 mutex_unlock(&fuse_mutex);
2211 }
2212 fput(old);
2213 }
2214 }
2215 }
2216 return err;
2217}
2218
2219const struct file_operations fuse_dev_operations = {
2220 .owner = THIS_MODULE,
2221 .open = fuse_dev_open,
2222 .llseek = no_llseek,
2223 .read_iter = fuse_dev_read,
2224 .splice_read = fuse_dev_splice_read,
2225 .write_iter = fuse_dev_write,
2226 .splice_write = fuse_dev_splice_write,
2227 .poll = fuse_dev_poll,
2228 .release = fuse_dev_release,
2229 .fasync = fuse_dev_fasync,
2230 .unlocked_ioctl = fuse_dev_ioctl,
2231 .compat_ioctl = fuse_dev_ioctl,
2232};
2233EXPORT_SYMBOL_GPL(fuse_dev_operations);
2234
2235static struct miscdevice fuse_miscdevice = {
2236 .minor = FUSE_MINOR,
2237 .name = "fuse",
2238 .fops = &fuse_dev_operations,
2239};
2240
2241int __init fuse_dev_init(void)
2242{
2243 int err = -ENOMEM;
2244 fuse_req_cachep = kmem_cache_create("fuse_request",
2245 sizeof(struct fuse_req),
2246 0, 0, NULL);
2247 if (!fuse_req_cachep)
2248 goto out;
2249
2250 err = misc_register(&fuse_miscdevice);
2251 if (err)
2252 goto out_cache_clean;
2253
2254 return 0;
2255
2256 out_cache_clean:
2257 kmem_cache_destroy(fuse_req_cachep);
2258 out:
2259 return err;
2260}
2261
2262void fuse_dev_cleanup(void)
2263{
2264 misc_deregister(&fuse_miscdevice);
2265 kmem_cache_destroy(fuse_req_cachep);
2266}
1/*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7*/
8
9#include "fuse_i.h"
10
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/poll.h>
14#include <linux/sched/signal.h>
15#include <linux/uio.h>
16#include <linux/miscdevice.h>
17#include <linux/pagemap.h>
18#include <linux/file.h>
19#include <linux/slab.h>
20#include <linux/pipe_fs_i.h>
21#include <linux/swap.h>
22#include <linux/splice.h>
23#include <linux/sched.h>
24
25MODULE_ALIAS_MISCDEV(FUSE_MINOR);
26MODULE_ALIAS("devname:fuse");
27
28/* Ordinary requests have even IDs, while interrupts IDs are odd */
29#define FUSE_INT_REQ_BIT (1ULL << 0)
30#define FUSE_REQ_ID_STEP (1ULL << 1)
31
32static struct kmem_cache *fuse_req_cachep;
33
34static struct fuse_dev *fuse_get_dev(struct file *file)
35{
36 /*
37 * Lockless access is OK, because file->private data is set
38 * once during mount and is valid until the file is released.
39 */
40 return READ_ONCE(file->private_data);
41}
42
43static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req)
44{
45 INIT_LIST_HEAD(&req->list);
46 INIT_LIST_HEAD(&req->intr_entry);
47 init_waitqueue_head(&req->waitq);
48 refcount_set(&req->count, 1);
49 __set_bit(FR_PENDING, &req->flags);
50 req->fm = fm;
51}
52
53static struct fuse_req *fuse_request_alloc(struct fuse_mount *fm, gfp_t flags)
54{
55 struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags);
56 if (req)
57 fuse_request_init(fm, req);
58
59 return req;
60}
61
62static void fuse_request_free(struct fuse_req *req)
63{
64 kmem_cache_free(fuse_req_cachep, req);
65}
66
67static void __fuse_get_request(struct fuse_req *req)
68{
69 refcount_inc(&req->count);
70}
71
72/* Must be called with > 1 refcount */
73static void __fuse_put_request(struct fuse_req *req)
74{
75 refcount_dec(&req->count);
76}
77
78void fuse_set_initialized(struct fuse_conn *fc)
79{
80 /* Make sure stores before this are seen on another CPU */
81 smp_wmb();
82 fc->initialized = 1;
83}
84
85static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
86{
87 return !fc->initialized || (for_background && fc->blocked);
88}
89
90static void fuse_drop_waiting(struct fuse_conn *fc)
91{
92 /*
93 * lockess check of fc->connected is okay, because atomic_dec_and_test()
94 * provides a memory barrier matched with the one in fuse_wait_aborted()
95 * to ensure no wake-up is missed.
96 */
97 if (atomic_dec_and_test(&fc->num_waiting) &&
98 !READ_ONCE(fc->connected)) {
99 /* wake up aborters */
100 wake_up_all(&fc->blocked_waitq);
101 }
102}
103
104static void fuse_put_request(struct fuse_req *req);
105
106static struct fuse_req *fuse_get_req(struct fuse_mount *fm, bool for_background)
107{
108 struct fuse_conn *fc = fm->fc;
109 struct fuse_req *req;
110 int err;
111 atomic_inc(&fc->num_waiting);
112
113 if (fuse_block_alloc(fc, for_background)) {
114 err = -EINTR;
115 if (wait_event_killable_exclusive(fc->blocked_waitq,
116 !fuse_block_alloc(fc, for_background)))
117 goto out;
118 }
119 /* Matches smp_wmb() in fuse_set_initialized() */
120 smp_rmb();
121
122 err = -ENOTCONN;
123 if (!fc->connected)
124 goto out;
125
126 err = -ECONNREFUSED;
127 if (fc->conn_error)
128 goto out;
129
130 req = fuse_request_alloc(fm, GFP_KERNEL);
131 err = -ENOMEM;
132 if (!req) {
133 if (for_background)
134 wake_up(&fc->blocked_waitq);
135 goto out;
136 }
137
138 req->in.h.uid = from_kuid(fc->user_ns, current_fsuid());
139 req->in.h.gid = from_kgid(fc->user_ns, current_fsgid());
140 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
141
142 __set_bit(FR_WAITING, &req->flags);
143 if (for_background)
144 __set_bit(FR_BACKGROUND, &req->flags);
145
146 if (unlikely(req->in.h.uid == ((uid_t)-1) ||
147 req->in.h.gid == ((gid_t)-1))) {
148 fuse_put_request(req);
149 return ERR_PTR(-EOVERFLOW);
150 }
151 return req;
152
153 out:
154 fuse_drop_waiting(fc);
155 return ERR_PTR(err);
156}
157
158static void fuse_put_request(struct fuse_req *req)
159{
160 struct fuse_conn *fc = req->fm->fc;
161
162 if (refcount_dec_and_test(&req->count)) {
163 if (test_bit(FR_BACKGROUND, &req->flags)) {
164 /*
165 * We get here in the unlikely case that a background
166 * request was allocated but not sent
167 */
168 spin_lock(&fc->bg_lock);
169 if (!fc->blocked)
170 wake_up(&fc->blocked_waitq);
171 spin_unlock(&fc->bg_lock);
172 }
173
174 if (test_bit(FR_WAITING, &req->flags)) {
175 __clear_bit(FR_WAITING, &req->flags);
176 fuse_drop_waiting(fc);
177 }
178
179 fuse_request_free(req);
180 }
181}
182
183unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args)
184{
185 unsigned nbytes = 0;
186 unsigned i;
187
188 for (i = 0; i < numargs; i++)
189 nbytes += args[i].size;
190
191 return nbytes;
192}
193EXPORT_SYMBOL_GPL(fuse_len_args);
194
195u64 fuse_get_unique(struct fuse_iqueue *fiq)
196{
197 fiq->reqctr += FUSE_REQ_ID_STEP;
198 return fiq->reqctr;
199}
200EXPORT_SYMBOL_GPL(fuse_get_unique);
201
202static unsigned int fuse_req_hash(u64 unique)
203{
204 return hash_long(unique & ~FUSE_INT_REQ_BIT, FUSE_PQ_HASH_BITS);
205}
206
207/*
208 * A new request is available, wake fiq->waitq
209 */
210static void fuse_dev_wake_and_unlock(struct fuse_iqueue *fiq)
211__releases(fiq->lock)
212{
213 wake_up(&fiq->waitq);
214 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
215 spin_unlock(&fiq->lock);
216}
217
218const struct fuse_iqueue_ops fuse_dev_fiq_ops = {
219 .wake_forget_and_unlock = fuse_dev_wake_and_unlock,
220 .wake_interrupt_and_unlock = fuse_dev_wake_and_unlock,
221 .wake_pending_and_unlock = fuse_dev_wake_and_unlock,
222};
223EXPORT_SYMBOL_GPL(fuse_dev_fiq_ops);
224
225static void queue_request_and_unlock(struct fuse_iqueue *fiq,
226 struct fuse_req *req)
227__releases(fiq->lock)
228{
229 req->in.h.len = sizeof(struct fuse_in_header) +
230 fuse_len_args(req->args->in_numargs,
231 (struct fuse_arg *) req->args->in_args);
232 list_add_tail(&req->list, &fiq->pending);
233 fiq->ops->wake_pending_and_unlock(fiq);
234}
235
236void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
237 u64 nodeid, u64 nlookup)
238{
239 struct fuse_iqueue *fiq = &fc->iq;
240
241 forget->forget_one.nodeid = nodeid;
242 forget->forget_one.nlookup = nlookup;
243
244 spin_lock(&fiq->lock);
245 if (fiq->connected) {
246 fiq->forget_list_tail->next = forget;
247 fiq->forget_list_tail = forget;
248 fiq->ops->wake_forget_and_unlock(fiq);
249 } else {
250 kfree(forget);
251 spin_unlock(&fiq->lock);
252 }
253}
254
255static void flush_bg_queue(struct fuse_conn *fc)
256{
257 struct fuse_iqueue *fiq = &fc->iq;
258
259 while (fc->active_background < fc->max_background &&
260 !list_empty(&fc->bg_queue)) {
261 struct fuse_req *req;
262
263 req = list_first_entry(&fc->bg_queue, struct fuse_req, list);
264 list_del(&req->list);
265 fc->active_background++;
266 spin_lock(&fiq->lock);
267 req->in.h.unique = fuse_get_unique(fiq);
268 queue_request_and_unlock(fiq, req);
269 }
270}
271
272/*
273 * This function is called when a request is finished. Either a reply
274 * has arrived or it was aborted (and not yet sent) or some error
275 * occurred during communication with userspace, or the device file
276 * was closed. The requester thread is woken up (if still waiting),
277 * the 'end' callback is called if given, else the reference to the
278 * request is released
279 */
280void fuse_request_end(struct fuse_req *req)
281{
282 struct fuse_mount *fm = req->fm;
283 struct fuse_conn *fc = fm->fc;
284 struct fuse_iqueue *fiq = &fc->iq;
285
286 if (test_and_set_bit(FR_FINISHED, &req->flags))
287 goto put_request;
288
289 /*
290 * test_and_set_bit() implies smp_mb() between bit
291 * changing and below FR_INTERRUPTED check. Pairs with
292 * smp_mb() from queue_interrupt().
293 */
294 if (test_bit(FR_INTERRUPTED, &req->flags)) {
295 spin_lock(&fiq->lock);
296 list_del_init(&req->intr_entry);
297 spin_unlock(&fiq->lock);
298 }
299 WARN_ON(test_bit(FR_PENDING, &req->flags));
300 WARN_ON(test_bit(FR_SENT, &req->flags));
301 if (test_bit(FR_BACKGROUND, &req->flags)) {
302 spin_lock(&fc->bg_lock);
303 clear_bit(FR_BACKGROUND, &req->flags);
304 if (fc->num_background == fc->max_background) {
305 fc->blocked = 0;
306 wake_up(&fc->blocked_waitq);
307 } else if (!fc->blocked) {
308 /*
309 * Wake up next waiter, if any. It's okay to use
310 * waitqueue_active(), as we've already synced up
311 * fc->blocked with waiters with the wake_up() call
312 * above.
313 */
314 if (waitqueue_active(&fc->blocked_waitq))
315 wake_up(&fc->blocked_waitq);
316 }
317
318 fc->num_background--;
319 fc->active_background--;
320 flush_bg_queue(fc);
321 spin_unlock(&fc->bg_lock);
322 } else {
323 /* Wake up waiter sleeping in request_wait_answer() */
324 wake_up(&req->waitq);
325 }
326
327 if (test_bit(FR_ASYNC, &req->flags))
328 req->args->end(fm, req->args, req->out.h.error);
329put_request:
330 fuse_put_request(req);
331}
332EXPORT_SYMBOL_GPL(fuse_request_end);
333
334static int queue_interrupt(struct fuse_req *req)
335{
336 struct fuse_iqueue *fiq = &req->fm->fc->iq;
337
338 spin_lock(&fiq->lock);
339 /* Check for we've sent request to interrupt this req */
340 if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) {
341 spin_unlock(&fiq->lock);
342 return -EINVAL;
343 }
344
345 if (list_empty(&req->intr_entry)) {
346 list_add_tail(&req->intr_entry, &fiq->interrupts);
347 /*
348 * Pairs with smp_mb() implied by test_and_set_bit()
349 * from fuse_request_end().
350 */
351 smp_mb();
352 if (test_bit(FR_FINISHED, &req->flags)) {
353 list_del_init(&req->intr_entry);
354 spin_unlock(&fiq->lock);
355 return 0;
356 }
357 fiq->ops->wake_interrupt_and_unlock(fiq);
358 } else {
359 spin_unlock(&fiq->lock);
360 }
361 return 0;
362}
363
364static void request_wait_answer(struct fuse_req *req)
365{
366 struct fuse_conn *fc = req->fm->fc;
367 struct fuse_iqueue *fiq = &fc->iq;
368 int err;
369
370 if (!fc->no_interrupt) {
371 /* Any signal may interrupt this */
372 err = wait_event_interruptible(req->waitq,
373 test_bit(FR_FINISHED, &req->flags));
374 if (!err)
375 return;
376
377 set_bit(FR_INTERRUPTED, &req->flags);
378 /* matches barrier in fuse_dev_do_read() */
379 smp_mb__after_atomic();
380 if (test_bit(FR_SENT, &req->flags))
381 queue_interrupt(req);
382 }
383
384 if (!test_bit(FR_FORCE, &req->flags)) {
385 /* Only fatal signals may interrupt this */
386 err = wait_event_killable(req->waitq,
387 test_bit(FR_FINISHED, &req->flags));
388 if (!err)
389 return;
390
391 spin_lock(&fiq->lock);
392 /* Request is not yet in userspace, bail out */
393 if (test_bit(FR_PENDING, &req->flags)) {
394 list_del(&req->list);
395 spin_unlock(&fiq->lock);
396 __fuse_put_request(req);
397 req->out.h.error = -EINTR;
398 return;
399 }
400 spin_unlock(&fiq->lock);
401 }
402
403 /*
404 * Either request is already in userspace, or it was forced.
405 * Wait it out.
406 */
407 wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
408}
409
410static void __fuse_request_send(struct fuse_req *req)
411{
412 struct fuse_iqueue *fiq = &req->fm->fc->iq;
413
414 BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
415 spin_lock(&fiq->lock);
416 if (!fiq->connected) {
417 spin_unlock(&fiq->lock);
418 req->out.h.error = -ENOTCONN;
419 } else {
420 req->in.h.unique = fuse_get_unique(fiq);
421 /* acquire extra reference, since request is still needed
422 after fuse_request_end() */
423 __fuse_get_request(req);
424 queue_request_and_unlock(fiq, req);
425
426 request_wait_answer(req);
427 /* Pairs with smp_wmb() in fuse_request_end() */
428 smp_rmb();
429 }
430}
431
432static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
433{
434 if (fc->minor < 4 && args->opcode == FUSE_STATFS)
435 args->out_args[0].size = FUSE_COMPAT_STATFS_SIZE;
436
437 if (fc->minor < 9) {
438 switch (args->opcode) {
439 case FUSE_LOOKUP:
440 case FUSE_CREATE:
441 case FUSE_MKNOD:
442 case FUSE_MKDIR:
443 case FUSE_SYMLINK:
444 case FUSE_LINK:
445 args->out_args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
446 break;
447 case FUSE_GETATTR:
448 case FUSE_SETATTR:
449 args->out_args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
450 break;
451 }
452 }
453 if (fc->minor < 12) {
454 switch (args->opcode) {
455 case FUSE_CREATE:
456 args->in_args[0].size = sizeof(struct fuse_open_in);
457 break;
458 case FUSE_MKNOD:
459 args->in_args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
460 break;
461 }
462 }
463}
464
465static void fuse_force_creds(struct fuse_req *req)
466{
467 struct fuse_conn *fc = req->fm->fc;
468
469 req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid());
470 req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid());
471 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
472}
473
474static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args)
475{
476 req->in.h.opcode = args->opcode;
477 req->in.h.nodeid = args->nodeid;
478 req->args = args;
479 if (args->is_ext)
480 req->in.h.total_extlen = args->in_args[args->ext_idx].size / 8;
481 if (args->end)
482 __set_bit(FR_ASYNC, &req->flags);
483}
484
485ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args)
486{
487 struct fuse_conn *fc = fm->fc;
488 struct fuse_req *req;
489 ssize_t ret;
490
491 if (args->force) {
492 atomic_inc(&fc->num_waiting);
493 req = fuse_request_alloc(fm, GFP_KERNEL | __GFP_NOFAIL);
494
495 if (!args->nocreds)
496 fuse_force_creds(req);
497
498 __set_bit(FR_WAITING, &req->flags);
499 __set_bit(FR_FORCE, &req->flags);
500 } else {
501 WARN_ON(args->nocreds);
502 req = fuse_get_req(fm, false);
503 if (IS_ERR(req))
504 return PTR_ERR(req);
505 }
506
507 /* Needs to be done after fuse_get_req() so that fc->minor is valid */
508 fuse_adjust_compat(fc, args);
509 fuse_args_to_req(req, args);
510
511 if (!args->noreply)
512 __set_bit(FR_ISREPLY, &req->flags);
513 __fuse_request_send(req);
514 ret = req->out.h.error;
515 if (!ret && args->out_argvar) {
516 BUG_ON(args->out_numargs == 0);
517 ret = args->out_args[args->out_numargs - 1].size;
518 }
519 fuse_put_request(req);
520
521 return ret;
522}
523
524static bool fuse_request_queue_background(struct fuse_req *req)
525{
526 struct fuse_mount *fm = req->fm;
527 struct fuse_conn *fc = fm->fc;
528 bool queued = false;
529
530 WARN_ON(!test_bit(FR_BACKGROUND, &req->flags));
531 if (!test_bit(FR_WAITING, &req->flags)) {
532 __set_bit(FR_WAITING, &req->flags);
533 atomic_inc(&fc->num_waiting);
534 }
535 __set_bit(FR_ISREPLY, &req->flags);
536 spin_lock(&fc->bg_lock);
537 if (likely(fc->connected)) {
538 fc->num_background++;
539 if (fc->num_background == fc->max_background)
540 fc->blocked = 1;
541 list_add_tail(&req->list, &fc->bg_queue);
542 flush_bg_queue(fc);
543 queued = true;
544 }
545 spin_unlock(&fc->bg_lock);
546
547 return queued;
548}
549
550int fuse_simple_background(struct fuse_mount *fm, struct fuse_args *args,
551 gfp_t gfp_flags)
552{
553 struct fuse_req *req;
554
555 if (args->force) {
556 WARN_ON(!args->nocreds);
557 req = fuse_request_alloc(fm, gfp_flags);
558 if (!req)
559 return -ENOMEM;
560 __set_bit(FR_BACKGROUND, &req->flags);
561 } else {
562 WARN_ON(args->nocreds);
563 req = fuse_get_req(fm, true);
564 if (IS_ERR(req))
565 return PTR_ERR(req);
566 }
567
568 fuse_args_to_req(req, args);
569
570 if (!fuse_request_queue_background(req)) {
571 fuse_put_request(req);
572 return -ENOTCONN;
573 }
574
575 return 0;
576}
577EXPORT_SYMBOL_GPL(fuse_simple_background);
578
579static int fuse_simple_notify_reply(struct fuse_mount *fm,
580 struct fuse_args *args, u64 unique)
581{
582 struct fuse_req *req;
583 struct fuse_iqueue *fiq = &fm->fc->iq;
584 int err = 0;
585
586 req = fuse_get_req(fm, false);
587 if (IS_ERR(req))
588 return PTR_ERR(req);
589
590 __clear_bit(FR_ISREPLY, &req->flags);
591 req->in.h.unique = unique;
592
593 fuse_args_to_req(req, args);
594
595 spin_lock(&fiq->lock);
596 if (fiq->connected) {
597 queue_request_and_unlock(fiq, req);
598 } else {
599 err = -ENODEV;
600 spin_unlock(&fiq->lock);
601 fuse_put_request(req);
602 }
603
604 return err;
605}
606
607/*
608 * Lock the request. Up to the next unlock_request() there mustn't be
609 * anything that could cause a page-fault. If the request was already
610 * aborted bail out.
611 */
612static int lock_request(struct fuse_req *req)
613{
614 int err = 0;
615 if (req) {
616 spin_lock(&req->waitq.lock);
617 if (test_bit(FR_ABORTED, &req->flags))
618 err = -ENOENT;
619 else
620 set_bit(FR_LOCKED, &req->flags);
621 spin_unlock(&req->waitq.lock);
622 }
623 return err;
624}
625
626/*
627 * Unlock request. If it was aborted while locked, caller is responsible
628 * for unlocking and ending the request.
629 */
630static int unlock_request(struct fuse_req *req)
631{
632 int err = 0;
633 if (req) {
634 spin_lock(&req->waitq.lock);
635 if (test_bit(FR_ABORTED, &req->flags))
636 err = -ENOENT;
637 else
638 clear_bit(FR_LOCKED, &req->flags);
639 spin_unlock(&req->waitq.lock);
640 }
641 return err;
642}
643
644struct fuse_copy_state {
645 int write;
646 struct fuse_req *req;
647 struct iov_iter *iter;
648 struct pipe_buffer *pipebufs;
649 struct pipe_buffer *currbuf;
650 struct pipe_inode_info *pipe;
651 unsigned long nr_segs;
652 struct page *pg;
653 unsigned len;
654 unsigned offset;
655 unsigned move_pages:1;
656};
657
658static void fuse_copy_init(struct fuse_copy_state *cs, int write,
659 struct iov_iter *iter)
660{
661 memset(cs, 0, sizeof(*cs));
662 cs->write = write;
663 cs->iter = iter;
664}
665
666/* Unmap and put previous page of userspace buffer */
667static void fuse_copy_finish(struct fuse_copy_state *cs)
668{
669 if (cs->currbuf) {
670 struct pipe_buffer *buf = cs->currbuf;
671
672 if (cs->write)
673 buf->len = PAGE_SIZE - cs->len;
674 cs->currbuf = NULL;
675 } else if (cs->pg) {
676 if (cs->write) {
677 flush_dcache_page(cs->pg);
678 set_page_dirty_lock(cs->pg);
679 }
680 put_page(cs->pg);
681 }
682 cs->pg = NULL;
683}
684
685/*
686 * Get another pagefull of userspace buffer, and map it to kernel
687 * address space, and lock request
688 */
689static int fuse_copy_fill(struct fuse_copy_state *cs)
690{
691 struct page *page;
692 int err;
693
694 err = unlock_request(cs->req);
695 if (err)
696 return err;
697
698 fuse_copy_finish(cs);
699 if (cs->pipebufs) {
700 struct pipe_buffer *buf = cs->pipebufs;
701
702 if (!cs->write) {
703 err = pipe_buf_confirm(cs->pipe, buf);
704 if (err)
705 return err;
706
707 BUG_ON(!cs->nr_segs);
708 cs->currbuf = buf;
709 cs->pg = buf->page;
710 cs->offset = buf->offset;
711 cs->len = buf->len;
712 cs->pipebufs++;
713 cs->nr_segs--;
714 } else {
715 if (cs->nr_segs >= cs->pipe->max_usage)
716 return -EIO;
717
718 page = alloc_page(GFP_HIGHUSER);
719 if (!page)
720 return -ENOMEM;
721
722 buf->page = page;
723 buf->offset = 0;
724 buf->len = 0;
725
726 cs->currbuf = buf;
727 cs->pg = page;
728 cs->offset = 0;
729 cs->len = PAGE_SIZE;
730 cs->pipebufs++;
731 cs->nr_segs++;
732 }
733 } else {
734 size_t off;
735 err = iov_iter_get_pages2(cs->iter, &page, PAGE_SIZE, 1, &off);
736 if (err < 0)
737 return err;
738 BUG_ON(!err);
739 cs->len = err;
740 cs->offset = off;
741 cs->pg = page;
742 }
743
744 return lock_request(cs->req);
745}
746
747/* Do as much copy to/from userspace buffer as we can */
748static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
749{
750 unsigned ncpy = min(*size, cs->len);
751 if (val) {
752 void *pgaddr = kmap_local_page(cs->pg);
753 void *buf = pgaddr + cs->offset;
754
755 if (cs->write)
756 memcpy(buf, *val, ncpy);
757 else
758 memcpy(*val, buf, ncpy);
759
760 kunmap_local(pgaddr);
761 *val += ncpy;
762 }
763 *size -= ncpy;
764 cs->len -= ncpy;
765 cs->offset += ncpy;
766 return ncpy;
767}
768
769static int fuse_check_folio(struct folio *folio)
770{
771 if (folio_mapped(folio) ||
772 folio->mapping != NULL ||
773 (folio->flags & PAGE_FLAGS_CHECK_AT_PREP &
774 ~(1 << PG_locked |
775 1 << PG_referenced |
776 1 << PG_uptodate |
777 1 << PG_lru |
778 1 << PG_active |
779 1 << PG_workingset |
780 1 << PG_reclaim |
781 1 << PG_waiters |
782 LRU_GEN_MASK | LRU_REFS_MASK))) {
783 dump_page(&folio->page, "fuse: trying to steal weird page");
784 return 1;
785 }
786 return 0;
787}
788
789static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
790{
791 int err;
792 struct folio *oldfolio = page_folio(*pagep);
793 struct folio *newfolio;
794 struct pipe_buffer *buf = cs->pipebufs;
795
796 folio_get(oldfolio);
797 err = unlock_request(cs->req);
798 if (err)
799 goto out_put_old;
800
801 fuse_copy_finish(cs);
802
803 err = pipe_buf_confirm(cs->pipe, buf);
804 if (err)
805 goto out_put_old;
806
807 BUG_ON(!cs->nr_segs);
808 cs->currbuf = buf;
809 cs->len = buf->len;
810 cs->pipebufs++;
811 cs->nr_segs--;
812
813 if (cs->len != PAGE_SIZE)
814 goto out_fallback;
815
816 if (!pipe_buf_try_steal(cs->pipe, buf))
817 goto out_fallback;
818
819 newfolio = page_folio(buf->page);
820
821 if (!folio_test_uptodate(newfolio))
822 folio_mark_uptodate(newfolio);
823
824 folio_clear_mappedtodisk(newfolio);
825
826 if (fuse_check_folio(newfolio) != 0)
827 goto out_fallback_unlock;
828
829 /*
830 * This is a new and locked page, it shouldn't be mapped or
831 * have any special flags on it
832 */
833 if (WARN_ON(folio_mapped(oldfolio)))
834 goto out_fallback_unlock;
835 if (WARN_ON(folio_has_private(oldfolio)))
836 goto out_fallback_unlock;
837 if (WARN_ON(folio_test_dirty(oldfolio) ||
838 folio_test_writeback(oldfolio)))
839 goto out_fallback_unlock;
840 if (WARN_ON(folio_test_mlocked(oldfolio)))
841 goto out_fallback_unlock;
842
843 replace_page_cache_folio(oldfolio, newfolio);
844
845 folio_get(newfolio);
846
847 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
848 folio_add_lru(newfolio);
849
850 /*
851 * Release while we have extra ref on stolen page. Otherwise
852 * anon_pipe_buf_release() might think the page can be reused.
853 */
854 pipe_buf_release(cs->pipe, buf);
855
856 err = 0;
857 spin_lock(&cs->req->waitq.lock);
858 if (test_bit(FR_ABORTED, &cs->req->flags))
859 err = -ENOENT;
860 else
861 *pagep = &newfolio->page;
862 spin_unlock(&cs->req->waitq.lock);
863
864 if (err) {
865 folio_unlock(newfolio);
866 folio_put(newfolio);
867 goto out_put_old;
868 }
869
870 folio_unlock(oldfolio);
871 /* Drop ref for ap->pages[] array */
872 folio_put(oldfolio);
873 cs->len = 0;
874
875 err = 0;
876out_put_old:
877 /* Drop ref obtained in this function */
878 folio_put(oldfolio);
879 return err;
880
881out_fallback_unlock:
882 folio_unlock(newfolio);
883out_fallback:
884 cs->pg = buf->page;
885 cs->offset = buf->offset;
886
887 err = lock_request(cs->req);
888 if (!err)
889 err = 1;
890
891 goto out_put_old;
892}
893
894static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
895 unsigned offset, unsigned count)
896{
897 struct pipe_buffer *buf;
898 int err;
899
900 if (cs->nr_segs >= cs->pipe->max_usage)
901 return -EIO;
902
903 get_page(page);
904 err = unlock_request(cs->req);
905 if (err) {
906 put_page(page);
907 return err;
908 }
909
910 fuse_copy_finish(cs);
911
912 buf = cs->pipebufs;
913 buf->page = page;
914 buf->offset = offset;
915 buf->len = count;
916
917 cs->pipebufs++;
918 cs->nr_segs++;
919 cs->len = 0;
920
921 return 0;
922}
923
924/*
925 * Copy a page in the request to/from the userspace buffer. Must be
926 * done atomically
927 */
928static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
929 unsigned offset, unsigned count, int zeroing)
930{
931 int err;
932 struct page *page = *pagep;
933
934 if (page && zeroing && count < PAGE_SIZE)
935 clear_highpage(page);
936
937 while (count) {
938 if (cs->write && cs->pipebufs && page) {
939 /*
940 * Can't control lifetime of pipe buffers, so always
941 * copy user pages.
942 */
943 if (cs->req->args->user_pages) {
944 err = fuse_copy_fill(cs);
945 if (err)
946 return err;
947 } else {
948 return fuse_ref_page(cs, page, offset, count);
949 }
950 } else if (!cs->len) {
951 if (cs->move_pages && page &&
952 offset == 0 && count == PAGE_SIZE) {
953 err = fuse_try_move_page(cs, pagep);
954 if (err <= 0)
955 return err;
956 } else {
957 err = fuse_copy_fill(cs);
958 if (err)
959 return err;
960 }
961 }
962 if (page) {
963 void *mapaddr = kmap_local_page(page);
964 void *buf = mapaddr + offset;
965 offset += fuse_copy_do(cs, &buf, &count);
966 kunmap_local(mapaddr);
967 } else
968 offset += fuse_copy_do(cs, NULL, &count);
969 }
970 if (page && !cs->write)
971 flush_dcache_page(page);
972 return 0;
973}
974
975/* Copy pages in the request to/from userspace buffer */
976static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
977 int zeroing)
978{
979 unsigned i;
980 struct fuse_req *req = cs->req;
981 struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
982
983
984 for (i = 0; i < ap->num_pages && (nbytes || zeroing); i++) {
985 int err;
986 unsigned int offset = ap->descs[i].offset;
987 unsigned int count = min(nbytes, ap->descs[i].length);
988
989 err = fuse_copy_page(cs, &ap->pages[i], offset, count, zeroing);
990 if (err)
991 return err;
992
993 nbytes -= count;
994 }
995 return 0;
996}
997
998/* Copy a single argument in the request to/from userspace buffer */
999static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
1000{
1001 while (size) {
1002 if (!cs->len) {
1003 int err = fuse_copy_fill(cs);
1004 if (err)
1005 return err;
1006 }
1007 fuse_copy_do(cs, &val, &size);
1008 }
1009 return 0;
1010}
1011
1012/* Copy request arguments to/from userspace buffer */
1013static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
1014 unsigned argpages, struct fuse_arg *args,
1015 int zeroing)
1016{
1017 int err = 0;
1018 unsigned i;
1019
1020 for (i = 0; !err && i < numargs; i++) {
1021 struct fuse_arg *arg = &args[i];
1022 if (i == numargs - 1 && argpages)
1023 err = fuse_copy_pages(cs, arg->size, zeroing);
1024 else
1025 err = fuse_copy_one(cs, arg->value, arg->size);
1026 }
1027 return err;
1028}
1029
1030static int forget_pending(struct fuse_iqueue *fiq)
1031{
1032 return fiq->forget_list_head.next != NULL;
1033}
1034
1035static int request_pending(struct fuse_iqueue *fiq)
1036{
1037 return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
1038 forget_pending(fiq);
1039}
1040
1041/*
1042 * Transfer an interrupt request to userspace
1043 *
1044 * Unlike other requests this is assembled on demand, without a need
1045 * to allocate a separate fuse_req structure.
1046 *
1047 * Called with fiq->lock held, releases it
1048 */
1049static int fuse_read_interrupt(struct fuse_iqueue *fiq,
1050 struct fuse_copy_state *cs,
1051 size_t nbytes, struct fuse_req *req)
1052__releases(fiq->lock)
1053{
1054 struct fuse_in_header ih;
1055 struct fuse_interrupt_in arg;
1056 unsigned reqsize = sizeof(ih) + sizeof(arg);
1057 int err;
1058
1059 list_del_init(&req->intr_entry);
1060 memset(&ih, 0, sizeof(ih));
1061 memset(&arg, 0, sizeof(arg));
1062 ih.len = reqsize;
1063 ih.opcode = FUSE_INTERRUPT;
1064 ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT);
1065 arg.unique = req->in.h.unique;
1066
1067 spin_unlock(&fiq->lock);
1068 if (nbytes < reqsize)
1069 return -EINVAL;
1070
1071 err = fuse_copy_one(cs, &ih, sizeof(ih));
1072 if (!err)
1073 err = fuse_copy_one(cs, &arg, sizeof(arg));
1074 fuse_copy_finish(cs);
1075
1076 return err ? err : reqsize;
1077}
1078
1079struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
1080 unsigned int max,
1081 unsigned int *countp)
1082{
1083 struct fuse_forget_link *head = fiq->forget_list_head.next;
1084 struct fuse_forget_link **newhead = &head;
1085 unsigned count;
1086
1087 for (count = 0; *newhead != NULL && count < max; count++)
1088 newhead = &(*newhead)->next;
1089
1090 fiq->forget_list_head.next = *newhead;
1091 *newhead = NULL;
1092 if (fiq->forget_list_head.next == NULL)
1093 fiq->forget_list_tail = &fiq->forget_list_head;
1094
1095 if (countp != NULL)
1096 *countp = count;
1097
1098 return head;
1099}
1100EXPORT_SYMBOL(fuse_dequeue_forget);
1101
1102static int fuse_read_single_forget(struct fuse_iqueue *fiq,
1103 struct fuse_copy_state *cs,
1104 size_t nbytes)
1105__releases(fiq->lock)
1106{
1107 int err;
1108 struct fuse_forget_link *forget = fuse_dequeue_forget(fiq, 1, NULL);
1109 struct fuse_forget_in arg = {
1110 .nlookup = forget->forget_one.nlookup,
1111 };
1112 struct fuse_in_header ih = {
1113 .opcode = FUSE_FORGET,
1114 .nodeid = forget->forget_one.nodeid,
1115 .unique = fuse_get_unique(fiq),
1116 .len = sizeof(ih) + sizeof(arg),
1117 };
1118
1119 spin_unlock(&fiq->lock);
1120 kfree(forget);
1121 if (nbytes < ih.len)
1122 return -EINVAL;
1123
1124 err = fuse_copy_one(cs, &ih, sizeof(ih));
1125 if (!err)
1126 err = fuse_copy_one(cs, &arg, sizeof(arg));
1127 fuse_copy_finish(cs);
1128
1129 if (err)
1130 return err;
1131
1132 return ih.len;
1133}
1134
1135static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
1136 struct fuse_copy_state *cs, size_t nbytes)
1137__releases(fiq->lock)
1138{
1139 int err;
1140 unsigned max_forgets;
1141 unsigned count;
1142 struct fuse_forget_link *head;
1143 struct fuse_batch_forget_in arg = { .count = 0 };
1144 struct fuse_in_header ih = {
1145 .opcode = FUSE_BATCH_FORGET,
1146 .unique = fuse_get_unique(fiq),
1147 .len = sizeof(ih) + sizeof(arg),
1148 };
1149
1150 if (nbytes < ih.len) {
1151 spin_unlock(&fiq->lock);
1152 return -EINVAL;
1153 }
1154
1155 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
1156 head = fuse_dequeue_forget(fiq, max_forgets, &count);
1157 spin_unlock(&fiq->lock);
1158
1159 arg.count = count;
1160 ih.len += count * sizeof(struct fuse_forget_one);
1161 err = fuse_copy_one(cs, &ih, sizeof(ih));
1162 if (!err)
1163 err = fuse_copy_one(cs, &arg, sizeof(arg));
1164
1165 while (head) {
1166 struct fuse_forget_link *forget = head;
1167
1168 if (!err) {
1169 err = fuse_copy_one(cs, &forget->forget_one,
1170 sizeof(forget->forget_one));
1171 }
1172 head = forget->next;
1173 kfree(forget);
1174 }
1175
1176 fuse_copy_finish(cs);
1177
1178 if (err)
1179 return err;
1180
1181 return ih.len;
1182}
1183
1184static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
1185 struct fuse_copy_state *cs,
1186 size_t nbytes)
1187__releases(fiq->lock)
1188{
1189 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
1190 return fuse_read_single_forget(fiq, cs, nbytes);
1191 else
1192 return fuse_read_batch_forget(fiq, cs, nbytes);
1193}
1194
1195/*
1196 * Read a single request into the userspace filesystem's buffer. This
1197 * function waits until a request is available, then removes it from
1198 * the pending list and copies request data to userspace buffer. If
1199 * no reply is needed (FORGET) or request has been aborted or there
1200 * was an error during the copying then it's finished by calling
1201 * fuse_request_end(). Otherwise add it to the processing list, and set
1202 * the 'sent' flag.
1203 */
1204static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
1205 struct fuse_copy_state *cs, size_t nbytes)
1206{
1207 ssize_t err;
1208 struct fuse_conn *fc = fud->fc;
1209 struct fuse_iqueue *fiq = &fc->iq;
1210 struct fuse_pqueue *fpq = &fud->pq;
1211 struct fuse_req *req;
1212 struct fuse_args *args;
1213 unsigned reqsize;
1214 unsigned int hash;
1215
1216 /*
1217 * Require sane minimum read buffer - that has capacity for fixed part
1218 * of any request header + negotiated max_write room for data.
1219 *
1220 * Historically libfuse reserves 4K for fixed header room, but e.g.
1221 * GlusterFS reserves only 80 bytes
1222 *
1223 * = `sizeof(fuse_in_header) + sizeof(fuse_write_in)`
1224 *
1225 * which is the absolute minimum any sane filesystem should be using
1226 * for header room.
1227 */
1228 if (nbytes < max_t(size_t, FUSE_MIN_READ_BUFFER,
1229 sizeof(struct fuse_in_header) +
1230 sizeof(struct fuse_write_in) +
1231 fc->max_write))
1232 return -EINVAL;
1233
1234 restart:
1235 for (;;) {
1236 spin_lock(&fiq->lock);
1237 if (!fiq->connected || request_pending(fiq))
1238 break;
1239 spin_unlock(&fiq->lock);
1240
1241 if (file->f_flags & O_NONBLOCK)
1242 return -EAGAIN;
1243 err = wait_event_interruptible_exclusive(fiq->waitq,
1244 !fiq->connected || request_pending(fiq));
1245 if (err)
1246 return err;
1247 }
1248
1249 if (!fiq->connected) {
1250 err = fc->aborted ? -ECONNABORTED : -ENODEV;
1251 goto err_unlock;
1252 }
1253
1254 if (!list_empty(&fiq->interrupts)) {
1255 req = list_entry(fiq->interrupts.next, struct fuse_req,
1256 intr_entry);
1257 return fuse_read_interrupt(fiq, cs, nbytes, req);
1258 }
1259
1260 if (forget_pending(fiq)) {
1261 if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
1262 return fuse_read_forget(fc, fiq, cs, nbytes);
1263
1264 if (fiq->forget_batch <= -8)
1265 fiq->forget_batch = 16;
1266 }
1267
1268 req = list_entry(fiq->pending.next, struct fuse_req, list);
1269 clear_bit(FR_PENDING, &req->flags);
1270 list_del_init(&req->list);
1271 spin_unlock(&fiq->lock);
1272
1273 args = req->args;
1274 reqsize = req->in.h.len;
1275
1276 /* If request is too large, reply with an error and restart the read */
1277 if (nbytes < reqsize) {
1278 req->out.h.error = -EIO;
1279 /* SETXATTR is special, since it may contain too large data */
1280 if (args->opcode == FUSE_SETXATTR)
1281 req->out.h.error = -E2BIG;
1282 fuse_request_end(req);
1283 goto restart;
1284 }
1285 spin_lock(&fpq->lock);
1286 /*
1287 * Must not put request on fpq->io queue after having been shut down by
1288 * fuse_abort_conn()
1289 */
1290 if (!fpq->connected) {
1291 req->out.h.error = err = -ECONNABORTED;
1292 goto out_end;
1293
1294 }
1295 list_add(&req->list, &fpq->io);
1296 spin_unlock(&fpq->lock);
1297 cs->req = req;
1298 err = fuse_copy_one(cs, &req->in.h, sizeof(req->in.h));
1299 if (!err)
1300 err = fuse_copy_args(cs, args->in_numargs, args->in_pages,
1301 (struct fuse_arg *) args->in_args, 0);
1302 fuse_copy_finish(cs);
1303 spin_lock(&fpq->lock);
1304 clear_bit(FR_LOCKED, &req->flags);
1305 if (!fpq->connected) {
1306 err = fc->aborted ? -ECONNABORTED : -ENODEV;
1307 goto out_end;
1308 }
1309 if (err) {
1310 req->out.h.error = -EIO;
1311 goto out_end;
1312 }
1313 if (!test_bit(FR_ISREPLY, &req->flags)) {
1314 err = reqsize;
1315 goto out_end;
1316 }
1317 hash = fuse_req_hash(req->in.h.unique);
1318 list_move_tail(&req->list, &fpq->processing[hash]);
1319 __fuse_get_request(req);
1320 set_bit(FR_SENT, &req->flags);
1321 spin_unlock(&fpq->lock);
1322 /* matches barrier in request_wait_answer() */
1323 smp_mb__after_atomic();
1324 if (test_bit(FR_INTERRUPTED, &req->flags))
1325 queue_interrupt(req);
1326 fuse_put_request(req);
1327
1328 return reqsize;
1329
1330out_end:
1331 if (!test_bit(FR_PRIVATE, &req->flags))
1332 list_del_init(&req->list);
1333 spin_unlock(&fpq->lock);
1334 fuse_request_end(req);
1335 return err;
1336
1337 err_unlock:
1338 spin_unlock(&fiq->lock);
1339 return err;
1340}
1341
1342static int fuse_dev_open(struct inode *inode, struct file *file)
1343{
1344 /*
1345 * The fuse device's file's private_data is used to hold
1346 * the fuse_conn(ection) when it is mounted, and is used to
1347 * keep track of whether the file has been mounted already.
1348 */
1349 file->private_data = NULL;
1350 return 0;
1351}
1352
1353static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
1354{
1355 struct fuse_copy_state cs;
1356 struct file *file = iocb->ki_filp;
1357 struct fuse_dev *fud = fuse_get_dev(file);
1358
1359 if (!fud)
1360 return -EPERM;
1361
1362 if (!user_backed_iter(to))
1363 return -EINVAL;
1364
1365 fuse_copy_init(&cs, 1, to);
1366
1367 return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to));
1368}
1369
1370static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1371 struct pipe_inode_info *pipe,
1372 size_t len, unsigned int flags)
1373{
1374 int total, ret;
1375 int page_nr = 0;
1376 struct pipe_buffer *bufs;
1377 struct fuse_copy_state cs;
1378 struct fuse_dev *fud = fuse_get_dev(in);
1379
1380 if (!fud)
1381 return -EPERM;
1382
1383 bufs = kvmalloc_array(pipe->max_usage, sizeof(struct pipe_buffer),
1384 GFP_KERNEL);
1385 if (!bufs)
1386 return -ENOMEM;
1387
1388 fuse_copy_init(&cs, 1, NULL);
1389 cs.pipebufs = bufs;
1390 cs.pipe = pipe;
1391 ret = fuse_dev_do_read(fud, in, &cs, len);
1392 if (ret < 0)
1393 goto out;
1394
1395 if (pipe_occupancy(pipe->head, pipe->tail) + cs.nr_segs > pipe->max_usage) {
1396 ret = -EIO;
1397 goto out;
1398 }
1399
1400 for (ret = total = 0; page_nr < cs.nr_segs; total += ret) {
1401 /*
1402 * Need to be careful about this. Having buf->ops in module
1403 * code can Oops if the buffer persists after module unload.
1404 */
1405 bufs[page_nr].ops = &nosteal_pipe_buf_ops;
1406 bufs[page_nr].flags = 0;
1407 ret = add_to_pipe(pipe, &bufs[page_nr++]);
1408 if (unlikely(ret < 0))
1409 break;
1410 }
1411 if (total)
1412 ret = total;
1413out:
1414 for (; page_nr < cs.nr_segs; page_nr++)
1415 put_page(bufs[page_nr].page);
1416
1417 kvfree(bufs);
1418 return ret;
1419}
1420
1421static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1422 struct fuse_copy_state *cs)
1423{
1424 struct fuse_notify_poll_wakeup_out outarg;
1425 int err = -EINVAL;
1426
1427 if (size != sizeof(outarg))
1428 goto err;
1429
1430 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1431 if (err)
1432 goto err;
1433
1434 fuse_copy_finish(cs);
1435 return fuse_notify_poll_wakeup(fc, &outarg);
1436
1437err:
1438 fuse_copy_finish(cs);
1439 return err;
1440}
1441
1442static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1443 struct fuse_copy_state *cs)
1444{
1445 struct fuse_notify_inval_inode_out outarg;
1446 int err = -EINVAL;
1447
1448 if (size != sizeof(outarg))
1449 goto err;
1450
1451 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1452 if (err)
1453 goto err;
1454 fuse_copy_finish(cs);
1455
1456 down_read(&fc->killsb);
1457 err = fuse_reverse_inval_inode(fc, outarg.ino,
1458 outarg.off, outarg.len);
1459 up_read(&fc->killsb);
1460 return err;
1461
1462err:
1463 fuse_copy_finish(cs);
1464 return err;
1465}
1466
1467static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1468 struct fuse_copy_state *cs)
1469{
1470 struct fuse_notify_inval_entry_out outarg;
1471 int err = -ENOMEM;
1472 char *buf;
1473 struct qstr name;
1474
1475 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1476 if (!buf)
1477 goto err;
1478
1479 err = -EINVAL;
1480 if (size < sizeof(outarg))
1481 goto err;
1482
1483 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1484 if (err)
1485 goto err;
1486
1487 err = -ENAMETOOLONG;
1488 if (outarg.namelen > FUSE_NAME_MAX)
1489 goto err;
1490
1491 err = -EINVAL;
1492 if (size != sizeof(outarg) + outarg.namelen + 1)
1493 goto err;
1494
1495 name.name = buf;
1496 name.len = outarg.namelen;
1497 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1498 if (err)
1499 goto err;
1500 fuse_copy_finish(cs);
1501 buf[outarg.namelen] = 0;
1502
1503 down_read(&fc->killsb);
1504 err = fuse_reverse_inval_entry(fc, outarg.parent, 0, &name, outarg.flags);
1505 up_read(&fc->killsb);
1506 kfree(buf);
1507 return err;
1508
1509err:
1510 kfree(buf);
1511 fuse_copy_finish(cs);
1512 return err;
1513}
1514
1515static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1516 struct fuse_copy_state *cs)
1517{
1518 struct fuse_notify_delete_out outarg;
1519 int err = -ENOMEM;
1520 char *buf;
1521 struct qstr name;
1522
1523 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1524 if (!buf)
1525 goto err;
1526
1527 err = -EINVAL;
1528 if (size < sizeof(outarg))
1529 goto err;
1530
1531 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1532 if (err)
1533 goto err;
1534
1535 err = -ENAMETOOLONG;
1536 if (outarg.namelen > FUSE_NAME_MAX)
1537 goto err;
1538
1539 err = -EINVAL;
1540 if (size != sizeof(outarg) + outarg.namelen + 1)
1541 goto err;
1542
1543 name.name = buf;
1544 name.len = outarg.namelen;
1545 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1546 if (err)
1547 goto err;
1548 fuse_copy_finish(cs);
1549 buf[outarg.namelen] = 0;
1550
1551 down_read(&fc->killsb);
1552 err = fuse_reverse_inval_entry(fc, outarg.parent, outarg.child, &name, 0);
1553 up_read(&fc->killsb);
1554 kfree(buf);
1555 return err;
1556
1557err:
1558 kfree(buf);
1559 fuse_copy_finish(cs);
1560 return err;
1561}
1562
1563static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1564 struct fuse_copy_state *cs)
1565{
1566 struct fuse_notify_store_out outarg;
1567 struct inode *inode;
1568 struct address_space *mapping;
1569 u64 nodeid;
1570 int err;
1571 pgoff_t index;
1572 unsigned int offset;
1573 unsigned int num;
1574 loff_t file_size;
1575 loff_t end;
1576
1577 err = -EINVAL;
1578 if (size < sizeof(outarg))
1579 goto out_finish;
1580
1581 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1582 if (err)
1583 goto out_finish;
1584
1585 err = -EINVAL;
1586 if (size - sizeof(outarg) != outarg.size)
1587 goto out_finish;
1588
1589 nodeid = outarg.nodeid;
1590
1591 down_read(&fc->killsb);
1592
1593 err = -ENOENT;
1594 inode = fuse_ilookup(fc, nodeid, NULL);
1595 if (!inode)
1596 goto out_up_killsb;
1597
1598 mapping = inode->i_mapping;
1599 index = outarg.offset >> PAGE_SHIFT;
1600 offset = outarg.offset & ~PAGE_MASK;
1601 file_size = i_size_read(inode);
1602 end = outarg.offset + outarg.size;
1603 if (end > file_size) {
1604 file_size = end;
1605 fuse_write_update_attr(inode, file_size, outarg.size);
1606 }
1607
1608 num = outarg.size;
1609 while (num) {
1610 struct page *page;
1611 unsigned int this_num;
1612
1613 err = -ENOMEM;
1614 page = find_or_create_page(mapping, index,
1615 mapping_gfp_mask(mapping));
1616 if (!page)
1617 goto out_iput;
1618
1619 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1620 err = fuse_copy_page(cs, &page, offset, this_num, 0);
1621 if (!err && offset == 0 &&
1622 (this_num == PAGE_SIZE || file_size == end))
1623 SetPageUptodate(page);
1624 unlock_page(page);
1625 put_page(page);
1626
1627 if (err)
1628 goto out_iput;
1629
1630 num -= this_num;
1631 offset = 0;
1632 index++;
1633 }
1634
1635 err = 0;
1636
1637out_iput:
1638 iput(inode);
1639out_up_killsb:
1640 up_read(&fc->killsb);
1641out_finish:
1642 fuse_copy_finish(cs);
1643 return err;
1644}
1645
1646struct fuse_retrieve_args {
1647 struct fuse_args_pages ap;
1648 struct fuse_notify_retrieve_in inarg;
1649};
1650
1651static void fuse_retrieve_end(struct fuse_mount *fm, struct fuse_args *args,
1652 int error)
1653{
1654 struct fuse_retrieve_args *ra =
1655 container_of(args, typeof(*ra), ap.args);
1656
1657 release_pages(ra->ap.pages, ra->ap.num_pages);
1658 kfree(ra);
1659}
1660
1661static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode,
1662 struct fuse_notify_retrieve_out *outarg)
1663{
1664 int err;
1665 struct address_space *mapping = inode->i_mapping;
1666 pgoff_t index;
1667 loff_t file_size;
1668 unsigned int num;
1669 unsigned int offset;
1670 size_t total_len = 0;
1671 unsigned int num_pages;
1672 struct fuse_conn *fc = fm->fc;
1673 struct fuse_retrieve_args *ra;
1674 size_t args_size = sizeof(*ra);
1675 struct fuse_args_pages *ap;
1676 struct fuse_args *args;
1677
1678 offset = outarg->offset & ~PAGE_MASK;
1679 file_size = i_size_read(inode);
1680
1681 num = min(outarg->size, fc->max_write);
1682 if (outarg->offset > file_size)
1683 num = 0;
1684 else if (outarg->offset + num > file_size)
1685 num = file_size - outarg->offset;
1686
1687 num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1688 num_pages = min(num_pages, fc->max_pages);
1689
1690 args_size += num_pages * (sizeof(ap->pages[0]) + sizeof(ap->descs[0]));
1691
1692 ra = kzalloc(args_size, GFP_KERNEL);
1693 if (!ra)
1694 return -ENOMEM;
1695
1696 ap = &ra->ap;
1697 ap->pages = (void *) (ra + 1);
1698 ap->descs = (void *) (ap->pages + num_pages);
1699
1700 args = &ap->args;
1701 args->nodeid = outarg->nodeid;
1702 args->opcode = FUSE_NOTIFY_REPLY;
1703 args->in_numargs = 2;
1704 args->in_pages = true;
1705 args->end = fuse_retrieve_end;
1706
1707 index = outarg->offset >> PAGE_SHIFT;
1708
1709 while (num && ap->num_pages < num_pages) {
1710 struct page *page;
1711 unsigned int this_num;
1712
1713 page = find_get_page(mapping, index);
1714 if (!page)
1715 break;
1716
1717 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1718 ap->pages[ap->num_pages] = page;
1719 ap->descs[ap->num_pages].offset = offset;
1720 ap->descs[ap->num_pages].length = this_num;
1721 ap->num_pages++;
1722
1723 offset = 0;
1724 num -= this_num;
1725 total_len += this_num;
1726 index++;
1727 }
1728 ra->inarg.offset = outarg->offset;
1729 ra->inarg.size = total_len;
1730 args->in_args[0].size = sizeof(ra->inarg);
1731 args->in_args[0].value = &ra->inarg;
1732 args->in_args[1].size = total_len;
1733
1734 err = fuse_simple_notify_reply(fm, args, outarg->notify_unique);
1735 if (err)
1736 fuse_retrieve_end(fm, args, err);
1737
1738 return err;
1739}
1740
1741static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1742 struct fuse_copy_state *cs)
1743{
1744 struct fuse_notify_retrieve_out outarg;
1745 struct fuse_mount *fm;
1746 struct inode *inode;
1747 u64 nodeid;
1748 int err;
1749
1750 err = -EINVAL;
1751 if (size != sizeof(outarg))
1752 goto copy_finish;
1753
1754 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1755 if (err)
1756 goto copy_finish;
1757
1758 fuse_copy_finish(cs);
1759
1760 down_read(&fc->killsb);
1761 err = -ENOENT;
1762 nodeid = outarg.nodeid;
1763
1764 inode = fuse_ilookup(fc, nodeid, &fm);
1765 if (inode) {
1766 err = fuse_retrieve(fm, inode, &outarg);
1767 iput(inode);
1768 }
1769 up_read(&fc->killsb);
1770
1771 return err;
1772
1773copy_finish:
1774 fuse_copy_finish(cs);
1775 return err;
1776}
1777
1778static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1779 unsigned int size, struct fuse_copy_state *cs)
1780{
1781 /* Don't try to move pages (yet) */
1782 cs->move_pages = 0;
1783
1784 switch (code) {
1785 case FUSE_NOTIFY_POLL:
1786 return fuse_notify_poll(fc, size, cs);
1787
1788 case FUSE_NOTIFY_INVAL_INODE:
1789 return fuse_notify_inval_inode(fc, size, cs);
1790
1791 case FUSE_NOTIFY_INVAL_ENTRY:
1792 return fuse_notify_inval_entry(fc, size, cs);
1793
1794 case FUSE_NOTIFY_STORE:
1795 return fuse_notify_store(fc, size, cs);
1796
1797 case FUSE_NOTIFY_RETRIEVE:
1798 return fuse_notify_retrieve(fc, size, cs);
1799
1800 case FUSE_NOTIFY_DELETE:
1801 return fuse_notify_delete(fc, size, cs);
1802
1803 default:
1804 fuse_copy_finish(cs);
1805 return -EINVAL;
1806 }
1807}
1808
1809/* Look up request on processing list by unique ID */
1810static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
1811{
1812 unsigned int hash = fuse_req_hash(unique);
1813 struct fuse_req *req;
1814
1815 list_for_each_entry(req, &fpq->processing[hash], list) {
1816 if (req->in.h.unique == unique)
1817 return req;
1818 }
1819 return NULL;
1820}
1821
1822static int copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args,
1823 unsigned nbytes)
1824{
1825 unsigned reqsize = sizeof(struct fuse_out_header);
1826
1827 reqsize += fuse_len_args(args->out_numargs, args->out_args);
1828
1829 if (reqsize < nbytes || (reqsize > nbytes && !args->out_argvar))
1830 return -EINVAL;
1831 else if (reqsize > nbytes) {
1832 struct fuse_arg *lastarg = &args->out_args[args->out_numargs-1];
1833 unsigned diffsize = reqsize - nbytes;
1834
1835 if (diffsize > lastarg->size)
1836 return -EINVAL;
1837 lastarg->size -= diffsize;
1838 }
1839 return fuse_copy_args(cs, args->out_numargs, args->out_pages,
1840 args->out_args, args->page_zeroing);
1841}
1842
1843/*
1844 * Write a single reply to a request. First the header is copied from
1845 * the write buffer. The request is then searched on the processing
1846 * list by the unique ID found in the header. If found, then remove
1847 * it from the list and copy the rest of the buffer to the request.
1848 * The request is finished by calling fuse_request_end().
1849 */
1850static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
1851 struct fuse_copy_state *cs, size_t nbytes)
1852{
1853 int err;
1854 struct fuse_conn *fc = fud->fc;
1855 struct fuse_pqueue *fpq = &fud->pq;
1856 struct fuse_req *req;
1857 struct fuse_out_header oh;
1858
1859 err = -EINVAL;
1860 if (nbytes < sizeof(struct fuse_out_header))
1861 goto out;
1862
1863 err = fuse_copy_one(cs, &oh, sizeof(oh));
1864 if (err)
1865 goto copy_finish;
1866
1867 err = -EINVAL;
1868 if (oh.len != nbytes)
1869 goto copy_finish;
1870
1871 /*
1872 * Zero oh.unique indicates unsolicited notification message
1873 * and error contains notification code.
1874 */
1875 if (!oh.unique) {
1876 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
1877 goto out;
1878 }
1879
1880 err = -EINVAL;
1881 if (oh.error <= -512 || oh.error > 0)
1882 goto copy_finish;
1883
1884 spin_lock(&fpq->lock);
1885 req = NULL;
1886 if (fpq->connected)
1887 req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT);
1888
1889 err = -ENOENT;
1890 if (!req) {
1891 spin_unlock(&fpq->lock);
1892 goto copy_finish;
1893 }
1894
1895 /* Is it an interrupt reply ID? */
1896 if (oh.unique & FUSE_INT_REQ_BIT) {
1897 __fuse_get_request(req);
1898 spin_unlock(&fpq->lock);
1899
1900 err = 0;
1901 if (nbytes != sizeof(struct fuse_out_header))
1902 err = -EINVAL;
1903 else if (oh.error == -ENOSYS)
1904 fc->no_interrupt = 1;
1905 else if (oh.error == -EAGAIN)
1906 err = queue_interrupt(req);
1907
1908 fuse_put_request(req);
1909
1910 goto copy_finish;
1911 }
1912
1913 clear_bit(FR_SENT, &req->flags);
1914 list_move(&req->list, &fpq->io);
1915 req->out.h = oh;
1916 set_bit(FR_LOCKED, &req->flags);
1917 spin_unlock(&fpq->lock);
1918 cs->req = req;
1919 if (!req->args->page_replace)
1920 cs->move_pages = 0;
1921
1922 if (oh.error)
1923 err = nbytes != sizeof(oh) ? -EINVAL : 0;
1924 else
1925 err = copy_out_args(cs, req->args, nbytes);
1926 fuse_copy_finish(cs);
1927
1928 spin_lock(&fpq->lock);
1929 clear_bit(FR_LOCKED, &req->flags);
1930 if (!fpq->connected)
1931 err = -ENOENT;
1932 else if (err)
1933 req->out.h.error = -EIO;
1934 if (!test_bit(FR_PRIVATE, &req->flags))
1935 list_del_init(&req->list);
1936 spin_unlock(&fpq->lock);
1937
1938 fuse_request_end(req);
1939out:
1940 return err ? err : nbytes;
1941
1942copy_finish:
1943 fuse_copy_finish(cs);
1944 goto out;
1945}
1946
1947static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
1948{
1949 struct fuse_copy_state cs;
1950 struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp);
1951
1952 if (!fud)
1953 return -EPERM;
1954
1955 if (!user_backed_iter(from))
1956 return -EINVAL;
1957
1958 fuse_copy_init(&cs, 0, from);
1959
1960 return fuse_dev_do_write(fud, &cs, iov_iter_count(from));
1961}
1962
1963static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1964 struct file *out, loff_t *ppos,
1965 size_t len, unsigned int flags)
1966{
1967 unsigned int head, tail, mask, count;
1968 unsigned nbuf;
1969 unsigned idx;
1970 struct pipe_buffer *bufs;
1971 struct fuse_copy_state cs;
1972 struct fuse_dev *fud;
1973 size_t rem;
1974 ssize_t ret;
1975
1976 fud = fuse_get_dev(out);
1977 if (!fud)
1978 return -EPERM;
1979
1980 pipe_lock(pipe);
1981
1982 head = pipe->head;
1983 tail = pipe->tail;
1984 mask = pipe->ring_size - 1;
1985 count = head - tail;
1986
1987 bufs = kvmalloc_array(count, sizeof(struct pipe_buffer), GFP_KERNEL);
1988 if (!bufs) {
1989 pipe_unlock(pipe);
1990 return -ENOMEM;
1991 }
1992
1993 nbuf = 0;
1994 rem = 0;
1995 for (idx = tail; idx != head && rem < len; idx++)
1996 rem += pipe->bufs[idx & mask].len;
1997
1998 ret = -EINVAL;
1999 if (rem < len)
2000 goto out_free;
2001
2002 rem = len;
2003 while (rem) {
2004 struct pipe_buffer *ibuf;
2005 struct pipe_buffer *obuf;
2006
2007 if (WARN_ON(nbuf >= count || tail == head))
2008 goto out_free;
2009
2010 ibuf = &pipe->bufs[tail & mask];
2011 obuf = &bufs[nbuf];
2012
2013 if (rem >= ibuf->len) {
2014 *obuf = *ibuf;
2015 ibuf->ops = NULL;
2016 tail++;
2017 pipe->tail = tail;
2018 } else {
2019 if (!pipe_buf_get(pipe, ibuf))
2020 goto out_free;
2021
2022 *obuf = *ibuf;
2023 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
2024 obuf->len = rem;
2025 ibuf->offset += obuf->len;
2026 ibuf->len -= obuf->len;
2027 }
2028 nbuf++;
2029 rem -= obuf->len;
2030 }
2031 pipe_unlock(pipe);
2032
2033 fuse_copy_init(&cs, 0, NULL);
2034 cs.pipebufs = bufs;
2035 cs.nr_segs = nbuf;
2036 cs.pipe = pipe;
2037
2038 if (flags & SPLICE_F_MOVE)
2039 cs.move_pages = 1;
2040
2041 ret = fuse_dev_do_write(fud, &cs, len);
2042
2043 pipe_lock(pipe);
2044out_free:
2045 for (idx = 0; idx < nbuf; idx++) {
2046 struct pipe_buffer *buf = &bufs[idx];
2047
2048 if (buf->ops)
2049 pipe_buf_release(pipe, buf);
2050 }
2051 pipe_unlock(pipe);
2052
2053 kvfree(bufs);
2054 return ret;
2055}
2056
2057static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
2058{
2059 __poll_t mask = EPOLLOUT | EPOLLWRNORM;
2060 struct fuse_iqueue *fiq;
2061 struct fuse_dev *fud = fuse_get_dev(file);
2062
2063 if (!fud)
2064 return EPOLLERR;
2065
2066 fiq = &fud->fc->iq;
2067 poll_wait(file, &fiq->waitq, wait);
2068
2069 spin_lock(&fiq->lock);
2070 if (!fiq->connected)
2071 mask = EPOLLERR;
2072 else if (request_pending(fiq))
2073 mask |= EPOLLIN | EPOLLRDNORM;
2074 spin_unlock(&fiq->lock);
2075
2076 return mask;
2077}
2078
2079/* Abort all requests on the given list (pending or processing) */
2080static void end_requests(struct list_head *head)
2081{
2082 while (!list_empty(head)) {
2083 struct fuse_req *req;
2084 req = list_entry(head->next, struct fuse_req, list);
2085 req->out.h.error = -ECONNABORTED;
2086 clear_bit(FR_SENT, &req->flags);
2087 list_del_init(&req->list);
2088 fuse_request_end(req);
2089 }
2090}
2091
2092static void end_polls(struct fuse_conn *fc)
2093{
2094 struct rb_node *p;
2095
2096 p = rb_first(&fc->polled_files);
2097
2098 while (p) {
2099 struct fuse_file *ff;
2100 ff = rb_entry(p, struct fuse_file, polled_node);
2101 wake_up_interruptible_all(&ff->poll_wait);
2102
2103 p = rb_next(p);
2104 }
2105}
2106
2107/*
2108 * Abort all requests.
2109 *
2110 * Emergency exit in case of a malicious or accidental deadlock, or just a hung
2111 * filesystem.
2112 *
2113 * The same effect is usually achievable through killing the filesystem daemon
2114 * and all users of the filesystem. The exception is the combination of an
2115 * asynchronous request and the tricky deadlock (see
2116 * Documentation/filesystems/fuse.rst).
2117 *
2118 * Aborting requests under I/O goes as follows: 1: Separate out unlocked
2119 * requests, they should be finished off immediately. Locked requests will be
2120 * finished after unlock; see unlock_request(). 2: Finish off the unlocked
2121 * requests. It is possible that some request will finish before we can. This
2122 * is OK, the request will in that case be removed from the list before we touch
2123 * it.
2124 */
2125void fuse_abort_conn(struct fuse_conn *fc)
2126{
2127 struct fuse_iqueue *fiq = &fc->iq;
2128
2129 spin_lock(&fc->lock);
2130 if (fc->connected) {
2131 struct fuse_dev *fud;
2132 struct fuse_req *req, *next;
2133 LIST_HEAD(to_end);
2134 unsigned int i;
2135
2136 /* Background queuing checks fc->connected under bg_lock */
2137 spin_lock(&fc->bg_lock);
2138 fc->connected = 0;
2139 spin_unlock(&fc->bg_lock);
2140
2141 fuse_set_initialized(fc);
2142 list_for_each_entry(fud, &fc->devices, entry) {
2143 struct fuse_pqueue *fpq = &fud->pq;
2144
2145 spin_lock(&fpq->lock);
2146 fpq->connected = 0;
2147 list_for_each_entry_safe(req, next, &fpq->io, list) {
2148 req->out.h.error = -ECONNABORTED;
2149 spin_lock(&req->waitq.lock);
2150 set_bit(FR_ABORTED, &req->flags);
2151 if (!test_bit(FR_LOCKED, &req->flags)) {
2152 set_bit(FR_PRIVATE, &req->flags);
2153 __fuse_get_request(req);
2154 list_move(&req->list, &to_end);
2155 }
2156 spin_unlock(&req->waitq.lock);
2157 }
2158 for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
2159 list_splice_tail_init(&fpq->processing[i],
2160 &to_end);
2161 spin_unlock(&fpq->lock);
2162 }
2163 spin_lock(&fc->bg_lock);
2164 fc->blocked = 0;
2165 fc->max_background = UINT_MAX;
2166 flush_bg_queue(fc);
2167 spin_unlock(&fc->bg_lock);
2168
2169 spin_lock(&fiq->lock);
2170 fiq->connected = 0;
2171 list_for_each_entry(req, &fiq->pending, list)
2172 clear_bit(FR_PENDING, &req->flags);
2173 list_splice_tail_init(&fiq->pending, &to_end);
2174 while (forget_pending(fiq))
2175 kfree(fuse_dequeue_forget(fiq, 1, NULL));
2176 wake_up_all(&fiq->waitq);
2177 spin_unlock(&fiq->lock);
2178 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
2179 end_polls(fc);
2180 wake_up_all(&fc->blocked_waitq);
2181 spin_unlock(&fc->lock);
2182
2183 end_requests(&to_end);
2184 } else {
2185 spin_unlock(&fc->lock);
2186 }
2187}
2188EXPORT_SYMBOL_GPL(fuse_abort_conn);
2189
2190void fuse_wait_aborted(struct fuse_conn *fc)
2191{
2192 /* matches implicit memory barrier in fuse_drop_waiting() */
2193 smp_mb();
2194 wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0);
2195}
2196
2197int fuse_dev_release(struct inode *inode, struct file *file)
2198{
2199 struct fuse_dev *fud = fuse_get_dev(file);
2200
2201 if (fud) {
2202 struct fuse_conn *fc = fud->fc;
2203 struct fuse_pqueue *fpq = &fud->pq;
2204 LIST_HEAD(to_end);
2205 unsigned int i;
2206
2207 spin_lock(&fpq->lock);
2208 WARN_ON(!list_empty(&fpq->io));
2209 for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
2210 list_splice_init(&fpq->processing[i], &to_end);
2211 spin_unlock(&fpq->lock);
2212
2213 end_requests(&to_end);
2214
2215 /* Are we the last open device? */
2216 if (atomic_dec_and_test(&fc->dev_count)) {
2217 WARN_ON(fc->iq.fasync != NULL);
2218 fuse_abort_conn(fc);
2219 }
2220 fuse_dev_free(fud);
2221 }
2222 return 0;
2223}
2224EXPORT_SYMBOL_GPL(fuse_dev_release);
2225
2226static int fuse_dev_fasync(int fd, struct file *file, int on)
2227{
2228 struct fuse_dev *fud = fuse_get_dev(file);
2229
2230 if (!fud)
2231 return -EPERM;
2232
2233 /* No locking - fasync_helper does its own locking */
2234 return fasync_helper(fd, file, on, &fud->fc->iq.fasync);
2235}
2236
2237static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
2238{
2239 struct fuse_dev *fud;
2240
2241 if (new->private_data)
2242 return -EINVAL;
2243
2244 fud = fuse_dev_alloc_install(fc);
2245 if (!fud)
2246 return -ENOMEM;
2247
2248 new->private_data = fud;
2249 atomic_inc(&fc->dev_count);
2250
2251 return 0;
2252}
2253
2254static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
2255 unsigned long arg)
2256{
2257 int res;
2258 int oldfd;
2259 struct fuse_dev *fud = NULL;
2260 struct fd f;
2261
2262 switch (cmd) {
2263 case FUSE_DEV_IOC_CLONE:
2264 if (get_user(oldfd, (__u32 __user *)arg))
2265 return -EFAULT;
2266
2267 f = fdget(oldfd);
2268 if (!f.file)
2269 return -EINVAL;
2270
2271 /*
2272 * Check against file->f_op because CUSE
2273 * uses the same ioctl handler.
2274 */
2275 if (f.file->f_op == file->f_op)
2276 fud = fuse_get_dev(f.file);
2277
2278 res = -EINVAL;
2279 if (fud) {
2280 mutex_lock(&fuse_mutex);
2281 res = fuse_device_clone(fud->fc, file);
2282 mutex_unlock(&fuse_mutex);
2283 }
2284 fdput(f);
2285 break;
2286 default:
2287 res = -ENOTTY;
2288 break;
2289 }
2290 return res;
2291}
2292
2293const struct file_operations fuse_dev_operations = {
2294 .owner = THIS_MODULE,
2295 .open = fuse_dev_open,
2296 .llseek = no_llseek,
2297 .read_iter = fuse_dev_read,
2298 .splice_read = fuse_dev_splice_read,
2299 .write_iter = fuse_dev_write,
2300 .splice_write = fuse_dev_splice_write,
2301 .poll = fuse_dev_poll,
2302 .release = fuse_dev_release,
2303 .fasync = fuse_dev_fasync,
2304 .unlocked_ioctl = fuse_dev_ioctl,
2305 .compat_ioctl = compat_ptr_ioctl,
2306};
2307EXPORT_SYMBOL_GPL(fuse_dev_operations);
2308
2309static struct miscdevice fuse_miscdevice = {
2310 .minor = FUSE_MINOR,
2311 .name = "fuse",
2312 .fops = &fuse_dev_operations,
2313};
2314
2315int __init fuse_dev_init(void)
2316{
2317 int err = -ENOMEM;
2318 fuse_req_cachep = kmem_cache_create("fuse_request",
2319 sizeof(struct fuse_req),
2320 0, 0, NULL);
2321 if (!fuse_req_cachep)
2322 goto out;
2323
2324 err = misc_register(&fuse_miscdevice);
2325 if (err)
2326 goto out_cache_clean;
2327
2328 return 0;
2329
2330 out_cache_clean:
2331 kmem_cache_destroy(fuse_req_cachep);
2332 out:
2333 return err;
2334}
2335
2336void fuse_dev_cleanup(void)
2337{
2338 misc_deregister(&fuse_miscdevice);
2339 kmem_cache_destroy(fuse_req_cachep);
2340}