Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/fs/nfs/direct.c
4 *
5 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
6 *
7 * High-performance uncached I/O for the Linux NFS client
8 *
9 * There are important applications whose performance or correctness
10 * depends on uncached access to file data. Database clusters
11 * (multiple copies of the same instance running on separate hosts)
12 * implement their own cache coherency protocol that subsumes file
13 * system cache protocols. Applications that process datasets
14 * considerably larger than the client's memory do not always benefit
15 * from a local cache. A streaming video server, for instance, has no
16 * need to cache the contents of a file.
17 *
18 * When an application requests uncached I/O, all read and write requests
19 * are made directly to the server; data stored or fetched via these
20 * requests is not cached in the Linux page cache. The client does not
21 * correct unaligned requests from applications. All requested bytes are
22 * held on permanent storage before a direct write system call returns to
23 * an application.
24 *
25 * Solaris implements an uncached I/O facility called directio() that
26 * is used for backups and sequential I/O to very large files. Solaris
27 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
28 * an undocumented mount option.
29 *
30 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
31 * help from Andrew Morton.
32 *
33 * 18 Dec 2001 Initial implementation for 2.4 --cel
34 * 08 Jul 2002 Version for 2.4.19, with bug fixes --trondmy
35 * 08 Jun 2003 Port to 2.5 APIs --cel
36 * 31 Mar 2004 Handle direct I/O without VFS support --cel
37 * 15 Sep 2004 Parallel async reads --cel
38 * 04 May 2005 support O_DIRECT with aio --cel
39 *
40 */
41
42#include <linux/errno.h>
43#include <linux/sched.h>
44#include <linux/kernel.h>
45#include <linux/file.h>
46#include <linux/pagemap.h>
47#include <linux/kref.h>
48#include <linux/slab.h>
49#include <linux/task_io_accounting_ops.h>
50#include <linux/module.h>
51
52#include <linux/nfs_fs.h>
53#include <linux/nfs_page.h>
54#include <linux/sunrpc/clnt.h>
55
56#include <linux/uaccess.h>
57#include <linux/atomic.h>
58
59#include "internal.h"
60#include "iostat.h"
61#include "pnfs.h"
62#include "fscache.h"
63#include "nfstrace.h"
64
65#define NFSDBG_FACILITY NFSDBG_VFS
66
67static struct kmem_cache *nfs_direct_cachep;
68
69static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
70static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
71static void nfs_direct_write_complete(struct nfs_direct_req *dreq);
72static void nfs_direct_write_schedule_work(struct work_struct *work);
73
74static inline void get_dreq(struct nfs_direct_req *dreq)
75{
76 atomic_inc(&dreq->io_count);
77}
78
79static inline int put_dreq(struct nfs_direct_req *dreq)
80{
81 return atomic_dec_and_test(&dreq->io_count);
82}
83
84static void
85nfs_direct_handle_truncated(struct nfs_direct_req *dreq,
86 const struct nfs_pgio_header *hdr,
87 ssize_t dreq_len)
88{
89 if (!(test_bit(NFS_IOHDR_ERROR, &hdr->flags) ||
90 test_bit(NFS_IOHDR_EOF, &hdr->flags)))
91 return;
92 if (dreq->max_count >= dreq_len) {
93 dreq->max_count = dreq_len;
94 if (dreq->count > dreq_len)
95 dreq->count = dreq_len;
96 }
97
98 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && !dreq->error)
99 dreq->error = hdr->error;
100}
101
102static void
103nfs_direct_count_bytes(struct nfs_direct_req *dreq,
104 const struct nfs_pgio_header *hdr)
105{
106 loff_t hdr_end = hdr->io_start + hdr->good_bytes;
107 ssize_t dreq_len = 0;
108
109 if (hdr_end > dreq->io_start)
110 dreq_len = hdr_end - dreq->io_start;
111
112 nfs_direct_handle_truncated(dreq, hdr, dreq_len);
113
114 if (dreq_len > dreq->max_count)
115 dreq_len = dreq->max_count;
116
117 if (dreq->count < dreq_len)
118 dreq->count = dreq_len;
119}
120
121static void nfs_direct_truncate_request(struct nfs_direct_req *dreq,
122 struct nfs_page *req)
123{
124 loff_t offs = req_offset(req);
125 size_t req_start = (size_t)(offs - dreq->io_start);
126
127 if (req_start < dreq->max_count)
128 dreq->max_count = req_start;
129 if (req_start < dreq->count)
130 dreq->count = req_start;
131}
132
133/**
134 * nfs_swap_rw - NFS address space operation for swap I/O
135 * @iocb: target I/O control block
136 * @iter: I/O buffer
137 *
138 * Perform IO to the swap-file. This is much like direct IO.
139 */
140int nfs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
141{
142 ssize_t ret;
143
144 VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
145
146 if (iov_iter_rw(iter) == READ)
147 ret = nfs_file_direct_read(iocb, iter, true);
148 else
149 ret = nfs_file_direct_write(iocb, iter, true);
150 if (ret < 0)
151 return ret;
152 return 0;
153}
154
155static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
156{
157 unsigned int i;
158 for (i = 0; i < npages; i++)
159 put_page(pages[i]);
160}
161
162void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
163 struct nfs_direct_req *dreq)
164{
165 cinfo->inode = dreq->inode;
166 cinfo->mds = &dreq->mds_cinfo;
167 cinfo->ds = &dreq->ds_cinfo;
168 cinfo->dreq = dreq;
169 cinfo->completion_ops = &nfs_direct_commit_completion_ops;
170}
171
172static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
173{
174 struct nfs_direct_req *dreq;
175
176 dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
177 if (!dreq)
178 return NULL;
179
180 kref_init(&dreq->kref);
181 kref_get(&dreq->kref);
182 init_completion(&dreq->completion);
183 INIT_LIST_HEAD(&dreq->mds_cinfo.list);
184 pnfs_init_ds_commit_info(&dreq->ds_cinfo);
185 INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
186 spin_lock_init(&dreq->lock);
187
188 return dreq;
189}
190
191static void nfs_direct_req_free(struct kref *kref)
192{
193 struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
194
195 pnfs_release_ds_info(&dreq->ds_cinfo, dreq->inode);
196 if (dreq->l_ctx != NULL)
197 nfs_put_lock_context(dreq->l_ctx);
198 if (dreq->ctx != NULL)
199 put_nfs_open_context(dreq->ctx);
200 kmem_cache_free(nfs_direct_cachep, dreq);
201}
202
203static void nfs_direct_req_release(struct nfs_direct_req *dreq)
204{
205 kref_put(&dreq->kref, nfs_direct_req_free);
206}
207
208ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq, loff_t offset)
209{
210 loff_t start = offset - dreq->io_start;
211 return dreq->max_count - start;
212}
213EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);
214
215/*
216 * Collects and returns the final error value/byte-count.
217 */
218static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
219{
220 ssize_t result = -EIOCBQUEUED;
221
222 /* Async requests don't wait here */
223 if (dreq->iocb)
224 goto out;
225
226 result = wait_for_completion_killable(&dreq->completion);
227
228 if (!result) {
229 result = dreq->count;
230 WARN_ON_ONCE(dreq->count < 0);
231 }
232 if (!result)
233 result = dreq->error;
234
235out:
236 return (ssize_t) result;
237}
238
239/*
240 * Synchronous I/O uses a stack-allocated iocb. Thus we can't trust
241 * the iocb is still valid here if this is a synchronous request.
242 */
243static void nfs_direct_complete(struct nfs_direct_req *dreq)
244{
245 struct inode *inode = dreq->inode;
246
247 inode_dio_end(inode);
248
249 if (dreq->iocb) {
250 long res = (long) dreq->error;
251 if (dreq->count != 0) {
252 res = (long) dreq->count;
253 WARN_ON_ONCE(dreq->count < 0);
254 }
255 dreq->iocb->ki_complete(dreq->iocb, res);
256 }
257
258 complete(&dreq->completion);
259
260 nfs_direct_req_release(dreq);
261}
262
263static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
264{
265 unsigned long bytes = 0;
266 struct nfs_direct_req *dreq = hdr->dreq;
267
268 spin_lock(&dreq->lock);
269 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
270 spin_unlock(&dreq->lock);
271 goto out_put;
272 }
273
274 nfs_direct_count_bytes(dreq, hdr);
275 spin_unlock(&dreq->lock);
276
277 while (!list_empty(&hdr->pages)) {
278 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
279 struct page *page = req->wb_page;
280
281 if (!PageCompound(page) && bytes < hdr->good_bytes &&
282 (dreq->flags == NFS_ODIRECT_SHOULD_DIRTY))
283 set_page_dirty(page);
284 bytes += req->wb_bytes;
285 nfs_list_remove_request(req);
286 nfs_release_request(req);
287 }
288out_put:
289 if (put_dreq(dreq))
290 nfs_direct_complete(dreq);
291 hdr->release(hdr);
292}
293
294static void nfs_read_sync_pgio_error(struct list_head *head, int error)
295{
296 struct nfs_page *req;
297
298 while (!list_empty(head)) {
299 req = nfs_list_entry(head->next);
300 nfs_list_remove_request(req);
301 nfs_release_request(req);
302 }
303}
304
305static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
306{
307 get_dreq(hdr->dreq);
308}
309
310static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
311 .error_cleanup = nfs_read_sync_pgio_error,
312 .init_hdr = nfs_direct_pgio_init,
313 .completion = nfs_direct_read_completion,
314};
315
316/*
317 * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
318 * operation. If nfs_readdata_alloc() or get_user_pages() fails,
319 * bail and stop sending more reads. Read length accounting is
320 * handled automatically by nfs_direct_read_result(). Otherwise, if
321 * no requests have been sent, just return an error.
322 */
323
324static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
325 struct iov_iter *iter,
326 loff_t pos)
327{
328 struct nfs_pageio_descriptor desc;
329 struct inode *inode = dreq->inode;
330 ssize_t result = -EINVAL;
331 size_t requested_bytes = 0;
332 size_t rsize = max_t(size_t, NFS_SERVER(inode)->rsize, PAGE_SIZE);
333
334 nfs_pageio_init_read(&desc, dreq->inode, false,
335 &nfs_direct_read_completion_ops);
336 get_dreq(dreq);
337 desc.pg_dreq = dreq;
338 inode_dio_begin(inode);
339
340 while (iov_iter_count(iter)) {
341 struct page **pagevec;
342 size_t bytes;
343 size_t pgbase;
344 unsigned npages, i;
345
346 result = iov_iter_get_pages_alloc2(iter, &pagevec,
347 rsize, &pgbase);
348 if (result < 0)
349 break;
350
351 bytes = result;
352 npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
353 for (i = 0; i < npages; i++) {
354 struct nfs_page *req;
355 unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
356 /* XXX do we need to do the eof zeroing found in async_filler? */
357 req = nfs_page_create_from_page(dreq->ctx, pagevec[i],
358 pgbase, pos, req_len);
359 if (IS_ERR(req)) {
360 result = PTR_ERR(req);
361 break;
362 }
363 if (!nfs_pageio_add_request(&desc, req)) {
364 result = desc.pg_error;
365 nfs_release_request(req);
366 break;
367 }
368 pgbase = 0;
369 bytes -= req_len;
370 requested_bytes += req_len;
371 pos += req_len;
372 }
373 nfs_direct_release_pages(pagevec, npages);
374 kvfree(pagevec);
375 if (result < 0)
376 break;
377 }
378
379 nfs_pageio_complete(&desc);
380
381 /*
382 * If no bytes were started, return the error, and let the
383 * generic layer handle the completion.
384 */
385 if (requested_bytes == 0) {
386 inode_dio_end(inode);
387 nfs_direct_req_release(dreq);
388 return result < 0 ? result : -EIO;
389 }
390
391 if (put_dreq(dreq))
392 nfs_direct_complete(dreq);
393 return requested_bytes;
394}
395
396/**
397 * nfs_file_direct_read - file direct read operation for NFS files
398 * @iocb: target I/O control block
399 * @iter: vector of user buffers into which to read data
400 * @swap: flag indicating this is swap IO, not O_DIRECT IO
401 *
402 * We use this function for direct reads instead of calling
403 * generic_file_aio_read() in order to avoid gfar's check to see if
404 * the request starts before the end of the file. For that check
405 * to work, we must generate a GETATTR before each direct read, and
406 * even then there is a window between the GETATTR and the subsequent
407 * READ where the file size could change. Our preference is simply
408 * to do all reads the application wants, and the server will take
409 * care of managing the end of file boundary.
410 *
411 * This function also eliminates unnecessarily updating the file's
412 * atime locally, as the NFS server sets the file's atime, and this
413 * client must read the updated atime from the server back into its
414 * cache.
415 */
416ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
417 bool swap)
418{
419 struct file *file = iocb->ki_filp;
420 struct address_space *mapping = file->f_mapping;
421 struct inode *inode = mapping->host;
422 struct nfs_direct_req *dreq;
423 struct nfs_lock_context *l_ctx;
424 ssize_t result, requested;
425 size_t count = iov_iter_count(iter);
426 nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
427
428 dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
429 file, count, (long long) iocb->ki_pos);
430
431 result = 0;
432 if (!count)
433 goto out;
434
435 task_io_account_read(count);
436
437 result = -ENOMEM;
438 dreq = nfs_direct_req_alloc();
439 if (dreq == NULL)
440 goto out;
441
442 dreq->inode = inode;
443 dreq->max_count = count;
444 dreq->io_start = iocb->ki_pos;
445 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
446 l_ctx = nfs_get_lock_context(dreq->ctx);
447 if (IS_ERR(l_ctx)) {
448 result = PTR_ERR(l_ctx);
449 nfs_direct_req_release(dreq);
450 goto out_release;
451 }
452 dreq->l_ctx = l_ctx;
453 if (!is_sync_kiocb(iocb))
454 dreq->iocb = iocb;
455
456 if (user_backed_iter(iter))
457 dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
458
459 if (!swap)
460 nfs_start_io_direct(inode);
461
462 NFS_I(inode)->read_io += count;
463 requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
464
465 if (!swap)
466 nfs_end_io_direct(inode);
467
468 if (requested > 0) {
469 result = nfs_direct_wait(dreq);
470 if (result > 0) {
471 requested -= result;
472 iocb->ki_pos += result;
473 }
474 iov_iter_revert(iter, requested);
475 } else {
476 result = requested;
477 }
478
479out_release:
480 nfs_direct_req_release(dreq);
481out:
482 return result;
483}
484
485static void nfs_direct_add_page_head(struct list_head *list,
486 struct nfs_page *req)
487{
488 struct nfs_page *head = req->wb_head;
489
490 if (!list_empty(&head->wb_list) || !nfs_lock_request(head))
491 return;
492 if (!list_empty(&head->wb_list)) {
493 nfs_unlock_request(head);
494 return;
495 }
496 list_add(&head->wb_list, list);
497 kref_get(&head->wb_kref);
498 kref_get(&head->wb_kref);
499}
500
501static void nfs_direct_join_group(struct list_head *list,
502 struct nfs_commit_info *cinfo,
503 struct inode *inode)
504{
505 struct nfs_page *req, *subreq;
506
507 list_for_each_entry(req, list, wb_list) {
508 if (req->wb_head != req) {
509 nfs_direct_add_page_head(&req->wb_list, req);
510 continue;
511 }
512 subreq = req->wb_this_page;
513 if (subreq == req)
514 continue;
515 do {
516 /*
517 * Remove subrequests from this list before freeing
518 * them in the call to nfs_join_page_group().
519 */
520 if (!list_empty(&subreq->wb_list)) {
521 nfs_list_remove_request(subreq);
522 nfs_release_request(subreq);
523 }
524 } while ((subreq = subreq->wb_this_page) != req);
525 nfs_join_page_group(req, cinfo, inode);
526 }
527}
528
529static void
530nfs_direct_write_scan_commit_list(struct inode *inode,
531 struct list_head *list,
532 struct nfs_commit_info *cinfo)
533{
534 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
535 pnfs_recover_commit_reqs(list, cinfo);
536 nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0);
537 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
538}
539
540static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
541{
542 struct nfs_pageio_descriptor desc;
543 struct nfs_page *req;
544 LIST_HEAD(reqs);
545 struct nfs_commit_info cinfo;
546
547 nfs_init_cinfo_from_dreq(&cinfo, dreq);
548 nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
549
550 nfs_direct_join_group(&reqs, &cinfo, dreq->inode);
551
552 nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
553 get_dreq(dreq);
554
555 nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
556 &nfs_direct_write_completion_ops);
557 desc.pg_dreq = dreq;
558
559 while (!list_empty(&reqs)) {
560 req = nfs_list_entry(reqs.next);
561 /* Bump the transmission count */
562 req->wb_nio++;
563 if (!nfs_pageio_add_request(&desc, req)) {
564 spin_lock(&dreq->lock);
565 if (dreq->error < 0) {
566 desc.pg_error = dreq->error;
567 } else if (desc.pg_error != -EAGAIN) {
568 dreq->flags = 0;
569 if (!desc.pg_error)
570 desc.pg_error = -EIO;
571 dreq->error = desc.pg_error;
572 } else
573 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
574 spin_unlock(&dreq->lock);
575 break;
576 }
577 nfs_release_request(req);
578 }
579 nfs_pageio_complete(&desc);
580
581 while (!list_empty(&reqs)) {
582 req = nfs_list_entry(reqs.next);
583 nfs_list_remove_request(req);
584 nfs_unlock_and_release_request(req);
585 if (desc.pg_error == -EAGAIN) {
586 nfs_mark_request_commit(req, NULL, &cinfo, 0);
587 } else {
588 spin_lock(&dreq->lock);
589 nfs_direct_truncate_request(dreq, req);
590 spin_unlock(&dreq->lock);
591 nfs_release_request(req);
592 }
593 }
594
595 if (put_dreq(dreq))
596 nfs_direct_write_complete(dreq);
597}
598
599static void nfs_direct_commit_complete(struct nfs_commit_data *data)
600{
601 const struct nfs_writeverf *verf = data->res.verf;
602 struct nfs_direct_req *dreq = data->dreq;
603 struct nfs_commit_info cinfo;
604 struct nfs_page *req;
605 int status = data->task.tk_status;
606
607 trace_nfs_direct_commit_complete(dreq);
608
609 if (status < 0) {
610 /* Errors in commit are fatal */
611 dreq->error = status;
612 dreq->flags = NFS_ODIRECT_DONE;
613 } else {
614 status = dreq->error;
615 }
616
617 nfs_init_cinfo_from_dreq(&cinfo, dreq);
618
619 while (!list_empty(&data->pages)) {
620 req = nfs_list_entry(data->pages.next);
621 nfs_list_remove_request(req);
622 if (status < 0) {
623 spin_lock(&dreq->lock);
624 nfs_direct_truncate_request(dreq, req);
625 spin_unlock(&dreq->lock);
626 nfs_release_request(req);
627 } else if (!nfs_write_match_verf(verf, req)) {
628 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
629 /*
630 * Despite the reboot, the write was successful,
631 * so reset wb_nio.
632 */
633 req->wb_nio = 0;
634 nfs_mark_request_commit(req, NULL, &cinfo, 0);
635 } else
636 nfs_release_request(req);
637 nfs_unlock_and_release_request(req);
638 }
639
640 if (nfs_commit_end(cinfo.mds))
641 nfs_direct_write_complete(dreq);
642}
643
644static void nfs_direct_resched_write(struct nfs_commit_info *cinfo,
645 struct nfs_page *req)
646{
647 struct nfs_direct_req *dreq = cinfo->dreq;
648
649 trace_nfs_direct_resched_write(dreq);
650
651 spin_lock(&dreq->lock);
652 if (dreq->flags != NFS_ODIRECT_DONE)
653 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
654 spin_unlock(&dreq->lock);
655 nfs_mark_request_commit(req, NULL, cinfo, 0);
656}
657
658static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
659 .completion = nfs_direct_commit_complete,
660 .resched_write = nfs_direct_resched_write,
661};
662
663static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
664{
665 int res;
666 struct nfs_commit_info cinfo;
667 LIST_HEAD(mds_list);
668
669 nfs_init_cinfo_from_dreq(&cinfo, dreq);
670 nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
671 res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
672 if (res < 0) /* res == -ENOMEM */
673 nfs_direct_write_reschedule(dreq);
674}
675
676static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq)
677{
678 struct nfs_commit_info cinfo;
679 struct nfs_page *req;
680 LIST_HEAD(reqs);
681
682 nfs_init_cinfo_from_dreq(&cinfo, dreq);
683 nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
684
685 while (!list_empty(&reqs)) {
686 req = nfs_list_entry(reqs.next);
687 nfs_list_remove_request(req);
688 nfs_direct_truncate_request(dreq, req);
689 nfs_release_request(req);
690 nfs_unlock_and_release_request(req);
691 }
692}
693
694static void nfs_direct_write_schedule_work(struct work_struct *work)
695{
696 struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
697 int flags = dreq->flags;
698
699 dreq->flags = 0;
700 switch (flags) {
701 case NFS_ODIRECT_DO_COMMIT:
702 nfs_direct_commit_schedule(dreq);
703 break;
704 case NFS_ODIRECT_RESCHED_WRITES:
705 nfs_direct_write_reschedule(dreq);
706 break;
707 default:
708 nfs_direct_write_clear_reqs(dreq);
709 nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping);
710 nfs_direct_complete(dreq);
711 }
712}
713
714static void nfs_direct_write_complete(struct nfs_direct_req *dreq)
715{
716 trace_nfs_direct_write_complete(dreq);
717 queue_work(nfsiod_workqueue, &dreq->work); /* Calls nfs_direct_write_schedule_work */
718}
719
720static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
721{
722 struct nfs_direct_req *dreq = hdr->dreq;
723 struct nfs_commit_info cinfo;
724 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
725 int flags = NFS_ODIRECT_DONE;
726
727 trace_nfs_direct_write_completion(dreq);
728
729 nfs_init_cinfo_from_dreq(&cinfo, dreq);
730
731 spin_lock(&dreq->lock);
732 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
733 spin_unlock(&dreq->lock);
734 goto out_put;
735 }
736
737 nfs_direct_count_bytes(dreq, hdr);
738 if (test_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags) &&
739 !test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
740 if (!dreq->flags)
741 dreq->flags = NFS_ODIRECT_DO_COMMIT;
742 flags = dreq->flags;
743 }
744 spin_unlock(&dreq->lock);
745
746 while (!list_empty(&hdr->pages)) {
747
748 req = nfs_list_entry(hdr->pages.next);
749 nfs_list_remove_request(req);
750 if (flags == NFS_ODIRECT_DO_COMMIT) {
751 kref_get(&req->wb_kref);
752 memcpy(&req->wb_verf, &hdr->verf.verifier,
753 sizeof(req->wb_verf));
754 nfs_mark_request_commit(req, hdr->lseg, &cinfo,
755 hdr->ds_commit_idx);
756 } else if (flags == NFS_ODIRECT_RESCHED_WRITES) {
757 kref_get(&req->wb_kref);
758 nfs_mark_request_commit(req, NULL, &cinfo, 0);
759 }
760 nfs_unlock_and_release_request(req);
761 }
762
763out_put:
764 if (put_dreq(dreq))
765 nfs_direct_write_complete(dreq);
766 hdr->release(hdr);
767}
768
769static void nfs_write_sync_pgio_error(struct list_head *head, int error)
770{
771 struct nfs_page *req;
772
773 while (!list_empty(head)) {
774 req = nfs_list_entry(head->next);
775 nfs_list_remove_request(req);
776 nfs_unlock_and_release_request(req);
777 }
778}
779
780static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
781{
782 struct nfs_direct_req *dreq = hdr->dreq;
783 struct nfs_page *req;
784 struct nfs_commit_info cinfo;
785
786 trace_nfs_direct_write_reschedule_io(dreq);
787
788 nfs_init_cinfo_from_dreq(&cinfo, dreq);
789 spin_lock(&dreq->lock);
790 if (dreq->error == 0)
791 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
792 set_bit(NFS_IOHDR_REDO, &hdr->flags);
793 spin_unlock(&dreq->lock);
794 while (!list_empty(&hdr->pages)) {
795 req = nfs_list_entry(hdr->pages.next);
796 nfs_list_remove_request(req);
797 nfs_unlock_request(req);
798 nfs_mark_request_commit(req, NULL, &cinfo, 0);
799 }
800}
801
802static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
803 .error_cleanup = nfs_write_sync_pgio_error,
804 .init_hdr = nfs_direct_pgio_init,
805 .completion = nfs_direct_write_completion,
806 .reschedule_io = nfs_direct_write_reschedule_io,
807};
808
809
810/*
811 * NB: Return the value of the first error return code. Subsequent
812 * errors after the first one are ignored.
813 */
814/*
815 * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
816 * operation. If nfs_writedata_alloc() or get_user_pages() fails,
817 * bail and stop sending more writes. Write length accounting is
818 * handled automatically by nfs_direct_write_result(). Otherwise, if
819 * no requests have been sent, just return an error.
820 */
821static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
822 struct iov_iter *iter,
823 loff_t pos, int ioflags)
824{
825 struct nfs_pageio_descriptor desc;
826 struct inode *inode = dreq->inode;
827 struct nfs_commit_info cinfo;
828 ssize_t result = 0;
829 size_t requested_bytes = 0;
830 size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
831 bool defer = false;
832
833 trace_nfs_direct_write_schedule_iovec(dreq);
834
835 nfs_pageio_init_write(&desc, inode, ioflags, false,
836 &nfs_direct_write_completion_ops);
837 desc.pg_dreq = dreq;
838 get_dreq(dreq);
839 inode_dio_begin(inode);
840
841 NFS_I(inode)->write_io += iov_iter_count(iter);
842 while (iov_iter_count(iter)) {
843 struct page **pagevec;
844 size_t bytes;
845 size_t pgbase;
846 unsigned npages, i;
847
848 result = iov_iter_get_pages_alloc2(iter, &pagevec,
849 wsize, &pgbase);
850 if (result < 0)
851 break;
852
853 bytes = result;
854 npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
855 for (i = 0; i < npages; i++) {
856 struct nfs_page *req;
857 unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
858
859 req = nfs_page_create_from_page(dreq->ctx, pagevec[i],
860 pgbase, pos, req_len);
861 if (IS_ERR(req)) {
862 result = PTR_ERR(req);
863 break;
864 }
865
866 if (desc.pg_error < 0) {
867 nfs_free_request(req);
868 result = desc.pg_error;
869 break;
870 }
871
872 pgbase = 0;
873 bytes -= req_len;
874 requested_bytes += req_len;
875 pos += req_len;
876
877 if (defer) {
878 nfs_mark_request_commit(req, NULL, &cinfo, 0);
879 continue;
880 }
881
882 nfs_lock_request(req);
883 if (nfs_pageio_add_request(&desc, req))
884 continue;
885
886 /* Exit on hard errors */
887 if (desc.pg_error < 0 && desc.pg_error != -EAGAIN) {
888 result = desc.pg_error;
889 nfs_unlock_and_release_request(req);
890 break;
891 }
892
893 /* If the error is soft, defer remaining requests */
894 nfs_init_cinfo_from_dreq(&cinfo, dreq);
895 spin_lock(&dreq->lock);
896 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
897 spin_unlock(&dreq->lock);
898 nfs_unlock_request(req);
899 nfs_mark_request_commit(req, NULL, &cinfo, 0);
900 desc.pg_error = 0;
901 defer = true;
902 }
903 nfs_direct_release_pages(pagevec, npages);
904 kvfree(pagevec);
905 if (result < 0)
906 break;
907 }
908 nfs_pageio_complete(&desc);
909
910 /*
911 * If no bytes were started, return the error, and let the
912 * generic layer handle the completion.
913 */
914 if (requested_bytes == 0) {
915 inode_dio_end(inode);
916 nfs_direct_req_release(dreq);
917 return result < 0 ? result : -EIO;
918 }
919
920 if (put_dreq(dreq))
921 nfs_direct_write_complete(dreq);
922 return requested_bytes;
923}
924
925/**
926 * nfs_file_direct_write - file direct write operation for NFS files
927 * @iocb: target I/O control block
928 * @iter: vector of user buffers from which to write data
929 * @swap: flag indicating this is swap IO, not O_DIRECT IO
930 *
931 * We use this function for direct writes instead of calling
932 * generic_file_aio_write() in order to avoid taking the inode
933 * semaphore and updating the i_size. The NFS server will set
934 * the new i_size and this client must read the updated size
935 * back into its cache. We let the server do generic write
936 * parameter checking and report problems.
937 *
938 * We eliminate local atime updates, see direct read above.
939 *
940 * We avoid unnecessary page cache invalidations for normal cached
941 * readers of this file.
942 *
943 * Note that O_APPEND is not supported for NFS direct writes, as there
944 * is no atomic O_APPEND write facility in the NFS protocol.
945 */
946ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
947 bool swap)
948{
949 ssize_t result, requested;
950 size_t count;
951 struct file *file = iocb->ki_filp;
952 struct address_space *mapping = file->f_mapping;
953 struct inode *inode = mapping->host;
954 struct nfs_direct_req *dreq;
955 struct nfs_lock_context *l_ctx;
956 loff_t pos, end;
957
958 dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
959 file, iov_iter_count(iter), (long long) iocb->ki_pos);
960
961 if (swap)
962 /* bypass generic checks */
963 result = iov_iter_count(iter);
964 else
965 result = generic_write_checks(iocb, iter);
966 if (result <= 0)
967 return result;
968 count = result;
969 nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
970
971 pos = iocb->ki_pos;
972 end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
973
974 task_io_account_write(count);
975
976 result = -ENOMEM;
977 dreq = nfs_direct_req_alloc();
978 if (!dreq)
979 goto out;
980
981 dreq->inode = inode;
982 dreq->max_count = count;
983 dreq->io_start = pos;
984 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
985 l_ctx = nfs_get_lock_context(dreq->ctx);
986 if (IS_ERR(l_ctx)) {
987 result = PTR_ERR(l_ctx);
988 nfs_direct_req_release(dreq);
989 goto out_release;
990 }
991 dreq->l_ctx = l_ctx;
992 if (!is_sync_kiocb(iocb))
993 dreq->iocb = iocb;
994 pnfs_init_ds_commit_info_ops(&dreq->ds_cinfo, inode);
995
996 if (swap) {
997 requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
998 FLUSH_STABLE);
999 } else {
1000 nfs_start_io_direct(inode);
1001
1002 requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
1003 FLUSH_COND_STABLE);
1004
1005 if (mapping->nrpages) {
1006 invalidate_inode_pages2_range(mapping,
1007 pos >> PAGE_SHIFT, end);
1008 }
1009
1010 nfs_end_io_direct(inode);
1011 }
1012
1013 if (requested > 0) {
1014 result = nfs_direct_wait(dreq);
1015 if (result > 0) {
1016 requested -= result;
1017 iocb->ki_pos = pos + result;
1018 /* XXX: should check the generic_write_sync retval */
1019 generic_write_sync(iocb, result);
1020 }
1021 iov_iter_revert(iter, requested);
1022 } else {
1023 result = requested;
1024 }
1025 nfs_fscache_invalidate(inode, FSCACHE_INVAL_DIO_WRITE);
1026out_release:
1027 nfs_direct_req_release(dreq);
1028out:
1029 return result;
1030}
1031
1032/**
1033 * nfs_init_directcache - create a slab cache for nfs_direct_req structures
1034 *
1035 */
1036int __init nfs_init_directcache(void)
1037{
1038 nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
1039 sizeof(struct nfs_direct_req),
1040 0, (SLAB_RECLAIM_ACCOUNT|
1041 SLAB_MEM_SPREAD),
1042 NULL);
1043 if (nfs_direct_cachep == NULL)
1044 return -ENOMEM;
1045
1046 return 0;
1047}
1048
1049/**
1050 * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
1051 *
1052 */
1053void nfs_destroy_directcache(void)
1054{
1055 kmem_cache_destroy(nfs_direct_cachep);
1056}
1/*
2 * linux/fs/nfs/direct.c
3 *
4 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
5 *
6 * High-performance uncached I/O for the Linux NFS client
7 *
8 * There are important applications whose performance or correctness
9 * depends on uncached access to file data. Database clusters
10 * (multiple copies of the same instance running on separate hosts)
11 * implement their own cache coherency protocol that subsumes file
12 * system cache protocols. Applications that process datasets
13 * considerably larger than the client's memory do not always benefit
14 * from a local cache. A streaming video server, for instance, has no
15 * need to cache the contents of a file.
16 *
17 * When an application requests uncached I/O, all read and write requests
18 * are made directly to the server; data stored or fetched via these
19 * requests is not cached in the Linux page cache. The client does not
20 * correct unaligned requests from applications. All requested bytes are
21 * held on permanent storage before a direct write system call returns to
22 * an application.
23 *
24 * Solaris implements an uncached I/O facility called directio() that
25 * is used for backups and sequential I/O to very large files. Solaris
26 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
27 * an undocumented mount option.
28 *
29 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
30 * help from Andrew Morton.
31 *
32 * 18 Dec 2001 Initial implementation for 2.4 --cel
33 * 08 Jul 2002 Version for 2.4.19, with bug fixes --trondmy
34 * 08 Jun 2003 Port to 2.5 APIs --cel
35 * 31 Mar 2004 Handle direct I/O without VFS support --cel
36 * 15 Sep 2004 Parallel async reads --cel
37 * 04 May 2005 support O_DIRECT with aio --cel
38 *
39 */
40
41#include <linux/errno.h>
42#include <linux/sched.h>
43#include <linux/kernel.h>
44#include <linux/file.h>
45#include <linux/pagemap.h>
46#include <linux/kref.h>
47#include <linux/slab.h>
48#include <linux/task_io_accounting_ops.h>
49#include <linux/module.h>
50
51#include <linux/nfs_fs.h>
52#include <linux/nfs_page.h>
53#include <linux/sunrpc/clnt.h>
54
55#include <asm/uaccess.h>
56#include <linux/atomic.h>
57
58#include "internal.h"
59#include "iostat.h"
60#include "pnfs.h"
61
62#define NFSDBG_FACILITY NFSDBG_VFS
63
64static struct kmem_cache *nfs_direct_cachep;
65
66/*
67 * This represents a set of asynchronous requests that we're waiting on
68 */
69struct nfs_direct_mirror {
70 ssize_t count;
71};
72
73struct nfs_direct_req {
74 struct kref kref; /* release manager */
75
76 /* I/O parameters */
77 struct nfs_open_context *ctx; /* file open context info */
78 struct nfs_lock_context *l_ctx; /* Lock context info */
79 struct kiocb * iocb; /* controlling i/o request */
80 struct inode * inode; /* target file of i/o */
81
82 /* completion state */
83 atomic_t io_count; /* i/os we're waiting for */
84 spinlock_t lock; /* protect completion state */
85
86 struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX];
87 int mirror_count;
88
89 ssize_t count, /* bytes actually processed */
90 bytes_left, /* bytes left to be sent */
91 io_start, /* start of IO */
92 error; /* any reported error */
93 struct completion completion; /* wait for i/o completion */
94
95 /* commit state */
96 struct nfs_mds_commit_info mds_cinfo; /* Storage for cinfo */
97 struct pnfs_ds_commit_info ds_cinfo; /* Storage for cinfo */
98 struct work_struct work;
99 int flags;
100#define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */
101#define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */
102 struct nfs_writeverf verf; /* unstable write verifier */
103};
104
105static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
106static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
107static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
108static void nfs_direct_write_schedule_work(struct work_struct *work);
109
110static inline void get_dreq(struct nfs_direct_req *dreq)
111{
112 atomic_inc(&dreq->io_count);
113}
114
115static inline int put_dreq(struct nfs_direct_req *dreq)
116{
117 return atomic_dec_and_test(&dreq->io_count);
118}
119
120static void
121nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr)
122{
123 int i;
124 ssize_t count;
125
126 if (dreq->mirror_count == 1) {
127 dreq->mirrors[hdr->pgio_mirror_idx].count += hdr->good_bytes;
128 dreq->count += hdr->good_bytes;
129 } else {
130 /* mirrored writes */
131 count = dreq->mirrors[hdr->pgio_mirror_idx].count;
132 if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) {
133 count = hdr->io_start + hdr->good_bytes - dreq->io_start;
134 dreq->mirrors[hdr->pgio_mirror_idx].count = count;
135 }
136 /* update the dreq->count by finding the minimum agreed count from all
137 * mirrors */
138 count = dreq->mirrors[0].count;
139
140 for (i = 1; i < dreq->mirror_count; i++)
141 count = min(count, dreq->mirrors[i].count);
142
143 dreq->count = count;
144 }
145}
146
147/*
148 * nfs_direct_select_verf - select the right verifier
149 * @dreq - direct request possibly spanning multiple servers
150 * @ds_clp - nfs_client of data server or NULL if MDS / non-pnfs
151 * @commit_idx - commit bucket index for the DS
152 *
153 * returns the correct verifier to use given the role of the server
154 */
155static struct nfs_writeverf *
156nfs_direct_select_verf(struct nfs_direct_req *dreq,
157 struct nfs_client *ds_clp,
158 int commit_idx)
159{
160 struct nfs_writeverf *verfp = &dreq->verf;
161
162#ifdef CONFIG_NFS_V4_1
163 /*
164 * pNFS is in use, use the DS verf except commit_through_mds is set
165 * for layout segment where nbuckets is zero.
166 */
167 if (ds_clp && dreq->ds_cinfo.nbuckets > 0) {
168 if (commit_idx >= 0 && commit_idx < dreq->ds_cinfo.nbuckets)
169 verfp = &dreq->ds_cinfo.buckets[commit_idx].direct_verf;
170 else
171 WARN_ON_ONCE(1);
172 }
173#endif
174 return verfp;
175}
176
177
178/*
179 * nfs_direct_set_hdr_verf - set the write/commit verifier
180 * @dreq - direct request possibly spanning multiple servers
181 * @hdr - pageio header to validate against previously seen verfs
182 *
183 * Set the server's (MDS or DS) "seen" verifier
184 */
185static void nfs_direct_set_hdr_verf(struct nfs_direct_req *dreq,
186 struct nfs_pgio_header *hdr)
187{
188 struct nfs_writeverf *verfp;
189
190 verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx);
191 WARN_ON_ONCE(verfp->committed >= 0);
192 memcpy(verfp, &hdr->verf, sizeof(struct nfs_writeverf));
193 WARN_ON_ONCE(verfp->committed < 0);
194}
195
196/*
197 * nfs_direct_cmp_hdr_verf - compare verifier for pgio header
198 * @dreq - direct request possibly spanning multiple servers
199 * @hdr - pageio header to validate against previously seen verf
200 *
201 * set the server's "seen" verf if not initialized.
202 * returns result of comparison between @hdr->verf and the "seen"
203 * verf of the server used by @hdr (DS or MDS)
204 */
205static int nfs_direct_set_or_cmp_hdr_verf(struct nfs_direct_req *dreq,
206 struct nfs_pgio_header *hdr)
207{
208 struct nfs_writeverf *verfp;
209
210 verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx);
211 if (verfp->committed < 0) {
212 nfs_direct_set_hdr_verf(dreq, hdr);
213 return 0;
214 }
215 return memcmp(verfp, &hdr->verf, sizeof(struct nfs_writeverf));
216}
217
218/*
219 * nfs_direct_cmp_commit_data_verf - compare verifier for commit data
220 * @dreq - direct request possibly spanning multiple servers
221 * @data - commit data to validate against previously seen verf
222 *
223 * returns result of comparison between @data->verf and the verf of
224 * the server used by @data (DS or MDS)
225 */
226static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
227 struct nfs_commit_data *data)
228{
229 struct nfs_writeverf *verfp;
230
231 verfp = nfs_direct_select_verf(dreq, data->ds_clp,
232 data->ds_commit_index);
233
234 /* verifier not set so always fail */
235 if (verfp->committed < 0)
236 return 1;
237
238 return memcmp(verfp, &data->verf, sizeof(struct nfs_writeverf));
239}
240
241/**
242 * nfs_direct_IO - NFS address space operation for direct I/O
243 * @iocb: target I/O control block
244 * @iov: array of vectors that define I/O buffer
245 * @pos: offset in file to begin the operation
246 * @nr_segs: size of iovec array
247 *
248 * The presence of this routine in the address space ops vector means
249 * the NFS client supports direct I/O. However, for most direct IO, we
250 * shunt off direct read and write requests before the VFS gets them,
251 * so this method is only ever called for swap.
252 */
253ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
254{
255 struct inode *inode = iocb->ki_filp->f_mapping->host;
256
257 /* we only support swap file calling nfs_direct_IO */
258 if (!IS_SWAPFILE(inode))
259 return 0;
260
261 VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
262
263 if (iov_iter_rw(iter) == READ)
264 return nfs_file_direct_read(iocb, iter, pos);
265 return nfs_file_direct_write(iocb, iter);
266}
267
268static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
269{
270 unsigned int i;
271 for (i = 0; i < npages; i++)
272 put_page(pages[i]);
273}
274
275void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
276 struct nfs_direct_req *dreq)
277{
278 cinfo->lock = &dreq->inode->i_lock;
279 cinfo->mds = &dreq->mds_cinfo;
280 cinfo->ds = &dreq->ds_cinfo;
281 cinfo->dreq = dreq;
282 cinfo->completion_ops = &nfs_direct_commit_completion_ops;
283}
284
285static inline void nfs_direct_setup_mirroring(struct nfs_direct_req *dreq,
286 struct nfs_pageio_descriptor *pgio,
287 struct nfs_page *req)
288{
289 int mirror_count = 1;
290
291 if (pgio->pg_ops->pg_get_mirror_count)
292 mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
293
294 dreq->mirror_count = mirror_count;
295}
296
297static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
298{
299 struct nfs_direct_req *dreq;
300
301 dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
302 if (!dreq)
303 return NULL;
304
305 kref_init(&dreq->kref);
306 kref_get(&dreq->kref);
307 init_completion(&dreq->completion);
308 INIT_LIST_HEAD(&dreq->mds_cinfo.list);
309 dreq->verf.committed = NFS_INVALID_STABLE_HOW; /* not set yet */
310 INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
311 dreq->mirror_count = 1;
312 spin_lock_init(&dreq->lock);
313
314 return dreq;
315}
316
317static void nfs_direct_req_free(struct kref *kref)
318{
319 struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
320
321 nfs_free_pnfs_ds_cinfo(&dreq->ds_cinfo);
322 if (dreq->l_ctx != NULL)
323 nfs_put_lock_context(dreq->l_ctx);
324 if (dreq->ctx != NULL)
325 put_nfs_open_context(dreq->ctx);
326 kmem_cache_free(nfs_direct_cachep, dreq);
327}
328
329static void nfs_direct_req_release(struct nfs_direct_req *dreq)
330{
331 kref_put(&dreq->kref, nfs_direct_req_free);
332}
333
334ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq)
335{
336 return dreq->bytes_left;
337}
338EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);
339
340/*
341 * Collects and returns the final error value/byte-count.
342 */
343static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
344{
345 ssize_t result = -EIOCBQUEUED;
346
347 /* Async requests don't wait here */
348 if (dreq->iocb)
349 goto out;
350
351 result = wait_for_completion_killable(&dreq->completion);
352
353 if (!result)
354 result = dreq->error;
355 if (!result)
356 result = dreq->count;
357
358out:
359 return (ssize_t) result;
360}
361
362/*
363 * Synchronous I/O uses a stack-allocated iocb. Thus we can't trust
364 * the iocb is still valid here if this is a synchronous request.
365 */
366static void nfs_direct_complete(struct nfs_direct_req *dreq, bool write)
367{
368 struct inode *inode = dreq->inode;
369
370 if (dreq->iocb && write) {
371 loff_t pos = dreq->iocb->ki_pos + dreq->count;
372
373 spin_lock(&inode->i_lock);
374 if (i_size_read(inode) < pos)
375 i_size_write(inode, pos);
376 spin_unlock(&inode->i_lock);
377 }
378
379 if (write)
380 nfs_zap_mapping(inode, inode->i_mapping);
381
382 inode_dio_end(inode);
383
384 if (dreq->iocb) {
385 long res = (long) dreq->error;
386 if (!res)
387 res = (long) dreq->count;
388 dreq->iocb->ki_complete(dreq->iocb, res, 0);
389 }
390
391 complete_all(&dreq->completion);
392
393 nfs_direct_req_release(dreq);
394}
395
396static void nfs_direct_readpage_release(struct nfs_page *req)
397{
398 dprintk("NFS: direct read done (%s/%llu %d@%lld)\n",
399 d_inode(req->wb_context->dentry)->i_sb->s_id,
400 (unsigned long long)NFS_FILEID(d_inode(req->wb_context->dentry)),
401 req->wb_bytes,
402 (long long)req_offset(req));
403 nfs_release_request(req);
404}
405
406static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
407{
408 unsigned long bytes = 0;
409 struct nfs_direct_req *dreq = hdr->dreq;
410
411 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
412 goto out_put;
413
414 spin_lock(&dreq->lock);
415 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
416 dreq->error = hdr->error;
417 else
418 nfs_direct_good_bytes(dreq, hdr);
419
420 spin_unlock(&dreq->lock);
421
422 while (!list_empty(&hdr->pages)) {
423 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
424 struct page *page = req->wb_page;
425
426 if (!PageCompound(page) && bytes < hdr->good_bytes)
427 set_page_dirty(page);
428 bytes += req->wb_bytes;
429 nfs_list_remove_request(req);
430 nfs_direct_readpage_release(req);
431 }
432out_put:
433 if (put_dreq(dreq))
434 nfs_direct_complete(dreq, false);
435 hdr->release(hdr);
436}
437
438static void nfs_read_sync_pgio_error(struct list_head *head)
439{
440 struct nfs_page *req;
441
442 while (!list_empty(head)) {
443 req = nfs_list_entry(head->next);
444 nfs_list_remove_request(req);
445 nfs_release_request(req);
446 }
447}
448
449static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
450{
451 get_dreq(hdr->dreq);
452}
453
454static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
455 .error_cleanup = nfs_read_sync_pgio_error,
456 .init_hdr = nfs_direct_pgio_init,
457 .completion = nfs_direct_read_completion,
458};
459
460/*
461 * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
462 * operation. If nfs_readdata_alloc() or get_user_pages() fails,
463 * bail and stop sending more reads. Read length accounting is
464 * handled automatically by nfs_direct_read_result(). Otherwise, if
465 * no requests have been sent, just return an error.
466 */
467
468static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
469 struct iov_iter *iter,
470 loff_t pos)
471{
472 struct nfs_pageio_descriptor desc;
473 struct inode *inode = dreq->inode;
474 ssize_t result = -EINVAL;
475 size_t requested_bytes = 0;
476 size_t rsize = max_t(size_t, NFS_SERVER(inode)->rsize, PAGE_SIZE);
477
478 nfs_pageio_init_read(&desc, dreq->inode, false,
479 &nfs_direct_read_completion_ops);
480 get_dreq(dreq);
481 desc.pg_dreq = dreq;
482 inode_dio_begin(inode);
483
484 while (iov_iter_count(iter)) {
485 struct page **pagevec;
486 size_t bytes;
487 size_t pgbase;
488 unsigned npages, i;
489
490 result = iov_iter_get_pages_alloc(iter, &pagevec,
491 rsize, &pgbase);
492 if (result < 0)
493 break;
494
495 bytes = result;
496 iov_iter_advance(iter, bytes);
497 npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
498 for (i = 0; i < npages; i++) {
499 struct nfs_page *req;
500 unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
501 /* XXX do we need to do the eof zeroing found in async_filler? */
502 req = nfs_create_request(dreq->ctx, pagevec[i], NULL,
503 pgbase, req_len);
504 if (IS_ERR(req)) {
505 result = PTR_ERR(req);
506 break;
507 }
508 req->wb_index = pos >> PAGE_SHIFT;
509 req->wb_offset = pos & ~PAGE_MASK;
510 if (!nfs_pageio_add_request(&desc, req)) {
511 result = desc.pg_error;
512 nfs_release_request(req);
513 break;
514 }
515 pgbase = 0;
516 bytes -= req_len;
517 requested_bytes += req_len;
518 pos += req_len;
519 dreq->bytes_left -= req_len;
520 }
521 nfs_direct_release_pages(pagevec, npages);
522 kvfree(pagevec);
523 if (result < 0)
524 break;
525 }
526
527 nfs_pageio_complete(&desc);
528
529 /*
530 * If no bytes were started, return the error, and let the
531 * generic layer handle the completion.
532 */
533 if (requested_bytes == 0) {
534 inode_dio_end(inode);
535 nfs_direct_req_release(dreq);
536 return result < 0 ? result : -EIO;
537 }
538
539 if (put_dreq(dreq))
540 nfs_direct_complete(dreq, false);
541 return 0;
542}
543
544/**
545 * nfs_file_direct_read - file direct read operation for NFS files
546 * @iocb: target I/O control block
547 * @iter: vector of user buffers into which to read data
548 * @pos: byte offset in file where reading starts
549 *
550 * We use this function for direct reads instead of calling
551 * generic_file_aio_read() in order to avoid gfar's check to see if
552 * the request starts before the end of the file. For that check
553 * to work, we must generate a GETATTR before each direct read, and
554 * even then there is a window between the GETATTR and the subsequent
555 * READ where the file size could change. Our preference is simply
556 * to do all reads the application wants, and the server will take
557 * care of managing the end of file boundary.
558 *
559 * This function also eliminates unnecessarily updating the file's
560 * atime locally, as the NFS server sets the file's atime, and this
561 * client must read the updated atime from the server back into its
562 * cache.
563 */
564ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
565 loff_t pos)
566{
567 struct file *file = iocb->ki_filp;
568 struct address_space *mapping = file->f_mapping;
569 struct inode *inode = mapping->host;
570 struct nfs_direct_req *dreq;
571 struct nfs_lock_context *l_ctx;
572 ssize_t result = -EINVAL;
573 size_t count = iov_iter_count(iter);
574 nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
575
576 dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
577 file, count, (long long) pos);
578
579 result = 0;
580 if (!count)
581 goto out;
582
583 inode_lock(inode);
584 result = nfs_sync_mapping(mapping);
585 if (result)
586 goto out_unlock;
587
588 task_io_account_read(count);
589
590 result = -ENOMEM;
591 dreq = nfs_direct_req_alloc();
592 if (dreq == NULL)
593 goto out_unlock;
594
595 dreq->inode = inode;
596 dreq->bytes_left = count;
597 dreq->io_start = pos;
598 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
599 l_ctx = nfs_get_lock_context(dreq->ctx);
600 if (IS_ERR(l_ctx)) {
601 result = PTR_ERR(l_ctx);
602 goto out_release;
603 }
604 dreq->l_ctx = l_ctx;
605 if (!is_sync_kiocb(iocb))
606 dreq->iocb = iocb;
607
608 NFS_I(inode)->read_io += count;
609 result = nfs_direct_read_schedule_iovec(dreq, iter, pos);
610
611 inode_unlock(inode);
612
613 if (!result) {
614 result = nfs_direct_wait(dreq);
615 if (result > 0)
616 iocb->ki_pos = pos + result;
617 }
618
619 nfs_direct_req_release(dreq);
620 return result;
621
622out_release:
623 nfs_direct_req_release(dreq);
624out_unlock:
625 inode_unlock(inode);
626out:
627 return result;
628}
629
630static void
631nfs_direct_write_scan_commit_list(struct inode *inode,
632 struct list_head *list,
633 struct nfs_commit_info *cinfo)
634{
635 spin_lock(cinfo->lock);
636#ifdef CONFIG_NFS_V4_1
637 if (cinfo->ds != NULL && cinfo->ds->nwritten != 0)
638 NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo);
639#endif
640 nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0);
641 spin_unlock(cinfo->lock);
642}
643
644static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
645{
646 struct nfs_pageio_descriptor desc;
647 struct nfs_page *req, *tmp;
648 LIST_HEAD(reqs);
649 struct nfs_commit_info cinfo;
650 LIST_HEAD(failed);
651 int i;
652
653 nfs_init_cinfo_from_dreq(&cinfo, dreq);
654 nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
655
656 dreq->count = 0;
657 for (i = 0; i < dreq->mirror_count; i++)
658 dreq->mirrors[i].count = 0;
659 get_dreq(dreq);
660
661 nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
662 &nfs_direct_write_completion_ops);
663 desc.pg_dreq = dreq;
664
665 req = nfs_list_entry(reqs.next);
666 nfs_direct_setup_mirroring(dreq, &desc, req);
667 if (desc.pg_error < 0) {
668 list_splice_init(&reqs, &failed);
669 goto out_failed;
670 }
671
672 list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
673 if (!nfs_pageio_add_request(&desc, req)) {
674 nfs_list_remove_request(req);
675 nfs_list_add_request(req, &failed);
676 spin_lock(cinfo.lock);
677 dreq->flags = 0;
678 if (desc.pg_error < 0)
679 dreq->error = desc.pg_error;
680 else
681 dreq->error = -EIO;
682 spin_unlock(cinfo.lock);
683 }
684 nfs_release_request(req);
685 }
686 nfs_pageio_complete(&desc);
687
688out_failed:
689 while (!list_empty(&failed)) {
690 req = nfs_list_entry(failed.next);
691 nfs_list_remove_request(req);
692 nfs_unlock_and_release_request(req);
693 }
694
695 if (put_dreq(dreq))
696 nfs_direct_write_complete(dreq, dreq->inode);
697}
698
699static void nfs_direct_commit_complete(struct nfs_commit_data *data)
700{
701 struct nfs_direct_req *dreq = data->dreq;
702 struct nfs_commit_info cinfo;
703 struct nfs_page *req;
704 int status = data->task.tk_status;
705
706 nfs_init_cinfo_from_dreq(&cinfo, dreq);
707 if (status < 0) {
708 dprintk("NFS: %5u commit failed with error %d.\n",
709 data->task.tk_pid, status);
710 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
711 } else if (nfs_direct_cmp_commit_data_verf(dreq, data)) {
712 dprintk("NFS: %5u commit verify failed\n", data->task.tk_pid);
713 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
714 }
715
716 dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
717 while (!list_empty(&data->pages)) {
718 req = nfs_list_entry(data->pages.next);
719 nfs_list_remove_request(req);
720 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
721 /* Note the rewrite will go through mds */
722 nfs_mark_request_commit(req, NULL, &cinfo, 0);
723 } else
724 nfs_release_request(req);
725 nfs_unlock_and_release_request(req);
726 }
727
728 if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
729 nfs_direct_write_complete(dreq, data->inode);
730}
731
732static void nfs_direct_resched_write(struct nfs_commit_info *cinfo,
733 struct nfs_page *req)
734{
735 struct nfs_direct_req *dreq = cinfo->dreq;
736
737 spin_lock(&dreq->lock);
738 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
739 spin_unlock(&dreq->lock);
740 nfs_mark_request_commit(req, NULL, cinfo, 0);
741}
742
743static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
744 .completion = nfs_direct_commit_complete,
745 .resched_write = nfs_direct_resched_write,
746};
747
748static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
749{
750 int res;
751 struct nfs_commit_info cinfo;
752 LIST_HEAD(mds_list);
753
754 nfs_init_cinfo_from_dreq(&cinfo, dreq);
755 nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
756 res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
757 if (res < 0) /* res == -ENOMEM */
758 nfs_direct_write_reschedule(dreq);
759}
760
761static void nfs_direct_write_schedule_work(struct work_struct *work)
762{
763 struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
764 int flags = dreq->flags;
765
766 dreq->flags = 0;
767 switch (flags) {
768 case NFS_ODIRECT_DO_COMMIT:
769 nfs_direct_commit_schedule(dreq);
770 break;
771 case NFS_ODIRECT_RESCHED_WRITES:
772 nfs_direct_write_reschedule(dreq);
773 break;
774 default:
775 nfs_direct_complete(dreq, true);
776 }
777}
778
779static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
780{
781 schedule_work(&dreq->work); /* Calls nfs_direct_write_schedule_work */
782}
783
784static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
785{
786 struct nfs_direct_req *dreq = hdr->dreq;
787 struct nfs_commit_info cinfo;
788 bool request_commit = false;
789 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
790
791 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
792 goto out_put;
793
794 nfs_init_cinfo_from_dreq(&cinfo, dreq);
795
796 spin_lock(&dreq->lock);
797
798 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
799 dreq->flags = 0;
800 dreq->error = hdr->error;
801 }
802 if (dreq->error == 0) {
803 nfs_direct_good_bytes(dreq, hdr);
804 if (nfs_write_need_commit(hdr)) {
805 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
806 request_commit = true;
807 else if (dreq->flags == 0) {
808 nfs_direct_set_hdr_verf(dreq, hdr);
809 request_commit = true;
810 dreq->flags = NFS_ODIRECT_DO_COMMIT;
811 } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
812 request_commit = true;
813 if (nfs_direct_set_or_cmp_hdr_verf(dreq, hdr))
814 dreq->flags =
815 NFS_ODIRECT_RESCHED_WRITES;
816 }
817 }
818 }
819 spin_unlock(&dreq->lock);
820
821 while (!list_empty(&hdr->pages)) {
822
823 req = nfs_list_entry(hdr->pages.next);
824 nfs_list_remove_request(req);
825 if (request_commit) {
826 kref_get(&req->wb_kref);
827 nfs_mark_request_commit(req, hdr->lseg, &cinfo,
828 hdr->ds_commit_idx);
829 }
830 nfs_unlock_and_release_request(req);
831 }
832
833out_put:
834 if (put_dreq(dreq))
835 nfs_direct_write_complete(dreq, hdr->inode);
836 hdr->release(hdr);
837}
838
839static void nfs_write_sync_pgio_error(struct list_head *head)
840{
841 struct nfs_page *req;
842
843 while (!list_empty(head)) {
844 req = nfs_list_entry(head->next);
845 nfs_list_remove_request(req);
846 nfs_unlock_and_release_request(req);
847 }
848}
849
850static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
851{
852 struct nfs_direct_req *dreq = hdr->dreq;
853
854 spin_lock(&dreq->lock);
855 if (dreq->error == 0) {
856 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
857 /* fake unstable write to let common nfs resend pages */
858 hdr->verf.committed = NFS_UNSTABLE;
859 hdr->good_bytes = hdr->args.count;
860 }
861 spin_unlock(&dreq->lock);
862}
863
864static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
865 .error_cleanup = nfs_write_sync_pgio_error,
866 .init_hdr = nfs_direct_pgio_init,
867 .completion = nfs_direct_write_completion,
868 .reschedule_io = nfs_direct_write_reschedule_io,
869};
870
871
872/*
873 * NB: Return the value of the first error return code. Subsequent
874 * errors after the first one are ignored.
875 */
876/*
877 * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
878 * operation. If nfs_writedata_alloc() or get_user_pages() fails,
879 * bail and stop sending more writes. Write length accounting is
880 * handled automatically by nfs_direct_write_result(). Otherwise, if
881 * no requests have been sent, just return an error.
882 */
883static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
884 struct iov_iter *iter,
885 loff_t pos)
886{
887 struct nfs_pageio_descriptor desc;
888 struct inode *inode = dreq->inode;
889 ssize_t result = 0;
890 size_t requested_bytes = 0;
891 size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
892
893 nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE, false,
894 &nfs_direct_write_completion_ops);
895 desc.pg_dreq = dreq;
896 get_dreq(dreq);
897 inode_dio_begin(inode);
898
899 NFS_I(inode)->write_io += iov_iter_count(iter);
900 while (iov_iter_count(iter)) {
901 struct page **pagevec;
902 size_t bytes;
903 size_t pgbase;
904 unsigned npages, i;
905
906 result = iov_iter_get_pages_alloc(iter, &pagevec,
907 wsize, &pgbase);
908 if (result < 0)
909 break;
910
911 bytes = result;
912 iov_iter_advance(iter, bytes);
913 npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
914 for (i = 0; i < npages; i++) {
915 struct nfs_page *req;
916 unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
917
918 req = nfs_create_request(dreq->ctx, pagevec[i], NULL,
919 pgbase, req_len);
920 if (IS_ERR(req)) {
921 result = PTR_ERR(req);
922 break;
923 }
924
925 nfs_direct_setup_mirroring(dreq, &desc, req);
926 if (desc.pg_error < 0) {
927 nfs_free_request(req);
928 result = desc.pg_error;
929 break;
930 }
931
932 nfs_lock_request(req);
933 req->wb_index = pos >> PAGE_SHIFT;
934 req->wb_offset = pos & ~PAGE_MASK;
935 if (!nfs_pageio_add_request(&desc, req)) {
936 result = desc.pg_error;
937 nfs_unlock_and_release_request(req);
938 break;
939 }
940 pgbase = 0;
941 bytes -= req_len;
942 requested_bytes += req_len;
943 pos += req_len;
944 dreq->bytes_left -= req_len;
945 }
946 nfs_direct_release_pages(pagevec, npages);
947 kvfree(pagevec);
948 if (result < 0)
949 break;
950 }
951 nfs_pageio_complete(&desc);
952
953 /*
954 * If no bytes were started, return the error, and let the
955 * generic layer handle the completion.
956 */
957 if (requested_bytes == 0) {
958 inode_dio_end(inode);
959 nfs_direct_req_release(dreq);
960 return result < 0 ? result : -EIO;
961 }
962
963 if (put_dreq(dreq))
964 nfs_direct_write_complete(dreq, dreq->inode);
965 return 0;
966}
967
968/**
969 * nfs_file_direct_write - file direct write operation for NFS files
970 * @iocb: target I/O control block
971 * @iter: vector of user buffers from which to write data
972 * @pos: byte offset in file where writing starts
973 *
974 * We use this function for direct writes instead of calling
975 * generic_file_aio_write() in order to avoid taking the inode
976 * semaphore and updating the i_size. The NFS server will set
977 * the new i_size and this client must read the updated size
978 * back into its cache. We let the server do generic write
979 * parameter checking and report problems.
980 *
981 * We eliminate local atime updates, see direct read above.
982 *
983 * We avoid unnecessary page cache invalidations for normal cached
984 * readers of this file.
985 *
986 * Note that O_APPEND is not supported for NFS direct writes, as there
987 * is no atomic O_APPEND write facility in the NFS protocol.
988 */
989ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
990{
991 ssize_t result = -EINVAL;
992 struct file *file = iocb->ki_filp;
993 struct address_space *mapping = file->f_mapping;
994 struct inode *inode = mapping->host;
995 struct nfs_direct_req *dreq;
996 struct nfs_lock_context *l_ctx;
997 loff_t pos, end;
998
999 dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
1000 file, iov_iter_count(iter), (long long) iocb->ki_pos);
1001
1002 nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES,
1003 iov_iter_count(iter));
1004
1005 pos = iocb->ki_pos;
1006 end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
1007
1008 inode_lock(inode);
1009
1010 result = nfs_sync_mapping(mapping);
1011 if (result)
1012 goto out_unlock;
1013
1014 if (mapping->nrpages) {
1015 result = invalidate_inode_pages2_range(mapping,
1016 pos >> PAGE_SHIFT, end);
1017 if (result)
1018 goto out_unlock;
1019 }
1020
1021 task_io_account_write(iov_iter_count(iter));
1022
1023 result = -ENOMEM;
1024 dreq = nfs_direct_req_alloc();
1025 if (!dreq)
1026 goto out_unlock;
1027
1028 dreq->inode = inode;
1029 dreq->bytes_left = iov_iter_count(iter);
1030 dreq->io_start = pos;
1031 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
1032 l_ctx = nfs_get_lock_context(dreq->ctx);
1033 if (IS_ERR(l_ctx)) {
1034 result = PTR_ERR(l_ctx);
1035 goto out_release;
1036 }
1037 dreq->l_ctx = l_ctx;
1038 if (!is_sync_kiocb(iocb))
1039 dreq->iocb = iocb;
1040
1041 result = nfs_direct_write_schedule_iovec(dreq, iter, pos);
1042
1043 if (mapping->nrpages) {
1044 invalidate_inode_pages2_range(mapping,
1045 pos >> PAGE_SHIFT, end);
1046 }
1047
1048 inode_unlock(inode);
1049
1050 if (!result) {
1051 result = nfs_direct_wait(dreq);
1052 if (result > 0) {
1053 struct inode *inode = mapping->host;
1054
1055 iocb->ki_pos = pos + result;
1056 spin_lock(&inode->i_lock);
1057 if (i_size_read(inode) < iocb->ki_pos)
1058 i_size_write(inode, iocb->ki_pos);
1059 spin_unlock(&inode->i_lock);
1060 generic_write_sync(file, pos, result);
1061 }
1062 }
1063 nfs_direct_req_release(dreq);
1064 return result;
1065
1066out_release:
1067 nfs_direct_req_release(dreq);
1068out_unlock:
1069 inode_unlock(inode);
1070 return result;
1071}
1072
1073/**
1074 * nfs_init_directcache - create a slab cache for nfs_direct_req structures
1075 *
1076 */
1077int __init nfs_init_directcache(void)
1078{
1079 nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
1080 sizeof(struct nfs_direct_req),
1081 0, (SLAB_RECLAIM_ACCOUNT|
1082 SLAB_MEM_SPREAD),
1083 NULL);
1084 if (nfs_direct_cachep == NULL)
1085 return -ENOMEM;
1086
1087 return 0;
1088}
1089
1090/**
1091 * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
1092 *
1093 */
1094void nfs_destroy_directcache(void)
1095{
1096 kmem_cache_destroy(nfs_direct_cachep);
1097}