Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * linux/fs/nfs/direct.c
  4 *
  5 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
  6 *
  7 * High-performance uncached I/O for the Linux NFS client
  8 *
  9 * There are important applications whose performance or correctness
 10 * depends on uncached access to file data.  Database clusters
 11 * (multiple copies of the same instance running on separate hosts)
 12 * implement their own cache coherency protocol that subsumes file
 13 * system cache protocols.  Applications that process datasets
 14 * considerably larger than the client's memory do not always benefit
 15 * from a local cache.  A streaming video server, for instance, has no
 16 * need to cache the contents of a file.
 17 *
 18 * When an application requests uncached I/O, all read and write requests
 19 * are made directly to the server; data stored or fetched via these
 20 * requests is not cached in the Linux page cache.  The client does not
 21 * correct unaligned requests from applications.  All requested bytes are
 22 * held on permanent storage before a direct write system call returns to
 23 * an application.
 24 *
 25 * Solaris implements an uncached I/O facility called directio() that
 26 * is used for backups and sequential I/O to very large files.  Solaris
 27 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
 28 * an undocumented mount option.
 29 *
 30 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
 31 * help from Andrew Morton.
 32 *
 33 * 18 Dec 2001	Initial implementation for 2.4  --cel
 34 * 08 Jul 2002	Version for 2.4.19, with bug fixes --trondmy
 35 * 08 Jun 2003	Port to 2.5 APIs  --cel
 36 * 31 Mar 2004	Handle direct I/O without VFS support  --cel
 37 * 15 Sep 2004	Parallel async reads  --cel
 38 * 04 May 2005	support O_DIRECT with aio  --cel
 39 *
 40 */
 41
 42#include <linux/errno.h>
 43#include <linux/sched.h>
 44#include <linux/kernel.h>
 45#include <linux/file.h>
 46#include <linux/pagemap.h>
 47#include <linux/kref.h>
 48#include <linux/slab.h>
 49#include <linux/task_io_accounting_ops.h>
 50#include <linux/module.h>
 51
 52#include <linux/nfs_fs.h>
 53#include <linux/nfs_page.h>
 54#include <linux/sunrpc/clnt.h>
 55
 56#include <linux/uaccess.h>
 57#include <linux/atomic.h>
 58
 
 59#include "internal.h"
 60#include "iostat.h"
 61#include "pnfs.h"
 
 
 62
 63#define NFSDBG_FACILITY		NFSDBG_VFS
 64
 65static struct kmem_cache *nfs_direct_cachep;
 66
 67struct nfs_direct_req {
 68	struct kref		kref;		/* release manager */
 69
 70	/* I/O parameters */
 71	struct nfs_open_context	*ctx;		/* file open context info */
 72	struct nfs_lock_context *l_ctx;		/* Lock context info */
 73	struct kiocb *		iocb;		/* controlling i/o request */
 74	struct inode *		inode;		/* target file of i/o */
 75
 76	/* completion state */
 77	atomic_t		io_count;	/* i/os we're waiting for */
 78	spinlock_t		lock;		/* protect completion state */
 79
 80	loff_t			io_start;	/* Start offset for I/O */
 81	ssize_t			count,		/* bytes actually processed */
 82				max_count,	/* max expected count */
 83				bytes_left,	/* bytes left to be sent */
 84				error;		/* any reported error */
 85	struct completion	completion;	/* wait for i/o completion */
 86
 87	/* commit state */
 88	struct nfs_mds_commit_info mds_cinfo;	/* Storage for cinfo */
 89	struct pnfs_ds_commit_info ds_cinfo;	/* Storage for cinfo */
 90	struct work_struct	work;
 91	int			flags;
 92	/* for write */
 93#define NFS_ODIRECT_DO_COMMIT		(1)	/* an unstable reply was received */
 94#define NFS_ODIRECT_RESCHED_WRITES	(2)	/* write verification failed */
 95	/* for read */
 96#define NFS_ODIRECT_SHOULD_DIRTY	(3)	/* dirty user-space page after read */
 97#define NFS_ODIRECT_DONE		INT_MAX	/* write verification failed */
 98};
 99
100static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
101static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
102static void nfs_direct_write_complete(struct nfs_direct_req *dreq);
103static void nfs_direct_write_schedule_work(struct work_struct *work);
104
105static inline void get_dreq(struct nfs_direct_req *dreq)
106{
107	atomic_inc(&dreq->io_count);
108}
109
110static inline int put_dreq(struct nfs_direct_req *dreq)
111{
112	return atomic_dec_and_test(&dreq->io_count);
113}
114
115static void
116nfs_direct_handle_truncated(struct nfs_direct_req *dreq,
117			    const struct nfs_pgio_header *hdr,
118			    ssize_t dreq_len)
119{
120	if (!(test_bit(NFS_IOHDR_ERROR, &hdr->flags) ||
121	      test_bit(NFS_IOHDR_EOF, &hdr->flags)))
122		return;
123	if (dreq->max_count >= dreq_len) {
124		dreq->max_count = dreq_len;
125		if (dreq->count > dreq_len)
126			dreq->count = dreq_len;
127
128		if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
129			dreq->error = hdr->error;
130		else /* Clear outstanding error if this is EOF */
131			dreq->error = 0;
132	}
 
 
 
133}
134
135static void
136nfs_direct_count_bytes(struct nfs_direct_req *dreq,
137		       const struct nfs_pgio_header *hdr)
138{
139	loff_t hdr_end = hdr->io_start + hdr->good_bytes;
140	ssize_t dreq_len = 0;
141
142	if (hdr_end > dreq->io_start)
143		dreq_len = hdr_end - dreq->io_start;
144
145	nfs_direct_handle_truncated(dreq, hdr, dreq_len);
146
147	if (dreq_len > dreq->max_count)
148		dreq_len = dreq->max_count;
149
150	if (dreq->count < dreq_len)
151		dreq->count = dreq_len;
152}
153
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154/**
155 * nfs_direct_IO - NFS address space operation for direct I/O
156 * @iocb: target I/O control block
157 * @iter: I/O buffer
158 *
159 * The presence of this routine in the address space ops vector means
160 * the NFS client supports direct I/O. However, for most direct IO, we
161 * shunt off direct read and write requests before the VFS gets them,
162 * so this method is only ever called for swap.
163 */
164ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
165{
166	struct inode *inode = iocb->ki_filp->f_mapping->host;
167
168	/* we only support swap file calling nfs_direct_IO */
169	if (!IS_SWAPFILE(inode))
170		return 0;
171
172	VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
173
174	if (iov_iter_rw(iter) == READ)
175		return nfs_file_direct_read(iocb, iter);
176	return nfs_file_direct_write(iocb, iter);
 
 
 
 
177}
178
179static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
180{
181	unsigned int i;
182	for (i = 0; i < npages; i++)
183		put_page(pages[i]);
184}
185
186void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
187			      struct nfs_direct_req *dreq)
188{
189	cinfo->inode = dreq->inode;
190	cinfo->mds = &dreq->mds_cinfo;
191	cinfo->ds = &dreq->ds_cinfo;
192	cinfo->dreq = dreq;
193	cinfo->completion_ops = &nfs_direct_commit_completion_ops;
194}
195
196static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
197{
198	struct nfs_direct_req *dreq;
199
200	dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
201	if (!dreq)
202		return NULL;
203
204	kref_init(&dreq->kref);
205	kref_get(&dreq->kref);
206	init_completion(&dreq->completion);
207	INIT_LIST_HEAD(&dreq->mds_cinfo.list);
208	pnfs_init_ds_commit_info(&dreq->ds_cinfo);
209	INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
210	spin_lock_init(&dreq->lock);
211
212	return dreq;
213}
214
215static void nfs_direct_req_free(struct kref *kref)
216{
217	struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
218
219	pnfs_release_ds_info(&dreq->ds_cinfo, dreq->inode);
220	if (dreq->l_ctx != NULL)
221		nfs_put_lock_context(dreq->l_ctx);
222	if (dreq->ctx != NULL)
223		put_nfs_open_context(dreq->ctx);
224	kmem_cache_free(nfs_direct_cachep, dreq);
225}
226
227static void nfs_direct_req_release(struct nfs_direct_req *dreq)
228{
229	kref_put(&dreq->kref, nfs_direct_req_free);
230}
231
232ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq)
233{
234	return dreq->bytes_left;
 
235}
236EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);
237
238/*
239 * Collects and returns the final error value/byte-count.
240 */
241static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
242{
243	ssize_t result = -EIOCBQUEUED;
244
245	/* Async requests don't wait here */
246	if (dreq->iocb)
247		goto out;
248
249	result = wait_for_completion_killable(&dreq->completion);
250
251	if (!result) {
252		result = dreq->count;
253		WARN_ON_ONCE(dreq->count < 0);
254	}
255	if (!result)
256		result = dreq->error;
257
258out:
259	return (ssize_t) result;
260}
261
262/*
263 * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
264 * the iocb is still valid here if this is a synchronous request.
265 */
266static void nfs_direct_complete(struct nfs_direct_req *dreq)
267{
268	struct inode *inode = dreq->inode;
269
270	inode_dio_end(inode);
271
272	if (dreq->iocb) {
273		long res = (long) dreq->error;
274		if (dreq->count != 0) {
275			res = (long) dreq->count;
276			WARN_ON_ONCE(dreq->count < 0);
277		}
278		dreq->iocb->ki_complete(dreq->iocb, res, 0);
279	}
280
281	complete(&dreq->completion);
282
283	nfs_direct_req_release(dreq);
284}
285
286static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
287{
288	unsigned long bytes = 0;
289	struct nfs_direct_req *dreq = hdr->dreq;
290
291	spin_lock(&dreq->lock);
292	if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
293		spin_unlock(&dreq->lock);
294		goto out_put;
295	}
296
297	nfs_direct_count_bytes(dreq, hdr);
298	spin_unlock(&dreq->lock);
299
 
 
300	while (!list_empty(&hdr->pages)) {
301		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
302		struct page *page = req->wb_page;
303
304		if (!PageCompound(page) && bytes < hdr->good_bytes &&
305		    (dreq->flags == NFS_ODIRECT_SHOULD_DIRTY))
306			set_page_dirty(page);
307		bytes += req->wb_bytes;
308		nfs_list_remove_request(req);
309		nfs_release_request(req);
310	}
311out_put:
312	if (put_dreq(dreq))
313		nfs_direct_complete(dreq);
314	hdr->release(hdr);
315}
316
317static void nfs_read_sync_pgio_error(struct list_head *head, int error)
318{
319	struct nfs_page *req;
320
321	while (!list_empty(head)) {
322		req = nfs_list_entry(head->next);
323		nfs_list_remove_request(req);
324		nfs_release_request(req);
325	}
326}
327
328static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
329{
330	get_dreq(hdr->dreq);
331}
332
333static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
334	.error_cleanup = nfs_read_sync_pgio_error,
335	.init_hdr = nfs_direct_pgio_init,
336	.completion = nfs_direct_read_completion,
337};
338
339/*
340 * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
341 * operation.  If nfs_readdata_alloc() or get_user_pages() fails,
342 * bail and stop sending more reads.  Read length accounting is
343 * handled automatically by nfs_direct_read_result().  Otherwise, if
344 * no requests have been sent, just return an error.
345 */
346
347static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
348					      struct iov_iter *iter,
349					      loff_t pos)
350{
351	struct nfs_pageio_descriptor desc;
352	struct inode *inode = dreq->inode;
353	ssize_t result = -EINVAL;
354	size_t requested_bytes = 0;
355	size_t rsize = max_t(size_t, NFS_SERVER(inode)->rsize, PAGE_SIZE);
356
357	nfs_pageio_init_read(&desc, dreq->inode, false,
358			     &nfs_direct_read_completion_ops);
359	get_dreq(dreq);
360	desc.pg_dreq = dreq;
361	inode_dio_begin(inode);
362
363	while (iov_iter_count(iter)) {
364		struct page **pagevec;
365		size_t bytes;
366		size_t pgbase;
367		unsigned npages, i;
368
369		result = iov_iter_get_pages_alloc(iter, &pagevec, 
370						  rsize, &pgbase);
371		if (result < 0)
372			break;
373	
374		bytes = result;
375		iov_iter_advance(iter, bytes);
376		npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
377		for (i = 0; i < npages; i++) {
378			struct nfs_page *req;
379			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
380			/* XXX do we need to do the eof zeroing found in async_filler? */
381			req = nfs_create_request(dreq->ctx, pagevec[i],
382						 pgbase, req_len);
383			if (IS_ERR(req)) {
384				result = PTR_ERR(req);
385				break;
386			}
387			req->wb_index = pos >> PAGE_SHIFT;
388			req->wb_offset = pos & ~PAGE_MASK;
389			if (!nfs_pageio_add_request(&desc, req)) {
390				result = desc.pg_error;
391				nfs_release_request(req);
392				break;
393			}
394			pgbase = 0;
395			bytes -= req_len;
396			requested_bytes += req_len;
397			pos += req_len;
398			dreq->bytes_left -= req_len;
399		}
400		nfs_direct_release_pages(pagevec, npages);
401		kvfree(pagevec);
402		if (result < 0)
403			break;
404	}
405
406	nfs_pageio_complete(&desc);
407
408	/*
409	 * If no bytes were started, return the error, and let the
410	 * generic layer handle the completion.
411	 */
412	if (requested_bytes == 0) {
413		inode_dio_end(inode);
414		nfs_direct_req_release(dreq);
415		return result < 0 ? result : -EIO;
416	}
417
418	if (put_dreq(dreq))
419		nfs_direct_complete(dreq);
420	return requested_bytes;
421}
422
423/**
424 * nfs_file_direct_read - file direct read operation for NFS files
425 * @iocb: target I/O control block
426 * @iter: vector of user buffers into which to read data
 
427 *
428 * We use this function for direct reads instead of calling
429 * generic_file_aio_read() in order to avoid gfar's check to see if
430 * the request starts before the end of the file.  For that check
431 * to work, we must generate a GETATTR before each direct read, and
432 * even then there is a window between the GETATTR and the subsequent
433 * READ where the file size could change.  Our preference is simply
434 * to do all reads the application wants, and the server will take
435 * care of managing the end of file boundary.
436 *
437 * This function also eliminates unnecessarily updating the file's
438 * atime locally, as the NFS server sets the file's atime, and this
439 * client must read the updated atime from the server back into its
440 * cache.
441 */
442ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
 
443{
444	struct file *file = iocb->ki_filp;
445	struct address_space *mapping = file->f_mapping;
446	struct inode *inode = mapping->host;
447	struct nfs_direct_req *dreq;
448	struct nfs_lock_context *l_ctx;
449	ssize_t result, requested;
450	size_t count = iov_iter_count(iter);
451	nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
452
453	dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
454		file, count, (long long) iocb->ki_pos);
455
456	result = 0;
457	if (!count)
458		goto out;
459
460	task_io_account_read(count);
461
462	result = -ENOMEM;
463	dreq = nfs_direct_req_alloc();
464	if (dreq == NULL)
465		goto out;
466
467	dreq->inode = inode;
468	dreq->bytes_left = dreq->max_count = count;
469	dreq->io_start = iocb->ki_pos;
470	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
471	l_ctx = nfs_get_lock_context(dreq->ctx);
472	if (IS_ERR(l_ctx)) {
473		result = PTR_ERR(l_ctx);
474		nfs_direct_req_release(dreq);
475		goto out_release;
476	}
477	dreq->l_ctx = l_ctx;
478	if (!is_sync_kiocb(iocb))
479		dreq->iocb = iocb;
480
481	if (iter_is_iovec(iter))
482		dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
483
484	nfs_start_io_direct(inode);
 
 
 
 
 
 
 
 
 
485
486	NFS_I(inode)->read_io += count;
487	requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
488
489	nfs_end_io_direct(inode);
 
490
491	if (requested > 0) {
492		result = nfs_direct_wait(dreq);
493		if (result > 0) {
494			requested -= result;
495			iocb->ki_pos += result;
496		}
497		iov_iter_revert(iter, requested);
498	} else {
499		result = requested;
500	}
501
502out_release:
503	nfs_direct_req_release(dreq);
504out:
505	return result;
506}
507
508static void
509nfs_direct_join_group(struct list_head *list, struct inode *inode)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
510{
511	struct nfs_page *req, *next;
512
513	list_for_each_entry(req, list, wb_list) {
514		if (req->wb_head != req || req->wb_this_page == req)
 
515			continue;
516		for (next = req->wb_this_page;
517				next != req->wb_head;
518				next = next->wb_this_page) {
519			nfs_list_remove_request(next);
520			nfs_release_request(next);
521		}
522		nfs_join_page_group(req, inode);
 
 
 
 
 
 
 
 
 
 
 
 
 
523	}
524}
525
526static void
527nfs_direct_write_scan_commit_list(struct inode *inode,
528				  struct list_head *list,
529				  struct nfs_commit_info *cinfo)
530{
531	mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
532	pnfs_recover_commit_reqs(list, cinfo);
533	nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0);
534	mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
535}
536
537static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
538{
539	struct nfs_pageio_descriptor desc;
540	struct nfs_page *req, *tmp;
541	LIST_HEAD(reqs);
542	struct nfs_commit_info cinfo;
543	LIST_HEAD(failed);
544
545	nfs_init_cinfo_from_dreq(&cinfo, dreq);
546	nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
547
548	nfs_direct_join_group(&reqs, dreq->inode);
549
550	dreq->count = 0;
551	dreq->max_count = 0;
552	list_for_each_entry(req, &reqs, wb_list)
553		dreq->max_count += req->wb_bytes;
554	nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
555	get_dreq(dreq);
556
557	nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
558			      &nfs_direct_write_completion_ops);
559	desc.pg_dreq = dreq;
560
561	list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
 
562		/* Bump the transmission count */
563		req->wb_nio++;
564		if (!nfs_pageio_add_request(&desc, req)) {
565			nfs_list_move_request(req, &failed);
566			spin_lock(&cinfo.inode->i_lock);
567			dreq->flags = 0;
568			if (desc.pg_error < 0)
 
 
 
569				dreq->error = desc.pg_error;
570			else
571				dreq->error = -EIO;
572			spin_unlock(&cinfo.inode->i_lock);
 
573		}
574		nfs_release_request(req);
575	}
576	nfs_pageio_complete(&desc);
577
578	while (!list_empty(&failed)) {
579		req = nfs_list_entry(failed.next);
580		nfs_list_remove_request(req);
581		nfs_unlock_and_release_request(req);
 
 
 
 
 
 
 
 
582	}
583
584	if (put_dreq(dreq))
585		nfs_direct_write_complete(dreq);
586}
587
588static void nfs_direct_commit_complete(struct nfs_commit_data *data)
589{
590	const struct nfs_writeverf *verf = data->res.verf;
591	struct nfs_direct_req *dreq = data->dreq;
592	struct nfs_commit_info cinfo;
593	struct nfs_page *req;
594	int status = data->task.tk_status;
595
 
 
 
596	if (status < 0) {
597		/* Errors in commit are fatal */
598		dreq->error = status;
599		dreq->max_count = 0;
600		dreq->count = 0;
601		dreq->flags = NFS_ODIRECT_DONE;
602	} else if (dreq->flags == NFS_ODIRECT_DONE)
603		status = dreq->error;
 
 
604
605	nfs_init_cinfo_from_dreq(&cinfo, dreq);
606
607	while (!list_empty(&data->pages)) {
608		req = nfs_list_entry(data->pages.next);
609		nfs_list_remove_request(req);
610		if (status >= 0 && !nfs_write_match_verf(verf, req)) {
611			dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 
 
 
 
 
 
 
 
612			/*
613			 * Despite the reboot, the write was successful,
614			 * so reset wb_nio.
615			 */
616			req->wb_nio = 0;
617			nfs_mark_request_commit(req, NULL, &cinfo, 0);
618		} else /* Error or match */
619			nfs_release_request(req);
620		nfs_unlock_and_release_request(req);
621	}
622
623	if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
624		nfs_direct_write_complete(dreq);
625}
626
627static void nfs_direct_resched_write(struct nfs_commit_info *cinfo,
628		struct nfs_page *req)
629{
630	struct nfs_direct_req *dreq = cinfo->dreq;
631
 
 
632	spin_lock(&dreq->lock);
633	if (dreq->flags != NFS_ODIRECT_DONE)
634		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
635	spin_unlock(&dreq->lock);
636	nfs_mark_request_commit(req, NULL, cinfo, 0);
637}
638
639static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
640	.completion = nfs_direct_commit_complete,
641	.resched_write = nfs_direct_resched_write,
642};
643
644static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
645{
646	int res;
647	struct nfs_commit_info cinfo;
648	LIST_HEAD(mds_list);
649
650	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 
651	nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
652	res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
653	if (res < 0) /* res == -ENOMEM */
654		nfs_direct_write_reschedule(dreq);
 
 
 
 
 
 
655}
656
657static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq)
658{
659	struct nfs_commit_info cinfo;
660	struct nfs_page *req;
661	LIST_HEAD(reqs);
662
663	nfs_init_cinfo_from_dreq(&cinfo, dreq);
664	nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
665
666	while (!list_empty(&reqs)) {
667		req = nfs_list_entry(reqs.next);
668		nfs_list_remove_request(req);
 
669		nfs_release_request(req);
670		nfs_unlock_and_release_request(req);
671	}
672}
673
674static void nfs_direct_write_schedule_work(struct work_struct *work)
675{
676	struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
677	int flags = dreq->flags;
678
679	dreq->flags = 0;
680	switch (flags) {
681		case NFS_ODIRECT_DO_COMMIT:
682			nfs_direct_commit_schedule(dreq);
683			break;
684		case NFS_ODIRECT_RESCHED_WRITES:
685			nfs_direct_write_reschedule(dreq);
686			break;
687		default:
688			nfs_direct_write_clear_reqs(dreq);
689			nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping);
690			nfs_direct_complete(dreq);
691	}
692}
693
694static void nfs_direct_write_complete(struct nfs_direct_req *dreq)
695{
 
696	queue_work(nfsiod_workqueue, &dreq->work); /* Calls nfs_direct_write_schedule_work */
697}
698
699static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
700{
701	struct nfs_direct_req *dreq = hdr->dreq;
702	struct nfs_commit_info cinfo;
703	bool request_commit = false;
704	struct nfs_page *req = nfs_list_entry(hdr->pages.next);
 
 
 
 
705
706	nfs_init_cinfo_from_dreq(&cinfo, dreq);
707
708	spin_lock(&dreq->lock);
709	if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
710		spin_unlock(&dreq->lock);
711		goto out_put;
712	}
713
714	nfs_direct_count_bytes(dreq, hdr);
715	if (hdr->good_bytes != 0 && nfs_write_need_commit(hdr)) {
716		switch (dreq->flags) {
717		case 0:
718			dreq->flags = NFS_ODIRECT_DO_COMMIT;
719			request_commit = true;
720			break;
721		case NFS_ODIRECT_RESCHED_WRITES:
722		case NFS_ODIRECT_DO_COMMIT:
723			request_commit = true;
724		}
725	}
726	spin_unlock(&dreq->lock);
727
 
 
 
 
 
728	while (!list_empty(&hdr->pages)) {
729
730		req = nfs_list_entry(hdr->pages.next);
731		nfs_list_remove_request(req);
732		if (request_commit) {
733			kref_get(&req->wb_kref);
734			memcpy(&req->wb_verf, &hdr->verf.verifier,
735			       sizeof(req->wb_verf));
736			nfs_mark_request_commit(req, hdr->lseg, &cinfo,
737				hdr->ds_commit_idx);
 
 
 
738		}
739		nfs_unlock_and_release_request(req);
740	}
741
742out_put:
743	if (put_dreq(dreq))
744		nfs_direct_write_complete(dreq);
745	hdr->release(hdr);
746}
747
748static void nfs_write_sync_pgio_error(struct list_head *head, int error)
749{
750	struct nfs_page *req;
751
752	while (!list_empty(head)) {
753		req = nfs_list_entry(head->next);
754		nfs_list_remove_request(req);
755		nfs_unlock_and_release_request(req);
756	}
757}
758
759static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
760{
761	struct nfs_direct_req *dreq = hdr->dreq;
 
 
 
 
762
 
763	spin_lock(&dreq->lock);
764	if (dreq->error == 0) {
765		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
766		/* fake unstable write to let common nfs resend pages */
767		hdr->verf.committed = NFS_UNSTABLE;
768		hdr->good_bytes = hdr->args.offset + hdr->args.count -
769			hdr->io_start;
770	}
771	spin_unlock(&dreq->lock);
 
 
 
 
 
 
772}
773
774static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
775	.error_cleanup = nfs_write_sync_pgio_error,
776	.init_hdr = nfs_direct_pgio_init,
777	.completion = nfs_direct_write_completion,
778	.reschedule_io = nfs_direct_write_reschedule_io,
779};
780
781
782/*
783 * NB: Return the value of the first error return code.  Subsequent
784 *     errors after the first one are ignored.
785 */
786/*
787 * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
788 * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
789 * bail and stop sending more writes.  Write length accounting is
790 * handled automatically by nfs_direct_write_result().  Otherwise, if
791 * no requests have been sent, just return an error.
792 */
793static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
794					       struct iov_iter *iter,
795					       loff_t pos)
796{
797	struct nfs_pageio_descriptor desc;
798	struct inode *inode = dreq->inode;
 
799	ssize_t result = 0;
800	size_t requested_bytes = 0;
801	size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
 
802
803	nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE, false,
 
 
804			      &nfs_direct_write_completion_ops);
805	desc.pg_dreq = dreq;
806	get_dreq(dreq);
807	inode_dio_begin(inode);
808
809	NFS_I(inode)->write_io += iov_iter_count(iter);
810	while (iov_iter_count(iter)) {
811		struct page **pagevec;
812		size_t bytes;
813		size_t pgbase;
814		unsigned npages, i;
815
816		result = iov_iter_get_pages_alloc(iter, &pagevec, 
817						  wsize, &pgbase);
818		if (result < 0)
819			break;
820
821		bytes = result;
822		iov_iter_advance(iter, bytes);
823		npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
824		for (i = 0; i < npages; i++) {
825			struct nfs_page *req;
826			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
827
828			req = nfs_create_request(dreq->ctx, pagevec[i],
829						 pgbase, req_len);
830			if (IS_ERR(req)) {
831				result = PTR_ERR(req);
832				break;
833			}
834
835			if (desc.pg_error < 0) {
836				nfs_free_request(req);
837				result = desc.pg_error;
838				break;
839			}
840
 
 
 
 
 
 
 
 
 
 
841			nfs_lock_request(req);
842			req->wb_index = pos >> PAGE_SHIFT;
843			req->wb_offset = pos & ~PAGE_MASK;
844			if (!nfs_pageio_add_request(&desc, req)) {
 
 
845				result = desc.pg_error;
846				nfs_unlock_and_release_request(req);
847				break;
848			}
849			pgbase = 0;
850			bytes -= req_len;
851			requested_bytes += req_len;
852			pos += req_len;
853			dreq->bytes_left -= req_len;
 
 
 
 
 
854		}
855		nfs_direct_release_pages(pagevec, npages);
856		kvfree(pagevec);
857		if (result < 0)
858			break;
859	}
860	nfs_pageio_complete(&desc);
861
862	/*
863	 * If no bytes were started, return the error, and let the
864	 * generic layer handle the completion.
865	 */
866	if (requested_bytes == 0) {
867		inode_dio_end(inode);
868		nfs_direct_req_release(dreq);
869		return result < 0 ? result : -EIO;
870	}
871
872	if (put_dreq(dreq))
873		nfs_direct_write_complete(dreq);
874	return requested_bytes;
875}
876
877/**
878 * nfs_file_direct_write - file direct write operation for NFS files
879 * @iocb: target I/O control block
880 * @iter: vector of user buffers from which to write data
 
881 *
882 * We use this function for direct writes instead of calling
883 * generic_file_aio_write() in order to avoid taking the inode
884 * semaphore and updating the i_size.  The NFS server will set
885 * the new i_size and this client must read the updated size
886 * back into its cache.  We let the server do generic write
887 * parameter checking and report problems.
888 *
889 * We eliminate local atime updates, see direct read above.
890 *
891 * We avoid unnecessary page cache invalidations for normal cached
892 * readers of this file.
893 *
894 * Note that O_APPEND is not supported for NFS direct writes, as there
895 * is no atomic O_APPEND write facility in the NFS protocol.
896 */
897ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
 
898{
899	ssize_t result, requested;
900	size_t count;
901	struct file *file = iocb->ki_filp;
902	struct address_space *mapping = file->f_mapping;
903	struct inode *inode = mapping->host;
904	struct nfs_direct_req *dreq;
905	struct nfs_lock_context *l_ctx;
906	loff_t pos, end;
907
908	dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
909		file, iov_iter_count(iter), (long long) iocb->ki_pos);
910
911	result = generic_write_checks(iocb, iter);
 
 
 
 
912	if (result <= 0)
913		return result;
914	count = result;
915	nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
916
917	pos = iocb->ki_pos;
918	end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
919
920	task_io_account_write(count);
921
922	result = -ENOMEM;
923	dreq = nfs_direct_req_alloc();
924	if (!dreq)
925		goto out;
926
927	dreq->inode = inode;
928	dreq->bytes_left = dreq->max_count = count;
929	dreq->io_start = pos;
930	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
931	l_ctx = nfs_get_lock_context(dreq->ctx);
932	if (IS_ERR(l_ctx)) {
933		result = PTR_ERR(l_ctx);
934		nfs_direct_req_release(dreq);
935		goto out_release;
936	}
937	dreq->l_ctx = l_ctx;
938	if (!is_sync_kiocb(iocb))
939		dreq->iocb = iocb;
940	pnfs_init_ds_commit_info_ops(&dreq->ds_cinfo, inode);
941
942	nfs_start_io_direct(inode);
 
 
 
 
 
 
 
 
 
 
 
943
944	requested = nfs_direct_write_schedule_iovec(dreq, iter, pos);
 
945
946	if (mapping->nrpages) {
947		invalidate_inode_pages2_range(mapping,
948					      pos >> PAGE_SHIFT, end);
949	}
950
951	nfs_end_io_direct(inode);
 
952
953	if (requested > 0) {
954		result = nfs_direct_wait(dreq);
955		if (result > 0) {
956			requested -= result;
957			iocb->ki_pos = pos + result;
958			/* XXX: should check the generic_write_sync retval */
959			generic_write_sync(iocb, result);
960		}
961		iov_iter_revert(iter, requested);
962	} else {
963		result = requested;
964	}
 
965out_release:
966	nfs_direct_req_release(dreq);
967out:
968	return result;
969}
970
971/**
972 * nfs_init_directcache - create a slab cache for nfs_direct_req structures
973 *
974 */
975int __init nfs_init_directcache(void)
976{
977	nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
978						sizeof(struct nfs_direct_req),
979						0, (SLAB_RECLAIM_ACCOUNT|
980							SLAB_MEM_SPREAD),
981						NULL);
982	if (nfs_direct_cachep == NULL)
983		return -ENOMEM;
984
985	return 0;
986}
987
988/**
989 * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
990 *
991 */
992void nfs_destroy_directcache(void)
993{
994	kmem_cache_destroy(nfs_direct_cachep);
995}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * linux/fs/nfs/direct.c
   4 *
   5 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
   6 *
   7 * High-performance uncached I/O for the Linux NFS client
   8 *
   9 * There are important applications whose performance or correctness
  10 * depends on uncached access to file data.  Database clusters
  11 * (multiple copies of the same instance running on separate hosts)
  12 * implement their own cache coherency protocol that subsumes file
  13 * system cache protocols.  Applications that process datasets
  14 * considerably larger than the client's memory do not always benefit
  15 * from a local cache.  A streaming video server, for instance, has no
  16 * need to cache the contents of a file.
  17 *
  18 * When an application requests uncached I/O, all read and write requests
  19 * are made directly to the server; data stored or fetched via these
  20 * requests is not cached in the Linux page cache.  The client does not
  21 * correct unaligned requests from applications.  All requested bytes are
  22 * held on permanent storage before a direct write system call returns to
  23 * an application.
  24 *
  25 * Solaris implements an uncached I/O facility called directio() that
  26 * is used for backups and sequential I/O to very large files.  Solaris
  27 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
  28 * an undocumented mount option.
  29 *
  30 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
  31 * help from Andrew Morton.
  32 *
  33 * 18 Dec 2001	Initial implementation for 2.4  --cel
  34 * 08 Jul 2002	Version for 2.4.19, with bug fixes --trondmy
  35 * 08 Jun 2003	Port to 2.5 APIs  --cel
  36 * 31 Mar 2004	Handle direct I/O without VFS support  --cel
  37 * 15 Sep 2004	Parallel async reads  --cel
  38 * 04 May 2005	support O_DIRECT with aio  --cel
  39 *
  40 */
  41
  42#include <linux/errno.h>
  43#include <linux/sched.h>
  44#include <linux/kernel.h>
  45#include <linux/file.h>
  46#include <linux/pagemap.h>
  47#include <linux/kref.h>
  48#include <linux/slab.h>
  49#include <linux/task_io_accounting_ops.h>
  50#include <linux/module.h>
  51
  52#include <linux/nfs_fs.h>
  53#include <linux/nfs_page.h>
  54#include <linux/sunrpc/clnt.h>
  55
  56#include <linux/uaccess.h>
  57#include <linux/atomic.h>
  58
  59#include "delegation.h"
  60#include "internal.h"
  61#include "iostat.h"
  62#include "pnfs.h"
  63#include "fscache.h"
  64#include "nfstrace.h"
  65
  66#define NFSDBG_FACILITY		NFSDBG_VFS
  67
  68static struct kmem_cache *nfs_direct_cachep;
  69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  70static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
  71static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
  72static void nfs_direct_write_complete(struct nfs_direct_req *dreq);
  73static void nfs_direct_write_schedule_work(struct work_struct *work);
  74
  75static inline void get_dreq(struct nfs_direct_req *dreq)
  76{
  77	atomic_inc(&dreq->io_count);
  78}
  79
  80static inline int put_dreq(struct nfs_direct_req *dreq)
  81{
  82	return atomic_dec_and_test(&dreq->io_count);
  83}
  84
  85static void
  86nfs_direct_handle_truncated(struct nfs_direct_req *dreq,
  87			    const struct nfs_pgio_header *hdr,
  88			    ssize_t dreq_len)
  89{
  90	if (!(test_bit(NFS_IOHDR_ERROR, &hdr->flags) ||
  91	      test_bit(NFS_IOHDR_EOF, &hdr->flags)))
  92		return;
  93	if (dreq->max_count >= dreq_len) {
  94		dreq->max_count = dreq_len;
  95		if (dreq->count > dreq_len)
  96			dreq->count = dreq_len;
 
 
 
 
 
  97	}
  98
  99	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && !dreq->error)
 100		dreq->error = hdr->error;
 101}
 102
 103static void
 104nfs_direct_count_bytes(struct nfs_direct_req *dreq,
 105		       const struct nfs_pgio_header *hdr)
 106{
 107	loff_t hdr_end = hdr->io_start + hdr->good_bytes;
 108	ssize_t dreq_len = 0;
 109
 110	if (hdr_end > dreq->io_start)
 111		dreq_len = hdr_end - dreq->io_start;
 112
 113	nfs_direct_handle_truncated(dreq, hdr, dreq_len);
 114
 115	if (dreq_len > dreq->max_count)
 116		dreq_len = dreq->max_count;
 117
 118	if (dreq->count < dreq_len)
 119		dreq->count = dreq_len;
 120}
 121
 122static void nfs_direct_truncate_request(struct nfs_direct_req *dreq,
 123					struct nfs_page *req)
 124{
 125	loff_t offs = req_offset(req);
 126	size_t req_start = (size_t)(offs - dreq->io_start);
 127
 128	if (req_start < dreq->max_count)
 129		dreq->max_count = req_start;
 130	if (req_start < dreq->count)
 131		dreq->count = req_start;
 132}
 133
 134static void nfs_direct_file_adjust_size_locked(struct inode *inode,
 135					       loff_t offset, size_t count)
 136{
 137	loff_t newsize = offset + (loff_t)count;
 138	loff_t oldsize = i_size_read(inode);
 139
 140	if (newsize > oldsize) {
 141		i_size_write(inode, newsize);
 142		NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE;
 143		trace_nfs_size_grow(inode, newsize);
 144		nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
 145	}
 146}
 147
 148/**
 149 * nfs_swap_rw - NFS address space operation for swap I/O
 150 * @iocb: target I/O control block
 151 * @iter: I/O buffer
 152 *
 153 * Perform IO to the swap-file.  This is much like direct IO.
 
 
 
 154 */
 155int nfs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
 156{
 157	ssize_t ret;
 
 
 
 
 
 
 158
 159	if (iov_iter_rw(iter) == READ)
 160		ret = nfs_file_direct_read(iocb, iter, true);
 161	else
 162		ret = nfs_file_direct_write(iocb, iter, true);
 163	if (ret < 0)
 164		return ret;
 165	return 0;
 166}
 167
 168static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
 169{
 170	unsigned int i;
 171	for (i = 0; i < npages; i++)
 172		put_page(pages[i]);
 173}
 174
 175void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
 176			      struct nfs_direct_req *dreq)
 177{
 178	cinfo->inode = dreq->inode;
 179	cinfo->mds = &dreq->mds_cinfo;
 180	cinfo->ds = &dreq->ds_cinfo;
 181	cinfo->dreq = dreq;
 182	cinfo->completion_ops = &nfs_direct_commit_completion_ops;
 183}
 184
 185static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
 186{
 187	struct nfs_direct_req *dreq;
 188
 189	dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
 190	if (!dreq)
 191		return NULL;
 192
 193	kref_init(&dreq->kref);
 194	kref_get(&dreq->kref);
 195	init_completion(&dreq->completion);
 196	INIT_LIST_HEAD(&dreq->mds_cinfo.list);
 197	pnfs_init_ds_commit_info(&dreq->ds_cinfo);
 198	INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
 199	spin_lock_init(&dreq->lock);
 200
 201	return dreq;
 202}
 203
 204static void nfs_direct_req_free(struct kref *kref)
 205{
 206	struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
 207
 208	pnfs_release_ds_info(&dreq->ds_cinfo, dreq->inode);
 209	if (dreq->l_ctx != NULL)
 210		nfs_put_lock_context(dreq->l_ctx);
 211	if (dreq->ctx != NULL)
 212		put_nfs_open_context(dreq->ctx);
 213	kmem_cache_free(nfs_direct_cachep, dreq);
 214}
 215
 216static void nfs_direct_req_release(struct nfs_direct_req *dreq)
 217{
 218	kref_put(&dreq->kref, nfs_direct_req_free);
 219}
 220
 221ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq, loff_t offset)
 222{
 223	loff_t start = offset - dreq->io_start;
 224	return dreq->max_count - start;
 225}
 226EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);
 227
 228/*
 229 * Collects and returns the final error value/byte-count.
 230 */
 231static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
 232{
 233	ssize_t result = -EIOCBQUEUED;
 234
 235	/* Async requests don't wait here */
 236	if (dreq->iocb)
 237		goto out;
 238
 239	result = wait_for_completion_killable(&dreq->completion);
 240
 241	if (!result) {
 242		result = dreq->count;
 243		WARN_ON_ONCE(dreq->count < 0);
 244	}
 245	if (!result)
 246		result = dreq->error;
 247
 248out:
 249	return (ssize_t) result;
 250}
 251
 252/*
 253 * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
 254 * the iocb is still valid here if this is a synchronous request.
 255 */
 256static void nfs_direct_complete(struct nfs_direct_req *dreq)
 257{
 258	struct inode *inode = dreq->inode;
 259
 260	inode_dio_end(inode);
 261
 262	if (dreq->iocb) {
 263		long res = (long) dreq->error;
 264		if (dreq->count != 0) {
 265			res = (long) dreq->count;
 266			WARN_ON_ONCE(dreq->count < 0);
 267		}
 268		dreq->iocb->ki_complete(dreq->iocb, res);
 269	}
 270
 271	complete(&dreq->completion);
 272
 273	nfs_direct_req_release(dreq);
 274}
 275
 276static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
 277{
 278	unsigned long bytes = 0;
 279	struct nfs_direct_req *dreq = hdr->dreq;
 280
 281	spin_lock(&dreq->lock);
 282	if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
 283		spin_unlock(&dreq->lock);
 284		goto out_put;
 285	}
 286
 287	nfs_direct_count_bytes(dreq, hdr);
 288	spin_unlock(&dreq->lock);
 289
 290	nfs_update_delegated_atime(dreq->inode);
 291
 292	while (!list_empty(&hdr->pages)) {
 293		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
 294		struct page *page = req->wb_page;
 295
 296		if (!PageCompound(page) && bytes < hdr->good_bytes &&
 297		    (dreq->flags == NFS_ODIRECT_SHOULD_DIRTY))
 298			set_page_dirty(page);
 299		bytes += req->wb_bytes;
 300		nfs_list_remove_request(req);
 301		nfs_release_request(req);
 302	}
 303out_put:
 304	if (put_dreq(dreq))
 305		nfs_direct_complete(dreq);
 306	hdr->release(hdr);
 307}
 308
 309static void nfs_read_sync_pgio_error(struct list_head *head, int error)
 310{
 311	struct nfs_page *req;
 312
 313	while (!list_empty(head)) {
 314		req = nfs_list_entry(head->next);
 315		nfs_list_remove_request(req);
 316		nfs_release_request(req);
 317	}
 318}
 319
 320static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
 321{
 322	get_dreq(hdr->dreq);
 323}
 324
 325static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
 326	.error_cleanup = nfs_read_sync_pgio_error,
 327	.init_hdr = nfs_direct_pgio_init,
 328	.completion = nfs_direct_read_completion,
 329};
 330
 331/*
 332 * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
 333 * operation.  If nfs_readdata_alloc() or get_user_pages() fails,
 334 * bail and stop sending more reads.  Read length accounting is
 335 * handled automatically by nfs_direct_read_result().  Otherwise, if
 336 * no requests have been sent, just return an error.
 337 */
 338
 339static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
 340					      struct iov_iter *iter,
 341					      loff_t pos)
 342{
 343	struct nfs_pageio_descriptor desc;
 344	struct inode *inode = dreq->inode;
 345	ssize_t result = -EINVAL;
 346	size_t requested_bytes = 0;
 347	size_t rsize = max_t(size_t, NFS_SERVER(inode)->rsize, PAGE_SIZE);
 348
 349	nfs_pageio_init_read(&desc, dreq->inode, false,
 350			     &nfs_direct_read_completion_ops);
 351	get_dreq(dreq);
 352	desc.pg_dreq = dreq;
 353	inode_dio_begin(inode);
 354
 355	while (iov_iter_count(iter)) {
 356		struct page **pagevec;
 357		size_t bytes;
 358		size_t pgbase;
 359		unsigned npages, i;
 360
 361		result = iov_iter_get_pages_alloc2(iter, &pagevec,
 362						  rsize, &pgbase);
 363		if (result < 0)
 364			break;
 365	
 366		bytes = result;
 
 367		npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
 368		for (i = 0; i < npages; i++) {
 369			struct nfs_page *req;
 370			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
 371			/* XXX do we need to do the eof zeroing found in async_filler? */
 372			req = nfs_page_create_from_page(dreq->ctx, pagevec[i],
 373							pgbase, pos, req_len);
 374			if (IS_ERR(req)) {
 375				result = PTR_ERR(req);
 376				break;
 377			}
 
 
 378			if (!nfs_pageio_add_request(&desc, req)) {
 379				result = desc.pg_error;
 380				nfs_release_request(req);
 381				break;
 382			}
 383			pgbase = 0;
 384			bytes -= req_len;
 385			requested_bytes += req_len;
 386			pos += req_len;
 
 387		}
 388		nfs_direct_release_pages(pagevec, npages);
 389		kvfree(pagevec);
 390		if (result < 0)
 391			break;
 392	}
 393
 394	nfs_pageio_complete(&desc);
 395
 396	/*
 397	 * If no bytes were started, return the error, and let the
 398	 * generic layer handle the completion.
 399	 */
 400	if (requested_bytes == 0) {
 401		inode_dio_end(inode);
 402		nfs_direct_req_release(dreq);
 403		return result < 0 ? result : -EIO;
 404	}
 405
 406	if (put_dreq(dreq))
 407		nfs_direct_complete(dreq);
 408	return requested_bytes;
 409}
 410
 411/**
 412 * nfs_file_direct_read - file direct read operation for NFS files
 413 * @iocb: target I/O control block
 414 * @iter: vector of user buffers into which to read data
 415 * @swap: flag indicating this is swap IO, not O_DIRECT IO
 416 *
 417 * We use this function for direct reads instead of calling
 418 * generic_file_aio_read() in order to avoid gfar's check to see if
 419 * the request starts before the end of the file.  For that check
 420 * to work, we must generate a GETATTR before each direct read, and
 421 * even then there is a window between the GETATTR and the subsequent
 422 * READ where the file size could change.  Our preference is simply
 423 * to do all reads the application wants, and the server will take
 424 * care of managing the end of file boundary.
 425 *
 426 * This function also eliminates unnecessarily updating the file's
 427 * atime locally, as the NFS server sets the file's atime, and this
 428 * client must read the updated atime from the server back into its
 429 * cache.
 430 */
 431ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
 432			     bool swap)
 433{
 434	struct file *file = iocb->ki_filp;
 435	struct address_space *mapping = file->f_mapping;
 436	struct inode *inode = mapping->host;
 437	struct nfs_direct_req *dreq;
 438	struct nfs_lock_context *l_ctx;
 439	ssize_t result, requested;
 440	size_t count = iov_iter_count(iter);
 441	nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
 442
 443	dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
 444		file, count, (long long) iocb->ki_pos);
 445
 446	result = 0;
 447	if (!count)
 448		goto out;
 449
 450	task_io_account_read(count);
 451
 452	result = -ENOMEM;
 453	dreq = nfs_direct_req_alloc();
 454	if (dreq == NULL)
 455		goto out;
 456
 457	dreq->inode = inode;
 458	dreq->max_count = count;
 459	dreq->io_start = iocb->ki_pos;
 460	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
 461	l_ctx = nfs_get_lock_context(dreq->ctx);
 462	if (IS_ERR(l_ctx)) {
 463		result = PTR_ERR(l_ctx);
 464		nfs_direct_req_release(dreq);
 465		goto out_release;
 466	}
 467	dreq->l_ctx = l_ctx;
 468	if (!is_sync_kiocb(iocb))
 469		dreq->iocb = iocb;
 470
 471	if (user_backed_iter(iter))
 472		dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
 473
 474	if (!swap) {
 475		result = nfs_start_io_direct(inode);
 476		if (result) {
 477			/* release the reference that would usually be
 478			 * consumed by nfs_direct_read_schedule_iovec()
 479			 */
 480			nfs_direct_req_release(dreq);
 481			goto out_release;
 482		}
 483	}
 484
 485	NFS_I(inode)->read_io += count;
 486	requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
 487
 488	if (!swap)
 489		nfs_end_io_direct(inode);
 490
 491	if (requested > 0) {
 492		result = nfs_direct_wait(dreq);
 493		if (result > 0) {
 494			requested -= result;
 495			iocb->ki_pos += result;
 496		}
 497		iov_iter_revert(iter, requested);
 498	} else {
 499		result = requested;
 500	}
 501
 502out_release:
 503	nfs_direct_req_release(dreq);
 504out:
 505	return result;
 506}
 507
 508static void nfs_direct_add_page_head(struct list_head *list,
 509				     struct nfs_page *req)
 510{
 511	struct nfs_page *head = req->wb_head;
 512
 513	if (!list_empty(&head->wb_list) || !nfs_lock_request(head))
 514		return;
 515	if (!list_empty(&head->wb_list)) {
 516		nfs_unlock_request(head);
 517		return;
 518	}
 519	list_add(&head->wb_list, list);
 520	kref_get(&head->wb_kref);
 521	kref_get(&head->wb_kref);
 522}
 523
 524static void nfs_direct_join_group(struct list_head *list,
 525				  struct nfs_commit_info *cinfo,
 526				  struct inode *inode)
 527{
 528	struct nfs_page *req, *subreq;
 529
 530	list_for_each_entry(req, list, wb_list) {
 531		if (req->wb_head != req) {
 532			nfs_direct_add_page_head(&req->wb_list, req);
 533			continue;
 
 
 
 
 
 534		}
 535		subreq = req->wb_this_page;
 536		if (subreq == req)
 537			continue;
 538		do {
 539			/*
 540			 * Remove subrequests from this list before freeing
 541			 * them in the call to nfs_join_page_group().
 542			 */
 543			if (!list_empty(&subreq->wb_list)) {
 544				nfs_list_remove_request(subreq);
 545				nfs_release_request(subreq);
 546			}
 547		} while ((subreq = subreq->wb_this_page) != req);
 548		nfs_join_page_group(req, cinfo, inode);
 549	}
 550}
 551
 552static void
 553nfs_direct_write_scan_commit_list(struct inode *inode,
 554				  struct list_head *list,
 555				  struct nfs_commit_info *cinfo)
 556{
 557	mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
 558	pnfs_recover_commit_reqs(list, cinfo);
 559	nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0);
 560	mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
 561}
 562
 563static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
 564{
 565	struct nfs_pageio_descriptor desc;
 566	struct nfs_page *req;
 567	LIST_HEAD(reqs);
 568	struct nfs_commit_info cinfo;
 
 569
 570	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 571	nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
 572
 573	nfs_direct_join_group(&reqs, &cinfo, dreq->inode);
 574
 
 
 
 
 575	nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
 576	get_dreq(dreq);
 577
 578	nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
 579			      &nfs_direct_write_completion_ops);
 580	desc.pg_dreq = dreq;
 581
 582	while (!list_empty(&reqs)) {
 583		req = nfs_list_entry(reqs.next);
 584		/* Bump the transmission count */
 585		req->wb_nio++;
 586		if (!nfs_pageio_add_request(&desc, req)) {
 587			spin_lock(&dreq->lock);
 588			if (dreq->error < 0) {
 589				desc.pg_error = dreq->error;
 590			} else if (desc.pg_error != -EAGAIN) {
 591				dreq->flags = 0;
 592				if (!desc.pg_error)
 593					desc.pg_error = -EIO;
 594				dreq->error = desc.pg_error;
 595			} else
 596				dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 597			spin_unlock(&dreq->lock);
 598			break;
 599		}
 600		nfs_release_request(req);
 601	}
 602	nfs_pageio_complete(&desc);
 603
 604	while (!list_empty(&reqs)) {
 605		req = nfs_list_entry(reqs.next);
 606		nfs_list_remove_request(req);
 607		nfs_unlock_and_release_request(req);
 608		if (desc.pg_error == -EAGAIN) {
 609			nfs_mark_request_commit(req, NULL, &cinfo, 0);
 610		} else {
 611			spin_lock(&dreq->lock);
 612			nfs_direct_truncate_request(dreq, req);
 613			spin_unlock(&dreq->lock);
 614			nfs_release_request(req);
 615		}
 616	}
 617
 618	if (put_dreq(dreq))
 619		nfs_direct_write_complete(dreq);
 620}
 621
 622static void nfs_direct_commit_complete(struct nfs_commit_data *data)
 623{
 624	const struct nfs_writeverf *verf = data->res.verf;
 625	struct nfs_direct_req *dreq = data->dreq;
 626	struct nfs_commit_info cinfo;
 627	struct nfs_page *req;
 628	int status = data->task.tk_status;
 629
 630	trace_nfs_direct_commit_complete(dreq);
 631
 632	spin_lock(&dreq->lock);
 633	if (status < 0) {
 634		/* Errors in commit are fatal */
 635		dreq->error = status;
 
 
 636		dreq->flags = NFS_ODIRECT_DONE;
 637	} else {
 638		status = dreq->error;
 639	}
 640	spin_unlock(&dreq->lock);
 641
 642	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 643
 644	while (!list_empty(&data->pages)) {
 645		req = nfs_list_entry(data->pages.next);
 646		nfs_list_remove_request(req);
 647		if (status < 0) {
 648			spin_lock(&dreq->lock);
 649			nfs_direct_truncate_request(dreq, req);
 650			spin_unlock(&dreq->lock);
 651			nfs_release_request(req);
 652		} else if (!nfs_write_match_verf(verf, req)) {
 653			spin_lock(&dreq->lock);
 654			if (dreq->flags == 0)
 655				dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 656			spin_unlock(&dreq->lock);
 657			/*
 658			 * Despite the reboot, the write was successful,
 659			 * so reset wb_nio.
 660			 */
 661			req->wb_nio = 0;
 662			nfs_mark_request_commit(req, NULL, &cinfo, 0);
 663		} else
 664			nfs_release_request(req);
 665		nfs_unlock_and_release_request(req);
 666	}
 667
 668	if (nfs_commit_end(cinfo.mds))
 669		nfs_direct_write_complete(dreq);
 670}
 671
 672static void nfs_direct_resched_write(struct nfs_commit_info *cinfo,
 673		struct nfs_page *req)
 674{
 675	struct nfs_direct_req *dreq = cinfo->dreq;
 676
 677	trace_nfs_direct_resched_write(dreq);
 678
 679	spin_lock(&dreq->lock);
 680	if (dreq->flags != NFS_ODIRECT_DONE)
 681		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 682	spin_unlock(&dreq->lock);
 683	nfs_mark_request_commit(req, NULL, cinfo, 0);
 684}
 685
 686static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
 687	.completion = nfs_direct_commit_complete,
 688	.resched_write = nfs_direct_resched_write,
 689};
 690
 691static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
 692{
 693	int res;
 694	struct nfs_commit_info cinfo;
 695	LIST_HEAD(mds_list);
 696
 697	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 698	nfs_commit_begin(cinfo.mds);
 699	nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
 700	res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
 701	if (res < 0) { /* res == -ENOMEM */
 702		spin_lock(&dreq->lock);
 703		if (dreq->flags == 0)
 704			dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 705		spin_unlock(&dreq->lock);
 706	}
 707	if (nfs_commit_end(cinfo.mds))
 708		nfs_direct_write_complete(dreq);
 709}
 710
 711static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq)
 712{
 713	struct nfs_commit_info cinfo;
 714	struct nfs_page *req;
 715	LIST_HEAD(reqs);
 716
 717	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 718	nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
 719
 720	while (!list_empty(&reqs)) {
 721		req = nfs_list_entry(reqs.next);
 722		nfs_list_remove_request(req);
 723		nfs_direct_truncate_request(dreq, req);
 724		nfs_release_request(req);
 725		nfs_unlock_and_release_request(req);
 726	}
 727}
 728
 729static void nfs_direct_write_schedule_work(struct work_struct *work)
 730{
 731	struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
 732	int flags = dreq->flags;
 733
 734	dreq->flags = 0;
 735	switch (flags) {
 736		case NFS_ODIRECT_DO_COMMIT:
 737			nfs_direct_commit_schedule(dreq);
 738			break;
 739		case NFS_ODIRECT_RESCHED_WRITES:
 740			nfs_direct_write_reschedule(dreq);
 741			break;
 742		default:
 743			nfs_direct_write_clear_reqs(dreq);
 744			nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping);
 745			nfs_direct_complete(dreq);
 746	}
 747}
 748
 749static void nfs_direct_write_complete(struct nfs_direct_req *dreq)
 750{
 751	trace_nfs_direct_write_complete(dreq);
 752	queue_work(nfsiod_workqueue, &dreq->work); /* Calls nfs_direct_write_schedule_work */
 753}
 754
 755static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
 756{
 757	struct nfs_direct_req *dreq = hdr->dreq;
 758	struct nfs_commit_info cinfo;
 
 759	struct nfs_page *req = nfs_list_entry(hdr->pages.next);
 760	struct inode *inode = dreq->inode;
 761	int flags = NFS_ODIRECT_DONE;
 762
 763	trace_nfs_direct_write_completion(dreq);
 764
 765	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 766
 767	spin_lock(&dreq->lock);
 768	if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
 769		spin_unlock(&dreq->lock);
 770		goto out_put;
 771	}
 772
 773	nfs_direct_count_bytes(dreq, hdr);
 774	if (test_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags) &&
 775	    !test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
 776		if (!dreq->flags)
 777			dreq->flags = NFS_ODIRECT_DO_COMMIT;
 778		flags = dreq->flags;
 
 
 
 
 
 779	}
 780	spin_unlock(&dreq->lock);
 781
 782	spin_lock(&inode->i_lock);
 783	nfs_direct_file_adjust_size_locked(inode, dreq->io_start, dreq->count);
 784	nfs_update_delegated_mtime_locked(dreq->inode);
 785	spin_unlock(&inode->i_lock);
 786
 787	while (!list_empty(&hdr->pages)) {
 788
 789		req = nfs_list_entry(hdr->pages.next);
 790		nfs_list_remove_request(req);
 791		if (flags == NFS_ODIRECT_DO_COMMIT) {
 792			kref_get(&req->wb_kref);
 793			memcpy(&req->wb_verf, &hdr->verf.verifier,
 794			       sizeof(req->wb_verf));
 795			nfs_mark_request_commit(req, hdr->lseg, &cinfo,
 796				hdr->ds_commit_idx);
 797		} else if (flags == NFS_ODIRECT_RESCHED_WRITES) {
 798			kref_get(&req->wb_kref);
 799			nfs_mark_request_commit(req, NULL, &cinfo, 0);
 800		}
 801		nfs_unlock_and_release_request(req);
 802	}
 803
 804out_put:
 805	if (put_dreq(dreq))
 806		nfs_direct_write_complete(dreq);
 807	hdr->release(hdr);
 808}
 809
 810static void nfs_write_sync_pgio_error(struct list_head *head, int error)
 811{
 812	struct nfs_page *req;
 813
 814	while (!list_empty(head)) {
 815		req = nfs_list_entry(head->next);
 816		nfs_list_remove_request(req);
 817		nfs_unlock_and_release_request(req);
 818	}
 819}
 820
 821static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
 822{
 823	struct nfs_direct_req *dreq = hdr->dreq;
 824	struct nfs_page *req;
 825	struct nfs_commit_info cinfo;
 826
 827	trace_nfs_direct_write_reschedule_io(dreq);
 828
 829	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 830	spin_lock(&dreq->lock);
 831	if (dreq->error == 0)
 832		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 833	set_bit(NFS_IOHDR_REDO, &hdr->flags);
 
 
 
 
 834	spin_unlock(&dreq->lock);
 835	while (!list_empty(&hdr->pages)) {
 836		req = nfs_list_entry(hdr->pages.next);
 837		nfs_list_remove_request(req);
 838		nfs_unlock_request(req);
 839		nfs_mark_request_commit(req, NULL, &cinfo, 0);
 840	}
 841}
 842
 843static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
 844	.error_cleanup = nfs_write_sync_pgio_error,
 845	.init_hdr = nfs_direct_pgio_init,
 846	.completion = nfs_direct_write_completion,
 847	.reschedule_io = nfs_direct_write_reschedule_io,
 848};
 849
 850
 851/*
 852 * NB: Return the value of the first error return code.  Subsequent
 853 *     errors after the first one are ignored.
 854 */
 855/*
 856 * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
 857 * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
 858 * bail and stop sending more writes.  Write length accounting is
 859 * handled automatically by nfs_direct_write_result().  Otherwise, if
 860 * no requests have been sent, just return an error.
 861 */
 862static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
 863					       struct iov_iter *iter,
 864					       loff_t pos, int ioflags)
 865{
 866	struct nfs_pageio_descriptor desc;
 867	struct inode *inode = dreq->inode;
 868	struct nfs_commit_info cinfo;
 869	ssize_t result = 0;
 870	size_t requested_bytes = 0;
 871	size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
 872	bool defer = false;
 873
 874	trace_nfs_direct_write_schedule_iovec(dreq);
 875
 876	nfs_pageio_init_write(&desc, inode, ioflags, false,
 877			      &nfs_direct_write_completion_ops);
 878	desc.pg_dreq = dreq;
 879	get_dreq(dreq);
 880	inode_dio_begin(inode);
 881
 882	NFS_I(inode)->write_io += iov_iter_count(iter);
 883	while (iov_iter_count(iter)) {
 884		struct page **pagevec;
 885		size_t bytes;
 886		size_t pgbase;
 887		unsigned npages, i;
 888
 889		result = iov_iter_get_pages_alloc2(iter, &pagevec,
 890						  wsize, &pgbase);
 891		if (result < 0)
 892			break;
 893
 894		bytes = result;
 
 895		npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
 896		for (i = 0; i < npages; i++) {
 897			struct nfs_page *req;
 898			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
 899
 900			req = nfs_page_create_from_page(dreq->ctx, pagevec[i],
 901							pgbase, pos, req_len);
 902			if (IS_ERR(req)) {
 903				result = PTR_ERR(req);
 904				break;
 905			}
 906
 907			if (desc.pg_error < 0) {
 908				nfs_free_request(req);
 909				result = desc.pg_error;
 910				break;
 911			}
 912
 913			pgbase = 0;
 914			bytes -= req_len;
 915			requested_bytes += req_len;
 916			pos += req_len;
 917
 918			if (defer) {
 919				nfs_mark_request_commit(req, NULL, &cinfo, 0);
 920				continue;
 921			}
 922
 923			nfs_lock_request(req);
 924			if (nfs_pageio_add_request(&desc, req))
 925				continue;
 926
 927			/* Exit on hard errors */
 928			if (desc.pg_error < 0 && desc.pg_error != -EAGAIN) {
 929				result = desc.pg_error;
 930				nfs_unlock_and_release_request(req);
 931				break;
 932			}
 933
 934			/* If the error is soft, defer remaining requests */
 935			nfs_init_cinfo_from_dreq(&cinfo, dreq);
 936			spin_lock(&dreq->lock);
 937			dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 938			spin_unlock(&dreq->lock);
 939			nfs_unlock_request(req);
 940			nfs_mark_request_commit(req, NULL, &cinfo, 0);
 941			desc.pg_error = 0;
 942			defer = true;
 943		}
 944		nfs_direct_release_pages(pagevec, npages);
 945		kvfree(pagevec);
 946		if (result < 0)
 947			break;
 948	}
 949	nfs_pageio_complete(&desc);
 950
 951	/*
 952	 * If no bytes were started, return the error, and let the
 953	 * generic layer handle the completion.
 954	 */
 955	if (requested_bytes == 0) {
 956		inode_dio_end(inode);
 957		nfs_direct_req_release(dreq);
 958		return result < 0 ? result : -EIO;
 959	}
 960
 961	if (put_dreq(dreq))
 962		nfs_direct_write_complete(dreq);
 963	return requested_bytes;
 964}
 965
 966/**
 967 * nfs_file_direct_write - file direct write operation for NFS files
 968 * @iocb: target I/O control block
 969 * @iter: vector of user buffers from which to write data
 970 * @swap: flag indicating this is swap IO, not O_DIRECT IO
 971 *
 972 * We use this function for direct writes instead of calling
 973 * generic_file_aio_write() in order to avoid taking the inode
 974 * semaphore and updating the i_size.  The NFS server will set
 975 * the new i_size and this client must read the updated size
 976 * back into its cache.  We let the server do generic write
 977 * parameter checking and report problems.
 978 *
 979 * We eliminate local atime updates, see direct read above.
 980 *
 981 * We avoid unnecessary page cache invalidations for normal cached
 982 * readers of this file.
 983 *
 984 * Note that O_APPEND is not supported for NFS direct writes, as there
 985 * is no atomic O_APPEND write facility in the NFS protocol.
 986 */
 987ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
 988			      bool swap)
 989{
 990	ssize_t result, requested;
 991	size_t count;
 992	struct file *file = iocb->ki_filp;
 993	struct address_space *mapping = file->f_mapping;
 994	struct inode *inode = mapping->host;
 995	struct nfs_direct_req *dreq;
 996	struct nfs_lock_context *l_ctx;
 997	loff_t pos, end;
 998
 999	dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
1000		file, iov_iter_count(iter), (long long) iocb->ki_pos);
1001
1002	if (swap)
1003		/* bypass generic checks */
1004		result =  iov_iter_count(iter);
1005	else
1006		result = generic_write_checks(iocb, iter);
1007	if (result <= 0)
1008		return result;
1009	count = result;
1010	nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
1011
1012	pos = iocb->ki_pos;
1013	end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
1014
1015	task_io_account_write(count);
1016
1017	result = -ENOMEM;
1018	dreq = nfs_direct_req_alloc();
1019	if (!dreq)
1020		goto out;
1021
1022	dreq->inode = inode;
1023	dreq->max_count = count;
1024	dreq->io_start = pos;
1025	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
1026	l_ctx = nfs_get_lock_context(dreq->ctx);
1027	if (IS_ERR(l_ctx)) {
1028		result = PTR_ERR(l_ctx);
1029		nfs_direct_req_release(dreq);
1030		goto out_release;
1031	}
1032	dreq->l_ctx = l_ctx;
1033	if (!is_sync_kiocb(iocb))
1034		dreq->iocb = iocb;
1035	pnfs_init_ds_commit_info_ops(&dreq->ds_cinfo, inode);
1036
1037	if (swap) {
1038		requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
1039							    FLUSH_STABLE);
1040	} else {
1041		result = nfs_start_io_direct(inode);
1042		if (result) {
1043			/* release the reference that would usually be
1044			 * consumed by nfs_direct_write_schedule_iovec()
1045			 */
1046			nfs_direct_req_release(dreq);
1047			goto out_release;
1048		}
1049
1050		requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
1051							    FLUSH_COND_STABLE);
1052
1053		if (mapping->nrpages) {
1054			invalidate_inode_pages2_range(mapping,
1055						      pos >> PAGE_SHIFT, end);
1056		}
1057
1058		nfs_end_io_direct(inode);
1059	}
1060
1061	if (requested > 0) {
1062		result = nfs_direct_wait(dreq);
1063		if (result > 0) {
1064			requested -= result;
1065			iocb->ki_pos = pos + result;
1066			/* XXX: should check the generic_write_sync retval */
1067			generic_write_sync(iocb, result);
1068		}
1069		iov_iter_revert(iter, requested);
1070	} else {
1071		result = requested;
1072	}
1073	nfs_fscache_invalidate(inode, FSCACHE_INVAL_DIO_WRITE);
1074out_release:
1075	nfs_direct_req_release(dreq);
1076out:
1077	return result;
1078}
1079
1080/**
1081 * nfs_init_directcache - create a slab cache for nfs_direct_req structures
1082 *
1083 */
1084int __init nfs_init_directcache(void)
1085{
1086	nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
1087						sizeof(struct nfs_direct_req),
1088						0, SLAB_RECLAIM_ACCOUNT,
 
1089						NULL);
1090	if (nfs_direct_cachep == NULL)
1091		return -ENOMEM;
1092
1093	return 0;
1094}
1095
1096/**
1097 * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
1098 *
1099 */
1100void nfs_destroy_directcache(void)
1101{
1102	kmem_cache_destroy(nfs_direct_cachep);
1103}