Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * linux/fs/nfs/direct.c
  4 *
  5 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
  6 *
  7 * High-performance uncached I/O for the Linux NFS client
  8 *
  9 * There are important applications whose performance or correctness
 10 * depends on uncached access to file data.  Database clusters
 11 * (multiple copies of the same instance running on separate hosts)
 12 * implement their own cache coherency protocol that subsumes file
 13 * system cache protocols.  Applications that process datasets
 14 * considerably larger than the client's memory do not always benefit
 15 * from a local cache.  A streaming video server, for instance, has no
 16 * need to cache the contents of a file.
 17 *
 18 * When an application requests uncached I/O, all read and write requests
 19 * are made directly to the server; data stored or fetched via these
 20 * requests is not cached in the Linux page cache.  The client does not
 21 * correct unaligned requests from applications.  All requested bytes are
 22 * held on permanent storage before a direct write system call returns to
 23 * an application.
 24 *
 25 * Solaris implements an uncached I/O facility called directio() that
 26 * is used for backups and sequential I/O to very large files.  Solaris
 27 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
 28 * an undocumented mount option.
 29 *
 30 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
 31 * help from Andrew Morton.
 32 *
 33 * 18 Dec 2001	Initial implementation for 2.4  --cel
 34 * 08 Jul 2002	Version for 2.4.19, with bug fixes --trondmy
 35 * 08 Jun 2003	Port to 2.5 APIs  --cel
 36 * 31 Mar 2004	Handle direct I/O without VFS support  --cel
 37 * 15 Sep 2004	Parallel async reads  --cel
 38 * 04 May 2005	support O_DIRECT with aio  --cel
 39 *
 40 */
 41
 42#include <linux/errno.h>
 43#include <linux/sched.h>
 44#include <linux/kernel.h>
 45#include <linux/file.h>
 46#include <linux/pagemap.h>
 47#include <linux/kref.h>
 48#include <linux/slab.h>
 49#include <linux/task_io_accounting_ops.h>
 50#include <linux/module.h>
 51
 52#include <linux/nfs_fs.h>
 53#include <linux/nfs_page.h>
 54#include <linux/sunrpc/clnt.h>
 55
 56#include <linux/uaccess.h>
 57#include <linux/atomic.h>
 58
 59#include "internal.h"
 60#include "iostat.h"
 61#include "pnfs.h"
 62
 63#define NFSDBG_FACILITY		NFSDBG_VFS
 64
 65static struct kmem_cache *nfs_direct_cachep;
 66
 
 
 
 
 
 
 
 67struct nfs_direct_req {
 68	struct kref		kref;		/* release manager */
 69
 70	/* I/O parameters */
 71	struct nfs_open_context	*ctx;		/* file open context info */
 72	struct nfs_lock_context *l_ctx;		/* Lock context info */
 73	struct kiocb *		iocb;		/* controlling i/o request */
 74	struct inode *		inode;		/* target file of i/o */
 75
 76	/* completion state */
 77	atomic_t		io_count;	/* i/os we're waiting for */
 78	spinlock_t		lock;		/* protect completion state */
 79
 
 
 
 80	loff_t			io_start;	/* Start offset for I/O */
 81	ssize_t			count,		/* bytes actually processed */
 82				max_count,	/* max expected count */
 83				bytes_left,	/* bytes left to be sent */
 84				error;		/* any reported error */
 85	struct completion	completion;	/* wait for i/o completion */
 86
 87	/* commit state */
 88	struct nfs_mds_commit_info mds_cinfo;	/* Storage for cinfo */
 89	struct pnfs_ds_commit_info ds_cinfo;	/* Storage for cinfo */
 90	struct work_struct	work;
 91	int			flags;
 92	/* for write */
 93#define NFS_ODIRECT_DO_COMMIT		(1)	/* an unstable reply was received */
 94#define NFS_ODIRECT_RESCHED_WRITES	(2)	/* write verification failed */
 95	/* for read */
 96#define NFS_ODIRECT_SHOULD_DIRTY	(3)	/* dirty user-space page after read */
 97#define NFS_ODIRECT_DONE		INT_MAX	/* write verification failed */
 98};
 99
100static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
101static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
102static void nfs_direct_write_complete(struct nfs_direct_req *dreq);
103static void nfs_direct_write_schedule_work(struct work_struct *work);
104
105static inline void get_dreq(struct nfs_direct_req *dreq)
106{
107	atomic_inc(&dreq->io_count);
108}
109
110static inline int put_dreq(struct nfs_direct_req *dreq)
111{
112	return atomic_dec_and_test(&dreq->io_count);
113}
114
115static void
116nfs_direct_handle_truncated(struct nfs_direct_req *dreq,
117			    const struct nfs_pgio_header *hdr,
118			    ssize_t dreq_len)
119{
120	if (!(test_bit(NFS_IOHDR_ERROR, &hdr->flags) ||
121	      test_bit(NFS_IOHDR_EOF, &hdr->flags)))
122		return;
123	if (dreq->max_count >= dreq_len) {
124		dreq->max_count = dreq_len;
125		if (dreq->count > dreq_len)
126			dreq->count = dreq_len;
127
128		if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
129			dreq->error = hdr->error;
130		else /* Clear outstanding error if this is EOF */
131			dreq->error = 0;
 
 
 
 
 
 
 
 
 
132	}
133}
134
135static void
136nfs_direct_count_bytes(struct nfs_direct_req *dreq,
137		       const struct nfs_pgio_header *hdr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138{
139	loff_t hdr_end = hdr->io_start + hdr->good_bytes;
140	ssize_t dreq_len = 0;
141
142	if (hdr_end > dreq->io_start)
143		dreq_len = hdr_end - dreq->io_start;
 
 
 
144
145	nfs_direct_handle_truncated(dreq, hdr, dreq_len);
 
 
 
 
146
147	if (dreq_len > dreq->max_count)
148		dreq_len = dreq->max_count;
 
 
 
 
 
 
 
 
 
 
 
149
150	if (dreq->count < dreq_len)
151		dreq->count = dreq_len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152}
153
154/**
155 * nfs_direct_IO - NFS address space operation for direct I/O
156 * @iocb: target I/O control block
157 * @iter: I/O buffer
158 *
159 * The presence of this routine in the address space ops vector means
160 * the NFS client supports direct I/O. However, for most direct IO, we
161 * shunt off direct read and write requests before the VFS gets them,
162 * so this method is only ever called for swap.
163 */
164ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
165{
166	struct inode *inode = iocb->ki_filp->f_mapping->host;
167
168	/* we only support swap file calling nfs_direct_IO */
169	if (!IS_SWAPFILE(inode))
170		return 0;
171
172	VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
173
174	if (iov_iter_rw(iter) == READ)
175		return nfs_file_direct_read(iocb, iter);
176	return nfs_file_direct_write(iocb, iter);
177}
178
179static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
180{
181	unsigned int i;
182	for (i = 0; i < npages; i++)
183		put_page(pages[i]);
184}
185
186void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
187			      struct nfs_direct_req *dreq)
188{
189	cinfo->inode = dreq->inode;
190	cinfo->mds = &dreq->mds_cinfo;
191	cinfo->ds = &dreq->ds_cinfo;
192	cinfo->dreq = dreq;
193	cinfo->completion_ops = &nfs_direct_commit_completion_ops;
194}
195
 
 
 
 
 
 
 
 
 
 
 
 
196static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
197{
198	struct nfs_direct_req *dreq;
199
200	dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
201	if (!dreq)
202		return NULL;
203
204	kref_init(&dreq->kref);
205	kref_get(&dreq->kref);
206	init_completion(&dreq->completion);
207	INIT_LIST_HEAD(&dreq->mds_cinfo.list);
208	pnfs_init_ds_commit_info(&dreq->ds_cinfo);
209	INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
 
210	spin_lock_init(&dreq->lock);
211
212	return dreq;
213}
214
215static void nfs_direct_req_free(struct kref *kref)
216{
217	struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
218
219	pnfs_release_ds_info(&dreq->ds_cinfo, dreq->inode);
220	if (dreq->l_ctx != NULL)
221		nfs_put_lock_context(dreq->l_ctx);
222	if (dreq->ctx != NULL)
223		put_nfs_open_context(dreq->ctx);
224	kmem_cache_free(nfs_direct_cachep, dreq);
225}
226
227static void nfs_direct_req_release(struct nfs_direct_req *dreq)
228{
229	kref_put(&dreq->kref, nfs_direct_req_free);
230}
231
232ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq)
233{
234	return dreq->bytes_left;
235}
236EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);
237
238/*
239 * Collects and returns the final error value/byte-count.
240 */
241static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
242{
243	ssize_t result = -EIOCBQUEUED;
244
245	/* Async requests don't wait here */
246	if (dreq->iocb)
247		goto out;
248
249	result = wait_for_completion_killable(&dreq->completion);
250
251	if (!result) {
252		result = dreq->count;
253		WARN_ON_ONCE(dreq->count < 0);
254	}
255	if (!result)
256		result = dreq->error;
257
258out:
259	return (ssize_t) result;
260}
261
262/*
263 * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
264 * the iocb is still valid here if this is a synchronous request.
265 */
266static void nfs_direct_complete(struct nfs_direct_req *dreq)
267{
268	struct inode *inode = dreq->inode;
269
270	inode_dio_end(inode);
271
272	if (dreq->iocb) {
273		long res = (long) dreq->error;
274		if (dreq->count != 0) {
275			res = (long) dreq->count;
276			WARN_ON_ONCE(dreq->count < 0);
277		}
278		dreq->iocb->ki_complete(dreq->iocb, res, 0);
279	}
280
281	complete(&dreq->completion);
282
283	nfs_direct_req_release(dreq);
284}
285
286static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
287{
288	unsigned long bytes = 0;
289	struct nfs_direct_req *dreq = hdr->dreq;
290
291	spin_lock(&dreq->lock);
292	if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
293		spin_unlock(&dreq->lock);
294		goto out_put;
295	}
296
297	nfs_direct_count_bytes(dreq, hdr);
 
 
 
 
 
298	spin_unlock(&dreq->lock);
299
300	while (!list_empty(&hdr->pages)) {
301		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
302		struct page *page = req->wb_page;
303
304		if (!PageCompound(page) && bytes < hdr->good_bytes &&
305		    (dreq->flags == NFS_ODIRECT_SHOULD_DIRTY))
306			set_page_dirty(page);
307		bytes += req->wb_bytes;
308		nfs_list_remove_request(req);
309		nfs_release_request(req);
310	}
311out_put:
312	if (put_dreq(dreq))
313		nfs_direct_complete(dreq);
314	hdr->release(hdr);
315}
316
317static void nfs_read_sync_pgio_error(struct list_head *head, int error)
318{
319	struct nfs_page *req;
320
321	while (!list_empty(head)) {
322		req = nfs_list_entry(head->next);
323		nfs_list_remove_request(req);
324		nfs_release_request(req);
325	}
326}
327
328static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
329{
330	get_dreq(hdr->dreq);
331}
332
333static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
334	.error_cleanup = nfs_read_sync_pgio_error,
335	.init_hdr = nfs_direct_pgio_init,
336	.completion = nfs_direct_read_completion,
337};
338
339/*
340 * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
341 * operation.  If nfs_readdata_alloc() or get_user_pages() fails,
342 * bail and stop sending more reads.  Read length accounting is
343 * handled automatically by nfs_direct_read_result().  Otherwise, if
344 * no requests have been sent, just return an error.
345 */
346
347static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
348					      struct iov_iter *iter,
349					      loff_t pos)
350{
351	struct nfs_pageio_descriptor desc;
352	struct inode *inode = dreq->inode;
353	ssize_t result = -EINVAL;
354	size_t requested_bytes = 0;
355	size_t rsize = max_t(size_t, NFS_SERVER(inode)->rsize, PAGE_SIZE);
356
357	nfs_pageio_init_read(&desc, dreq->inode, false,
358			     &nfs_direct_read_completion_ops);
359	get_dreq(dreq);
360	desc.pg_dreq = dreq;
361	inode_dio_begin(inode);
362
363	while (iov_iter_count(iter)) {
364		struct page **pagevec;
365		size_t bytes;
366		size_t pgbase;
367		unsigned npages, i;
368
369		result = iov_iter_get_pages_alloc(iter, &pagevec, 
370						  rsize, &pgbase);
371		if (result < 0)
372			break;
373	
374		bytes = result;
375		iov_iter_advance(iter, bytes);
376		npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
377		for (i = 0; i < npages; i++) {
378			struct nfs_page *req;
379			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
380			/* XXX do we need to do the eof zeroing found in async_filler? */
381			req = nfs_create_request(dreq->ctx, pagevec[i],
382						 pgbase, req_len);
383			if (IS_ERR(req)) {
384				result = PTR_ERR(req);
385				break;
386			}
387			req->wb_index = pos >> PAGE_SHIFT;
388			req->wb_offset = pos & ~PAGE_MASK;
389			if (!nfs_pageio_add_request(&desc, req)) {
390				result = desc.pg_error;
391				nfs_release_request(req);
392				break;
393			}
394			pgbase = 0;
395			bytes -= req_len;
396			requested_bytes += req_len;
397			pos += req_len;
398			dreq->bytes_left -= req_len;
399		}
400		nfs_direct_release_pages(pagevec, npages);
401		kvfree(pagevec);
402		if (result < 0)
403			break;
404	}
405
406	nfs_pageio_complete(&desc);
407
408	/*
409	 * If no bytes were started, return the error, and let the
410	 * generic layer handle the completion.
411	 */
412	if (requested_bytes == 0) {
413		inode_dio_end(inode);
414		nfs_direct_req_release(dreq);
415		return result < 0 ? result : -EIO;
416	}
417
418	if (put_dreq(dreq))
419		nfs_direct_complete(dreq);
420	return requested_bytes;
421}
422
423/**
424 * nfs_file_direct_read - file direct read operation for NFS files
425 * @iocb: target I/O control block
426 * @iter: vector of user buffers into which to read data
427 *
428 * We use this function for direct reads instead of calling
429 * generic_file_aio_read() in order to avoid gfar's check to see if
430 * the request starts before the end of the file.  For that check
431 * to work, we must generate a GETATTR before each direct read, and
432 * even then there is a window between the GETATTR and the subsequent
433 * READ where the file size could change.  Our preference is simply
434 * to do all reads the application wants, and the server will take
435 * care of managing the end of file boundary.
436 *
437 * This function also eliminates unnecessarily updating the file's
438 * atime locally, as the NFS server sets the file's atime, and this
439 * client must read the updated atime from the server back into its
440 * cache.
441 */
442ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
443{
444	struct file *file = iocb->ki_filp;
445	struct address_space *mapping = file->f_mapping;
446	struct inode *inode = mapping->host;
447	struct nfs_direct_req *dreq;
448	struct nfs_lock_context *l_ctx;
449	ssize_t result, requested;
450	size_t count = iov_iter_count(iter);
451	nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
452
453	dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
454		file, count, (long long) iocb->ki_pos);
455
456	result = 0;
457	if (!count)
458		goto out;
459
460	task_io_account_read(count);
461
462	result = -ENOMEM;
463	dreq = nfs_direct_req_alloc();
464	if (dreq == NULL)
465		goto out;
466
467	dreq->inode = inode;
468	dreq->bytes_left = dreq->max_count = count;
469	dreq->io_start = iocb->ki_pos;
470	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
471	l_ctx = nfs_get_lock_context(dreq->ctx);
472	if (IS_ERR(l_ctx)) {
473		result = PTR_ERR(l_ctx);
474		nfs_direct_req_release(dreq);
475		goto out_release;
476	}
477	dreq->l_ctx = l_ctx;
478	if (!is_sync_kiocb(iocb))
479		dreq->iocb = iocb;
480
481	if (iter_is_iovec(iter))
482		dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
483
484	nfs_start_io_direct(inode);
485
486	NFS_I(inode)->read_io += count;
487	requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
488
489	nfs_end_io_direct(inode);
490
491	if (requested > 0) {
492		result = nfs_direct_wait(dreq);
493		if (result > 0) {
494			requested -= result;
495			iocb->ki_pos += result;
496		}
497		iov_iter_revert(iter, requested);
498	} else {
499		result = requested;
500	}
501
502out_release:
503	nfs_direct_req_release(dreq);
504out:
505	return result;
506}
507
508static void
509nfs_direct_join_group(struct list_head *list, struct inode *inode)
510{
511	struct nfs_page *req, *next;
512
513	list_for_each_entry(req, list, wb_list) {
514		if (req->wb_head != req || req->wb_this_page == req)
515			continue;
516		for (next = req->wb_this_page;
517				next != req->wb_head;
518				next = next->wb_this_page) {
519			nfs_list_remove_request(next);
520			nfs_release_request(next);
521		}
522		nfs_join_page_group(req, inode);
523	}
524}
525
526static void
527nfs_direct_write_scan_commit_list(struct inode *inode,
528				  struct list_head *list,
529				  struct nfs_commit_info *cinfo)
530{
531	mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
532	pnfs_recover_commit_reqs(list, cinfo);
 
 
 
533	nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0);
534	mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
535}
536
537static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
538{
539	struct nfs_pageio_descriptor desc;
540	struct nfs_page *req, *tmp;
541	LIST_HEAD(reqs);
542	struct nfs_commit_info cinfo;
543	LIST_HEAD(failed);
 
544
545	nfs_init_cinfo_from_dreq(&cinfo, dreq);
546	nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
547
548	nfs_direct_join_group(&reqs, dreq->inode);
549
550	dreq->count = 0;
551	dreq->max_count = 0;
552	list_for_each_entry(req, &reqs, wb_list)
553		dreq->max_count += req->wb_bytes;
554	nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
 
 
555	get_dreq(dreq);
556
557	nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
558			      &nfs_direct_write_completion_ops);
559	desc.pg_dreq = dreq;
560
 
 
 
 
 
 
 
561	list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
562		/* Bump the transmission count */
563		req->wb_nio++;
564		if (!nfs_pageio_add_request(&desc, req)) {
565			nfs_list_move_request(req, &failed);
 
566			spin_lock(&cinfo.inode->i_lock);
567			dreq->flags = 0;
568			if (desc.pg_error < 0)
569				dreq->error = desc.pg_error;
570			else
571				dreq->error = -EIO;
572			spin_unlock(&cinfo.inode->i_lock);
573		}
574		nfs_release_request(req);
575	}
576	nfs_pageio_complete(&desc);
577
 
578	while (!list_empty(&failed)) {
579		req = nfs_list_entry(failed.next);
580		nfs_list_remove_request(req);
581		nfs_unlock_and_release_request(req);
582	}
583
584	if (put_dreq(dreq))
585		nfs_direct_write_complete(dreq);
586}
587
588static void nfs_direct_commit_complete(struct nfs_commit_data *data)
589{
590	const struct nfs_writeverf *verf = data->res.verf;
591	struct nfs_direct_req *dreq = data->dreq;
592	struct nfs_commit_info cinfo;
593	struct nfs_page *req;
594	int status = data->task.tk_status;
595
596	if (status < 0) {
597		/* Errors in commit are fatal */
598		dreq->error = status;
599		dreq->max_count = 0;
600		dreq->count = 0;
601		dreq->flags = NFS_ODIRECT_DONE;
602	} else if (dreq->flags == NFS_ODIRECT_DONE)
603		status = dreq->error;
604
605	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 
 
606
607	while (!list_empty(&data->pages)) {
608		req = nfs_list_entry(data->pages.next);
609		nfs_list_remove_request(req);
610		if (status >= 0 && !nfs_write_match_verf(verf, req)) {
611			dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
612			/*
613			 * Despite the reboot, the write was successful,
614			 * so reset wb_nio.
615			 */
616			req->wb_nio = 0;
617			nfs_mark_request_commit(req, NULL, &cinfo, 0);
618		} else /* Error or match */
619			nfs_release_request(req);
620		nfs_unlock_and_release_request(req);
621	}
622
623	if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
624		nfs_direct_write_complete(dreq);
625}
626
627static void nfs_direct_resched_write(struct nfs_commit_info *cinfo,
628		struct nfs_page *req)
629{
630	struct nfs_direct_req *dreq = cinfo->dreq;
631
632	spin_lock(&dreq->lock);
633	if (dreq->flags != NFS_ODIRECT_DONE)
634		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
635	spin_unlock(&dreq->lock);
636	nfs_mark_request_commit(req, NULL, cinfo, 0);
637}
638
639static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
640	.completion = nfs_direct_commit_complete,
641	.resched_write = nfs_direct_resched_write,
642};
643
644static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
645{
646	int res;
647	struct nfs_commit_info cinfo;
648	LIST_HEAD(mds_list);
649
650	nfs_init_cinfo_from_dreq(&cinfo, dreq);
651	nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
652	res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
653	if (res < 0) /* res == -ENOMEM */
654		nfs_direct_write_reschedule(dreq);
655}
656
657static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq)
658{
659	struct nfs_commit_info cinfo;
660	struct nfs_page *req;
661	LIST_HEAD(reqs);
662
663	nfs_init_cinfo_from_dreq(&cinfo, dreq);
664	nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
665
666	while (!list_empty(&reqs)) {
667		req = nfs_list_entry(reqs.next);
668		nfs_list_remove_request(req);
669		nfs_release_request(req);
670		nfs_unlock_and_release_request(req);
671	}
672}
673
674static void nfs_direct_write_schedule_work(struct work_struct *work)
675{
676	struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
677	int flags = dreq->flags;
678
679	dreq->flags = 0;
680	switch (flags) {
681		case NFS_ODIRECT_DO_COMMIT:
682			nfs_direct_commit_schedule(dreq);
683			break;
684		case NFS_ODIRECT_RESCHED_WRITES:
685			nfs_direct_write_reschedule(dreq);
686			break;
687		default:
688			nfs_direct_write_clear_reqs(dreq);
689			nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping);
690			nfs_direct_complete(dreq);
691	}
692}
693
694static void nfs_direct_write_complete(struct nfs_direct_req *dreq)
695{
696	queue_work(nfsiod_workqueue, &dreq->work); /* Calls nfs_direct_write_schedule_work */
697}
698
699static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
700{
701	struct nfs_direct_req *dreq = hdr->dreq;
702	struct nfs_commit_info cinfo;
 
703	struct nfs_page *req = nfs_list_entry(hdr->pages.next);
704	int flags = NFS_ODIRECT_DONE;
 
 
705
706	nfs_init_cinfo_from_dreq(&cinfo, dreq);
707
708	spin_lock(&dreq->lock);
709	if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
710		spin_unlock(&dreq->lock);
711		goto out_put;
712	}
713
714	nfs_direct_count_bytes(dreq, hdr);
715	if (hdr->good_bytes != 0 && nfs_write_need_commit(hdr)) {
716		if (!dreq->flags)
717			dreq->flags = NFS_ODIRECT_DO_COMMIT;
718		flags = dreq->flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
719	}
720	spin_unlock(&dreq->lock);
721
722	while (!list_empty(&hdr->pages)) {
723
724		req = nfs_list_entry(hdr->pages.next);
725		nfs_list_remove_request(req);
726		if (flags == NFS_ODIRECT_DO_COMMIT) {
727			kref_get(&req->wb_kref);
728			memcpy(&req->wb_verf, &hdr->verf.verifier,
729			       sizeof(req->wb_verf));
730			nfs_mark_request_commit(req, hdr->lseg, &cinfo,
731				hdr->ds_commit_idx);
732		} else if (flags == NFS_ODIRECT_RESCHED_WRITES) {
733			kref_get(&req->wb_kref);
734			nfs_mark_request_commit(req, NULL, &cinfo, 0);
735		}
736		nfs_unlock_and_release_request(req);
737	}
738
739out_put:
740	if (put_dreq(dreq))
741		nfs_direct_write_complete(dreq);
742	hdr->release(hdr);
743}
744
745static void nfs_write_sync_pgio_error(struct list_head *head, int error)
746{
747	struct nfs_page *req;
748
749	while (!list_empty(head)) {
750		req = nfs_list_entry(head->next);
751		nfs_list_remove_request(req);
752		nfs_unlock_and_release_request(req);
753	}
754}
755
756static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
757{
758	struct nfs_direct_req *dreq = hdr->dreq;
759
760	spin_lock(&dreq->lock);
761	if (dreq->error == 0) {
762		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
763		/* fake unstable write to let common nfs resend pages */
764		hdr->verf.committed = NFS_UNSTABLE;
765		hdr->good_bytes = hdr->args.offset + hdr->args.count -
766			hdr->io_start;
767	}
768	spin_unlock(&dreq->lock);
769}
770
771static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
772	.error_cleanup = nfs_write_sync_pgio_error,
773	.init_hdr = nfs_direct_pgio_init,
774	.completion = nfs_direct_write_completion,
775	.reschedule_io = nfs_direct_write_reschedule_io,
776};
777
778
779/*
780 * NB: Return the value of the first error return code.  Subsequent
781 *     errors after the first one are ignored.
782 */
783/*
784 * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
785 * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
786 * bail and stop sending more writes.  Write length accounting is
787 * handled automatically by nfs_direct_write_result().  Otherwise, if
788 * no requests have been sent, just return an error.
789 */
790static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
791					       struct iov_iter *iter,
792					       loff_t pos)
793{
794	struct nfs_pageio_descriptor desc;
795	struct inode *inode = dreq->inode;
796	ssize_t result = 0;
797	size_t requested_bytes = 0;
798	size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
799
800	nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE, false,
801			      &nfs_direct_write_completion_ops);
802	desc.pg_dreq = dreq;
803	get_dreq(dreq);
804	inode_dio_begin(inode);
805
806	NFS_I(inode)->write_io += iov_iter_count(iter);
807	while (iov_iter_count(iter)) {
808		struct page **pagevec;
809		size_t bytes;
810		size_t pgbase;
811		unsigned npages, i;
812
813		result = iov_iter_get_pages_alloc(iter, &pagevec, 
814						  wsize, &pgbase);
815		if (result < 0)
816			break;
817
818		bytes = result;
819		iov_iter_advance(iter, bytes);
820		npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
821		for (i = 0; i < npages; i++) {
822			struct nfs_page *req;
823			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
824
825			req = nfs_create_request(dreq->ctx, pagevec[i],
826						 pgbase, req_len);
827			if (IS_ERR(req)) {
828				result = PTR_ERR(req);
829				break;
830			}
831
 
832			if (desc.pg_error < 0) {
833				nfs_free_request(req);
834				result = desc.pg_error;
835				break;
836			}
837
838			nfs_lock_request(req);
839			req->wb_index = pos >> PAGE_SHIFT;
840			req->wb_offset = pos & ~PAGE_MASK;
841			if (!nfs_pageio_add_request(&desc, req)) {
842				result = desc.pg_error;
843				nfs_unlock_and_release_request(req);
844				break;
845			}
846			pgbase = 0;
847			bytes -= req_len;
848			requested_bytes += req_len;
849			pos += req_len;
850			dreq->bytes_left -= req_len;
851		}
852		nfs_direct_release_pages(pagevec, npages);
853		kvfree(pagevec);
854		if (result < 0)
855			break;
856	}
857	nfs_pageio_complete(&desc);
858
859	/*
860	 * If no bytes were started, return the error, and let the
861	 * generic layer handle the completion.
862	 */
863	if (requested_bytes == 0) {
864		inode_dio_end(inode);
865		nfs_direct_req_release(dreq);
866		return result < 0 ? result : -EIO;
867	}
868
869	if (put_dreq(dreq))
870		nfs_direct_write_complete(dreq);
871	return requested_bytes;
872}
873
874/**
875 * nfs_file_direct_write - file direct write operation for NFS files
876 * @iocb: target I/O control block
877 * @iter: vector of user buffers from which to write data
878 *
879 * We use this function for direct writes instead of calling
880 * generic_file_aio_write() in order to avoid taking the inode
881 * semaphore and updating the i_size.  The NFS server will set
882 * the new i_size and this client must read the updated size
883 * back into its cache.  We let the server do generic write
884 * parameter checking and report problems.
885 *
886 * We eliminate local atime updates, see direct read above.
887 *
888 * We avoid unnecessary page cache invalidations for normal cached
889 * readers of this file.
890 *
891 * Note that O_APPEND is not supported for NFS direct writes, as there
892 * is no atomic O_APPEND write facility in the NFS protocol.
893 */
894ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
895{
896	ssize_t result, requested;
897	size_t count;
898	struct file *file = iocb->ki_filp;
899	struct address_space *mapping = file->f_mapping;
900	struct inode *inode = mapping->host;
901	struct nfs_direct_req *dreq;
902	struct nfs_lock_context *l_ctx;
903	loff_t pos, end;
904
905	dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
906		file, iov_iter_count(iter), (long long) iocb->ki_pos);
907
908	result = generic_write_checks(iocb, iter);
909	if (result <= 0)
910		return result;
911	count = result;
912	nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
913
914	pos = iocb->ki_pos;
915	end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
916
917	task_io_account_write(count);
918
919	result = -ENOMEM;
920	dreq = nfs_direct_req_alloc();
921	if (!dreq)
922		goto out;
923
924	dreq->inode = inode;
925	dreq->bytes_left = dreq->max_count = count;
926	dreq->io_start = pos;
927	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
928	l_ctx = nfs_get_lock_context(dreq->ctx);
929	if (IS_ERR(l_ctx)) {
930		result = PTR_ERR(l_ctx);
931		nfs_direct_req_release(dreq);
932		goto out_release;
933	}
934	dreq->l_ctx = l_ctx;
935	if (!is_sync_kiocb(iocb))
936		dreq->iocb = iocb;
937	pnfs_init_ds_commit_info_ops(&dreq->ds_cinfo, inode);
938
939	nfs_start_io_direct(inode);
940
941	requested = nfs_direct_write_schedule_iovec(dreq, iter, pos);
942
943	if (mapping->nrpages) {
944		invalidate_inode_pages2_range(mapping,
945					      pos >> PAGE_SHIFT, end);
946	}
947
948	nfs_end_io_direct(inode);
949
950	if (requested > 0) {
951		result = nfs_direct_wait(dreq);
952		if (result > 0) {
953			requested -= result;
954			iocb->ki_pos = pos + result;
955			/* XXX: should check the generic_write_sync retval */
956			generic_write_sync(iocb, result);
957		}
958		iov_iter_revert(iter, requested);
959	} else {
960		result = requested;
961	}
962out_release:
963	nfs_direct_req_release(dreq);
964out:
965	return result;
966}
967
968/**
969 * nfs_init_directcache - create a slab cache for nfs_direct_req structures
970 *
971 */
972int __init nfs_init_directcache(void)
973{
974	nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
975						sizeof(struct nfs_direct_req),
976						0, (SLAB_RECLAIM_ACCOUNT|
977							SLAB_MEM_SPREAD),
978						NULL);
979	if (nfs_direct_cachep == NULL)
980		return -ENOMEM;
981
982	return 0;
983}
984
985/**
986 * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
987 *
988 */
989void nfs_destroy_directcache(void)
990{
991	kmem_cache_destroy(nfs_direct_cachep);
992}
v4.17
 
   1/*
   2 * linux/fs/nfs/direct.c
   3 *
   4 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
   5 *
   6 * High-performance uncached I/O for the Linux NFS client
   7 *
   8 * There are important applications whose performance or correctness
   9 * depends on uncached access to file data.  Database clusters
  10 * (multiple copies of the same instance running on separate hosts)
  11 * implement their own cache coherency protocol that subsumes file
  12 * system cache protocols.  Applications that process datasets
  13 * considerably larger than the client's memory do not always benefit
  14 * from a local cache.  A streaming video server, for instance, has no
  15 * need to cache the contents of a file.
  16 *
  17 * When an application requests uncached I/O, all read and write requests
  18 * are made directly to the server; data stored or fetched via these
  19 * requests is not cached in the Linux page cache.  The client does not
  20 * correct unaligned requests from applications.  All requested bytes are
  21 * held on permanent storage before a direct write system call returns to
  22 * an application.
  23 *
  24 * Solaris implements an uncached I/O facility called directio() that
  25 * is used for backups and sequential I/O to very large files.  Solaris
  26 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
  27 * an undocumented mount option.
  28 *
  29 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
  30 * help from Andrew Morton.
  31 *
  32 * 18 Dec 2001	Initial implementation for 2.4  --cel
  33 * 08 Jul 2002	Version for 2.4.19, with bug fixes --trondmy
  34 * 08 Jun 2003	Port to 2.5 APIs  --cel
  35 * 31 Mar 2004	Handle direct I/O without VFS support  --cel
  36 * 15 Sep 2004	Parallel async reads  --cel
  37 * 04 May 2005	support O_DIRECT with aio  --cel
  38 *
  39 */
  40
  41#include <linux/errno.h>
  42#include <linux/sched.h>
  43#include <linux/kernel.h>
  44#include <linux/file.h>
  45#include <linux/pagemap.h>
  46#include <linux/kref.h>
  47#include <linux/slab.h>
  48#include <linux/task_io_accounting_ops.h>
  49#include <linux/module.h>
  50
  51#include <linux/nfs_fs.h>
  52#include <linux/nfs_page.h>
  53#include <linux/sunrpc/clnt.h>
  54
  55#include <linux/uaccess.h>
  56#include <linux/atomic.h>
  57
  58#include "internal.h"
  59#include "iostat.h"
  60#include "pnfs.h"
  61
  62#define NFSDBG_FACILITY		NFSDBG_VFS
  63
  64static struct kmem_cache *nfs_direct_cachep;
  65
  66/*
  67 * This represents a set of asynchronous requests that we're waiting on
  68 */
  69struct nfs_direct_mirror {
  70	ssize_t count;
  71};
  72
  73struct nfs_direct_req {
  74	struct kref		kref;		/* release manager */
  75
  76	/* I/O parameters */
  77	struct nfs_open_context	*ctx;		/* file open context info */
  78	struct nfs_lock_context *l_ctx;		/* Lock context info */
  79	struct kiocb *		iocb;		/* controlling i/o request */
  80	struct inode *		inode;		/* target file of i/o */
  81
  82	/* completion state */
  83	atomic_t		io_count;	/* i/os we're waiting for */
  84	spinlock_t		lock;		/* protect completion state */
  85
  86	struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX];
  87	int			mirror_count;
  88
  89	loff_t			io_start;	/* Start offset for I/O */
  90	ssize_t			count,		/* bytes actually processed */
  91				max_count,	/* max expected count */
  92				bytes_left,	/* bytes left to be sent */
  93				error;		/* any reported error */
  94	struct completion	completion;	/* wait for i/o completion */
  95
  96	/* commit state */
  97	struct nfs_mds_commit_info mds_cinfo;	/* Storage for cinfo */
  98	struct pnfs_ds_commit_info ds_cinfo;	/* Storage for cinfo */
  99	struct work_struct	work;
 100	int			flags;
 
 101#define NFS_ODIRECT_DO_COMMIT		(1)	/* an unstable reply was received */
 102#define NFS_ODIRECT_RESCHED_WRITES	(2)	/* write verification failed */
 103	struct nfs_writeverf	verf;		/* unstable write verifier */
 
 
 104};
 105
 106static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
 107static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
 108static void nfs_direct_write_complete(struct nfs_direct_req *dreq);
 109static void nfs_direct_write_schedule_work(struct work_struct *work);
 110
 111static inline void get_dreq(struct nfs_direct_req *dreq)
 112{
 113	atomic_inc(&dreq->io_count);
 114}
 115
 116static inline int put_dreq(struct nfs_direct_req *dreq)
 117{
 118	return atomic_dec_and_test(&dreq->io_count);
 119}
 120
 121static void
 122nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr)
 123{
 124	int i;
 125	ssize_t count;
 126
 127	WARN_ON_ONCE(dreq->count >= dreq->max_count);
 128
 129	if (dreq->mirror_count == 1) {
 130		dreq->mirrors[hdr->pgio_mirror_idx].count += hdr->good_bytes;
 131		dreq->count += hdr->good_bytes;
 132	} else {
 133		/* mirrored writes */
 134		count = dreq->mirrors[hdr->pgio_mirror_idx].count;
 135		if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) {
 136			count = hdr->io_start + hdr->good_bytes - dreq->io_start;
 137			dreq->mirrors[hdr->pgio_mirror_idx].count = count;
 138		}
 139		/* update the dreq->count by finding the minimum agreed count from all
 140		 * mirrors */
 141		count = dreq->mirrors[0].count;
 142
 143		for (i = 1; i < dreq->mirror_count; i++)
 144			count = min(count, dreq->mirrors[i].count);
 145
 146		dreq->count = count;
 147	}
 148}
 149
 150/*
 151 * nfs_direct_select_verf - select the right verifier
 152 * @dreq - direct request possibly spanning multiple servers
 153 * @ds_clp - nfs_client of data server or NULL if MDS / non-pnfs
 154 * @commit_idx - commit bucket index for the DS
 155 *
 156 * returns the correct verifier to use given the role of the server
 157 */
 158static struct nfs_writeverf *
 159nfs_direct_select_verf(struct nfs_direct_req *dreq,
 160		       struct nfs_client *ds_clp,
 161		       int commit_idx)
 162{
 163	struct nfs_writeverf *verfp = &dreq->verf;
 164
 165#ifdef CONFIG_NFS_V4_1
 166	/*
 167	 * pNFS is in use, use the DS verf except commit_through_mds is set
 168	 * for layout segment where nbuckets is zero.
 169	 */
 170	if (ds_clp && dreq->ds_cinfo.nbuckets > 0) {
 171		if (commit_idx >= 0 && commit_idx < dreq->ds_cinfo.nbuckets)
 172			verfp = &dreq->ds_cinfo.buckets[commit_idx].direct_verf;
 173		else
 174			WARN_ON_ONCE(1);
 175	}
 176#endif
 177	return verfp;
 178}
 179
 180
 181/*
 182 * nfs_direct_set_hdr_verf - set the write/commit verifier
 183 * @dreq - direct request possibly spanning multiple servers
 184 * @hdr - pageio header to validate against previously seen verfs
 185 *
 186 * Set the server's (MDS or DS) "seen" verifier
 187 */
 188static void nfs_direct_set_hdr_verf(struct nfs_direct_req *dreq,
 189				    struct nfs_pgio_header *hdr)
 190{
 191	struct nfs_writeverf *verfp;
 
 192
 193	verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx);
 194	WARN_ON_ONCE(verfp->committed >= 0);
 195	memcpy(verfp, &hdr->verf, sizeof(struct nfs_writeverf));
 196	WARN_ON_ONCE(verfp->committed < 0);
 197}
 198
 199static int nfs_direct_cmp_verf(const struct nfs_writeverf *v1,
 200		const struct nfs_writeverf *v2)
 201{
 202	return nfs_write_verifier_cmp(&v1->verifier, &v2->verifier);
 203}
 204
 205/*
 206 * nfs_direct_cmp_hdr_verf - compare verifier for pgio header
 207 * @dreq - direct request possibly spanning multiple servers
 208 * @hdr - pageio header to validate against previously seen verf
 209 *
 210 * set the server's "seen" verf if not initialized.
 211 * returns result of comparison between @hdr->verf and the "seen"
 212 * verf of the server used by @hdr (DS or MDS)
 213 */
 214static int nfs_direct_set_or_cmp_hdr_verf(struct nfs_direct_req *dreq,
 215					  struct nfs_pgio_header *hdr)
 216{
 217	struct nfs_writeverf *verfp;
 218
 219	verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx);
 220	if (verfp->committed < 0) {
 221		nfs_direct_set_hdr_verf(dreq, hdr);
 222		return 0;
 223	}
 224	return nfs_direct_cmp_verf(verfp, &hdr->verf);
 225}
 226
 227/*
 228 * nfs_direct_cmp_commit_data_verf - compare verifier for commit data
 229 * @dreq - direct request possibly spanning multiple servers
 230 * @data - commit data to validate against previously seen verf
 231 *
 232 * returns result of comparison between @data->verf and the verf of
 233 * the server used by @data (DS or MDS)
 234 */
 235static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
 236					   struct nfs_commit_data *data)
 237{
 238	struct nfs_writeverf *verfp;
 239
 240	verfp = nfs_direct_select_verf(dreq, data->ds_clp,
 241					 data->ds_commit_index);
 242
 243	/* verifier not set so always fail */
 244	if (verfp->committed < 0)
 245		return 1;
 246
 247	return nfs_direct_cmp_verf(verfp, &data->verf);
 248}
 249
 250/**
 251 * nfs_direct_IO - NFS address space operation for direct I/O
 252 * @iocb: target I/O control block
 253 * @iter: I/O buffer
 254 *
 255 * The presence of this routine in the address space ops vector means
 256 * the NFS client supports direct I/O. However, for most direct IO, we
 257 * shunt off direct read and write requests before the VFS gets them,
 258 * so this method is only ever called for swap.
 259 */
 260ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 261{
 262	struct inode *inode = iocb->ki_filp->f_mapping->host;
 263
 264	/* we only support swap file calling nfs_direct_IO */
 265	if (!IS_SWAPFILE(inode))
 266		return 0;
 267
 268	VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
 269
 270	if (iov_iter_rw(iter) == READ)
 271		return nfs_file_direct_read(iocb, iter);
 272	return nfs_file_direct_write(iocb, iter);
 273}
 274
 275static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
 276{
 277	unsigned int i;
 278	for (i = 0; i < npages; i++)
 279		put_page(pages[i]);
 280}
 281
 282void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
 283			      struct nfs_direct_req *dreq)
 284{
 285	cinfo->inode = dreq->inode;
 286	cinfo->mds = &dreq->mds_cinfo;
 287	cinfo->ds = &dreq->ds_cinfo;
 288	cinfo->dreq = dreq;
 289	cinfo->completion_ops = &nfs_direct_commit_completion_ops;
 290}
 291
 292static inline void nfs_direct_setup_mirroring(struct nfs_direct_req *dreq,
 293					     struct nfs_pageio_descriptor *pgio,
 294					     struct nfs_page *req)
 295{
 296	int mirror_count = 1;
 297
 298	if (pgio->pg_ops->pg_get_mirror_count)
 299		mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
 300
 301	dreq->mirror_count = mirror_count;
 302}
 303
 304static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
 305{
 306	struct nfs_direct_req *dreq;
 307
 308	dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
 309	if (!dreq)
 310		return NULL;
 311
 312	kref_init(&dreq->kref);
 313	kref_get(&dreq->kref);
 314	init_completion(&dreq->completion);
 315	INIT_LIST_HEAD(&dreq->mds_cinfo.list);
 316	dreq->verf.committed = NFS_INVALID_STABLE_HOW;	/* not set yet */
 317	INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
 318	dreq->mirror_count = 1;
 319	spin_lock_init(&dreq->lock);
 320
 321	return dreq;
 322}
 323
 324static void nfs_direct_req_free(struct kref *kref)
 325{
 326	struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
 327
 328	nfs_free_pnfs_ds_cinfo(&dreq->ds_cinfo);
 329	if (dreq->l_ctx != NULL)
 330		nfs_put_lock_context(dreq->l_ctx);
 331	if (dreq->ctx != NULL)
 332		put_nfs_open_context(dreq->ctx);
 333	kmem_cache_free(nfs_direct_cachep, dreq);
 334}
 335
 336static void nfs_direct_req_release(struct nfs_direct_req *dreq)
 337{
 338	kref_put(&dreq->kref, nfs_direct_req_free);
 339}
 340
 341ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq)
 342{
 343	return dreq->bytes_left;
 344}
 345EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);
 346
 347/*
 348 * Collects and returns the final error value/byte-count.
 349 */
 350static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
 351{
 352	ssize_t result = -EIOCBQUEUED;
 353
 354	/* Async requests don't wait here */
 355	if (dreq->iocb)
 356		goto out;
 357
 358	result = wait_for_completion_killable(&dreq->completion);
 359
 360	if (!result) {
 361		result = dreq->count;
 362		WARN_ON_ONCE(dreq->count < 0);
 363	}
 364	if (!result)
 365		result = dreq->error;
 366
 367out:
 368	return (ssize_t) result;
 369}
 370
 371/*
 372 * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
 373 * the iocb is still valid here if this is a synchronous request.
 374 */
 375static void nfs_direct_complete(struct nfs_direct_req *dreq)
 376{
 377	struct inode *inode = dreq->inode;
 378
 379	inode_dio_end(inode);
 380
 381	if (dreq->iocb) {
 382		long res = (long) dreq->error;
 383		if (dreq->count != 0) {
 384			res = (long) dreq->count;
 385			WARN_ON_ONCE(dreq->count < 0);
 386		}
 387		dreq->iocb->ki_complete(dreq->iocb, res, 0);
 388	}
 389
 390	complete(&dreq->completion);
 391
 392	nfs_direct_req_release(dreq);
 393}
 394
 395static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
 396{
 397	unsigned long bytes = 0;
 398	struct nfs_direct_req *dreq = hdr->dreq;
 399
 400	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
 
 
 401		goto out_put;
 
 402
 403	spin_lock(&dreq->lock);
 404	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
 405		dreq->error = hdr->error;
 406	else
 407		nfs_direct_good_bytes(dreq, hdr);
 408
 409	spin_unlock(&dreq->lock);
 410
 411	while (!list_empty(&hdr->pages)) {
 412		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
 413		struct page *page = req->wb_page;
 414
 415		if (!PageCompound(page) && bytes < hdr->good_bytes)
 
 416			set_page_dirty(page);
 417		bytes += req->wb_bytes;
 418		nfs_list_remove_request(req);
 419		nfs_release_request(req);
 420	}
 421out_put:
 422	if (put_dreq(dreq))
 423		nfs_direct_complete(dreq);
 424	hdr->release(hdr);
 425}
 426
 427static void nfs_read_sync_pgio_error(struct list_head *head)
 428{
 429	struct nfs_page *req;
 430
 431	while (!list_empty(head)) {
 432		req = nfs_list_entry(head->next);
 433		nfs_list_remove_request(req);
 434		nfs_release_request(req);
 435	}
 436}
 437
 438static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
 439{
 440	get_dreq(hdr->dreq);
 441}
 442
 443static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
 444	.error_cleanup = nfs_read_sync_pgio_error,
 445	.init_hdr = nfs_direct_pgio_init,
 446	.completion = nfs_direct_read_completion,
 447};
 448
 449/*
 450 * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
 451 * operation.  If nfs_readdata_alloc() or get_user_pages() fails,
 452 * bail and stop sending more reads.  Read length accounting is
 453 * handled automatically by nfs_direct_read_result().  Otherwise, if
 454 * no requests have been sent, just return an error.
 455 */
 456
 457static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
 458					      struct iov_iter *iter,
 459					      loff_t pos)
 460{
 461	struct nfs_pageio_descriptor desc;
 462	struct inode *inode = dreq->inode;
 463	ssize_t result = -EINVAL;
 464	size_t requested_bytes = 0;
 465	size_t rsize = max_t(size_t, NFS_SERVER(inode)->rsize, PAGE_SIZE);
 466
 467	nfs_pageio_init_read(&desc, dreq->inode, false,
 468			     &nfs_direct_read_completion_ops);
 469	get_dreq(dreq);
 470	desc.pg_dreq = dreq;
 471	inode_dio_begin(inode);
 472
 473	while (iov_iter_count(iter)) {
 474		struct page **pagevec;
 475		size_t bytes;
 476		size_t pgbase;
 477		unsigned npages, i;
 478
 479		result = iov_iter_get_pages_alloc(iter, &pagevec, 
 480						  rsize, &pgbase);
 481		if (result < 0)
 482			break;
 483	
 484		bytes = result;
 485		iov_iter_advance(iter, bytes);
 486		npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
 487		for (i = 0; i < npages; i++) {
 488			struct nfs_page *req;
 489			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
 490			/* XXX do we need to do the eof zeroing found in async_filler? */
 491			req = nfs_create_request(dreq->ctx, pagevec[i], NULL,
 492						 pgbase, req_len);
 493			if (IS_ERR(req)) {
 494				result = PTR_ERR(req);
 495				break;
 496			}
 497			req->wb_index = pos >> PAGE_SHIFT;
 498			req->wb_offset = pos & ~PAGE_MASK;
 499			if (!nfs_pageio_add_request(&desc, req)) {
 500				result = desc.pg_error;
 501				nfs_release_request(req);
 502				break;
 503			}
 504			pgbase = 0;
 505			bytes -= req_len;
 506			requested_bytes += req_len;
 507			pos += req_len;
 508			dreq->bytes_left -= req_len;
 509		}
 510		nfs_direct_release_pages(pagevec, npages);
 511		kvfree(pagevec);
 512		if (result < 0)
 513			break;
 514	}
 515
 516	nfs_pageio_complete(&desc);
 517
 518	/*
 519	 * If no bytes were started, return the error, and let the
 520	 * generic layer handle the completion.
 521	 */
 522	if (requested_bytes == 0) {
 523		inode_dio_end(inode);
 524		nfs_direct_req_release(dreq);
 525		return result < 0 ? result : -EIO;
 526	}
 527
 528	if (put_dreq(dreq))
 529		nfs_direct_complete(dreq);
 530	return requested_bytes;
 531}
 532
 533/**
 534 * nfs_file_direct_read - file direct read operation for NFS files
 535 * @iocb: target I/O control block
 536 * @iter: vector of user buffers into which to read data
 537 *
 538 * We use this function for direct reads instead of calling
 539 * generic_file_aio_read() in order to avoid gfar's check to see if
 540 * the request starts before the end of the file.  For that check
 541 * to work, we must generate a GETATTR before each direct read, and
 542 * even then there is a window between the GETATTR and the subsequent
 543 * READ where the file size could change.  Our preference is simply
 544 * to do all reads the application wants, and the server will take
 545 * care of managing the end of file boundary.
 546 *
 547 * This function also eliminates unnecessarily updating the file's
 548 * atime locally, as the NFS server sets the file's atime, and this
 549 * client must read the updated atime from the server back into its
 550 * cache.
 551 */
 552ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
 553{
 554	struct file *file = iocb->ki_filp;
 555	struct address_space *mapping = file->f_mapping;
 556	struct inode *inode = mapping->host;
 557	struct nfs_direct_req *dreq;
 558	struct nfs_lock_context *l_ctx;
 559	ssize_t result = -EINVAL, requested;
 560	size_t count = iov_iter_count(iter);
 561	nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
 562
 563	dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
 564		file, count, (long long) iocb->ki_pos);
 565
 566	result = 0;
 567	if (!count)
 568		goto out;
 569
 570	task_io_account_read(count);
 571
 572	result = -ENOMEM;
 573	dreq = nfs_direct_req_alloc();
 574	if (dreq == NULL)
 575		goto out;
 576
 577	dreq->inode = inode;
 578	dreq->bytes_left = dreq->max_count = count;
 579	dreq->io_start = iocb->ki_pos;
 580	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
 581	l_ctx = nfs_get_lock_context(dreq->ctx);
 582	if (IS_ERR(l_ctx)) {
 583		result = PTR_ERR(l_ctx);
 
 584		goto out_release;
 585	}
 586	dreq->l_ctx = l_ctx;
 587	if (!is_sync_kiocb(iocb))
 588		dreq->iocb = iocb;
 589
 
 
 
 590	nfs_start_io_direct(inode);
 591
 592	NFS_I(inode)->read_io += count;
 593	requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
 594
 595	nfs_end_io_direct(inode);
 596
 597	if (requested > 0) {
 598		result = nfs_direct_wait(dreq);
 599		if (result > 0) {
 600			requested -= result;
 601			iocb->ki_pos += result;
 602		}
 603		iov_iter_revert(iter, requested);
 604	} else {
 605		result = requested;
 606	}
 607
 608out_release:
 609	nfs_direct_req_release(dreq);
 610out:
 611	return result;
 612}
 613
 614static void
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 615nfs_direct_write_scan_commit_list(struct inode *inode,
 616				  struct list_head *list,
 617				  struct nfs_commit_info *cinfo)
 618{
 619	mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
 620#ifdef CONFIG_NFS_V4_1
 621	if (cinfo->ds != NULL && cinfo->ds->nwritten != 0)
 622		NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo);
 623#endif
 624	nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0);
 625	mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
 626}
 627
 628static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
 629{
 630	struct nfs_pageio_descriptor desc;
 631	struct nfs_page *req, *tmp;
 632	LIST_HEAD(reqs);
 633	struct nfs_commit_info cinfo;
 634	LIST_HEAD(failed);
 635	int i;
 636
 637	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 638	nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
 639
 
 
 640	dreq->count = 0;
 641	dreq->verf.committed = NFS_INVALID_STABLE_HOW;
 
 
 642	nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
 643	for (i = 0; i < dreq->mirror_count; i++)
 644		dreq->mirrors[i].count = 0;
 645	get_dreq(dreq);
 646
 647	nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
 648			      &nfs_direct_write_completion_ops);
 649	desc.pg_dreq = dreq;
 650
 651	req = nfs_list_entry(reqs.next);
 652	nfs_direct_setup_mirroring(dreq, &desc, req);
 653	if (desc.pg_error < 0) {
 654		list_splice_init(&reqs, &failed);
 655		goto out_failed;
 656	}
 657
 658	list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
 
 
 659		if (!nfs_pageio_add_request(&desc, req)) {
 660			nfs_list_remove_request(req);
 661			nfs_list_add_request(req, &failed);
 662			spin_lock(&cinfo.inode->i_lock);
 663			dreq->flags = 0;
 664			if (desc.pg_error < 0)
 665				dreq->error = desc.pg_error;
 666			else
 667				dreq->error = -EIO;
 668			spin_unlock(&cinfo.inode->i_lock);
 669		}
 670		nfs_release_request(req);
 671	}
 672	nfs_pageio_complete(&desc);
 673
 674out_failed:
 675	while (!list_empty(&failed)) {
 676		req = nfs_list_entry(failed.next);
 677		nfs_list_remove_request(req);
 678		nfs_unlock_and_release_request(req);
 679	}
 680
 681	if (put_dreq(dreq))
 682		nfs_direct_write_complete(dreq);
 683}
 684
 685static void nfs_direct_commit_complete(struct nfs_commit_data *data)
 686{
 
 687	struct nfs_direct_req *dreq = data->dreq;
 688	struct nfs_commit_info cinfo;
 689	struct nfs_page *req;
 690	int status = data->task.tk_status;
 691
 
 
 
 
 
 
 
 
 
 692	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 693	if (status < 0 || nfs_direct_cmp_commit_data_verf(dreq, data))
 694		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 695
 696	while (!list_empty(&data->pages)) {
 697		req = nfs_list_entry(data->pages.next);
 698		nfs_list_remove_request(req);
 699		if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
 700			/* Note the rewrite will go through mds */
 
 
 
 
 
 701			nfs_mark_request_commit(req, NULL, &cinfo, 0);
 702		} else
 703			nfs_release_request(req);
 704		nfs_unlock_and_release_request(req);
 705	}
 706
 707	if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
 708		nfs_direct_write_complete(dreq);
 709}
 710
 711static void nfs_direct_resched_write(struct nfs_commit_info *cinfo,
 712		struct nfs_page *req)
 713{
 714	struct nfs_direct_req *dreq = cinfo->dreq;
 715
 716	spin_lock(&dreq->lock);
 717	dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 
 718	spin_unlock(&dreq->lock);
 719	nfs_mark_request_commit(req, NULL, cinfo, 0);
 720}
 721
 722static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
 723	.completion = nfs_direct_commit_complete,
 724	.resched_write = nfs_direct_resched_write,
 725};
 726
 727static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
 728{
 729	int res;
 730	struct nfs_commit_info cinfo;
 731	LIST_HEAD(mds_list);
 732
 733	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 734	nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
 735	res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
 736	if (res < 0) /* res == -ENOMEM */
 737		nfs_direct_write_reschedule(dreq);
 738}
 739
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 740static void nfs_direct_write_schedule_work(struct work_struct *work)
 741{
 742	struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
 743	int flags = dreq->flags;
 744
 745	dreq->flags = 0;
 746	switch (flags) {
 747		case NFS_ODIRECT_DO_COMMIT:
 748			nfs_direct_commit_schedule(dreq);
 749			break;
 750		case NFS_ODIRECT_RESCHED_WRITES:
 751			nfs_direct_write_reschedule(dreq);
 752			break;
 753		default:
 
 754			nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping);
 755			nfs_direct_complete(dreq);
 756	}
 757}
 758
 759static void nfs_direct_write_complete(struct nfs_direct_req *dreq)
 760{
 761	schedule_work(&dreq->work); /* Calls nfs_direct_write_schedule_work */
 762}
 763
 764static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
 765{
 766	struct nfs_direct_req *dreq = hdr->dreq;
 767	struct nfs_commit_info cinfo;
 768	bool request_commit = false;
 769	struct nfs_page *req = nfs_list_entry(hdr->pages.next);
 770
 771	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
 772		goto out_put;
 773
 774	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 775
 776	spin_lock(&dreq->lock);
 
 
 
 
 777
 778	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
 779		dreq->error = hdr->error;
 780	if (dreq->error == 0) {
 781		nfs_direct_good_bytes(dreq, hdr);
 782		if (nfs_write_need_commit(hdr)) {
 783			if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
 784				request_commit = true;
 785			else if (dreq->flags == 0) {
 786				nfs_direct_set_hdr_verf(dreq, hdr);
 787				request_commit = true;
 788				dreq->flags = NFS_ODIRECT_DO_COMMIT;
 789			} else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
 790				request_commit = true;
 791				if (nfs_direct_set_or_cmp_hdr_verf(dreq, hdr))
 792					dreq->flags =
 793						NFS_ODIRECT_RESCHED_WRITES;
 794			}
 795		}
 796	}
 797	spin_unlock(&dreq->lock);
 798
 799	while (!list_empty(&hdr->pages)) {
 800
 801		req = nfs_list_entry(hdr->pages.next);
 802		nfs_list_remove_request(req);
 803		if (request_commit) {
 804			kref_get(&req->wb_kref);
 
 
 805			nfs_mark_request_commit(req, hdr->lseg, &cinfo,
 806				hdr->ds_commit_idx);
 
 
 
 807		}
 808		nfs_unlock_and_release_request(req);
 809	}
 810
 811out_put:
 812	if (put_dreq(dreq))
 813		nfs_direct_write_complete(dreq);
 814	hdr->release(hdr);
 815}
 816
 817static void nfs_write_sync_pgio_error(struct list_head *head)
 818{
 819	struct nfs_page *req;
 820
 821	while (!list_empty(head)) {
 822		req = nfs_list_entry(head->next);
 823		nfs_list_remove_request(req);
 824		nfs_unlock_and_release_request(req);
 825	}
 826}
 827
 828static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
 829{
 830	struct nfs_direct_req *dreq = hdr->dreq;
 831
 832	spin_lock(&dreq->lock);
 833	if (dreq->error == 0) {
 834		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 835		/* fake unstable write to let common nfs resend pages */
 836		hdr->verf.committed = NFS_UNSTABLE;
 837		hdr->good_bytes = hdr->args.count;
 
 838	}
 839	spin_unlock(&dreq->lock);
 840}
 841
 842static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
 843	.error_cleanup = nfs_write_sync_pgio_error,
 844	.init_hdr = nfs_direct_pgio_init,
 845	.completion = nfs_direct_write_completion,
 846	.reschedule_io = nfs_direct_write_reschedule_io,
 847};
 848
 849
 850/*
 851 * NB: Return the value of the first error return code.  Subsequent
 852 *     errors after the first one are ignored.
 853 */
 854/*
 855 * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
 856 * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
 857 * bail and stop sending more writes.  Write length accounting is
 858 * handled automatically by nfs_direct_write_result().  Otherwise, if
 859 * no requests have been sent, just return an error.
 860 */
 861static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
 862					       struct iov_iter *iter,
 863					       loff_t pos)
 864{
 865	struct nfs_pageio_descriptor desc;
 866	struct inode *inode = dreq->inode;
 867	ssize_t result = 0;
 868	size_t requested_bytes = 0;
 869	size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
 870
 871	nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE, false,
 872			      &nfs_direct_write_completion_ops);
 873	desc.pg_dreq = dreq;
 874	get_dreq(dreq);
 875	inode_dio_begin(inode);
 876
 877	NFS_I(inode)->write_io += iov_iter_count(iter);
 878	while (iov_iter_count(iter)) {
 879		struct page **pagevec;
 880		size_t bytes;
 881		size_t pgbase;
 882		unsigned npages, i;
 883
 884		result = iov_iter_get_pages_alloc(iter, &pagevec, 
 885						  wsize, &pgbase);
 886		if (result < 0)
 887			break;
 888
 889		bytes = result;
 890		iov_iter_advance(iter, bytes);
 891		npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
 892		for (i = 0; i < npages; i++) {
 893			struct nfs_page *req;
 894			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
 895
 896			req = nfs_create_request(dreq->ctx, pagevec[i], NULL,
 897						 pgbase, req_len);
 898			if (IS_ERR(req)) {
 899				result = PTR_ERR(req);
 900				break;
 901			}
 902
 903			nfs_direct_setup_mirroring(dreq, &desc, req);
 904			if (desc.pg_error < 0) {
 905				nfs_free_request(req);
 906				result = desc.pg_error;
 907				break;
 908			}
 909
 910			nfs_lock_request(req);
 911			req->wb_index = pos >> PAGE_SHIFT;
 912			req->wb_offset = pos & ~PAGE_MASK;
 913			if (!nfs_pageio_add_request(&desc, req)) {
 914				result = desc.pg_error;
 915				nfs_unlock_and_release_request(req);
 916				break;
 917			}
 918			pgbase = 0;
 919			bytes -= req_len;
 920			requested_bytes += req_len;
 921			pos += req_len;
 922			dreq->bytes_left -= req_len;
 923		}
 924		nfs_direct_release_pages(pagevec, npages);
 925		kvfree(pagevec);
 926		if (result < 0)
 927			break;
 928	}
 929	nfs_pageio_complete(&desc);
 930
 931	/*
 932	 * If no bytes were started, return the error, and let the
 933	 * generic layer handle the completion.
 934	 */
 935	if (requested_bytes == 0) {
 936		inode_dio_end(inode);
 937		nfs_direct_req_release(dreq);
 938		return result < 0 ? result : -EIO;
 939	}
 940
 941	if (put_dreq(dreq))
 942		nfs_direct_write_complete(dreq);
 943	return requested_bytes;
 944}
 945
 946/**
 947 * nfs_file_direct_write - file direct write operation for NFS files
 948 * @iocb: target I/O control block
 949 * @iter: vector of user buffers from which to write data
 950 *
 951 * We use this function for direct writes instead of calling
 952 * generic_file_aio_write() in order to avoid taking the inode
 953 * semaphore and updating the i_size.  The NFS server will set
 954 * the new i_size and this client must read the updated size
 955 * back into its cache.  We let the server do generic write
 956 * parameter checking and report problems.
 957 *
 958 * We eliminate local atime updates, see direct read above.
 959 *
 960 * We avoid unnecessary page cache invalidations for normal cached
 961 * readers of this file.
 962 *
 963 * Note that O_APPEND is not supported for NFS direct writes, as there
 964 * is no atomic O_APPEND write facility in the NFS protocol.
 965 */
 966ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
 967{
 968	ssize_t result = -EINVAL, requested;
 969	size_t count;
 970	struct file *file = iocb->ki_filp;
 971	struct address_space *mapping = file->f_mapping;
 972	struct inode *inode = mapping->host;
 973	struct nfs_direct_req *dreq;
 974	struct nfs_lock_context *l_ctx;
 975	loff_t pos, end;
 976
 977	dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
 978		file, iov_iter_count(iter), (long long) iocb->ki_pos);
 979
 980	result = generic_write_checks(iocb, iter);
 981	if (result <= 0)
 982		return result;
 983	count = result;
 984	nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
 985
 986	pos = iocb->ki_pos;
 987	end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
 988
 989	task_io_account_write(count);
 990
 991	result = -ENOMEM;
 992	dreq = nfs_direct_req_alloc();
 993	if (!dreq)
 994		goto out;
 995
 996	dreq->inode = inode;
 997	dreq->bytes_left = dreq->max_count = count;
 998	dreq->io_start = pos;
 999	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
1000	l_ctx = nfs_get_lock_context(dreq->ctx);
1001	if (IS_ERR(l_ctx)) {
1002		result = PTR_ERR(l_ctx);
 
1003		goto out_release;
1004	}
1005	dreq->l_ctx = l_ctx;
1006	if (!is_sync_kiocb(iocb))
1007		dreq->iocb = iocb;
 
1008
1009	nfs_start_io_direct(inode);
1010
1011	requested = nfs_direct_write_schedule_iovec(dreq, iter, pos);
1012
1013	if (mapping->nrpages) {
1014		invalidate_inode_pages2_range(mapping,
1015					      pos >> PAGE_SHIFT, end);
1016	}
1017
1018	nfs_end_io_direct(inode);
1019
1020	if (requested > 0) {
1021		result = nfs_direct_wait(dreq);
1022		if (result > 0) {
1023			requested -= result;
1024			iocb->ki_pos = pos + result;
1025			/* XXX: should check the generic_write_sync retval */
1026			generic_write_sync(iocb, result);
1027		}
1028		iov_iter_revert(iter, requested);
1029	} else {
1030		result = requested;
1031	}
1032out_release:
1033	nfs_direct_req_release(dreq);
1034out:
1035	return result;
1036}
1037
1038/**
1039 * nfs_init_directcache - create a slab cache for nfs_direct_req structures
1040 *
1041 */
1042int __init nfs_init_directcache(void)
1043{
1044	nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
1045						sizeof(struct nfs_direct_req),
1046						0, (SLAB_RECLAIM_ACCOUNT|
1047							SLAB_MEM_SPREAD),
1048						NULL);
1049	if (nfs_direct_cachep == NULL)
1050		return -ENOMEM;
1051
1052	return 0;
1053}
1054
1055/**
1056 * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
1057 *
1058 */
1059void nfs_destroy_directcache(void)
1060{
1061	kmem_cache_destroy(nfs_direct_cachep);
1062}