Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * linux/fs/nfs/direct.c
   4 *
   5 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
   6 *
   7 * High-performance uncached I/O for the Linux NFS client
   8 *
   9 * There are important applications whose performance or correctness
  10 * depends on uncached access to file data.  Database clusters
  11 * (multiple copies of the same instance running on separate hosts)
  12 * implement their own cache coherency protocol that subsumes file
  13 * system cache protocols.  Applications that process datasets
  14 * considerably larger than the client's memory do not always benefit
  15 * from a local cache.  A streaming video server, for instance, has no
  16 * need to cache the contents of a file.
  17 *
  18 * When an application requests uncached I/O, all read and write requests
  19 * are made directly to the server; data stored or fetched via these
  20 * requests is not cached in the Linux page cache.  The client does not
  21 * correct unaligned requests from applications.  All requested bytes are
  22 * held on permanent storage before a direct write system call returns to
  23 * an application.
  24 *
  25 * Solaris implements an uncached I/O facility called directio() that
  26 * is used for backups and sequential I/O to very large files.  Solaris
  27 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
  28 * an undocumented mount option.
  29 *
  30 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
  31 * help from Andrew Morton.
  32 *
  33 * 18 Dec 2001	Initial implementation for 2.4  --cel
  34 * 08 Jul 2002	Version for 2.4.19, with bug fixes --trondmy
  35 * 08 Jun 2003	Port to 2.5 APIs  --cel
  36 * 31 Mar 2004	Handle direct I/O without VFS support  --cel
  37 * 15 Sep 2004	Parallel async reads  --cel
  38 * 04 May 2005	support O_DIRECT with aio  --cel
  39 *
  40 */
  41
  42#include <linux/errno.h>
  43#include <linux/sched.h>
  44#include <linux/kernel.h>
  45#include <linux/file.h>
  46#include <linux/pagemap.h>
  47#include <linux/kref.h>
  48#include <linux/slab.h>
  49#include <linux/task_io_accounting_ops.h>
  50#include <linux/module.h>
  51
  52#include <linux/nfs_fs.h>
  53#include <linux/nfs_page.h>
  54#include <linux/sunrpc/clnt.h>
  55
  56#include <linux/uaccess.h>
  57#include <linux/atomic.h>
  58
  59#include "internal.h"
  60#include "iostat.h"
  61#include "pnfs.h"
  62
  63#define NFSDBG_FACILITY		NFSDBG_VFS
  64
  65static struct kmem_cache *nfs_direct_cachep;
  66
 
 
 
  67struct nfs_direct_req {
  68	struct kref		kref;		/* release manager */
  69
  70	/* I/O parameters */
  71	struct nfs_open_context	*ctx;		/* file open context info */
  72	struct nfs_lock_context *l_ctx;		/* Lock context info */
  73	struct kiocb *		iocb;		/* controlling i/o request */
  74	struct inode *		inode;		/* target file of i/o */
  75
  76	/* completion state */
  77	atomic_t		io_count;	/* i/os we're waiting for */
  78	spinlock_t		lock;		/* protect completion state */
  79
  80	loff_t			io_start;	/* Start offset for I/O */
  81	ssize_t			count,		/* bytes actually processed */
  82				max_count,	/* max expected count */
  83				bytes_left,	/* bytes left to be sent */
  84				error;		/* any reported error */
  85	struct completion	completion;	/* wait for i/o completion */
  86
  87	/* commit state */
  88	struct nfs_mds_commit_info mds_cinfo;	/* Storage for cinfo */
  89	struct pnfs_ds_commit_info ds_cinfo;	/* Storage for cinfo */
  90	struct work_struct	work;
  91	int			flags;
  92	/* for write */
  93#define NFS_ODIRECT_DO_COMMIT		(1)	/* an unstable reply was received */
  94#define NFS_ODIRECT_RESCHED_WRITES	(2)	/* write verification failed */
  95	/* for read */
  96#define NFS_ODIRECT_SHOULD_DIRTY	(3)	/* dirty user-space page after read */
  97	struct nfs_writeverf	verf;		/* unstable write verifier */
  98};
  99
 100static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
 101static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
 102static void nfs_direct_write_complete(struct nfs_direct_req *dreq);
 103static void nfs_direct_write_schedule_work(struct work_struct *work);
 104
 105static inline void get_dreq(struct nfs_direct_req *dreq)
 106{
 107	atomic_inc(&dreq->io_count);
 108}
 109
 110static inline int put_dreq(struct nfs_direct_req *dreq)
 111{
 112	return atomic_dec_and_test(&dreq->io_count);
 113}
 114
 115static void
 116nfs_direct_handle_truncated(struct nfs_direct_req *dreq,
 117			    const struct nfs_pgio_header *hdr,
 118			    ssize_t dreq_len)
 119{
 120	if (!(test_bit(NFS_IOHDR_ERROR, &hdr->flags) ||
 121	      test_bit(NFS_IOHDR_EOF, &hdr->flags)))
 122		return;
 123	if (dreq->max_count >= dreq_len) {
 124		dreq->max_count = dreq_len;
 125		if (dreq->count > dreq_len)
 126			dreq->count = dreq_len;
 127
 128		if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
 129			dreq->error = hdr->error;
 130		else /* Clear outstanding error if this is EOF */
 131			dreq->error = 0;
 132	}
 133}
 134
 135static void
 136nfs_direct_count_bytes(struct nfs_direct_req *dreq,
 137		       const struct nfs_pgio_header *hdr)
 138{
 139	loff_t hdr_end = hdr->io_start + hdr->good_bytes;
 140	ssize_t dreq_len = 0;
 141
 142	if (hdr_end > dreq->io_start)
 143		dreq_len = hdr_end - dreq->io_start;
 144
 145	nfs_direct_handle_truncated(dreq, hdr, dreq_len);
 146
 147	if (dreq_len > dreq->max_count)
 148		dreq_len = dreq->max_count;
 149
 150	if (dreq->count < dreq_len)
 151		dreq->count = dreq_len;
 152}
 153
 154/*
 155 * nfs_direct_select_verf - select the right verifier
 156 * @dreq - direct request possibly spanning multiple servers
 157 * @ds_clp - nfs_client of data server or NULL if MDS / non-pnfs
 158 * @commit_idx - commit bucket index for the DS
 159 *
 160 * returns the correct verifier to use given the role of the server
 161 */
 162static struct nfs_writeverf *
 163nfs_direct_select_verf(struct nfs_direct_req *dreq,
 164		       struct nfs_client *ds_clp,
 165		       int commit_idx)
 166{
 167	struct nfs_writeverf *verfp = &dreq->verf;
 168
 169#ifdef CONFIG_NFS_V4_1
 170	/*
 171	 * pNFS is in use, use the DS verf except commit_through_mds is set
 172	 * for layout segment where nbuckets is zero.
 173	 */
 174	if (ds_clp && dreq->ds_cinfo.nbuckets > 0) {
 175		if (commit_idx >= 0 && commit_idx < dreq->ds_cinfo.nbuckets)
 176			verfp = &dreq->ds_cinfo.buckets[commit_idx].direct_verf;
 177		else
 178			WARN_ON_ONCE(1);
 179	}
 180#endif
 181	return verfp;
 182}
 183
 184
 185/*
 186 * nfs_direct_set_hdr_verf - set the write/commit verifier
 187 * @dreq - direct request possibly spanning multiple servers
 188 * @hdr - pageio header to validate against previously seen verfs
 189 *
 190 * Set the server's (MDS or DS) "seen" verifier
 191 */
 192static void nfs_direct_set_hdr_verf(struct nfs_direct_req *dreq,
 193				    struct nfs_pgio_header *hdr)
 194{
 195	struct nfs_writeverf *verfp;
 196
 197	verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx);
 198	WARN_ON_ONCE(verfp->committed >= 0);
 199	memcpy(verfp, &hdr->verf, sizeof(struct nfs_writeverf));
 200	WARN_ON_ONCE(verfp->committed < 0);
 201}
 202
 203static int nfs_direct_cmp_verf(const struct nfs_writeverf *v1,
 204		const struct nfs_writeverf *v2)
 205{
 206	return nfs_write_verifier_cmp(&v1->verifier, &v2->verifier);
 207}
 208
 209/*
 210 * nfs_direct_cmp_hdr_verf - compare verifier for pgio header
 211 * @dreq - direct request possibly spanning multiple servers
 212 * @hdr - pageio header to validate against previously seen verf
 213 *
 214 * set the server's "seen" verf if not initialized.
 215 * returns result of comparison between @hdr->verf and the "seen"
 216 * verf of the server used by @hdr (DS or MDS)
 217 */
 218static int nfs_direct_set_or_cmp_hdr_verf(struct nfs_direct_req *dreq,
 219					  struct nfs_pgio_header *hdr)
 220{
 221	struct nfs_writeverf *verfp;
 222
 223	verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx);
 224	if (verfp->committed < 0) {
 225		nfs_direct_set_hdr_verf(dreq, hdr);
 226		return 0;
 227	}
 228	return nfs_direct_cmp_verf(verfp, &hdr->verf);
 229}
 230
 231/*
 232 * nfs_direct_cmp_commit_data_verf - compare verifier for commit data
 233 * @dreq - direct request possibly spanning multiple servers
 234 * @data - commit data to validate against previously seen verf
 235 *
 236 * returns result of comparison between @data->verf and the verf of
 237 * the server used by @data (DS or MDS)
 238 */
 239static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
 240					   struct nfs_commit_data *data)
 241{
 242	struct nfs_writeverf *verfp;
 243
 244	verfp = nfs_direct_select_verf(dreq, data->ds_clp,
 245					 data->ds_commit_index);
 246
 247	/* verifier not set so always fail */
 248	if (verfp->committed < 0)
 249		return 1;
 250
 251	return nfs_direct_cmp_verf(verfp, &data->verf);
 252}
 253
 254/**
 255 * nfs_direct_IO - NFS address space operation for direct I/O
 
 256 * @iocb: target I/O control block
 257 * @iter: I/O buffer
 
 
 258 *
 259 * The presence of this routine in the address space ops vector means
 260 * the NFS client supports direct I/O. However, for most direct IO, we
 261 * shunt off direct read and write requests before the VFS gets them,
 262 * so this method is only ever called for swap.
 263 */
 264ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 265{
 266	struct inode *inode = iocb->ki_filp->f_mapping->host;
 
 
 267
 268	/* we only support swap file calling nfs_direct_IO */
 269	if (!IS_SWAPFILE(inode))
 270		return 0;
 271
 272	VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
 273
 274	if (iov_iter_rw(iter) == READ)
 275		return nfs_file_direct_read(iocb, iter);
 276	return nfs_file_direct_write(iocb, iter);
 277}
 278
 279static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
 280{
 281	unsigned int i;
 282	for (i = 0; i < npages; i++)
 283		put_page(pages[i]);
 284}
 285
 286void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
 287			      struct nfs_direct_req *dreq)
 288{
 289	cinfo->inode = dreq->inode;
 290	cinfo->mds = &dreq->mds_cinfo;
 291	cinfo->ds = &dreq->ds_cinfo;
 292	cinfo->dreq = dreq;
 293	cinfo->completion_ops = &nfs_direct_commit_completion_ops;
 294}
 295
 296static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
 297{
 298	struct nfs_direct_req *dreq;
 299
 300	dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
 301	if (!dreq)
 302		return NULL;
 303
 304	kref_init(&dreq->kref);
 305	kref_get(&dreq->kref);
 306	init_completion(&dreq->completion);
 307	INIT_LIST_HEAD(&dreq->mds_cinfo.list);
 308	dreq->verf.committed = NFS_INVALID_STABLE_HOW;	/* not set yet */
 309	INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
 310	spin_lock_init(&dreq->lock);
 311
 312	return dreq;
 313}
 314
 315static void nfs_direct_req_free(struct kref *kref)
 316{
 317	struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
 318
 319	nfs_free_pnfs_ds_cinfo(&dreq->ds_cinfo);
 320	if (dreq->l_ctx != NULL)
 321		nfs_put_lock_context(dreq->l_ctx);
 322	if (dreq->ctx != NULL)
 323		put_nfs_open_context(dreq->ctx);
 324	kmem_cache_free(nfs_direct_cachep, dreq);
 325}
 326
 327static void nfs_direct_req_release(struct nfs_direct_req *dreq)
 328{
 329	kref_put(&dreq->kref, nfs_direct_req_free);
 330}
 331
 332ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq)
 333{
 334	return dreq->bytes_left;
 335}
 336EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);
 337
 338/*
 339 * Collects and returns the final error value/byte-count.
 340 */
 341static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
 342{
 343	ssize_t result = -EIOCBQUEUED;
 344
 345	/* Async requests don't wait here */
 346	if (dreq->iocb)
 347		goto out;
 348
 349	result = wait_for_completion_killable(&dreq->completion);
 350
 351	if (!result) {
 352		result = dreq->count;
 353		WARN_ON_ONCE(dreq->count < 0);
 354	}
 355	if (!result)
 356		result = dreq->error;
 
 
 357
 358out:
 359	return (ssize_t) result;
 360}
 361
 362/*
 363 * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
 364 * the iocb is still valid here if this is a synchronous request.
 365 */
 366static void nfs_direct_complete(struct nfs_direct_req *dreq)
 367{
 368	struct inode *inode = dreq->inode;
 369
 370	inode_dio_end(inode);
 371
 372	if (dreq->iocb) {
 373		long res = (long) dreq->error;
 374		if (dreq->count != 0) {
 375			res = (long) dreq->count;
 376			WARN_ON_ONCE(dreq->count < 0);
 377		}
 378		dreq->iocb->ki_complete(dreq->iocb, res, 0);
 379	}
 380
 381	complete(&dreq->completion);
 382
 383	nfs_direct_req_release(dreq);
 384}
 385
 
 
 
 
 
 
 
 
 
 
 386static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
 387{
 388	unsigned long bytes = 0;
 389	struct nfs_direct_req *dreq = hdr->dreq;
 390
 391	spin_lock(&dreq->lock);
 392	if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
 393		spin_unlock(&dreq->lock);
 394		goto out_put;
 395	}
 396
 397	nfs_direct_count_bytes(dreq, hdr);
 
 
 
 
 398	spin_unlock(&dreq->lock);
 399
 400	while (!list_empty(&hdr->pages)) {
 401		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
 402		struct page *page = req->wb_page;
 403
 404		if (!PageCompound(page) && bytes < hdr->good_bytes &&
 405		    (dreq->flags == NFS_ODIRECT_SHOULD_DIRTY))
 406			set_page_dirty(page);
 
 
 
 
 
 
 
 
 
 
 
 
 407		bytes += req->wb_bytes;
 408		nfs_list_remove_request(req);
 409		nfs_release_request(req);
 410	}
 411out_put:
 412	if (put_dreq(dreq))
 413		nfs_direct_complete(dreq);
 414	hdr->release(hdr);
 415}
 416
 417static void nfs_read_sync_pgio_error(struct list_head *head, int error)
 418{
 419	struct nfs_page *req;
 420
 421	while (!list_empty(head)) {
 422		req = nfs_list_entry(head->next);
 423		nfs_list_remove_request(req);
 424		nfs_release_request(req);
 425	}
 426}
 427
 428static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
 429{
 430	get_dreq(hdr->dreq);
 431}
 432
 433static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
 434	.error_cleanup = nfs_read_sync_pgio_error,
 435	.init_hdr = nfs_direct_pgio_init,
 436	.completion = nfs_direct_read_completion,
 437};
 438
 439/*
 440 * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
 441 * operation.  If nfs_readdata_alloc() or get_user_pages() fails,
 442 * bail and stop sending more reads.  Read length accounting is
 443 * handled automatically by nfs_direct_read_result().  Otherwise, if
 444 * no requests have been sent, just return an error.
 445 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 446
 447static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
 448					      struct iov_iter *iter,
 449					      loff_t pos)
 450{
 451	struct nfs_pageio_descriptor desc;
 452	struct inode *inode = dreq->inode;
 453	ssize_t result = -EINVAL;
 454	size_t requested_bytes = 0;
 455	size_t rsize = max_t(size_t, NFS_SERVER(inode)->rsize, PAGE_SIZE);
 456
 457	nfs_pageio_init_read(&desc, dreq->inode, false,
 458			     &nfs_direct_read_completion_ops);
 459	get_dreq(dreq);
 460	desc.pg_dreq = dreq;
 461	inode_dio_begin(inode);
 462
 463	while (iov_iter_count(iter)) {
 464		struct page **pagevec;
 465		size_t bytes;
 466		size_t pgbase;
 467		unsigned npages, i;
 
 
 468
 469		result = iov_iter_get_pages_alloc(iter, &pagevec, 
 470						  rsize, &pgbase);
 
 
 
 
 
 
 
 
 
 471		if (result < 0)
 472			break;
 473	
 474		bytes = result;
 475		iov_iter_advance(iter, bytes);
 476		npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
 
 
 
 
 
 
 477		for (i = 0; i < npages; i++) {
 478			struct nfs_page *req;
 479			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
 480			/* XXX do we need to do the eof zeroing found in async_filler? */
 481			req = nfs_create_request(dreq->ctx, pagevec[i],
 
 482						 pgbase, req_len);
 483			if (IS_ERR(req)) {
 484				result = PTR_ERR(req);
 485				break;
 486			}
 487			req->wb_index = pos >> PAGE_SHIFT;
 488			req->wb_offset = pos & ~PAGE_MASK;
 489			if (!nfs_pageio_add_request(&desc, req)) {
 490				result = desc.pg_error;
 491				nfs_release_request(req);
 492				break;
 493			}
 494			pgbase = 0;
 495			bytes -= req_len;
 496			requested_bytes += req_len;
 
 497			pos += req_len;
 498			dreq->bytes_left -= req_len;
 499		}
 
 500		nfs_direct_release_pages(pagevec, npages);
 501		kvfree(pagevec);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 502		if (result < 0)
 503			break;
 
 
 
 
 504	}
 505
 506	nfs_pageio_complete(&desc);
 507
 508	/*
 509	 * If no bytes were started, return the error, and let the
 510	 * generic layer handle the completion.
 511	 */
 512	if (requested_bytes == 0) {
 513		inode_dio_end(inode);
 514		nfs_direct_req_release(dreq);
 515		return result < 0 ? result : -EIO;
 516	}
 517
 518	if (put_dreq(dreq))
 519		nfs_direct_complete(dreq);
 520	return requested_bytes;
 521}
 522
 523/**
 524 * nfs_file_direct_read - file direct read operation for NFS files
 525 * @iocb: target I/O control block
 526 * @iter: vector of user buffers into which to read data
 527 *
 528 * We use this function for direct reads instead of calling
 529 * generic_file_aio_read() in order to avoid gfar's check to see if
 530 * the request starts before the end of the file.  For that check
 531 * to work, we must generate a GETATTR before each direct read, and
 532 * even then there is a window between the GETATTR and the subsequent
 533 * READ where the file size could change.  Our preference is simply
 534 * to do all reads the application wants, and the server will take
 535 * care of managing the end of file boundary.
 536 *
 537 * This function also eliminates unnecessarily updating the file's
 538 * atime locally, as the NFS server sets the file's atime, and this
 539 * client must read the updated atime from the server back into its
 540 * cache.
 541 */
 542ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
 543{
 544	struct file *file = iocb->ki_filp;
 545	struct address_space *mapping = file->f_mapping;
 546	struct inode *inode = mapping->host;
 547	struct nfs_direct_req *dreq;
 548	struct nfs_lock_context *l_ctx;
 549	ssize_t result = -EINVAL, requested;
 550	size_t count = iov_iter_count(iter);
 551	nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
 552
 553	dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
 554		file, count, (long long) iocb->ki_pos);
 555
 556	result = 0;
 557	if (!count)
 558		goto out;
 559
 560	task_io_account_read(count);
 561
 562	result = -ENOMEM;
 563	dreq = nfs_direct_req_alloc();
 564	if (dreq == NULL)
 565		goto out;
 566
 567	dreq->inode = inode;
 568	dreq->bytes_left = dreq->max_count = count;
 569	dreq->io_start = iocb->ki_pos;
 570	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
 571	l_ctx = nfs_get_lock_context(dreq->ctx);
 572	if (IS_ERR(l_ctx)) {
 573		result = PTR_ERR(l_ctx);
 574		goto out_release;
 575	}
 576	dreq->l_ctx = l_ctx;
 577	if (!is_sync_kiocb(iocb))
 578		dreq->iocb = iocb;
 579
 580	if (iter_is_iovec(iter))
 581		dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
 582
 583	nfs_start_io_direct(inode);
 584
 585	NFS_I(inode)->read_io += count;
 586	requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
 587
 588	nfs_end_io_direct(inode);
 589
 590	if (requested > 0) {
 591		result = nfs_direct_wait(dreq);
 592		if (result > 0) {
 593			requested -= result;
 594			iocb->ki_pos += result;
 595		}
 596		iov_iter_revert(iter, requested);
 597	} else {
 598		result = requested;
 599	}
 600
 601out_release:
 602	nfs_direct_req_release(dreq);
 603out:
 604	return result;
 605}
 606
 607static void
 608nfs_direct_write_scan_commit_list(struct inode *inode,
 609				  struct list_head *list,
 610				  struct nfs_commit_info *cinfo)
 611{
 612	mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
 613#ifdef CONFIG_NFS_V4_1
 614	if (cinfo->ds != NULL && cinfo->ds->nwritten != 0)
 615		NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo);
 616#endif
 617	nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0);
 618	mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
 619}
 620
 
 621static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
 622{
 623	struct nfs_pageio_descriptor desc;
 624	struct nfs_page *req, *tmp;
 625	LIST_HEAD(reqs);
 626	struct nfs_commit_info cinfo;
 627	LIST_HEAD(failed);
 628
 629	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 630	nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
 
 
 
 631
 632	dreq->count = 0;
 633	dreq->max_count = 0;
 634	list_for_each_entry(req, &reqs, wb_list)
 635		dreq->max_count += req->wb_bytes;
 636	dreq->verf.committed = NFS_INVALID_STABLE_HOW;
 637	nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
 638	get_dreq(dreq);
 639
 640	nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
 641			      &nfs_direct_write_completion_ops);
 642	desc.pg_dreq = dreq;
 643
 644	list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
 645		/* Bump the transmission count */
 646		req->wb_nio++;
 647		if (!nfs_pageio_add_request(&desc, req)) {
 648			nfs_list_move_request(req, &failed);
 649			spin_lock(&cinfo.inode->i_lock);
 
 650			dreq->flags = 0;
 651			if (desc.pg_error < 0)
 652				dreq->error = desc.pg_error;
 653			else
 654				dreq->error = -EIO;
 655			spin_unlock(&cinfo.inode->i_lock);
 656		}
 657		nfs_release_request(req);
 658	}
 659	nfs_pageio_complete(&desc);
 660
 661	while (!list_empty(&failed)) {
 662		req = nfs_list_entry(failed.next);
 663		nfs_list_remove_request(req);
 664		nfs_unlock_and_release_request(req);
 665	}
 666
 667	if (put_dreq(dreq))
 668		nfs_direct_write_complete(dreq);
 669}
 670
 671static void nfs_direct_commit_complete(struct nfs_commit_data *data)
 672{
 673	struct nfs_direct_req *dreq = data->dreq;
 674	struct nfs_commit_info cinfo;
 675	struct nfs_page *req;
 676	int status = data->task.tk_status;
 677
 678	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 679	if (status < 0 || nfs_direct_cmp_commit_data_verf(dreq, data))
 
 
 
 
 
 680		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 
 681
 
 682	while (!list_empty(&data->pages)) {
 683		req = nfs_list_entry(data->pages.next);
 684		nfs_list_remove_request(req);
 685		if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
 686			/*
 687			 * Despite the reboot, the write was successful,
 688			 * so reset wb_nio.
 689			 */
 690			req->wb_nio = 0;
 691			/* Note the rewrite will go through mds */
 692			nfs_mark_request_commit(req, NULL, &cinfo, 0);
 693		} else
 694			nfs_release_request(req);
 695		nfs_unlock_and_release_request(req);
 696	}
 697
 698	if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
 699		nfs_direct_write_complete(dreq);
 700}
 701
 702static void nfs_direct_resched_write(struct nfs_commit_info *cinfo,
 703		struct nfs_page *req)
 704{
 705	struct nfs_direct_req *dreq = cinfo->dreq;
 706
 707	spin_lock(&dreq->lock);
 708	dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 709	spin_unlock(&dreq->lock);
 710	nfs_mark_request_commit(req, NULL, cinfo, 0);
 711}
 712
 713static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
 714	.completion = nfs_direct_commit_complete,
 715	.resched_write = nfs_direct_resched_write,
 716};
 717
 718static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
 719{
 720	int res;
 721	struct nfs_commit_info cinfo;
 722	LIST_HEAD(mds_list);
 723
 724	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 725	nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
 726	res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
 727	if (res < 0) /* res == -ENOMEM */
 728		nfs_direct_write_reschedule(dreq);
 729}
 730
 731static void nfs_direct_write_schedule_work(struct work_struct *work)
 732{
 733	struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
 734	int flags = dreq->flags;
 735
 736	dreq->flags = 0;
 737	switch (flags) {
 738		case NFS_ODIRECT_DO_COMMIT:
 739			nfs_direct_commit_schedule(dreq);
 740			break;
 741		case NFS_ODIRECT_RESCHED_WRITES:
 742			nfs_direct_write_reschedule(dreq);
 743			break;
 744		default:
 745			nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping);
 746			nfs_direct_complete(dreq);
 747	}
 748}
 749
 750static void nfs_direct_write_complete(struct nfs_direct_req *dreq)
 751{
 752	queue_work(nfsiod_workqueue, &dreq->work); /* Calls nfs_direct_write_schedule_work */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 753}
 754
 755static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
 756{
 757	struct nfs_direct_req *dreq = hdr->dreq;
 758	struct nfs_commit_info cinfo;
 759	bool request_commit = false;
 760	struct nfs_page *req = nfs_list_entry(hdr->pages.next);
 761
 
 
 
 762	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 763
 764	spin_lock(&dreq->lock);
 765	if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
 766		spin_unlock(&dreq->lock);
 767		goto out_put;
 768	}
 769
 770	nfs_direct_count_bytes(dreq, hdr);
 771	if (hdr->good_bytes != 0) {
 772		if (nfs_write_need_commit(hdr)) {
 
 
 
 
 
 
 
 
 
 773			if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
 774				request_commit = true;
 775			else if (dreq->flags == 0) {
 776				nfs_direct_set_hdr_verf(dreq, hdr);
 777				request_commit = true;
 
 778				dreq->flags = NFS_ODIRECT_DO_COMMIT;
 779			} else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
 780				request_commit = true;
 781				if (nfs_direct_set_or_cmp_hdr_verf(dreq, hdr))
 782					dreq->flags =
 783						NFS_ODIRECT_RESCHED_WRITES;
 
 784			}
 785		}
 786	}
 787	spin_unlock(&dreq->lock);
 788
 789	while (!list_empty(&hdr->pages)) {
 790
 791		req = nfs_list_entry(hdr->pages.next);
 792		nfs_list_remove_request(req);
 793		if (request_commit) {
 
 
 794			kref_get(&req->wb_kref);
 795			nfs_mark_request_commit(req, hdr->lseg, &cinfo,
 796				hdr->ds_commit_idx);
 797		}
 798		nfs_unlock_and_release_request(req);
 799	}
 800
 801out_put:
 802	if (put_dreq(dreq))
 803		nfs_direct_write_complete(dreq);
 804	hdr->release(hdr);
 805}
 806
 807static void nfs_write_sync_pgio_error(struct list_head *head, int error)
 808{
 809	struct nfs_page *req;
 810
 811	while (!list_empty(head)) {
 812		req = nfs_list_entry(head->next);
 813		nfs_list_remove_request(req);
 814		nfs_unlock_and_release_request(req);
 815	}
 816}
 817
 818static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
 819{
 820	struct nfs_direct_req *dreq = hdr->dreq;
 821
 822	spin_lock(&dreq->lock);
 823	if (dreq->error == 0) {
 824		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 825		/* fake unstable write to let common nfs resend pages */
 826		hdr->verf.committed = NFS_UNSTABLE;
 827		hdr->good_bytes = hdr->args.count;
 828	}
 829	spin_unlock(&dreq->lock);
 830}
 831
 832static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
 833	.error_cleanup = nfs_write_sync_pgio_error,
 834	.init_hdr = nfs_direct_pgio_init,
 835	.completion = nfs_direct_write_completion,
 836	.reschedule_io = nfs_direct_write_reschedule_io,
 837};
 838
 839
 840/*
 841 * NB: Return the value of the first error return code.  Subsequent
 842 *     errors after the first one are ignored.
 843 */
 844/*
 845 * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
 846 * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
 847 * bail and stop sending more writes.  Write length accounting is
 848 * handled automatically by nfs_direct_write_result().  Otherwise, if
 849 * no requests have been sent, just return an error.
 850 */
 851static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
 852					       struct iov_iter *iter,
 
 853					       loff_t pos)
 854{
 855	struct nfs_pageio_descriptor desc;
 856	struct inode *inode = dreq->inode;
 857	ssize_t result = 0;
 858	size_t requested_bytes = 0;
 859	size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
 860
 861	nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE, false,
 862			      &nfs_direct_write_completion_ops);
 863	desc.pg_dreq = dreq;
 864	get_dreq(dreq);
 865	inode_dio_begin(inode);
 866
 867	NFS_I(inode)->write_io += iov_iter_count(iter);
 868	while (iov_iter_count(iter)) {
 869		struct page **pagevec;
 870		size_t bytes;
 871		size_t pgbase;
 872		unsigned npages, i;
 873
 874		result = iov_iter_get_pages_alloc(iter, &pagevec, 
 875						  wsize, &pgbase);
 876		if (result < 0)
 877			break;
 878
 879		bytes = result;
 880		iov_iter_advance(iter, bytes);
 881		npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
 882		for (i = 0; i < npages; i++) {
 883			struct nfs_page *req;
 884			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
 885
 886			req = nfs_create_request(dreq->ctx, pagevec[i],
 887						 pgbase, req_len);
 888			if (IS_ERR(req)) {
 889				result = PTR_ERR(req);
 890				break;
 891			}
 892
 893			if (desc.pg_error < 0) {
 894				nfs_free_request(req);
 895				result = desc.pg_error;
 896				break;
 897			}
 898
 899			nfs_lock_request(req);
 900			req->wb_index = pos >> PAGE_SHIFT;
 901			req->wb_offset = pos & ~PAGE_MASK;
 902			if (!nfs_pageio_add_request(&desc, req)) {
 903				result = desc.pg_error;
 904				nfs_unlock_and_release_request(req);
 905				break;
 906			}
 907			pgbase = 0;
 908			bytes -= req_len;
 909			requested_bytes += req_len;
 910			pos += req_len;
 911			dreq->bytes_left -= req_len;
 912		}
 913		nfs_direct_release_pages(pagevec, npages);
 914		kvfree(pagevec);
 915		if (result < 0)
 916			break;
 
 917	}
 918	nfs_pageio_complete(&desc);
 
 919
 920	/*
 921	 * If no bytes were started, return the error, and let the
 922	 * generic layer handle the completion.
 923	 */
 924	if (requested_bytes == 0) {
 925		inode_dio_end(inode);
 926		nfs_direct_req_release(dreq);
 927		return result < 0 ? result : -EIO;
 928	}
 929
 930	if (put_dreq(dreq))
 931		nfs_direct_write_complete(dreq);
 932	return requested_bytes;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 933}
 934
 935/**
 936 * nfs_file_direct_write - file direct write operation for NFS files
 937 * @iocb: target I/O control block
 938 * @iter: vector of user buffers from which to write data
 
 
 939 *
 940 * We use this function for direct writes instead of calling
 941 * generic_file_aio_write() in order to avoid taking the inode
 942 * semaphore and updating the i_size.  The NFS server will set
 943 * the new i_size and this client must read the updated size
 944 * back into its cache.  We let the server do generic write
 945 * parameter checking and report problems.
 946 *
 947 * We eliminate local atime updates, see direct read above.
 948 *
 949 * We avoid unnecessary page cache invalidations for normal cached
 950 * readers of this file.
 951 *
 952 * Note that O_APPEND is not supported for NFS direct writes, as there
 953 * is no atomic O_APPEND write facility in the NFS protocol.
 954 */
 955ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
 
 956{
 957	ssize_t result = -EINVAL, requested;
 958	size_t count;
 959	struct file *file = iocb->ki_filp;
 960	struct address_space *mapping = file->f_mapping;
 961	struct inode *inode = mapping->host;
 962	struct nfs_direct_req *dreq;
 963	struct nfs_lock_context *l_ctx;
 964	loff_t pos, end;
 965
 966	dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
 967		file, iov_iter_count(iter), (long long) iocb->ki_pos);
 968
 969	result = generic_write_checks(iocb, iter);
 970	if (result <= 0)
 971		return result;
 972	count = result;
 973	nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
 974
 975	pos = iocb->ki_pos;
 976	end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
 977
 978	task_io_account_write(count);
 979
 980	result = -ENOMEM;
 981	dreq = nfs_direct_req_alloc();
 982	if (!dreq)
 983		goto out;
 984
 985	dreq->inode = inode;
 986	dreq->bytes_left = dreq->max_count = count;
 987	dreq->io_start = pos;
 988	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
 989	l_ctx = nfs_get_lock_context(dreq->ctx);
 990	if (IS_ERR(l_ctx)) {
 991		result = PTR_ERR(l_ctx);
 992		goto out_release;
 993	}
 994	dreq->l_ctx = l_ctx;
 995	if (!is_sync_kiocb(iocb))
 996		dreq->iocb = iocb;
 997
 998	nfs_start_io_direct(inode);
 999
1000	requested = nfs_direct_write_schedule_iovec(dreq, iter, pos);
1001
1002	if (mapping->nrpages) {
1003		invalidate_inode_pages2_range(mapping,
1004					      pos >> PAGE_SHIFT, end);
1005	}
1006
1007	nfs_end_io_direct(inode);
1008
1009	if (requested > 0) {
1010		result = nfs_direct_wait(dreq);
1011		if (result > 0) {
1012			requested -= result;
1013			iocb->ki_pos = pos + result;
1014			/* XXX: should check the generic_write_sync retval */
1015			generic_write_sync(iocb, result);
1016		}
1017		iov_iter_revert(iter, requested);
1018	} else {
1019		result = requested;
1020	}
1021out_release:
1022	nfs_direct_req_release(dreq);
1023out:
1024	return result;
1025}
1026
1027/**
1028 * nfs_init_directcache - create a slab cache for nfs_direct_req structures
1029 *
1030 */
1031int __init nfs_init_directcache(void)
1032{
1033	nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
1034						sizeof(struct nfs_direct_req),
1035						0, (SLAB_RECLAIM_ACCOUNT|
1036							SLAB_MEM_SPREAD),
1037						NULL);
1038	if (nfs_direct_cachep == NULL)
1039		return -ENOMEM;
1040
1041	return 0;
1042}
1043
1044/**
1045 * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
1046 *
1047 */
1048void nfs_destroy_directcache(void)
1049{
1050	kmem_cache_destroy(nfs_direct_cachep);
1051}
v3.5.6
 
  1/*
  2 * linux/fs/nfs/direct.c
  3 *
  4 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
  5 *
  6 * High-performance uncached I/O for the Linux NFS client
  7 *
  8 * There are important applications whose performance or correctness
  9 * depends on uncached access to file data.  Database clusters
 10 * (multiple copies of the same instance running on separate hosts)
 11 * implement their own cache coherency protocol that subsumes file
 12 * system cache protocols.  Applications that process datasets
 13 * considerably larger than the client's memory do not always benefit
 14 * from a local cache.  A streaming video server, for instance, has no
 15 * need to cache the contents of a file.
 16 *
 17 * When an application requests uncached I/O, all read and write requests
 18 * are made directly to the server; data stored or fetched via these
 19 * requests is not cached in the Linux page cache.  The client does not
 20 * correct unaligned requests from applications.  All requested bytes are
 21 * held on permanent storage before a direct write system call returns to
 22 * an application.
 23 *
 24 * Solaris implements an uncached I/O facility called directio() that
 25 * is used for backups and sequential I/O to very large files.  Solaris
 26 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
 27 * an undocumented mount option.
 28 *
 29 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
 30 * help from Andrew Morton.
 31 *
 32 * 18 Dec 2001	Initial implementation for 2.4  --cel
 33 * 08 Jul 2002	Version for 2.4.19, with bug fixes --trondmy
 34 * 08 Jun 2003	Port to 2.5 APIs  --cel
 35 * 31 Mar 2004	Handle direct I/O without VFS support  --cel
 36 * 15 Sep 2004	Parallel async reads  --cel
 37 * 04 May 2005	support O_DIRECT with aio  --cel
 38 *
 39 */
 40
 41#include <linux/errno.h>
 42#include <linux/sched.h>
 43#include <linux/kernel.h>
 44#include <linux/file.h>
 45#include <linux/pagemap.h>
 46#include <linux/kref.h>
 47#include <linux/slab.h>
 48#include <linux/task_io_accounting_ops.h>
 
 49
 50#include <linux/nfs_fs.h>
 51#include <linux/nfs_page.h>
 52#include <linux/sunrpc/clnt.h>
 53
 54#include <asm/uaccess.h>
 55#include <linux/atomic.h>
 56
 57#include "internal.h"
 58#include "iostat.h"
 59#include "pnfs.h"
 60
 61#define NFSDBG_FACILITY		NFSDBG_VFS
 62
 63static struct kmem_cache *nfs_direct_cachep;
 64
 65/*
 66 * This represents a set of asynchronous requests that we're waiting on
 67 */
 68struct nfs_direct_req {
 69	struct kref		kref;		/* release manager */
 70
 71	/* I/O parameters */
 72	struct nfs_open_context	*ctx;		/* file open context info */
 73	struct nfs_lock_context *l_ctx;		/* Lock context info */
 74	struct kiocb *		iocb;		/* controlling i/o request */
 75	struct inode *		inode;		/* target file of i/o */
 76
 77	/* completion state */
 78	atomic_t		io_count;	/* i/os we're waiting for */
 79	spinlock_t		lock;		/* protect completion state */
 
 
 80	ssize_t			count,		/* bytes actually processed */
 
 
 81				error;		/* any reported error */
 82	struct completion	completion;	/* wait for i/o completion */
 83
 84	/* commit state */
 85	struct nfs_mds_commit_info mds_cinfo;	/* Storage for cinfo */
 86	struct pnfs_ds_commit_info ds_cinfo;	/* Storage for cinfo */
 87	struct work_struct	work;
 88	int			flags;
 
 89#define NFS_ODIRECT_DO_COMMIT		(1)	/* an unstable reply was received */
 90#define NFS_ODIRECT_RESCHED_WRITES	(2)	/* write verification failed */
 
 
 91	struct nfs_writeverf	verf;		/* unstable write verifier */
 92};
 93
 94static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
 95static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
 96static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
 97static void nfs_direct_write_schedule_work(struct work_struct *work);
 98
 99static inline void get_dreq(struct nfs_direct_req *dreq)
100{
101	atomic_inc(&dreq->io_count);
102}
103
104static inline int put_dreq(struct nfs_direct_req *dreq)
105{
106	return atomic_dec_and_test(&dreq->io_count);
107}
108
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109/**
110 * nfs_direct_IO - NFS address space operation for direct I/O
111 * @rw: direction (read or write)
112 * @iocb: target I/O control block
113 * @iov: array of vectors that define I/O buffer
114 * @pos: offset in file to begin the operation
115 * @nr_segs: size of iovec array
116 *
117 * The presence of this routine in the address space ops vector means
118 * the NFS client supports direct I/O.  However, we shunt off direct
119 * read and write requests before the VFS gets them, so this method
120 * should never be called.
121 */
122ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
123{
124	dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
125			iocb->ki_filp->f_path.dentry->d_name.name,
126			(long long) pos, nr_segs);
127
128	return -EINVAL;
 
 
 
 
 
 
 
 
129}
130
131static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
132{
133	unsigned int i;
134	for (i = 0; i < npages; i++)
135		page_cache_release(pages[i]);
136}
137
138void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
139			      struct nfs_direct_req *dreq)
140{
141	cinfo->lock = &dreq->lock;
142	cinfo->mds = &dreq->mds_cinfo;
143	cinfo->ds = &dreq->ds_cinfo;
144	cinfo->dreq = dreq;
145	cinfo->completion_ops = &nfs_direct_commit_completion_ops;
146}
147
148static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
149{
150	struct nfs_direct_req *dreq;
151
152	dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
153	if (!dreq)
154		return NULL;
155
156	kref_init(&dreq->kref);
157	kref_get(&dreq->kref);
158	init_completion(&dreq->completion);
159	INIT_LIST_HEAD(&dreq->mds_cinfo.list);
 
160	INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
161	spin_lock_init(&dreq->lock);
162
163	return dreq;
164}
165
166static void nfs_direct_req_free(struct kref *kref)
167{
168	struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
169
 
170	if (dreq->l_ctx != NULL)
171		nfs_put_lock_context(dreq->l_ctx);
172	if (dreq->ctx != NULL)
173		put_nfs_open_context(dreq->ctx);
174	kmem_cache_free(nfs_direct_cachep, dreq);
175}
176
177static void nfs_direct_req_release(struct nfs_direct_req *dreq)
178{
179	kref_put(&dreq->kref, nfs_direct_req_free);
180}
181
 
 
 
 
 
 
182/*
183 * Collects and returns the final error value/byte-count.
184 */
185static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
186{
187	ssize_t result = -EIOCBQUEUED;
188
189	/* Async requests don't wait here */
190	if (dreq->iocb)
191		goto out;
192
193	result = wait_for_completion_killable(&dreq->completion);
194
 
 
 
 
195	if (!result)
196		result = dreq->error;
197	if (!result)
198		result = dreq->count;
199
200out:
201	return (ssize_t) result;
202}
203
204/*
205 * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
206 * the iocb is still valid here if this is a synchronous request.
207 */
208static void nfs_direct_complete(struct nfs_direct_req *dreq)
209{
 
 
 
 
210	if (dreq->iocb) {
211		long res = (long) dreq->error;
212		if (!res)
213			res = (long) dreq->count;
214		aio_complete(dreq->iocb, res, 0);
 
 
215	}
216	complete_all(&dreq->completion);
 
217
218	nfs_direct_req_release(dreq);
219}
220
221static void nfs_direct_readpage_release(struct nfs_page *req)
222{
223	dprintk("NFS: direct read done (%s/%lld %d@%lld)\n",
224		req->wb_context->dentry->d_inode->i_sb->s_id,
225		(long long)NFS_FILEID(req->wb_context->dentry->d_inode),
226		req->wb_bytes,
227		(long long)req_offset(req));
228	nfs_release_request(req);
229}
230
231static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
232{
233	unsigned long bytes = 0;
234	struct nfs_direct_req *dreq = hdr->dreq;
235
236	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
 
 
237		goto out_put;
 
238
239	spin_lock(&dreq->lock);
240	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
241		dreq->error = hdr->error;
242	else
243		dreq->count += hdr->good_bytes;
244	spin_unlock(&dreq->lock);
245
246	while (!list_empty(&hdr->pages)) {
247		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
248		struct page *page = req->wb_page;
249
250		if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
251			if (bytes > hdr->good_bytes)
252				zero_user(page, 0, PAGE_SIZE);
253			else if (hdr->good_bytes - bytes < PAGE_SIZE)
254				zero_user_segment(page,
255					hdr->good_bytes & ~PAGE_MASK,
256					PAGE_SIZE);
257		}
258		if (!PageCompound(page)) {
259			if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
260				if (bytes < hdr->good_bytes)
261					set_page_dirty(page);
262			} else
263				set_page_dirty(page);
264		}
265		bytes += req->wb_bytes;
266		nfs_list_remove_request(req);
267		nfs_direct_readpage_release(req);
268	}
269out_put:
270	if (put_dreq(dreq))
271		nfs_direct_complete(dreq);
272	hdr->release(hdr);
273}
274
275static void nfs_read_sync_pgio_error(struct list_head *head)
276{
277	struct nfs_page *req;
278
279	while (!list_empty(head)) {
280		req = nfs_list_entry(head->next);
281		nfs_list_remove_request(req);
282		nfs_release_request(req);
283	}
284}
285
286static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
287{
288	get_dreq(hdr->dreq);
289}
290
291static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
292	.error_cleanup = nfs_read_sync_pgio_error,
293	.init_hdr = nfs_direct_pgio_init,
294	.completion = nfs_direct_read_completion,
295};
296
297/*
298 * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
299 * operation.  If nfs_readdata_alloc() or get_user_pages() fails,
300 * bail and stop sending more reads.  Read length accounting is
301 * handled automatically by nfs_direct_read_result().  Otherwise, if
302 * no requests have been sent, just return an error.
303 */
304static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *desc,
305						const struct iovec *iov,
306						loff_t pos)
307{
308	struct nfs_direct_req *dreq = desc->pg_dreq;
309	struct nfs_open_context *ctx = dreq->ctx;
310	struct inode *inode = ctx->dentry->d_inode;
311	unsigned long user_addr = (unsigned long)iov->iov_base;
312	size_t count = iov->iov_len;
313	size_t rsize = NFS_SERVER(inode)->rsize;
314	unsigned int pgbase;
315	int result;
316	ssize_t started = 0;
317	struct page **pagevec = NULL;
318	unsigned int npages;
319
320	do {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
321		size_t bytes;
322		int i;
323
324		pgbase = user_addr & ~PAGE_MASK;
325		bytes = min(max_t(size_t, rsize, PAGE_SIZE), count);
326
327		result = -ENOMEM;
328		npages = nfs_page_array_len(pgbase, bytes);
329		if (!pagevec)
330			pagevec = kmalloc(npages * sizeof(struct page *),
331					  GFP_KERNEL);
332		if (!pagevec)
333			break;
334		down_read(&current->mm->mmap_sem);
335		result = get_user_pages(current, current->mm, user_addr,
336					npages, 1, 0, pagevec, NULL);
337		up_read(&current->mm->mmap_sem);
338		if (result < 0)
339			break;
340		if ((unsigned)result < npages) {
341			bytes = result * PAGE_SIZE;
342			if (bytes <= pgbase) {
343				nfs_direct_release_pages(pagevec, result);
344				break;
345			}
346			bytes -= pgbase;
347			npages = result;
348		}
349
350		for (i = 0; i < npages; i++) {
351			struct nfs_page *req;
352			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
353			/* XXX do we need to do the eof zeroing found in async_filler? */
354			req = nfs_create_request(dreq->ctx, dreq->inode,
355						 pagevec[i],
356						 pgbase, req_len);
357			if (IS_ERR(req)) {
358				result = PTR_ERR(req);
359				break;
360			}
361			req->wb_index = pos >> PAGE_SHIFT;
362			req->wb_offset = pos & ~PAGE_MASK;
363			if (!nfs_pageio_add_request(desc, req)) {
364				result = desc->pg_error;
365				nfs_release_request(req);
366				break;
367			}
368			pgbase = 0;
369			bytes -= req_len;
370			started += req_len;
371			user_addr += req_len;
372			pos += req_len;
373			count -= req_len;
374		}
375		/* The nfs_page now hold references to these pages */
376		nfs_direct_release_pages(pagevec, npages);
377	} while (count != 0 && result >= 0);
378
379	kfree(pagevec);
380
381	if (started)
382		return started;
383	return result < 0 ? (ssize_t) result : -EFAULT;
384}
385
386static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
387					      const struct iovec *iov,
388					      unsigned long nr_segs,
389					      loff_t pos)
390{
391	struct nfs_pageio_descriptor desc;
392	ssize_t result = -EINVAL;
393	size_t requested_bytes = 0;
394	unsigned long seg;
395
396	nfs_pageio_init_read(&desc, dreq->inode,
397			     &nfs_direct_read_completion_ops);
398	get_dreq(dreq);
399	desc.pg_dreq = dreq;
400
401	for (seg = 0; seg < nr_segs; seg++) {
402		const struct iovec *vec = &iov[seg];
403		result = nfs_direct_read_schedule_segment(&desc, vec, pos);
404		if (result < 0)
405			break;
406		requested_bytes += result;
407		if ((size_t)result < vec->iov_len)
408			break;
409		pos += vec->iov_len;
410	}
411
412	nfs_pageio_complete(&desc);
413
414	/*
415	 * If no bytes were started, return the error, and let the
416	 * generic layer handle the completion.
417	 */
418	if (requested_bytes == 0) {
 
419		nfs_direct_req_release(dreq);
420		return result < 0 ? result : -EIO;
421	}
422
423	if (put_dreq(dreq))
424		nfs_direct_complete(dreq);
425	return 0;
426}
427
428static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
429			       unsigned long nr_segs, loff_t pos)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
430{
431	ssize_t result = -ENOMEM;
432	struct inode *inode = iocb->ki_filp->f_mapping->host;
 
433	struct nfs_direct_req *dreq;
 
 
 
 
 
 
 
434
 
 
 
 
 
 
 
435	dreq = nfs_direct_req_alloc();
436	if (dreq == NULL)
437		goto out;
438
439	dreq->inode = inode;
 
 
440	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
441	dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
442	if (dreq->l_ctx == NULL)
 
443		goto out_release;
 
 
444	if (!is_sync_kiocb(iocb))
445		dreq->iocb = iocb;
446
447	result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos);
448	if (!result)
 
 
 
 
 
 
 
 
 
449		result = nfs_direct_wait(dreq);
450	NFS_I(inode)->read_io += result;
 
 
 
 
 
 
 
 
451out_release:
452	nfs_direct_req_release(dreq);
453out:
454	return result;
455}
456
457static void nfs_inode_dio_write_done(struct inode *inode)
458{
459	nfs_zap_mapping(inode, inode->i_mapping);
460	inode_dio_done(inode);
 
 
 
 
 
 
 
 
461}
462
463#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
464static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
465{
466	struct nfs_pageio_descriptor desc;
467	struct nfs_page *req, *tmp;
468	LIST_HEAD(reqs);
469	struct nfs_commit_info cinfo;
470	LIST_HEAD(failed);
471
472	nfs_init_cinfo_from_dreq(&cinfo, dreq);
473	pnfs_recover_commit_reqs(dreq->inode, &reqs, &cinfo);
474	spin_lock(cinfo.lock);
475	nfs_scan_commit_list(&cinfo.mds->list, &reqs, &cinfo, 0);
476	spin_unlock(cinfo.lock);
477
478	dreq->count = 0;
 
 
 
 
 
479	get_dreq(dreq);
480
481	nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE,
482			      &nfs_direct_write_completion_ops);
483	desc.pg_dreq = dreq;
484
485	list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
 
 
486		if (!nfs_pageio_add_request(&desc, req)) {
487			nfs_list_remove_request(req);
488			nfs_list_add_request(req, &failed);
489			spin_lock(cinfo.lock);
490			dreq->flags = 0;
491			dreq->error = -EIO;
492			spin_unlock(cinfo.lock);
 
 
 
493		}
494		nfs_release_request(req);
495	}
496	nfs_pageio_complete(&desc);
497
498	while (!list_empty(&failed)) {
499		req = nfs_list_entry(failed.next);
500		nfs_list_remove_request(req);
501		nfs_unlock_and_release_request(req);
502	}
503
504	if (put_dreq(dreq))
505		nfs_direct_write_complete(dreq, dreq->inode);
506}
507
508static void nfs_direct_commit_complete(struct nfs_commit_data *data)
509{
510	struct nfs_direct_req *dreq = data->dreq;
511	struct nfs_commit_info cinfo;
512	struct nfs_page *req;
513	int status = data->task.tk_status;
514
515	nfs_init_cinfo_from_dreq(&cinfo, dreq);
516	if (status < 0) {
517		dprintk("NFS: %5u commit failed with error %d.\n",
518			data->task.tk_pid, status);
519		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
520	} else if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) {
521		dprintk("NFS: %5u commit verify failed\n", data->task.tk_pid);
522		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
523	}
524
525	dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
526	while (!list_empty(&data->pages)) {
527		req = nfs_list_entry(data->pages.next);
528		nfs_list_remove_request(req);
529		if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
 
 
 
 
 
530			/* Note the rewrite will go through mds */
531			nfs_mark_request_commit(req, NULL, &cinfo);
532		} else
533			nfs_release_request(req);
534		nfs_unlock_and_release_request(req);
535	}
536
537	if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
538		nfs_direct_write_complete(dreq, data->inode);
539}
540
541static void nfs_direct_error_cleanup(struct nfs_inode *nfsi)
 
542{
543	/* There is no lock to clear */
 
 
 
 
 
544}
545
546static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
547	.completion = nfs_direct_commit_complete,
548	.error_cleanup = nfs_direct_error_cleanup,
549};
550
551static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
552{
553	int res;
554	struct nfs_commit_info cinfo;
555	LIST_HEAD(mds_list);
556
557	nfs_init_cinfo_from_dreq(&cinfo, dreq);
558	nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
559	res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
560	if (res < 0) /* res == -ENOMEM */
561		nfs_direct_write_reschedule(dreq);
562}
563
564static void nfs_direct_write_schedule_work(struct work_struct *work)
565{
566	struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
567	int flags = dreq->flags;
568
569	dreq->flags = 0;
570	switch (flags) {
571		case NFS_ODIRECT_DO_COMMIT:
572			nfs_direct_commit_schedule(dreq);
573			break;
574		case NFS_ODIRECT_RESCHED_WRITES:
575			nfs_direct_write_reschedule(dreq);
576			break;
577		default:
578			nfs_inode_dio_write_done(dreq->inode);
579			nfs_direct_complete(dreq);
580	}
581}
582
583static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
584{
585	schedule_work(&dreq->work); /* Calls nfs_direct_write_schedule_work */
586}
587
588#else
589static void nfs_direct_write_schedule_work(struct work_struct *work)
590{
591}
592
593static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
594{
595	nfs_inode_dio_write_done(inode);
596	nfs_direct_complete(dreq);
597}
598#endif
599
600/*
601 * NB: Return the value of the first error return code.  Subsequent
602 *     errors after the first one are ignored.
603 */
604/*
605 * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
606 * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
607 * bail and stop sending more writes.  Write length accounting is
608 * handled automatically by nfs_direct_write_result().  Otherwise, if
609 * no requests have been sent, just return an error.
610 */
611static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *desc,
612						 const struct iovec *iov,
613						 loff_t pos)
614{
615	struct nfs_direct_req *dreq = desc->pg_dreq;
616	struct nfs_open_context *ctx = dreq->ctx;
617	struct inode *inode = ctx->dentry->d_inode;
618	unsigned long user_addr = (unsigned long)iov->iov_base;
619	size_t count = iov->iov_len;
620	size_t wsize = NFS_SERVER(inode)->wsize;
621	unsigned int pgbase;
622	int result;
623	ssize_t started = 0;
624	struct page **pagevec = NULL;
625	unsigned int npages;
626
627	do {
628		size_t bytes;
629		int i;
630
631		pgbase = user_addr & ~PAGE_MASK;
632		bytes = min(max_t(size_t, wsize, PAGE_SIZE), count);
633
634		result = -ENOMEM;
635		npages = nfs_page_array_len(pgbase, bytes);
636		if (!pagevec)
637			pagevec = kmalloc(npages * sizeof(struct page *), GFP_KERNEL);
638		if (!pagevec)
639			break;
640
641		down_read(&current->mm->mmap_sem);
642		result = get_user_pages(current, current->mm, user_addr,
643					npages, 0, 0, pagevec, NULL);
644		up_read(&current->mm->mmap_sem);
645		if (result < 0)
646			break;
647
648		if ((unsigned)result < npages) {
649			bytes = result * PAGE_SIZE;
650			if (bytes <= pgbase) {
651				nfs_direct_release_pages(pagevec, result);
652				break;
653			}
654			bytes -= pgbase;
655			npages = result;
656		}
657
658		for (i = 0; i < npages; i++) {
659			struct nfs_page *req;
660			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
661
662			req = nfs_create_request(dreq->ctx, dreq->inode,
663						 pagevec[i],
664						 pgbase, req_len);
665			if (IS_ERR(req)) {
666				result = PTR_ERR(req);
667				break;
668			}
669			nfs_lock_request(req);
670			req->wb_index = pos >> PAGE_SHIFT;
671			req->wb_offset = pos & ~PAGE_MASK;
672			if (!nfs_pageio_add_request(desc, req)) {
673				result = desc->pg_error;
674				nfs_unlock_and_release_request(req);
675				break;
676			}
677			pgbase = 0;
678			bytes -= req_len;
679			started += req_len;
680			user_addr += req_len;
681			pos += req_len;
682			count -= req_len;
683		}
684		/* The nfs_page now hold references to these pages */
685		nfs_direct_release_pages(pagevec, npages);
686	} while (count != 0 && result >= 0);
687
688	kfree(pagevec);
689
690	if (started)
691		return started;
692	return result < 0 ? (ssize_t) result : -EFAULT;
693}
694
695static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
696{
697	struct nfs_direct_req *dreq = hdr->dreq;
698	struct nfs_commit_info cinfo;
699	int bit = -1;
700	struct nfs_page *req = nfs_list_entry(hdr->pages.next);
701
702	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
703		goto out_put;
704
705	nfs_init_cinfo_from_dreq(&cinfo, dreq);
706
707	spin_lock(&dreq->lock);
 
 
 
 
708
709	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
710		dreq->flags = 0;
711		dreq->error = hdr->error;
712	}
713	if (dreq->error != 0)
714		bit = NFS_IOHDR_ERROR;
715	else {
716		dreq->count += hdr->good_bytes;
717		if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) {
718			dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
719			bit = NFS_IOHDR_NEED_RESCHED;
720		} else if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) {
721			if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
722				bit = NFS_IOHDR_NEED_RESCHED;
723			else if (dreq->flags == 0) {
724				memcpy(&dreq->verf, hdr->verf,
725				       sizeof(dreq->verf));
726				bit = NFS_IOHDR_NEED_COMMIT;
727				dreq->flags = NFS_ODIRECT_DO_COMMIT;
728			} else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
729				if (memcmp(&dreq->verf, hdr->verf, sizeof(dreq->verf))) {
730					dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
731					bit = NFS_IOHDR_NEED_RESCHED;
732				} else
733					bit = NFS_IOHDR_NEED_COMMIT;
734			}
735		}
736	}
737	spin_unlock(&dreq->lock);
738
739	while (!list_empty(&hdr->pages)) {
 
740		req = nfs_list_entry(hdr->pages.next);
741		nfs_list_remove_request(req);
742		switch (bit) {
743		case NFS_IOHDR_NEED_RESCHED:
744		case NFS_IOHDR_NEED_COMMIT:
745			kref_get(&req->wb_kref);
746			nfs_mark_request_commit(req, hdr->lseg, &cinfo);
 
747		}
748		nfs_unlock_and_release_request(req);
749	}
750
751out_put:
752	if (put_dreq(dreq))
753		nfs_direct_write_complete(dreq, hdr->inode);
754	hdr->release(hdr);
755}
756
757static void nfs_write_sync_pgio_error(struct list_head *head)
758{
759	struct nfs_page *req;
760
761	while (!list_empty(head)) {
762		req = nfs_list_entry(head->next);
763		nfs_list_remove_request(req);
764		nfs_unlock_and_release_request(req);
765	}
766}
767
 
 
 
 
 
 
 
 
 
 
 
 
 
 
768static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
769	.error_cleanup = nfs_write_sync_pgio_error,
770	.init_hdr = nfs_direct_pgio_init,
771	.completion = nfs_direct_write_completion,
 
772};
773
 
 
 
 
 
 
 
 
 
 
 
 
774static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
775					       const struct iovec *iov,
776					       unsigned long nr_segs,
777					       loff_t pos)
778{
779	struct nfs_pageio_descriptor desc;
780	struct inode *inode = dreq->inode;
781	ssize_t result = 0;
782	size_t requested_bytes = 0;
783	unsigned long seg;
784
785	nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE,
786			      &nfs_direct_write_completion_ops);
787	desc.pg_dreq = dreq;
788	get_dreq(dreq);
789	atomic_inc(&inode->i_dio_count);
790
791	for (seg = 0; seg < nr_segs; seg++) {
792		const struct iovec *vec = &iov[seg];
793		result = nfs_direct_write_schedule_segment(&desc, vec, pos);
 
 
 
 
 
 
794		if (result < 0)
795			break;
796		requested_bytes += result;
797		if ((size_t)result < vec->iov_len)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
798			break;
799		pos += vec->iov_len;
800	}
801	nfs_pageio_complete(&desc);
802	NFS_I(dreq->inode)->write_io += desc.pg_bytes_written;
803
804	/*
805	 * If no bytes were started, return the error, and let the
806	 * generic layer handle the completion.
807	 */
808	if (requested_bytes == 0) {
809		inode_dio_done(inode);
810		nfs_direct_req_release(dreq);
811		return result < 0 ? result : -EIO;
812	}
813
814	if (put_dreq(dreq))
815		nfs_direct_write_complete(dreq, dreq->inode);
816	return 0;
817}
818
819static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
820				unsigned long nr_segs, loff_t pos,
821				size_t count)
822{
823	ssize_t result = -ENOMEM;
824	struct inode *inode = iocb->ki_filp->f_mapping->host;
825	struct nfs_direct_req *dreq;
826
827	dreq = nfs_direct_req_alloc();
828	if (!dreq)
829		goto out;
830
831	dreq->inode = inode;
832	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
833	dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
834	if (dreq->l_ctx == NULL)
835		goto out_release;
836	if (!is_sync_kiocb(iocb))
837		dreq->iocb = iocb;
838
839	result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos);
840	if (!result)
841		result = nfs_direct_wait(dreq);
842out_release:
843	nfs_direct_req_release(dreq);
844out:
845	return result;
846}
847
848/**
849 * nfs_file_direct_read - file direct read operation for NFS files
850 * @iocb: target I/O control block
851 * @iov: vector of user buffers into which to read data
852 * @nr_segs: size of iov vector
853 * @pos: byte offset in file where reading starts
854 *
855 * We use this function for direct reads instead of calling
856 * generic_file_aio_read() in order to avoid gfar's check to see if
857 * the request starts before the end of the file.  For that check
858 * to work, we must generate a GETATTR before each direct read, and
859 * even then there is a window between the GETATTR and the subsequent
860 * READ where the file size could change.  Our preference is simply
861 * to do all reads the application wants, and the server will take
862 * care of managing the end of file boundary.
863 *
864 * This function also eliminates unnecessarily updating the file's
865 * atime locally, as the NFS server sets the file's atime, and this
866 * client must read the updated atime from the server back into its
867 * cache.
868 */
869ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
870				unsigned long nr_segs, loff_t pos)
871{
872	ssize_t retval = -EINVAL;
873	struct file *file = iocb->ki_filp;
874	struct address_space *mapping = file->f_mapping;
875	size_t count;
876
877	count = iov_length(iov, nr_segs);
878	nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
879
880	dfprintk(FILE, "NFS: direct read(%s/%s, %zd@%Ld)\n",
881		file->f_path.dentry->d_parent->d_name.name,
882		file->f_path.dentry->d_name.name,
883		count, (long long) pos);
884
885	retval = 0;
886	if (!count)
887		goto out;
888
889	retval = nfs_sync_mapping(mapping);
890	if (retval)
891		goto out;
892
893	task_io_account_read(count);
894
895	retval = nfs_direct_read(iocb, iov, nr_segs, pos);
896	if (retval > 0)
897		iocb->ki_pos = pos + retval;
898
899out:
900	return retval;
901}
902
903/**
904 * nfs_file_direct_write - file direct write operation for NFS files
905 * @iocb: target I/O control block
906 * @iov: vector of user buffers from which to write data
907 * @nr_segs: size of iov vector
908 * @pos: byte offset in file where writing starts
909 *
910 * We use this function for direct writes instead of calling
911 * generic_file_aio_write() in order to avoid taking the inode
912 * semaphore and updating the i_size.  The NFS server will set
913 * the new i_size and this client must read the updated size
914 * back into its cache.  We let the server do generic write
915 * parameter checking and report problems.
916 *
917 * We eliminate local atime updates, see direct read above.
918 *
919 * We avoid unnecessary page cache invalidations for normal cached
920 * readers of this file.
921 *
922 * Note that O_APPEND is not supported for NFS direct writes, as there
923 * is no atomic O_APPEND write facility in the NFS protocol.
924 */
925ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
926				unsigned long nr_segs, loff_t pos)
927{
928	ssize_t retval = -EINVAL;
 
929	struct file *file = iocb->ki_filp;
930	struct address_space *mapping = file->f_mapping;
931	size_t count;
 
 
 
 
 
 
932
933	count = iov_length(iov, nr_segs);
 
 
 
934	nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
935
936	dfprintk(FILE, "NFS: direct write(%s/%s, %zd@%Ld)\n",
937		file->f_path.dentry->d_parent->d_name.name,
938		file->f_path.dentry->d_name.name,
939		count, (long long) pos);
940
941	retval = generic_write_checks(file, &pos, &count, 0);
942	if (retval)
 
943		goto out;
944
945	retval = -EINVAL;
946	if ((ssize_t) count < 0)
947		goto out;
948	retval = 0;
949	if (!count)
950		goto out;
 
 
 
 
 
 
 
 
 
 
951
952	retval = nfs_sync_mapping(mapping);
953	if (retval)
954		goto out;
 
955
956	task_io_account_write(count);
957
958	retval = nfs_direct_write(iocb, iov, nr_segs, pos, count);
959	if (retval > 0) {
960		struct inode *inode = mapping->host;
961
962		iocb->ki_pos = pos + retval;
963		spin_lock(&inode->i_lock);
964		if (i_size_read(inode) < iocb->ki_pos)
965			i_size_write(inode, iocb->ki_pos);
966		spin_unlock(&inode->i_lock);
 
 
967	}
 
 
968out:
969	return retval;
970}
971
972/**
973 * nfs_init_directcache - create a slab cache for nfs_direct_req structures
974 *
975 */
976int __init nfs_init_directcache(void)
977{
978	nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
979						sizeof(struct nfs_direct_req),
980						0, (SLAB_RECLAIM_ACCOUNT|
981							SLAB_MEM_SPREAD),
982						NULL);
983	if (nfs_direct_cachep == NULL)
984		return -ENOMEM;
985
986	return 0;
987}
988
989/**
990 * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
991 *
992 */
993void nfs_destroy_directcache(void)
994{
995	kmem_cache_destroy(nfs_direct_cachep);
996}