Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * linux/fs/nfs/direct.c
   3 *
   4 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
   5 *
   6 * High-performance uncached I/O for the Linux NFS client
   7 *
   8 * There are important applications whose performance or correctness
   9 * depends on uncached access to file data.  Database clusters
  10 * (multiple copies of the same instance running on separate hosts)
  11 * implement their own cache coherency protocol that subsumes file
  12 * system cache protocols.  Applications that process datasets
  13 * considerably larger than the client's memory do not always benefit
  14 * from a local cache.  A streaming video server, for instance, has no
  15 * need to cache the contents of a file.
  16 *
  17 * When an application requests uncached I/O, all read and write requests
  18 * are made directly to the server; data stored or fetched via these
  19 * requests is not cached in the Linux page cache.  The client does not
  20 * correct unaligned requests from applications.  All requested bytes are
  21 * held on permanent storage before a direct write system call returns to
  22 * an application.
  23 *
  24 * Solaris implements an uncached I/O facility called directio() that
  25 * is used for backups and sequential I/O to very large files.  Solaris
  26 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
  27 * an undocumented mount option.
  28 *
  29 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
  30 * help from Andrew Morton.
  31 *
  32 * 18 Dec 2001	Initial implementation for 2.4  --cel
  33 * 08 Jul 2002	Version for 2.4.19, with bug fixes --trondmy
  34 * 08 Jun 2003	Port to 2.5 APIs  --cel
  35 * 31 Mar 2004	Handle direct I/O without VFS support  --cel
  36 * 15 Sep 2004	Parallel async reads  --cel
  37 * 04 May 2005	support O_DIRECT with aio  --cel
  38 *
  39 */
  40
  41#include <linux/errno.h>
  42#include <linux/sched.h>
  43#include <linux/kernel.h>
  44#include <linux/file.h>
  45#include <linux/pagemap.h>
  46#include <linux/kref.h>
  47#include <linux/slab.h>
  48#include <linux/task_io_accounting_ops.h>
 
  49
  50#include <linux/nfs_fs.h>
  51#include <linux/nfs_page.h>
  52#include <linux/sunrpc/clnt.h>
  53
  54#include <asm/system.h>
  55#include <asm/uaccess.h>
  56#include <linux/atomic.h>
  57
  58#include "internal.h"
  59#include "iostat.h"
 
  60
  61#define NFSDBG_FACILITY		NFSDBG_VFS
  62
  63static struct kmem_cache *nfs_direct_cachep;
  64
  65/*
  66 * This represents a set of asynchronous requests that we're waiting on
  67 */
 
 
 
 
  68struct nfs_direct_req {
  69	struct kref		kref;		/* release manager */
  70
  71	/* I/O parameters */
  72	struct nfs_open_context	*ctx;		/* file open context info */
  73	struct nfs_lock_context *l_ctx;		/* Lock context info */
  74	struct kiocb *		iocb;		/* controlling i/o request */
  75	struct inode *		inode;		/* target file of i/o */
  76
  77	/* completion state */
  78	atomic_t		io_count;	/* i/os we're waiting for */
  79	spinlock_t		lock;		/* protect completion state */
 
 
 
 
  80	ssize_t			count,		/* bytes actually processed */
 
 
  81				error;		/* any reported error */
  82	struct completion	completion;	/* wait for i/o completion */
  83
  84	/* commit state */
  85	struct list_head	rewrite_list;	/* saved nfs_write_data structs */
  86	struct nfs_write_data *	commit_data;	/* special write_data for commits */
 
  87	int			flags;
  88#define NFS_ODIRECT_DO_COMMIT		(1)	/* an unstable reply was received */
  89#define NFS_ODIRECT_RESCHED_WRITES	(2)	/* write verification failed */
  90	struct nfs_writeverf	verf;		/* unstable write verifier */
  91};
  92
 
 
  93static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
  94static const struct rpc_call_ops nfs_write_direct_ops;
  95
  96static inline void get_dreq(struct nfs_direct_req *dreq)
  97{
  98	atomic_inc(&dreq->io_count);
  99}
 100
 101static inline int put_dreq(struct nfs_direct_req *dreq)
 102{
 103	return atomic_dec_and_test(&dreq->io_count);
 104}
 105
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 106/**
 107 * nfs_direct_IO - NFS address space operation for direct I/O
 108 * @rw: direction (read or write)
 109 * @iocb: target I/O control block
 110 * @iov: array of vectors that define I/O buffer
 111 * @pos: offset in file to begin the operation
 112 * @nr_segs: size of iovec array
 113 *
 114 * The presence of this routine in the address space ops vector means
 115 * the NFS client supports direct I/O.  However, we shunt off direct
 116 * read and write requests before the VFS gets them, so this method
 117 * should never be called.
 118 */
 119ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
 120{
 121	dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
 122			iocb->ki_filp->f_path.dentry->d_name.name,
 123			(long long) pos, nr_segs);
 124
 125	return -EINVAL;
 
 
 
 
 
 
 
 
 126}
 127
 128static void nfs_direct_dirty_pages(struct page **pages, unsigned int pgbase, size_t count)
 129{
 130	unsigned int npages;
 131	unsigned int i;
 
 
 
 132
 133	if (count == 0)
 134		return;
 135	pages += (pgbase >> PAGE_SHIFT);
 136	npages = (count + (pgbase & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
 137	for (i = 0; i < npages; i++) {
 138		struct page *page = pages[i];
 139		if (!PageCompound(page))
 140			set_page_dirty(page);
 141	}
 142}
 143
 144static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
 
 
 145{
 146	unsigned int i;
 147	for (i = 0; i < npages; i++)
 148		page_cache_release(pages[i]);
 
 
 
 149}
 150
 151static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
 152{
 153	struct nfs_direct_req *dreq;
 154
 155	dreq = kmem_cache_alloc(nfs_direct_cachep, GFP_KERNEL);
 156	if (!dreq)
 157		return NULL;
 158
 159	kref_init(&dreq->kref);
 160	kref_get(&dreq->kref);
 161	init_completion(&dreq->completion);
 162	INIT_LIST_HEAD(&dreq->rewrite_list);
 163	dreq->iocb = NULL;
 164	dreq->ctx = NULL;
 165	dreq->l_ctx = NULL;
 166	spin_lock_init(&dreq->lock);
 167	atomic_set(&dreq->io_count, 0);
 168	dreq->count = 0;
 169	dreq->error = 0;
 170	dreq->flags = 0;
 171
 172	return dreq;
 173}
 174
 175static void nfs_direct_req_free(struct kref *kref)
 176{
 177	struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
 178
 
 179	if (dreq->l_ctx != NULL)
 180		nfs_put_lock_context(dreq->l_ctx);
 181	if (dreq->ctx != NULL)
 182		put_nfs_open_context(dreq->ctx);
 183	kmem_cache_free(nfs_direct_cachep, dreq);
 184}
 185
 186static void nfs_direct_req_release(struct nfs_direct_req *dreq)
 187{
 188	kref_put(&dreq->kref, nfs_direct_req_free);
 189}
 190
 
 
 
 
 
 
 191/*
 192 * Collects and returns the final error value/byte-count.
 193 */
 194static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
 195{
 196	ssize_t result = -EIOCBQUEUED;
 197
 198	/* Async requests don't wait here */
 199	if (dreq->iocb)
 200		goto out;
 201
 202	result = wait_for_completion_killable(&dreq->completion);
 203
 204	if (!result)
 205		result = dreq->error;
 206	if (!result)
 207		result = dreq->count;
 208
 209out:
 210	return (ssize_t) result;
 211}
 212
 213/*
 214 * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
 215 * the iocb is still valid here if this is a synchronous request.
 216 */
 217static void nfs_direct_complete(struct nfs_direct_req *dreq)
 218{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 219	if (dreq->iocb) {
 220		long res = (long) dreq->error;
 221		if (!res)
 222			res = (long) dreq->count;
 223		aio_complete(dreq->iocb, res, 0);
 224	}
 
 225	complete_all(&dreq->completion);
 226
 227	nfs_direct_req_release(dreq);
 228}
 229
 230/*
 231 * We must hold a reference to all the pages in this direct read request
 232 * until the RPCs complete.  This could be long *after* we are woken up in
 233 * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
 234 */
 235static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
 236{
 237	struct nfs_read_data *data = calldata;
 238
 239	nfs_readpage_result(task, data);
 
 
 
 240}
 241
 242static void nfs_direct_read_release(void *calldata)
 243{
 
 
 244
 245	struct nfs_read_data *data = calldata;
 246	struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
 247	int status = data->task.tk_status;
 248
 249	spin_lock(&dreq->lock);
 250	if (unlikely(status < 0)) {
 251		dreq->error = status;
 252		spin_unlock(&dreq->lock);
 253	} else {
 254		dreq->count += data->res.count;
 255		spin_unlock(&dreq->lock);
 256		nfs_direct_dirty_pages(data->pagevec,
 257				data->args.pgbase,
 258				data->res.count);
 259	}
 260	nfs_direct_release_pages(data->pagevec, data->npages);
 261
 
 
 
 
 
 
 
 
 
 
 
 262	if (put_dreq(dreq))
 263		nfs_direct_complete(dreq);
 264	nfs_readdata_free(data);
 
 
 
 
 
 
 
 
 
 
 
 265}
 266
 267static const struct rpc_call_ops nfs_read_direct_ops = {
 268#if defined(CONFIG_NFS_V4_1)
 269	.rpc_call_prepare = nfs_read_prepare,
 270#endif /* CONFIG_NFS_V4_1 */
 271	.rpc_call_done = nfs_direct_read_result,
 272	.rpc_release = nfs_direct_read_release,
 
 
 
 273};
 274
 275/*
 276 * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
 277 * operation.  If nfs_readdata_alloc() or get_user_pages() fails,
 278 * bail and stop sending more reads.  Read length accounting is
 279 * handled automatically by nfs_direct_read_result().  Otherwise, if
 280 * no requests have been sent, just return an error.
 281 */
 282static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
 283						const struct iovec *iov,
 284						loff_t pos)
 285{
 286	struct nfs_open_context *ctx = dreq->ctx;
 287	struct inode *inode = ctx->dentry->d_inode;
 288	unsigned long user_addr = (unsigned long)iov->iov_base;
 289	size_t count = iov->iov_len;
 290	size_t rsize = NFS_SERVER(inode)->rsize;
 291	struct rpc_task *task;
 292	struct rpc_message msg = {
 293		.rpc_cred = ctx->cred,
 294	};
 295	struct rpc_task_setup task_setup_data = {
 296		.rpc_client = NFS_CLIENT(inode),
 297		.rpc_message = &msg,
 298		.callback_ops = &nfs_read_direct_ops,
 299		.workqueue = nfsiod_workqueue,
 300		.flags = RPC_TASK_ASYNC,
 301	};
 302	unsigned int pgbase;
 303	int result;
 304	ssize_t started = 0;
 305
 306	do {
 307		struct nfs_read_data *data;
 308		size_t bytes;
 309
 310		pgbase = user_addr & ~PAGE_MASK;
 311		bytes = min(rsize,count);
 312
 313		result = -ENOMEM;
 314		data = nfs_readdata_alloc(nfs_page_array_len(pgbase, bytes));
 315		if (unlikely(!data))
 316			break;
 317
 318		down_read(&current->mm->mmap_sem);
 319		result = get_user_pages(current, current->mm, user_addr,
 320					data->npages, 1, 0, data->pagevec, NULL);
 321		up_read(&current->mm->mmap_sem);
 322		if (result < 0) {
 323			nfs_readdata_free(data);
 324			break;
 325		}
 326		if ((unsigned)result < data->npages) {
 327			bytes = result * PAGE_SIZE;
 328			if (bytes <= pgbase) {
 329				nfs_direct_release_pages(data->pagevec, result);
 330				nfs_readdata_free(data);
 331				break;
 332			}
 333			bytes -= pgbase;
 334			data->npages = result;
 335		}
 336
 337		get_dreq(dreq);
 338
 339		data->req = (struct nfs_page *) dreq;
 340		data->inode = inode;
 341		data->cred = msg.rpc_cred;
 342		data->args.fh = NFS_FH(inode);
 343		data->args.context = ctx;
 344		data->args.lock_context = dreq->l_ctx;
 345		data->args.offset = pos;
 346		data->args.pgbase = pgbase;
 347		data->args.pages = data->pagevec;
 348		data->args.count = bytes;
 349		data->res.fattr = &data->fattr;
 350		data->res.eof = 0;
 351		data->res.count = bytes;
 352		nfs_fattr_init(&data->fattr);
 353		msg.rpc_argp = &data->args;
 354		msg.rpc_resp = &data->res;
 355
 356		task_setup_data.task = &data->task;
 357		task_setup_data.callback_data = data;
 358		NFS_PROTO(inode)->read_setup(data, &msg);
 359
 360		task = rpc_run_task(&task_setup_data);
 361		if (IS_ERR(task))
 362			break;
 363		rpc_put_task(task);
 364
 365		dprintk("NFS: %5u initiated direct read call "
 366			"(req %s/%Ld, %zu bytes @ offset %Lu)\n",
 367				data->task.tk_pid,
 368				inode->i_sb->s_id,
 369				(long long)NFS_FILEID(inode),
 370				bytes,
 371				(unsigned long long)data->args.offset);
 372
 373		started += bytes;
 374		user_addr += bytes;
 375		pos += bytes;
 376		/* FIXME: Remove this unnecessary math from final patch */
 377		pgbase += bytes;
 378		pgbase &= ~PAGE_MASK;
 379		BUG_ON(pgbase != (user_addr & ~PAGE_MASK));
 380
 381		count -= bytes;
 382	} while (count != 0);
 383
 384	if (started)
 385		return started;
 386	return result < 0 ? (ssize_t) result : -EFAULT;
 387}
 388
 389static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
 390					      const struct iovec *iov,
 391					      unsigned long nr_segs,
 392					      loff_t pos)
 393{
 
 
 394	ssize_t result = -EINVAL;
 395	size_t requested_bytes = 0;
 396	unsigned long seg;
 397
 
 
 398	get_dreq(dreq);
 
 
 
 
 
 
 
 
 399
 400	for (seg = 0; seg < nr_segs; seg++) {
 401		const struct iovec *vec = &iov[seg];
 402		result = nfs_direct_read_schedule_segment(dreq, vec, pos);
 403		if (result < 0)
 404			break;
 405		requested_bytes += result;
 406		if ((size_t)result < vec->iov_len)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 407			break;
 408		pos += vec->iov_len;
 409	}
 410
 
 
 411	/*
 412	 * If no bytes were started, return the error, and let the
 413	 * generic layer handle the completion.
 414	 */
 415	if (requested_bytes == 0) {
 
 416		nfs_direct_req_release(dreq);
 417		return result < 0 ? result : -EIO;
 418	}
 419
 420	if (put_dreq(dreq))
 421		nfs_direct_complete(dreq);
 422	return 0;
 423}
 424
 425static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
 426			       unsigned long nr_segs, loff_t pos)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 427{
 428	ssize_t result = -ENOMEM;
 429	struct inode *inode = iocb->ki_filp->f_mapping->host;
 
 430	struct nfs_direct_req *dreq;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 431
 
 432	dreq = nfs_direct_req_alloc();
 433	if (dreq == NULL)
 434		goto out;
 435
 436	dreq->inode = inode;
 
 
 437	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
 438	dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
 439	if (dreq->l_ctx == NULL)
 
 440		goto out_release;
 
 
 441	if (!is_sync_kiocb(iocb))
 442		dreq->iocb = iocb;
 443
 444	result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos);
 445	if (!result)
 
 
 
 
 446		result = nfs_direct_wait(dreq);
 
 
 
 
 
 
 
 447out_release:
 448	nfs_direct_req_release(dreq);
 
 
 449out:
 450	return result;
 451}
 452
 453static void nfs_direct_free_writedata(struct nfs_direct_req *dreq)
 454{
 455	while (!list_empty(&dreq->rewrite_list)) {
 456		struct nfs_write_data *data = list_entry(dreq->rewrite_list.next, struct nfs_write_data, pages);
 457		list_del(&data->pages);
 458		nfs_direct_release_pages(data->pagevec, data->npages);
 459		nfs_writedata_free(data);
 460	}
 
 
 
 
 461}
 462
 463#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
 464static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
 465{
 466	struct inode *inode = dreq->inode;
 467	struct list_head *p;
 468	struct nfs_write_data *data;
 469	struct rpc_task *task;
 470	struct rpc_message msg = {
 471		.rpc_cred = dreq->ctx->cred,
 472	};
 473	struct rpc_task_setup task_setup_data = {
 474		.rpc_client = NFS_CLIENT(inode),
 475		.rpc_message = &msg,
 476		.callback_ops = &nfs_write_direct_ops,
 477		.workqueue = nfsiod_workqueue,
 478		.flags = RPC_TASK_ASYNC,
 479	};
 480
 481	dreq->count = 0;
 
 
 482	get_dreq(dreq);
 483
 484	list_for_each(p, &dreq->rewrite_list) {
 485		data = list_entry(p, struct nfs_write_data, pages);
 486
 487		get_dreq(dreq);
 488
 489		/* Use stable writes */
 490		data->args.stable = NFS_FILE_SYNC;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 491
 492		/*
 493		 * Reset data->res.
 494		 */
 495		nfs_fattr_init(&data->fattr);
 496		data->res.count = data->args.count;
 497		memset(&data->verf, 0, sizeof(data->verf));
 498
 499		/*
 500		 * Reuse data->task; data->args should not have changed
 501		 * since the original request was sent.
 502		 */
 503		task_setup_data.task = &data->task;
 504		task_setup_data.callback_data = data;
 505		msg.rpc_argp = &data->args;
 506		msg.rpc_resp = &data->res;
 507		NFS_PROTO(inode)->write_setup(data, &msg);
 508
 509		/*
 510		 * We're called via an RPC callback, so BKL is already held.
 511		 */
 512		task = rpc_run_task(&task_setup_data);
 513		if (!IS_ERR(task))
 514			rpc_put_task(task);
 515
 516		dprintk("NFS: %5u rescheduled direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
 517				data->task.tk_pid,
 518				inode->i_sb->s_id,
 519				(long long)NFS_FILEID(inode),
 520				data->args.count,
 521				(unsigned long long)data->args.offset);
 522	}
 523
 524	if (put_dreq(dreq))
 525		nfs_direct_write_complete(dreq, inode);
 526}
 527
 528static void nfs_direct_commit_result(struct rpc_task *task, void *calldata)
 529{
 530	struct nfs_write_data *data = calldata;
 531
 532	/* Call the NFS version-specific code */
 533	NFS_PROTO(data->inode)->commit_done(task, data);
 534}
 535
 536static void nfs_direct_commit_release(void *calldata)
 537{
 538	struct nfs_write_data *data = calldata;
 539	struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
 
 540	int status = data->task.tk_status;
 541
 
 542	if (status < 0) {
 543		dprintk("NFS: %5u commit failed with error %d.\n",
 544				data->task.tk_pid, status);
 545		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 546	} else if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) {
 547		dprintk("NFS: %5u commit verify failed\n", data->task.tk_pid);
 548		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 549	}
 550
 551	dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
 552	nfs_direct_write_complete(dreq, data->inode);
 553	nfs_commit_free(data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 554}
 555
 556static const struct rpc_call_ops nfs_commit_direct_ops = {
 557#if defined(CONFIG_NFS_V4_1)
 558	.rpc_call_prepare = nfs_write_prepare,
 559#endif /* CONFIG_NFS_V4_1 */
 560	.rpc_call_done = nfs_direct_commit_result,
 561	.rpc_release = nfs_direct_commit_release,
 562};
 563
 564static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
 565{
 566	struct nfs_write_data *data = dreq->commit_data;
 567	struct rpc_task *task;
 568	struct rpc_message msg = {
 569		.rpc_argp = &data->args,
 570		.rpc_resp = &data->res,
 571		.rpc_cred = dreq->ctx->cred,
 572	};
 573	struct rpc_task_setup task_setup_data = {
 574		.task = &data->task,
 575		.rpc_client = NFS_CLIENT(dreq->inode),
 576		.rpc_message = &msg,
 577		.callback_ops = &nfs_commit_direct_ops,
 578		.callback_data = data,
 579		.workqueue = nfsiod_workqueue,
 580		.flags = RPC_TASK_ASYNC,
 581	};
 582
 583	data->inode = dreq->inode;
 584	data->cred = msg.rpc_cred;
 585
 586	data->args.fh = NFS_FH(data->inode);
 587	data->args.offset = 0;
 588	data->args.count = 0;
 589	data->args.context = dreq->ctx;
 590	data->args.lock_context = dreq->l_ctx;
 591	data->res.count = 0;
 592	data->res.fattr = &data->fattr;
 593	data->res.verf = &data->verf;
 594	nfs_fattr_init(&data->fattr);
 595
 596	NFS_PROTO(data->inode)->commit_setup(data, &msg);
 597
 598	/* Note: task.tk_ops->rpc_release will free dreq->commit_data */
 599	dreq->commit_data = NULL;
 600
 601	dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
 602
 603	task = rpc_run_task(&task_setup_data);
 604	if (!IS_ERR(task))
 605		rpc_put_task(task);
 606}
 607
 608static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
 609{
 
 610	int flags = dreq->flags;
 611
 612	dreq->flags = 0;
 613	switch (flags) {
 614		case NFS_ODIRECT_DO_COMMIT:
 615			nfs_direct_commit_schedule(dreq);
 616			break;
 617		case NFS_ODIRECT_RESCHED_WRITES:
 618			nfs_direct_write_reschedule(dreq);
 619			break;
 620		default:
 621			if (dreq->commit_data != NULL)
 622				nfs_commit_free(dreq->commit_data);
 623			nfs_direct_free_writedata(dreq);
 624			nfs_zap_mapping(inode, inode->i_mapping);
 625			nfs_direct_complete(dreq);
 626	}
 627}
 628
 629static void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
 630{
 631	dreq->commit_data = nfs_commitdata_alloc();
 632	if (dreq->commit_data != NULL)
 633		dreq->commit_data->req = (struct nfs_page *) dreq;
 634}
 635#else
 636static inline void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
 637{
 638	dreq->commit_data = NULL;
 639}
 640
 641static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
 642{
 643	nfs_direct_free_writedata(dreq);
 644	nfs_zap_mapping(inode, inode->i_mapping);
 645	nfs_direct_complete(dreq);
 646}
 647#endif
 648
 649static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
 650{
 651	struct nfs_write_data *data = calldata;
 
 
 
 652
 653	nfs_writeback_done(task, data);
 654}
 655
 656/*
 657 * NB: Return the value of the first error return code.  Subsequent
 658 *     errors after the first one are ignored.
 659 */
 660static void nfs_direct_write_release(void *calldata)
 661{
 662	struct nfs_write_data *data = calldata;
 663	struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
 664	int status = data->task.tk_status;
 665
 666	spin_lock(&dreq->lock);
 667
 668	if (unlikely(status < 0)) {
 669		/* An error has occurred, so we should not commit */
 670		dreq->flags = 0;
 671		dreq->error = status;
 672	}
 673	if (unlikely(dreq->error != 0))
 674		goto out_unlock;
 675
 676	dreq->count += data->res.count;
 677
 678	if (data->res.verf->committed != NFS_FILE_SYNC) {
 679		switch (dreq->flags) {
 680			case 0:
 681				memcpy(&dreq->verf, &data->verf, sizeof(dreq->verf));
 682				dreq->flags = NFS_ODIRECT_DO_COMMIT;
 683				break;
 684			case NFS_ODIRECT_DO_COMMIT:
 685				if (memcmp(&dreq->verf, &data->verf, sizeof(dreq->verf))) {
 686					dprintk("NFS: %5u write verify failed\n", data->task.tk_pid);
 687					dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 688				}
 689		}
 690	}
 691out_unlock:
 692	spin_unlock(&dreq->lock);
 693
 
 
 
 
 
 
 
 
 
 
 
 
 
 694	if (put_dreq(dreq))
 695		nfs_direct_write_complete(dreq, data->inode);
 
 696}
 697
 698static const struct rpc_call_ops nfs_write_direct_ops = {
 699#if defined(CONFIG_NFS_V4_1)
 700	.rpc_call_prepare = nfs_write_prepare,
 701#endif /* CONFIG_NFS_V4_1 */
 702	.rpc_call_done = nfs_direct_write_result,
 703	.rpc_release = nfs_direct_write_release,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 704};
 705
 
 
 
 
 
 706/*
 707 * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
 708 * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
 709 * bail and stop sending more writes.  Write length accounting is
 710 * handled automatically by nfs_direct_write_result().  Otherwise, if
 711 * no requests have been sent, just return an error.
 712 */
 713static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
 714						 const struct iovec *iov,
 715						 loff_t pos, int sync)
 716{
 717	struct nfs_open_context *ctx = dreq->ctx;
 718	struct inode *inode = ctx->dentry->d_inode;
 719	unsigned long user_addr = (unsigned long)iov->iov_base;
 720	size_t count = iov->iov_len;
 721	struct rpc_task *task;
 722	struct rpc_message msg = {
 723		.rpc_cred = ctx->cred,
 724	};
 725	struct rpc_task_setup task_setup_data = {
 726		.rpc_client = NFS_CLIENT(inode),
 727		.rpc_message = &msg,
 728		.callback_ops = &nfs_write_direct_ops,
 729		.workqueue = nfsiod_workqueue,
 730		.flags = RPC_TASK_ASYNC,
 731	};
 732	size_t wsize = NFS_SERVER(inode)->wsize;
 733	unsigned int pgbase;
 734	int result;
 735	ssize_t started = 0;
 736
 737	do {
 738		struct nfs_write_data *data;
 739		size_t bytes;
 
 
 740
 741		pgbase = user_addr & ~PAGE_MASK;
 742		bytes = min(wsize,count);
 
 
 
 
 743
 744		result = -ENOMEM;
 745		data = nfs_writedata_alloc(nfs_page_array_len(pgbase, bytes));
 746		if (unlikely(!data))
 747			break;
 748
 749		down_read(&current->mm->mmap_sem);
 750		result = get_user_pages(current, current->mm, user_addr,
 751					data->npages, 0, 0, data->pagevec, NULL);
 752		up_read(&current->mm->mmap_sem);
 753		if (result < 0) {
 754			nfs_writedata_free(data);
 755			break;
 756		}
 757		if ((unsigned)result < data->npages) {
 758			bytes = result * PAGE_SIZE;
 759			if (bytes <= pgbase) {
 760				nfs_direct_release_pages(data->pagevec, result);
 761				nfs_writedata_free(data);
 762				break;
 763			}
 764			bytes -= pgbase;
 765			data->npages = result;
 766		}
 767
 768		get_dreq(dreq);
 769
 770		list_move_tail(&data->pages, &dreq->rewrite_list);
 771
 772		data->req = (struct nfs_page *) dreq;
 773		data->inode = inode;
 774		data->cred = msg.rpc_cred;
 775		data->args.fh = NFS_FH(inode);
 776		data->args.context = ctx;
 777		data->args.lock_context = dreq->l_ctx;
 778		data->args.offset = pos;
 779		data->args.pgbase = pgbase;
 780		data->args.pages = data->pagevec;
 781		data->args.count = bytes;
 782		data->args.stable = sync;
 783		data->res.fattr = &data->fattr;
 784		data->res.count = bytes;
 785		data->res.verf = &data->verf;
 786		nfs_fattr_init(&data->fattr);
 787
 788		task_setup_data.task = &data->task;
 789		task_setup_data.callback_data = data;
 790		msg.rpc_argp = &data->args;
 791		msg.rpc_resp = &data->res;
 792		NFS_PROTO(inode)->write_setup(data, &msg);
 793
 794		task = rpc_run_task(&task_setup_data);
 795		if (IS_ERR(task))
 796			break;
 797		rpc_put_task(task);
 798
 799		dprintk("NFS: %5u initiated direct write call "
 800			"(req %s/%Ld, %zu bytes @ offset %Lu)\n",
 801				data->task.tk_pid,
 802				inode->i_sb->s_id,
 803				(long long)NFS_FILEID(inode),
 804				bytes,
 805				(unsigned long long)data->args.offset);
 806
 807		started += bytes;
 808		user_addr += bytes;
 809		pos += bytes;
 810
 811		/* FIXME: Remove this useless math from the final patch */
 812		pgbase += bytes;
 813		pgbase &= ~PAGE_MASK;
 814		BUG_ON(pgbase != (user_addr & ~PAGE_MASK));
 815
 816		count -= bytes;
 817	} while (count != 0);
 818
 819	if (started)
 820		return started;
 821	return result < 0 ? (ssize_t) result : -EFAULT;
 822}
 823
 824static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
 825					       const struct iovec *iov,
 826					       unsigned long nr_segs,
 827					       loff_t pos, int sync)
 828{
 829	ssize_t result = 0;
 830	size_t requested_bytes = 0;
 831	unsigned long seg;
 832
 833	get_dreq(dreq);
 834
 835	for (seg = 0; seg < nr_segs; seg++) {
 836		const struct iovec *vec = &iov[seg];
 837		result = nfs_direct_write_schedule_segment(dreq, vec,
 838							   pos, sync);
 
 
 
 
 
 
 
 
 
 
 
 
 839		if (result < 0)
 840			break;
 841		requested_bytes += result;
 842		if ((size_t)result < vec->iov_len)
 843			break;
 844		pos += vec->iov_len;
 845	}
 
 846
 847	/*
 848	 * If no bytes were started, return the error, and let the
 849	 * generic layer handle the completion.
 850	 */
 851	if (requested_bytes == 0) {
 
 852		nfs_direct_req_release(dreq);
 853		return result < 0 ? result : -EIO;
 854	}
 855
 856	if (put_dreq(dreq))
 857		nfs_direct_write_complete(dreq, dreq->inode);
 858	return 0;
 859}
 860
 861static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
 862				unsigned long nr_segs, loff_t pos,
 863				size_t count)
 864{
 865	ssize_t result = -ENOMEM;
 866	struct inode *inode = iocb->ki_filp->f_mapping->host;
 867	struct nfs_direct_req *dreq;
 868	size_t wsize = NFS_SERVER(inode)->wsize;
 869	int sync = NFS_UNSTABLE;
 870
 871	dreq = nfs_direct_req_alloc();
 872	if (!dreq)
 873		goto out;
 874	nfs_alloc_commit_data(dreq);
 875
 876	if (dreq->commit_data == NULL || count <= wsize)
 877		sync = NFS_FILE_SYNC;
 878
 879	dreq->inode = inode;
 880	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
 881	dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
 882	if (dreq->l_ctx == NULL)
 883		goto out_release;
 884	if (!is_sync_kiocb(iocb))
 885		dreq->iocb = iocb;
 886
 887	result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, sync);
 888	if (!result)
 889		result = nfs_direct_wait(dreq);
 890out_release:
 891	nfs_direct_req_release(dreq);
 892out:
 893	return result;
 894}
 895
 896/**
 897 * nfs_file_direct_read - file direct read operation for NFS files
 898 * @iocb: target I/O control block
 899 * @iov: vector of user buffers into which to read data
 900 * @nr_segs: size of iov vector
 901 * @pos: byte offset in file where reading starts
 902 *
 903 * We use this function for direct reads instead of calling
 904 * generic_file_aio_read() in order to avoid gfar's check to see if
 905 * the request starts before the end of the file.  For that check
 906 * to work, we must generate a GETATTR before each direct read, and
 907 * even then there is a window between the GETATTR and the subsequent
 908 * READ where the file size could change.  Our preference is simply
 909 * to do all reads the application wants, and the server will take
 910 * care of managing the end of file boundary.
 911 *
 912 * This function also eliminates unnecessarily updating the file's
 913 * atime locally, as the NFS server sets the file's atime, and this
 914 * client must read the updated atime from the server back into its
 915 * cache.
 916 */
 917ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
 918				unsigned long nr_segs, loff_t pos)
 919{
 920	ssize_t retval = -EINVAL;
 921	struct file *file = iocb->ki_filp;
 922	struct address_space *mapping = file->f_mapping;
 923	size_t count;
 924
 925	count = iov_length(iov, nr_segs);
 926	nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
 927
 928	dfprintk(FILE, "NFS: direct read(%s/%s, %zd@%Ld)\n",
 929		file->f_path.dentry->d_parent->d_name.name,
 930		file->f_path.dentry->d_name.name,
 931		count, (long long) pos);
 932
 933	retval = 0;
 934	if (!count)
 935		goto out;
 936
 937	retval = nfs_sync_mapping(mapping);
 938	if (retval)
 939		goto out;
 940
 941	task_io_account_read(count);
 942
 943	retval = nfs_direct_read(iocb, iov, nr_segs, pos);
 944	if (retval > 0)
 945		iocb->ki_pos = pos + retval;
 946
 947out:
 948	return retval;
 949}
 950
 951/**
 952 * nfs_file_direct_write - file direct write operation for NFS files
 953 * @iocb: target I/O control block
 954 * @iov: vector of user buffers from which to write data
 955 * @nr_segs: size of iov vector
 956 * @pos: byte offset in file where writing starts
 957 *
 958 * We use this function for direct writes instead of calling
 959 * generic_file_aio_write() in order to avoid taking the inode
 960 * semaphore and updating the i_size.  The NFS server will set
 961 * the new i_size and this client must read the updated size
 962 * back into its cache.  We let the server do generic write
 963 * parameter checking and report problems.
 964 *
 965 * We eliminate local atime updates, see direct read above.
 966 *
 967 * We avoid unnecessary page cache invalidations for normal cached
 968 * readers of this file.
 969 *
 970 * Note that O_APPEND is not supported for NFS direct writes, as there
 971 * is no atomic O_APPEND write facility in the NFS protocol.
 972 */
 973ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
 974				unsigned long nr_segs, loff_t pos)
 975{
 976	ssize_t retval = -EINVAL;
 977	struct file *file = iocb->ki_filp;
 978	struct address_space *mapping = file->f_mapping;
 979	size_t count;
 
 
 
 980
 981	count = iov_length(iov, nr_segs);
 982	nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
 983
 984	dfprintk(FILE, "NFS: direct write(%s/%s, %zd@%Ld)\n",
 985		file->f_path.dentry->d_parent->d_name.name,
 986		file->f_path.dentry->d_name.name,
 987		count, (long long) pos);
 988
 989	retval = generic_write_checks(file, &pos, &count, 0);
 990	if (retval)
 991		goto out;
 992
 993	retval = -EINVAL;
 994	if ((ssize_t) count < 0)
 995		goto out;
 996	retval = 0;
 997	if (!count)
 998		goto out;
 999
1000	retval = nfs_sync_mapping(mapping);
1001	if (retval)
1002		goto out;
1003
1004	task_io_account_write(count);
 
 
 
 
 
1005
1006	retval = nfs_direct_write(iocb, iov, nr_segs, pos, count);
1007
1008	if (retval > 0)
1009		iocb->ki_pos = pos + retval;
 
 
1010
1011out:
1012	return retval;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1013}
1014
1015/**
1016 * nfs_init_directcache - create a slab cache for nfs_direct_req structures
1017 *
1018 */
1019int __init nfs_init_directcache(void)
1020{
1021	nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
1022						sizeof(struct nfs_direct_req),
1023						0, (SLAB_RECLAIM_ACCOUNT|
1024							SLAB_MEM_SPREAD),
1025						NULL);
1026	if (nfs_direct_cachep == NULL)
1027		return -ENOMEM;
1028
1029	return 0;
1030}
1031
1032/**
1033 * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
1034 *
1035 */
1036void nfs_destroy_directcache(void)
1037{
1038	kmem_cache_destroy(nfs_direct_cachep);
1039}
v4.6
   1/*
   2 * linux/fs/nfs/direct.c
   3 *
   4 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
   5 *
   6 * High-performance uncached I/O for the Linux NFS client
   7 *
   8 * There are important applications whose performance or correctness
   9 * depends on uncached access to file data.  Database clusters
  10 * (multiple copies of the same instance running on separate hosts)
  11 * implement their own cache coherency protocol that subsumes file
  12 * system cache protocols.  Applications that process datasets
  13 * considerably larger than the client's memory do not always benefit
  14 * from a local cache.  A streaming video server, for instance, has no
  15 * need to cache the contents of a file.
  16 *
  17 * When an application requests uncached I/O, all read and write requests
  18 * are made directly to the server; data stored or fetched via these
  19 * requests is not cached in the Linux page cache.  The client does not
  20 * correct unaligned requests from applications.  All requested bytes are
  21 * held on permanent storage before a direct write system call returns to
  22 * an application.
  23 *
  24 * Solaris implements an uncached I/O facility called directio() that
  25 * is used for backups and sequential I/O to very large files.  Solaris
  26 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
  27 * an undocumented mount option.
  28 *
  29 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
  30 * help from Andrew Morton.
  31 *
  32 * 18 Dec 2001	Initial implementation for 2.4  --cel
  33 * 08 Jul 2002	Version for 2.4.19, with bug fixes --trondmy
  34 * 08 Jun 2003	Port to 2.5 APIs  --cel
  35 * 31 Mar 2004	Handle direct I/O without VFS support  --cel
  36 * 15 Sep 2004	Parallel async reads  --cel
  37 * 04 May 2005	support O_DIRECT with aio  --cel
  38 *
  39 */
  40
  41#include <linux/errno.h>
  42#include <linux/sched.h>
  43#include <linux/kernel.h>
  44#include <linux/file.h>
  45#include <linux/pagemap.h>
  46#include <linux/kref.h>
  47#include <linux/slab.h>
  48#include <linux/task_io_accounting_ops.h>
  49#include <linux/module.h>
  50
  51#include <linux/nfs_fs.h>
  52#include <linux/nfs_page.h>
  53#include <linux/sunrpc/clnt.h>
  54
 
  55#include <asm/uaccess.h>
  56#include <linux/atomic.h>
  57
  58#include "internal.h"
  59#include "iostat.h"
  60#include "pnfs.h"
  61
  62#define NFSDBG_FACILITY		NFSDBG_VFS
  63
  64static struct kmem_cache *nfs_direct_cachep;
  65
  66/*
  67 * This represents a set of asynchronous requests that we're waiting on
  68 */
  69struct nfs_direct_mirror {
  70	ssize_t count;
  71};
  72
  73struct nfs_direct_req {
  74	struct kref		kref;		/* release manager */
  75
  76	/* I/O parameters */
  77	struct nfs_open_context	*ctx;		/* file open context info */
  78	struct nfs_lock_context *l_ctx;		/* Lock context info */
  79	struct kiocb *		iocb;		/* controlling i/o request */
  80	struct inode *		inode;		/* target file of i/o */
  81
  82	/* completion state */
  83	atomic_t		io_count;	/* i/os we're waiting for */
  84	spinlock_t		lock;		/* protect completion state */
  85
  86	struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX];
  87	int			mirror_count;
  88
  89	ssize_t			count,		/* bytes actually processed */
  90				bytes_left,	/* bytes left to be sent */
  91				io_start,	/* start of IO */
  92				error;		/* any reported error */
  93	struct completion	completion;	/* wait for i/o completion */
  94
  95	/* commit state */
  96	struct nfs_mds_commit_info mds_cinfo;	/* Storage for cinfo */
  97	struct pnfs_ds_commit_info ds_cinfo;	/* Storage for cinfo */
  98	struct work_struct	work;
  99	int			flags;
 100#define NFS_ODIRECT_DO_COMMIT		(1)	/* an unstable reply was received */
 101#define NFS_ODIRECT_RESCHED_WRITES	(2)	/* write verification failed */
 102	struct nfs_writeverf	verf;		/* unstable write verifier */
 103};
 104
 105static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
 106static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
 107static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
 108static void nfs_direct_write_schedule_work(struct work_struct *work);
 109
 110static inline void get_dreq(struct nfs_direct_req *dreq)
 111{
 112	atomic_inc(&dreq->io_count);
 113}
 114
 115static inline int put_dreq(struct nfs_direct_req *dreq)
 116{
 117	return atomic_dec_and_test(&dreq->io_count);
 118}
 119
 120static void
 121nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr)
 122{
 123	int i;
 124	ssize_t count;
 125
 126	if (dreq->mirror_count == 1) {
 127		dreq->mirrors[hdr->pgio_mirror_idx].count += hdr->good_bytes;
 128		dreq->count += hdr->good_bytes;
 129	} else {
 130		/* mirrored writes */
 131		count = dreq->mirrors[hdr->pgio_mirror_idx].count;
 132		if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) {
 133			count = hdr->io_start + hdr->good_bytes - dreq->io_start;
 134			dreq->mirrors[hdr->pgio_mirror_idx].count = count;
 135		}
 136		/* update the dreq->count by finding the minimum agreed count from all
 137		 * mirrors */
 138		count = dreq->mirrors[0].count;
 139
 140		for (i = 1; i < dreq->mirror_count; i++)
 141			count = min(count, dreq->mirrors[i].count);
 142
 143		dreq->count = count;
 144	}
 145}
 146
 147/*
 148 * nfs_direct_select_verf - select the right verifier
 149 * @dreq - direct request possibly spanning multiple servers
 150 * @ds_clp - nfs_client of data server or NULL if MDS / non-pnfs
 151 * @commit_idx - commit bucket index for the DS
 152 *
 153 * returns the correct verifier to use given the role of the server
 154 */
 155static struct nfs_writeverf *
 156nfs_direct_select_verf(struct nfs_direct_req *dreq,
 157		       struct nfs_client *ds_clp,
 158		       int commit_idx)
 159{
 160	struct nfs_writeverf *verfp = &dreq->verf;
 161
 162#ifdef CONFIG_NFS_V4_1
 163	/*
 164	 * pNFS is in use, use the DS verf except commit_through_mds is set
 165	 * for layout segment where nbuckets is zero.
 166	 */
 167	if (ds_clp && dreq->ds_cinfo.nbuckets > 0) {
 168		if (commit_idx >= 0 && commit_idx < dreq->ds_cinfo.nbuckets)
 169			verfp = &dreq->ds_cinfo.buckets[commit_idx].direct_verf;
 170		else
 171			WARN_ON_ONCE(1);
 172	}
 173#endif
 174	return verfp;
 175}
 176
 177
 178/*
 179 * nfs_direct_set_hdr_verf - set the write/commit verifier
 180 * @dreq - direct request possibly spanning multiple servers
 181 * @hdr - pageio header to validate against previously seen verfs
 182 *
 183 * Set the server's (MDS or DS) "seen" verifier
 184 */
 185static void nfs_direct_set_hdr_verf(struct nfs_direct_req *dreq,
 186				    struct nfs_pgio_header *hdr)
 187{
 188	struct nfs_writeverf *verfp;
 189
 190	verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx);
 191	WARN_ON_ONCE(verfp->committed >= 0);
 192	memcpy(verfp, &hdr->verf, sizeof(struct nfs_writeverf));
 193	WARN_ON_ONCE(verfp->committed < 0);
 194}
 195
 196/*
 197 * nfs_direct_cmp_hdr_verf - compare verifier for pgio header
 198 * @dreq - direct request possibly spanning multiple servers
 199 * @hdr - pageio header to validate against previously seen verf
 200 *
 201 * set the server's "seen" verf if not initialized.
 202 * returns result of comparison between @hdr->verf and the "seen"
 203 * verf of the server used by @hdr (DS or MDS)
 204 */
 205static int nfs_direct_set_or_cmp_hdr_verf(struct nfs_direct_req *dreq,
 206					  struct nfs_pgio_header *hdr)
 207{
 208	struct nfs_writeverf *verfp;
 209
 210	verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx);
 211	if (verfp->committed < 0) {
 212		nfs_direct_set_hdr_verf(dreq, hdr);
 213		return 0;
 214	}
 215	return memcmp(verfp, &hdr->verf, sizeof(struct nfs_writeverf));
 216}
 217
 218/*
 219 * nfs_direct_cmp_commit_data_verf - compare verifier for commit data
 220 * @dreq - direct request possibly spanning multiple servers
 221 * @data - commit data to validate against previously seen verf
 222 *
 223 * returns result of comparison between @data->verf and the verf of
 224 * the server used by @data (DS or MDS)
 225 */
 226static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
 227					   struct nfs_commit_data *data)
 228{
 229	struct nfs_writeverf *verfp;
 230
 231	verfp = nfs_direct_select_verf(dreq, data->ds_clp,
 232					 data->ds_commit_index);
 233
 234	/* verifier not set so always fail */
 235	if (verfp->committed < 0)
 236		return 1;
 237
 238	return memcmp(verfp, &data->verf, sizeof(struct nfs_writeverf));
 239}
 240
 241/**
 242 * nfs_direct_IO - NFS address space operation for direct I/O
 
 243 * @iocb: target I/O control block
 244 * @iov: array of vectors that define I/O buffer
 245 * @pos: offset in file to begin the operation
 246 * @nr_segs: size of iovec array
 247 *
 248 * The presence of this routine in the address space ops vector means
 249 * the NFS client supports direct I/O. However, for most direct IO, we
 250 * shunt off direct read and write requests before the VFS gets them,
 251 * so this method is only ever called for swap.
 252 */
 253ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
 254{
 255	struct inode *inode = iocb->ki_filp->f_mapping->host;
 
 
 256
 257	/* we only support swap file calling nfs_direct_IO */
 258	if (!IS_SWAPFILE(inode))
 259		return 0;
 260
 261	VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
 262
 263	if (iov_iter_rw(iter) == READ)
 264		return nfs_file_direct_read(iocb, iter, pos);
 265	return nfs_file_direct_write(iocb, iter);
 266}
 267
 268static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
 269{
 
 270	unsigned int i;
 271	for (i = 0; i < npages; i++)
 272		put_page(pages[i]);
 273}
 274
 275void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
 276			      struct nfs_direct_req *dreq)
 277{
 278	cinfo->lock = &dreq->inode->i_lock;
 279	cinfo->mds = &dreq->mds_cinfo;
 280	cinfo->ds = &dreq->ds_cinfo;
 281	cinfo->dreq = dreq;
 282	cinfo->completion_ops = &nfs_direct_commit_completion_ops;
 
 283}
 284
 285static inline void nfs_direct_setup_mirroring(struct nfs_direct_req *dreq,
 286					     struct nfs_pageio_descriptor *pgio,
 287					     struct nfs_page *req)
 288{
 289	int mirror_count = 1;
 290
 291	if (pgio->pg_ops->pg_get_mirror_count)
 292		mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
 293
 294	dreq->mirror_count = mirror_count;
 295}
 296
 297static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
 298{
 299	struct nfs_direct_req *dreq;
 300
 301	dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
 302	if (!dreq)
 303		return NULL;
 304
 305	kref_init(&dreq->kref);
 306	kref_get(&dreq->kref);
 307	init_completion(&dreq->completion);
 308	INIT_LIST_HEAD(&dreq->mds_cinfo.list);
 309	dreq->verf.committed = NFS_INVALID_STABLE_HOW;	/* not set yet */
 310	INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
 311	dreq->mirror_count = 1;
 312	spin_lock_init(&dreq->lock);
 
 
 
 
 313
 314	return dreq;
 315}
 316
 317static void nfs_direct_req_free(struct kref *kref)
 318{
 319	struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
 320
 321	nfs_free_pnfs_ds_cinfo(&dreq->ds_cinfo);
 322	if (dreq->l_ctx != NULL)
 323		nfs_put_lock_context(dreq->l_ctx);
 324	if (dreq->ctx != NULL)
 325		put_nfs_open_context(dreq->ctx);
 326	kmem_cache_free(nfs_direct_cachep, dreq);
 327}
 328
 329static void nfs_direct_req_release(struct nfs_direct_req *dreq)
 330{
 331	kref_put(&dreq->kref, nfs_direct_req_free);
 332}
 333
 334ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq)
 335{
 336	return dreq->bytes_left;
 337}
 338EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);
 339
 340/*
 341 * Collects and returns the final error value/byte-count.
 342 */
 343static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
 344{
 345	ssize_t result = -EIOCBQUEUED;
 346
 347	/* Async requests don't wait here */
 348	if (dreq->iocb)
 349		goto out;
 350
 351	result = wait_for_completion_killable(&dreq->completion);
 352
 353	if (!result)
 354		result = dreq->error;
 355	if (!result)
 356		result = dreq->count;
 357
 358out:
 359	return (ssize_t) result;
 360}
 361
 362/*
 363 * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
 364 * the iocb is still valid here if this is a synchronous request.
 365 */
 366static void nfs_direct_complete(struct nfs_direct_req *dreq, bool write)
 367{
 368	struct inode *inode = dreq->inode;
 369
 370	if (dreq->iocb && write) {
 371		loff_t pos = dreq->iocb->ki_pos + dreq->count;
 372
 373		spin_lock(&inode->i_lock);
 374		if (i_size_read(inode) < pos)
 375			i_size_write(inode, pos);
 376		spin_unlock(&inode->i_lock);
 377	}
 378
 379	if (write)
 380		nfs_zap_mapping(inode, inode->i_mapping);
 381
 382	inode_dio_end(inode);
 383
 384	if (dreq->iocb) {
 385		long res = (long) dreq->error;
 386		if (!res)
 387			res = (long) dreq->count;
 388		dreq->iocb->ki_complete(dreq->iocb, res, 0);
 389	}
 390
 391	complete_all(&dreq->completion);
 392
 393	nfs_direct_req_release(dreq);
 394}
 395
 396static void nfs_direct_readpage_release(struct nfs_page *req)
 
 
 
 
 
 397{
 398	dprintk("NFS: direct read done (%s/%llu %d@%lld)\n",
 399		d_inode(req->wb_context->dentry)->i_sb->s_id,
 400		(unsigned long long)NFS_FILEID(d_inode(req->wb_context->dentry)),
 401		req->wb_bytes,
 402		(long long)req_offset(req));
 403	nfs_release_request(req);
 404}
 405
 406static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
 407{
 408	unsigned long bytes = 0;
 409	struct nfs_direct_req *dreq = hdr->dreq;
 410
 411	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
 412		goto out_put;
 
 413
 414	spin_lock(&dreq->lock);
 415	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
 416		dreq->error = hdr->error;
 417	else
 418		nfs_direct_good_bytes(dreq, hdr);
 419
 420	spin_unlock(&dreq->lock);
 
 
 
 
 
 421
 422	while (!list_empty(&hdr->pages)) {
 423		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
 424		struct page *page = req->wb_page;
 425
 426		if (!PageCompound(page) && bytes < hdr->good_bytes)
 427			set_page_dirty(page);
 428		bytes += req->wb_bytes;
 429		nfs_list_remove_request(req);
 430		nfs_direct_readpage_release(req);
 431	}
 432out_put:
 433	if (put_dreq(dreq))
 434		nfs_direct_complete(dreq, false);
 435	hdr->release(hdr);
 436}
 437
 438static void nfs_read_sync_pgio_error(struct list_head *head)
 439{
 440	struct nfs_page *req;
 441
 442	while (!list_empty(head)) {
 443		req = nfs_list_entry(head->next);
 444		nfs_list_remove_request(req);
 445		nfs_release_request(req);
 446	}
 447}
 448
 449static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
 450{
 451	get_dreq(hdr->dreq);
 452}
 453
 454static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
 455	.error_cleanup = nfs_read_sync_pgio_error,
 456	.init_hdr = nfs_direct_pgio_init,
 457	.completion = nfs_direct_read_completion,
 458};
 459
 460/*
 461 * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
 462 * operation.  If nfs_readdata_alloc() or get_user_pages() fails,
 463 * bail and stop sending more reads.  Read length accounting is
 464 * handled automatically by nfs_direct_read_result().  Otherwise, if
 465 * no requests have been sent, just return an error.
 466 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 467
 468static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
 469					      struct iov_iter *iter,
 
 470					      loff_t pos)
 471{
 472	struct nfs_pageio_descriptor desc;
 473	struct inode *inode = dreq->inode;
 474	ssize_t result = -EINVAL;
 475	size_t requested_bytes = 0;
 476	size_t rsize = max_t(size_t, NFS_SERVER(inode)->rsize, PAGE_SIZE);
 477
 478	nfs_pageio_init_read(&desc, dreq->inode, false,
 479			     &nfs_direct_read_completion_ops);
 480	get_dreq(dreq);
 481	desc.pg_dreq = dreq;
 482	inode_dio_begin(inode);
 483
 484	while (iov_iter_count(iter)) {
 485		struct page **pagevec;
 486		size_t bytes;
 487		size_t pgbase;
 488		unsigned npages, i;
 489
 490		result = iov_iter_get_pages_alloc(iter, &pagevec, 
 491						  rsize, &pgbase);
 
 492		if (result < 0)
 493			break;
 494	
 495		bytes = result;
 496		iov_iter_advance(iter, bytes);
 497		npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
 498		for (i = 0; i < npages; i++) {
 499			struct nfs_page *req;
 500			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
 501			/* XXX do we need to do the eof zeroing found in async_filler? */
 502			req = nfs_create_request(dreq->ctx, pagevec[i], NULL,
 503						 pgbase, req_len);
 504			if (IS_ERR(req)) {
 505				result = PTR_ERR(req);
 506				break;
 507			}
 508			req->wb_index = pos >> PAGE_SHIFT;
 509			req->wb_offset = pos & ~PAGE_MASK;
 510			if (!nfs_pageio_add_request(&desc, req)) {
 511				result = desc.pg_error;
 512				nfs_release_request(req);
 513				break;
 514			}
 515			pgbase = 0;
 516			bytes -= req_len;
 517			requested_bytes += req_len;
 518			pos += req_len;
 519			dreq->bytes_left -= req_len;
 520		}
 521		nfs_direct_release_pages(pagevec, npages);
 522		kvfree(pagevec);
 523		if (result < 0)
 524			break;
 
 525	}
 526
 527	nfs_pageio_complete(&desc);
 528
 529	/*
 530	 * If no bytes were started, return the error, and let the
 531	 * generic layer handle the completion.
 532	 */
 533	if (requested_bytes == 0) {
 534		inode_dio_end(inode);
 535		nfs_direct_req_release(dreq);
 536		return result < 0 ? result : -EIO;
 537	}
 538
 539	if (put_dreq(dreq))
 540		nfs_direct_complete(dreq, false);
 541	return 0;
 542}
 543
 544/**
 545 * nfs_file_direct_read - file direct read operation for NFS files
 546 * @iocb: target I/O control block
 547 * @iter: vector of user buffers into which to read data
 548 * @pos: byte offset in file where reading starts
 549 *
 550 * We use this function for direct reads instead of calling
 551 * generic_file_aio_read() in order to avoid gfar's check to see if
 552 * the request starts before the end of the file.  For that check
 553 * to work, we must generate a GETATTR before each direct read, and
 554 * even then there is a window between the GETATTR and the subsequent
 555 * READ where the file size could change.  Our preference is simply
 556 * to do all reads the application wants, and the server will take
 557 * care of managing the end of file boundary.
 558 *
 559 * This function also eliminates unnecessarily updating the file's
 560 * atime locally, as the NFS server sets the file's atime, and this
 561 * client must read the updated atime from the server back into its
 562 * cache.
 563 */
 564ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
 565				loff_t pos)
 566{
 567	struct file *file = iocb->ki_filp;
 568	struct address_space *mapping = file->f_mapping;
 569	struct inode *inode = mapping->host;
 570	struct nfs_direct_req *dreq;
 571	struct nfs_lock_context *l_ctx;
 572	ssize_t result = -EINVAL;
 573	size_t count = iov_iter_count(iter);
 574	nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
 575
 576	dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
 577		file, count, (long long) pos);
 578
 579	result = 0;
 580	if (!count)
 581		goto out;
 582
 583	inode_lock(inode);
 584	result = nfs_sync_mapping(mapping);
 585	if (result)
 586		goto out_unlock;
 587
 588	task_io_account_read(count);
 589
 590	result = -ENOMEM;
 591	dreq = nfs_direct_req_alloc();
 592	if (dreq == NULL)
 593		goto out_unlock;
 594
 595	dreq->inode = inode;
 596	dreq->bytes_left = count;
 597	dreq->io_start = pos;
 598	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
 599	l_ctx = nfs_get_lock_context(dreq->ctx);
 600	if (IS_ERR(l_ctx)) {
 601		result = PTR_ERR(l_ctx);
 602		goto out_release;
 603	}
 604	dreq->l_ctx = l_ctx;
 605	if (!is_sync_kiocb(iocb))
 606		dreq->iocb = iocb;
 607
 608	NFS_I(inode)->read_io += count;
 609	result = nfs_direct_read_schedule_iovec(dreq, iter, pos);
 610
 611	inode_unlock(inode);
 612
 613	if (!result) {
 614		result = nfs_direct_wait(dreq);
 615		if (result > 0)
 616			iocb->ki_pos = pos + result;
 617	}
 618
 619	nfs_direct_req_release(dreq);
 620	return result;
 621
 622out_release:
 623	nfs_direct_req_release(dreq);
 624out_unlock:
 625	inode_unlock(inode);
 626out:
 627	return result;
 628}
 629
 630static void
 631nfs_direct_write_scan_commit_list(struct inode *inode,
 632				  struct list_head *list,
 633				  struct nfs_commit_info *cinfo)
 634{
 635	spin_lock(cinfo->lock);
 636#ifdef CONFIG_NFS_V4_1
 637	if (cinfo->ds != NULL && cinfo->ds->nwritten != 0)
 638		NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo);
 639#endif
 640	nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0);
 641	spin_unlock(cinfo->lock);
 642}
 643
 
 644static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
 645{
 646	struct nfs_pageio_descriptor desc;
 647	struct nfs_page *req, *tmp;
 648	LIST_HEAD(reqs);
 649	struct nfs_commit_info cinfo;
 650	LIST_HEAD(failed);
 651	int i;
 652
 653	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 654	nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
 
 
 
 
 
 655
 656	dreq->count = 0;
 657	for (i = 0; i < dreq->mirror_count; i++)
 658		dreq->mirrors[i].count = 0;
 659	get_dreq(dreq);
 660
 661	nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
 662			      &nfs_direct_write_completion_ops);
 663	desc.pg_dreq = dreq;
 664
 665	req = nfs_list_entry(reqs.next);
 666	nfs_direct_setup_mirroring(dreq, &desc, req);
 667	if (desc.pg_error < 0) {
 668		list_splice_init(&reqs, &failed);
 669		goto out_failed;
 670	}
 671
 672	list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
 673		if (!nfs_pageio_add_request(&desc, req)) {
 674			nfs_list_remove_request(req);
 675			nfs_list_add_request(req, &failed);
 676			spin_lock(cinfo.lock);
 677			dreq->flags = 0;
 678			if (desc.pg_error < 0)
 679				dreq->error = desc.pg_error;
 680			else
 681				dreq->error = -EIO;
 682			spin_unlock(cinfo.lock);
 683		}
 684		nfs_release_request(req);
 685	}
 686	nfs_pageio_complete(&desc);
 687
 688out_failed:
 689	while (!list_empty(&failed)) {
 690		req = nfs_list_entry(failed.next);
 691		nfs_list_remove_request(req);
 692		nfs_unlock_and_release_request(req);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 693	}
 694
 695	if (put_dreq(dreq))
 696		nfs_direct_write_complete(dreq, dreq->inode);
 
 
 
 
 
 
 
 
 697}
 698
 699static void nfs_direct_commit_complete(struct nfs_commit_data *data)
 700{
 701	struct nfs_direct_req *dreq = data->dreq;
 702	struct nfs_commit_info cinfo;
 703	struct nfs_page *req;
 704	int status = data->task.tk_status;
 705
 706	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 707	if (status < 0) {
 708		dprintk("NFS: %5u commit failed with error %d.\n",
 709			data->task.tk_pid, status);
 710		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 711	} else if (nfs_direct_cmp_commit_data_verf(dreq, data)) {
 712		dprintk("NFS: %5u commit verify failed\n", data->task.tk_pid);
 713		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 714	}
 715
 716	dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
 717	while (!list_empty(&data->pages)) {
 718		req = nfs_list_entry(data->pages.next);
 719		nfs_list_remove_request(req);
 720		if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
 721			/* Note the rewrite will go through mds */
 722			nfs_mark_request_commit(req, NULL, &cinfo, 0);
 723		} else
 724			nfs_release_request(req);
 725		nfs_unlock_and_release_request(req);
 726	}
 727
 728	if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
 729		nfs_direct_write_complete(dreq, data->inode);
 730}
 731
 732static void nfs_direct_resched_write(struct nfs_commit_info *cinfo,
 733		struct nfs_page *req)
 734{
 735	struct nfs_direct_req *dreq = cinfo->dreq;
 736
 737	spin_lock(&dreq->lock);
 738	dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 739	spin_unlock(&dreq->lock);
 740	nfs_mark_request_commit(req, NULL, cinfo, 0);
 741}
 742
 743static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
 744	.completion = nfs_direct_commit_complete,
 745	.resched_write = nfs_direct_resched_write,
 
 
 
 746};
 747
 748static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
 749{
 750	int res;
 751	struct nfs_commit_info cinfo;
 752	LIST_HEAD(mds_list);
 753
 754	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 755	nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
 756	res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
 757	if (res < 0) /* res == -ENOMEM */
 758		nfs_direct_write_reschedule(dreq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 759}
 760
 761static void nfs_direct_write_schedule_work(struct work_struct *work)
 762{
 763	struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
 764	int flags = dreq->flags;
 765
 766	dreq->flags = 0;
 767	switch (flags) {
 768		case NFS_ODIRECT_DO_COMMIT:
 769			nfs_direct_commit_schedule(dreq);
 770			break;
 771		case NFS_ODIRECT_RESCHED_WRITES:
 772			nfs_direct_write_reschedule(dreq);
 773			break;
 774		default:
 775			nfs_direct_complete(dreq, true);
 
 
 
 
 776	}
 777}
 778
 
 
 
 
 
 
 
 
 
 
 
 
 779static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
 780{
 781	schedule_work(&dreq->work); /* Calls nfs_direct_write_schedule_work */
 
 
 782}
 
 783
 784static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
 785{
 786	struct nfs_direct_req *dreq = hdr->dreq;
 787	struct nfs_commit_info cinfo;
 788	bool request_commit = false;
 789	struct nfs_page *req = nfs_list_entry(hdr->pages.next);
 790
 791	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
 792		goto out_put;
 793
 794	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 
 
 
 
 
 
 
 
 795
 796	spin_lock(&dreq->lock);
 797
 798	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
 
 799		dreq->flags = 0;
 800		dreq->error = hdr->error;
 801	}
 802	if (dreq->error == 0) {
 803		nfs_direct_good_bytes(dreq, hdr);
 804		if (nfs_write_need_commit(hdr)) {
 805			if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
 806				request_commit = true;
 807			else if (dreq->flags == 0) {
 808				nfs_direct_set_hdr_verf(dreq, hdr);
 809				request_commit = true;
 
 810				dreq->flags = NFS_ODIRECT_DO_COMMIT;
 811			} else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
 812				request_commit = true;
 813				if (nfs_direct_set_or_cmp_hdr_verf(dreq, hdr))
 814					dreq->flags =
 815						NFS_ODIRECT_RESCHED_WRITES;
 816			}
 817		}
 818	}
 
 819	spin_unlock(&dreq->lock);
 820
 821	while (!list_empty(&hdr->pages)) {
 822
 823		req = nfs_list_entry(hdr->pages.next);
 824		nfs_list_remove_request(req);
 825		if (request_commit) {
 826			kref_get(&req->wb_kref);
 827			nfs_mark_request_commit(req, hdr->lseg, &cinfo,
 828				hdr->ds_commit_idx);
 829		}
 830		nfs_unlock_and_release_request(req);
 831	}
 832
 833out_put:
 834	if (put_dreq(dreq))
 835		nfs_direct_write_complete(dreq, hdr->inode);
 836	hdr->release(hdr);
 837}
 838
 839static void nfs_write_sync_pgio_error(struct list_head *head)
 840{
 841	struct nfs_page *req;
 842
 843	while (!list_empty(head)) {
 844		req = nfs_list_entry(head->next);
 845		nfs_list_remove_request(req);
 846		nfs_unlock_and_release_request(req);
 847	}
 848}
 849
 850static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
 851{
 852	struct nfs_direct_req *dreq = hdr->dreq;
 853
 854	spin_lock(&dreq->lock);
 855	if (dreq->error == 0) {
 856		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 857		/* fake unstable write to let common nfs resend pages */
 858		hdr->verf.committed = NFS_UNSTABLE;
 859		hdr->good_bytes = hdr->args.count;
 860	}
 861	spin_unlock(&dreq->lock);
 862}
 863
 864static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
 865	.error_cleanup = nfs_write_sync_pgio_error,
 866	.init_hdr = nfs_direct_pgio_init,
 867	.completion = nfs_direct_write_completion,
 868	.reschedule_io = nfs_direct_write_reschedule_io,
 869};
 870
 871
 872/*
 873 * NB: Return the value of the first error return code.  Subsequent
 874 *     errors after the first one are ignored.
 875 */
 876/*
 877 * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
 878 * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
 879 * bail and stop sending more writes.  Write length accounting is
 880 * handled automatically by nfs_direct_write_result().  Otherwise, if
 881 * no requests have been sent, just return an error.
 882 */
 883static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
 884					       struct iov_iter *iter,
 885					       loff_t pos)
 886{
 887	struct nfs_pageio_descriptor desc;
 888	struct inode *inode = dreq->inode;
 889	ssize_t result = 0;
 890	size_t requested_bytes = 0;
 891	size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 892
 893	nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE, false,
 894			      &nfs_direct_write_completion_ops);
 895	desc.pg_dreq = dreq;
 896	get_dreq(dreq);
 897	inode_dio_begin(inode);
 898
 899	NFS_I(inode)->write_io += iov_iter_count(iter);
 900	while (iov_iter_count(iter)) {
 901		struct page **pagevec;
 902		size_t bytes;
 903		size_t pgbase;
 904		unsigned npages, i;
 905
 906		result = iov_iter_get_pages_alloc(iter, &pagevec, 
 907						  wsize, &pgbase);
 908		if (result < 0)
 909			break;
 910
 911		bytes = result;
 912		iov_iter_advance(iter, bytes);
 913		npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
 914		for (i = 0; i < npages; i++) {
 915			struct nfs_page *req;
 916			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
 917
 918			req = nfs_create_request(dreq->ctx, pagevec[i], NULL,
 919						 pgbase, req_len);
 920			if (IS_ERR(req)) {
 921				result = PTR_ERR(req);
 
 
 922				break;
 923			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 924
 925			nfs_direct_setup_mirroring(dreq, &desc, req);
 926			if (desc.pg_error < 0) {
 927				nfs_free_request(req);
 928				result = desc.pg_error;
 929				break;
 930			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 931
 932			nfs_lock_request(req);
 933			req->wb_index = pos >> PAGE_SHIFT;
 934			req->wb_offset = pos & ~PAGE_MASK;
 935			if (!nfs_pageio_add_request(&desc, req)) {
 936				result = desc.pg_error;
 937				nfs_unlock_and_release_request(req);
 938				break;
 939			}
 940			pgbase = 0;
 941			bytes -= req_len;
 942			requested_bytes += req_len;
 943			pos += req_len;
 944			dreq->bytes_left -= req_len;
 945		}
 946		nfs_direct_release_pages(pagevec, npages);
 947		kvfree(pagevec);
 948		if (result < 0)
 949			break;
 
 
 
 
 950	}
 951	nfs_pageio_complete(&desc);
 952
 953	/*
 954	 * If no bytes were started, return the error, and let the
 955	 * generic layer handle the completion.
 956	 */
 957	if (requested_bytes == 0) {
 958		inode_dio_end(inode);
 959		nfs_direct_req_release(dreq);
 960		return result < 0 ? result : -EIO;
 961	}
 962
 963	if (put_dreq(dreq))
 964		nfs_direct_write_complete(dreq, dreq->inode);
 965	return 0;
 966}
 967
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 968/**
 969 * nfs_file_direct_write - file direct write operation for NFS files
 970 * @iocb: target I/O control block
 971 * @iter: vector of user buffers from which to write data
 
 972 * @pos: byte offset in file where writing starts
 973 *
 974 * We use this function for direct writes instead of calling
 975 * generic_file_aio_write() in order to avoid taking the inode
 976 * semaphore and updating the i_size.  The NFS server will set
 977 * the new i_size and this client must read the updated size
 978 * back into its cache.  We let the server do generic write
 979 * parameter checking and report problems.
 980 *
 981 * We eliminate local atime updates, see direct read above.
 982 *
 983 * We avoid unnecessary page cache invalidations for normal cached
 984 * readers of this file.
 985 *
 986 * Note that O_APPEND is not supported for NFS direct writes, as there
 987 * is no atomic O_APPEND write facility in the NFS protocol.
 988 */
 989ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
 
 990{
 991	ssize_t result = -EINVAL;
 992	struct file *file = iocb->ki_filp;
 993	struct address_space *mapping = file->f_mapping;
 994	struct inode *inode = mapping->host;
 995	struct nfs_direct_req *dreq;
 996	struct nfs_lock_context *l_ctx;
 997	loff_t pos, end;
 998
 999	dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
1000		file, iov_iter_count(iter), (long long) iocb->ki_pos);
1001
1002	nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES,
1003		      iov_iter_count(iter));
 
 
1004
1005	pos = iocb->ki_pos;
1006	end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
 
1007
1008	inode_lock(inode);
 
 
 
 
 
1009
1010	result = nfs_sync_mapping(mapping);
1011	if (result)
1012		goto out_unlock;
1013
1014	if (mapping->nrpages) {
1015		result = invalidate_inode_pages2_range(mapping,
1016					pos >> PAGE_SHIFT, end);
1017		if (result)
1018			goto out_unlock;
1019	}
1020
1021	task_io_account_write(iov_iter_count(iter));
1022
1023	result = -ENOMEM;
1024	dreq = nfs_direct_req_alloc();
1025	if (!dreq)
1026		goto out_unlock;
1027
1028	dreq->inode = inode;
1029	dreq->bytes_left = iov_iter_count(iter);
1030	dreq->io_start = pos;
1031	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
1032	l_ctx = nfs_get_lock_context(dreq->ctx);
1033	if (IS_ERR(l_ctx)) {
1034		result = PTR_ERR(l_ctx);
1035		goto out_release;
1036	}
1037	dreq->l_ctx = l_ctx;
1038	if (!is_sync_kiocb(iocb))
1039		dreq->iocb = iocb;
1040
1041	result = nfs_direct_write_schedule_iovec(dreq, iter, pos);
1042
1043	if (mapping->nrpages) {
1044		invalidate_inode_pages2_range(mapping,
1045					      pos >> PAGE_SHIFT, end);
1046	}
1047
1048	inode_unlock(inode);
1049
1050	if (!result) {
1051		result = nfs_direct_wait(dreq);
1052		if (result > 0) {
1053			struct inode *inode = mapping->host;
1054
1055			iocb->ki_pos = pos + result;
1056			spin_lock(&inode->i_lock);
1057			if (i_size_read(inode) < iocb->ki_pos)
1058				i_size_write(inode, iocb->ki_pos);
1059			spin_unlock(&inode->i_lock);
1060			generic_write_sync(file, pos, result);
1061		}
1062	}
1063	nfs_direct_req_release(dreq);
1064	return result;
1065
1066out_release:
1067	nfs_direct_req_release(dreq);
1068out_unlock:
1069	inode_unlock(inode);
1070	return result;
1071}
1072
1073/**
1074 * nfs_init_directcache - create a slab cache for nfs_direct_req structures
1075 *
1076 */
1077int __init nfs_init_directcache(void)
1078{
1079	nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
1080						sizeof(struct nfs_direct_req),
1081						0, (SLAB_RECLAIM_ACCOUNT|
1082							SLAB_MEM_SPREAD),
1083						NULL);
1084	if (nfs_direct_cachep == NULL)
1085		return -ENOMEM;
1086
1087	return 0;
1088}
1089
1090/**
1091 * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
1092 *
1093 */
1094void nfs_destroy_directcache(void)
1095{
1096	kmem_cache_destroy(nfs_direct_cachep);
1097}