Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * linux/fs/nfs/direct.c
   4 *
   5 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
   6 *
   7 * High-performance uncached I/O for the Linux NFS client
   8 *
   9 * There are important applications whose performance or correctness
  10 * depends on uncached access to file data.  Database clusters
  11 * (multiple copies of the same instance running on separate hosts)
  12 * implement their own cache coherency protocol that subsumes file
  13 * system cache protocols.  Applications that process datasets
  14 * considerably larger than the client's memory do not always benefit
  15 * from a local cache.  A streaming video server, for instance, has no
  16 * need to cache the contents of a file.
  17 *
  18 * When an application requests uncached I/O, all read and write requests
  19 * are made directly to the server; data stored or fetched via these
  20 * requests is not cached in the Linux page cache.  The client does not
  21 * correct unaligned requests from applications.  All requested bytes are
  22 * held on permanent storage before a direct write system call returns to
  23 * an application.
  24 *
  25 * Solaris implements an uncached I/O facility called directio() that
  26 * is used for backups and sequential I/O to very large files.  Solaris
  27 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
  28 * an undocumented mount option.
  29 *
  30 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
  31 * help from Andrew Morton.
  32 *
  33 * 18 Dec 2001	Initial implementation for 2.4  --cel
  34 * 08 Jul 2002	Version for 2.4.19, with bug fixes --trondmy
  35 * 08 Jun 2003	Port to 2.5 APIs  --cel
  36 * 31 Mar 2004	Handle direct I/O without VFS support  --cel
  37 * 15 Sep 2004	Parallel async reads  --cel
  38 * 04 May 2005	support O_DIRECT with aio  --cel
  39 *
  40 */
  41
  42#include <linux/errno.h>
  43#include <linux/sched.h>
  44#include <linux/kernel.h>
  45#include <linux/file.h>
  46#include <linux/pagemap.h>
  47#include <linux/kref.h>
  48#include <linux/slab.h>
  49#include <linux/task_io_accounting_ops.h>
  50#include <linux/module.h>
  51
  52#include <linux/nfs_fs.h>
  53#include <linux/nfs_page.h>
  54#include <linux/sunrpc/clnt.h>
  55
  56#include <linux/uaccess.h>
  57#include <linux/atomic.h>
  58
  59#include "internal.h"
  60#include "iostat.h"
  61#include "pnfs.h"
  62#include "fscache.h"
  63#include "nfstrace.h"
  64
  65#define NFSDBG_FACILITY		NFSDBG_VFS
  66
  67static struct kmem_cache *nfs_direct_cachep;
  68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  69static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
  70static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
  71static void nfs_direct_write_complete(struct nfs_direct_req *dreq);
  72static void nfs_direct_write_schedule_work(struct work_struct *work);
  73
  74static inline void get_dreq(struct nfs_direct_req *dreq)
  75{
  76	atomic_inc(&dreq->io_count);
  77}
  78
  79static inline int put_dreq(struct nfs_direct_req *dreq)
  80{
  81	return atomic_dec_and_test(&dreq->io_count);
  82}
  83
  84static void
  85nfs_direct_handle_truncated(struct nfs_direct_req *dreq,
  86			    const struct nfs_pgio_header *hdr,
  87			    ssize_t dreq_len)
  88{
  89	if (!(test_bit(NFS_IOHDR_ERROR, &hdr->flags) ||
  90	      test_bit(NFS_IOHDR_EOF, &hdr->flags)))
  91		return;
  92	if (dreq->max_count >= dreq_len) {
  93		dreq->max_count = dreq_len;
  94		if (dreq->count > dreq_len)
  95			dreq->count = dreq_len;
  96	}
  97
  98	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && !dreq->error)
  99		dreq->error = hdr->error;
 100}
 101
 102static void
 103nfs_direct_count_bytes(struct nfs_direct_req *dreq,
 104		       const struct nfs_pgio_header *hdr)
 105{
 106	loff_t hdr_end = hdr->io_start + hdr->good_bytes;
 107	ssize_t dreq_len = 0;
 108
 109	if (hdr_end > dreq->io_start)
 110		dreq_len = hdr_end - dreq->io_start;
 111
 112	nfs_direct_handle_truncated(dreq, hdr, dreq_len);
 113
 114	if (dreq_len > dreq->max_count)
 115		dreq_len = dreq->max_count;
 116
 117	if (dreq->count < dreq_len)
 118		dreq->count = dreq_len;
 119}
 120
 121static void nfs_direct_truncate_request(struct nfs_direct_req *dreq,
 122					struct nfs_page *req)
 123{
 124	loff_t offs = req_offset(req);
 125	size_t req_start = (size_t)(offs - dreq->io_start);
 126
 127	if (req_start < dreq->max_count)
 128		dreq->max_count = req_start;
 129	if (req_start < dreq->count)
 130		dreq->count = req_start;
 131}
 132
 133/**
 134 * nfs_swap_rw - NFS address space operation for swap I/O
 
 135 * @iocb: target I/O control block
 136 * @iter: I/O buffer
 137 *
 138 * Perform IO to the swap-file.  This is much like direct IO.
 
 
 
 
 
 139 */
 140int nfs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
 141{
 142	ssize_t ret;
 143
 144	VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
 145
 146	if (iov_iter_rw(iter) == READ)
 147		ret = nfs_file_direct_read(iocb, iter, true);
 148	else
 149		ret = nfs_file_direct_write(iocb, iter, true);
 150	if (ret < 0)
 151		return ret;
 152	return 0;
 
 
 
 153}
 154
 155static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
 156{
 157	unsigned int i;
 158	for (i = 0; i < npages; i++)
 159		put_page(pages[i]);
 160}
 161
 162void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
 163			      struct nfs_direct_req *dreq)
 164{
 165	cinfo->inode = dreq->inode;
 166	cinfo->mds = &dreq->mds_cinfo;
 167	cinfo->ds = &dreq->ds_cinfo;
 168	cinfo->dreq = dreq;
 169	cinfo->completion_ops = &nfs_direct_commit_completion_ops;
 170}
 171
 172static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
 173{
 174	struct nfs_direct_req *dreq;
 175
 176	dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
 177	if (!dreq)
 178		return NULL;
 179
 180	kref_init(&dreq->kref);
 181	kref_get(&dreq->kref);
 182	init_completion(&dreq->completion);
 183	INIT_LIST_HEAD(&dreq->mds_cinfo.list);
 184	pnfs_init_ds_commit_info(&dreq->ds_cinfo);
 185	INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
 186	spin_lock_init(&dreq->lock);
 187
 188	return dreq;
 189}
 190
 191static void nfs_direct_req_free(struct kref *kref)
 192{
 193	struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
 194
 195	pnfs_release_ds_info(&dreq->ds_cinfo, dreq->inode);
 196	if (dreq->l_ctx != NULL)
 197		nfs_put_lock_context(dreq->l_ctx);
 198	if (dreq->ctx != NULL)
 199		put_nfs_open_context(dreq->ctx);
 200	kmem_cache_free(nfs_direct_cachep, dreq);
 201}
 202
 203static void nfs_direct_req_release(struct nfs_direct_req *dreq)
 204{
 205	kref_put(&dreq->kref, nfs_direct_req_free);
 206}
 207
 208ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq, loff_t offset)
 209{
 210	loff_t start = offset - dreq->io_start;
 211	return dreq->max_count - start;
 212}
 213EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);
 214
 215/*
 216 * Collects and returns the final error value/byte-count.
 217 */
 218static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
 219{
 220	ssize_t result = -EIOCBQUEUED;
 221
 222	/* Async requests don't wait here */
 223	if (dreq->iocb)
 224		goto out;
 225
 226	result = wait_for_completion_killable(&dreq->completion);
 227
 228	if (!result) {
 229		result = dreq->count;
 230		WARN_ON_ONCE(dreq->count < 0);
 231	}
 232	if (!result)
 233		result = dreq->error;
 
 
 234
 235out:
 236	return (ssize_t) result;
 237}
 238
 239/*
 240 * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
 241 * the iocb is still valid here if this is a synchronous request.
 242 */
 243static void nfs_direct_complete(struct nfs_direct_req *dreq)
 244{
 245	struct inode *inode = dreq->inode;
 246
 247	inode_dio_end(inode);
 
 
 
 
 
 
 
 
 
 
 
 
 248
 249	if (dreq->iocb) {
 250		long res = (long) dreq->error;
 251		if (dreq->count != 0) {
 252			res = (long) dreq->count;
 253			WARN_ON_ONCE(dreq->count < 0);
 254		}
 255		dreq->iocb->ki_complete(dreq->iocb, res);
 256	}
 257
 258	complete(&dreq->completion);
 259
 260	nfs_direct_req_release(dreq);
 261}
 262
 
 
 
 
 
 
 
 
 
 
 263static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
 264{
 265	unsigned long bytes = 0;
 266	struct nfs_direct_req *dreq = hdr->dreq;
 267
 268	spin_lock(&dreq->lock);
 269	if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
 270		spin_unlock(&dreq->lock);
 271		goto out_put;
 272	}
 273
 274	nfs_direct_count_bytes(dreq, hdr);
 
 
 
 
 275	spin_unlock(&dreq->lock);
 276
 277	while (!list_empty(&hdr->pages)) {
 278		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
 279		struct page *page = req->wb_page;
 280
 281		if (!PageCompound(page) && bytes < hdr->good_bytes &&
 282		    (dreq->flags == NFS_ODIRECT_SHOULD_DIRTY))
 283			set_page_dirty(page);
 284		bytes += req->wb_bytes;
 285		nfs_list_remove_request(req);
 286		nfs_release_request(req);
 287	}
 288out_put:
 289	if (put_dreq(dreq))
 290		nfs_direct_complete(dreq);
 291	hdr->release(hdr);
 292}
 293
 294static void nfs_read_sync_pgio_error(struct list_head *head, int error)
 295{
 296	struct nfs_page *req;
 297
 298	while (!list_empty(head)) {
 299		req = nfs_list_entry(head->next);
 300		nfs_list_remove_request(req);
 301		nfs_release_request(req);
 302	}
 303}
 304
 305static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
 306{
 307	get_dreq(hdr->dreq);
 308}
 309
 310static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
 311	.error_cleanup = nfs_read_sync_pgio_error,
 312	.init_hdr = nfs_direct_pgio_init,
 313	.completion = nfs_direct_read_completion,
 314};
 315
 316/*
 317 * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
 318 * operation.  If nfs_readdata_alloc() or get_user_pages() fails,
 319 * bail and stop sending more reads.  Read length accounting is
 320 * handled automatically by nfs_direct_read_result().  Otherwise, if
 321 * no requests have been sent, just return an error.
 322 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 323
 324static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
 325					      struct iov_iter *iter,
 326					      loff_t pos)
 327{
 328	struct nfs_pageio_descriptor desc;
 329	struct inode *inode = dreq->inode;
 330	ssize_t result = -EINVAL;
 331	size_t requested_bytes = 0;
 332	size_t rsize = max_t(size_t, NFS_SERVER(inode)->rsize, PAGE_SIZE);
 333
 334	nfs_pageio_init_read(&desc, dreq->inode, false,
 335			     &nfs_direct_read_completion_ops);
 336	get_dreq(dreq);
 337	desc.pg_dreq = dreq;
 338	inode_dio_begin(inode);
 339
 340	while (iov_iter_count(iter)) {
 341		struct page **pagevec;
 342		size_t bytes;
 343		size_t pgbase;
 344		unsigned npages, i;
 345
 346		result = iov_iter_get_pages_alloc2(iter, &pagevec,
 347						  rsize, &pgbase);
 348		if (result < 0)
 
 
 
 
 
 
 349			break;
 350	
 351		bytes = result;
 352		npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 353		for (i = 0; i < npages; i++) {
 354			struct nfs_page *req;
 355			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
 356			/* XXX do we need to do the eof zeroing found in async_filler? */
 357			req = nfs_page_create_from_page(dreq->ctx, pagevec[i],
 358							pgbase, pos, req_len);
 
 359			if (IS_ERR(req)) {
 360				result = PTR_ERR(req);
 361				break;
 362			}
 363			if (!nfs_pageio_add_request(&desc, req)) {
 364				result = desc.pg_error;
 
 
 365				nfs_release_request(req);
 366				break;
 367			}
 368			pgbase = 0;
 369			bytes -= req_len;
 370			requested_bytes += req_len;
 
 371			pos += req_len;
 
 
 372		}
 
 373		nfs_direct_release_pages(pagevec, npages);
 374		kvfree(pagevec);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 375		if (result < 0)
 376			break;
 
 
 
 
 377	}
 378
 379	nfs_pageio_complete(&desc);
 380
 381	/*
 382	 * If no bytes were started, return the error, and let the
 383	 * generic layer handle the completion.
 384	 */
 385	if (requested_bytes == 0) {
 386		inode_dio_end(inode);
 387		nfs_direct_req_release(dreq);
 388		return result < 0 ? result : -EIO;
 389	}
 390
 391	if (put_dreq(dreq))
 392		nfs_direct_complete(dreq);
 393	return requested_bytes;
 394}
 395
 396/**
 397 * nfs_file_direct_read - file direct read operation for NFS files
 398 * @iocb: target I/O control block
 399 * @iter: vector of user buffers into which to read data
 400 * @swap: flag indicating this is swap IO, not O_DIRECT IO
 
 401 *
 402 * We use this function for direct reads instead of calling
 403 * generic_file_aio_read() in order to avoid gfar's check to see if
 404 * the request starts before the end of the file.  For that check
 405 * to work, we must generate a GETATTR before each direct read, and
 406 * even then there is a window between the GETATTR and the subsequent
 407 * READ where the file size could change.  Our preference is simply
 408 * to do all reads the application wants, and the server will take
 409 * care of managing the end of file boundary.
 410 *
 411 * This function also eliminates unnecessarily updating the file's
 412 * atime locally, as the NFS server sets the file's atime, and this
 413 * client must read the updated atime from the server back into its
 414 * cache.
 415 */
 416ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
 417			     bool swap)
 418{
 419	struct file *file = iocb->ki_filp;
 420	struct address_space *mapping = file->f_mapping;
 421	struct inode *inode = mapping->host;
 422	struct nfs_direct_req *dreq;
 423	struct nfs_lock_context *l_ctx;
 424	ssize_t result, requested;
 425	size_t count = iov_iter_count(iter);
 
 
 426	nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
 427
 428	dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
 429		file, count, (long long) iocb->ki_pos);
 430
 431	result = 0;
 432	if (!count)
 433		goto out;
 434
 
 
 
 
 
 435	task_io_account_read(count);
 436
 437	result = -ENOMEM;
 438	dreq = nfs_direct_req_alloc();
 439	if (dreq == NULL)
 440		goto out;
 441
 442	dreq->inode = inode;
 443	dreq->max_count = count;
 444	dreq->io_start = iocb->ki_pos;
 445	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
 446	l_ctx = nfs_get_lock_context(dreq->ctx);
 447	if (IS_ERR(l_ctx)) {
 448		result = PTR_ERR(l_ctx);
 449		nfs_direct_req_release(dreq);
 450		goto out_release;
 451	}
 452	dreq->l_ctx = l_ctx;
 453	if (!is_sync_kiocb(iocb))
 454		dreq->iocb = iocb;
 455
 456	if (user_backed_iter(iter))
 457		dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
 458
 459	if (!swap)
 460		nfs_start_io_direct(inode);
 461
 462	NFS_I(inode)->read_io += count;
 463	requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
 464
 465	if (!swap)
 466		nfs_end_io_direct(inode);
 467
 468	if (requested > 0) {
 469		result = nfs_direct_wait(dreq);
 470		if (result > 0) {
 471			requested -= result;
 472			iocb->ki_pos += result;
 473		}
 474		iov_iter_revert(iter, requested);
 475	} else {
 476		result = requested;
 477	}
 478
 
 
 
 479out_release:
 480	nfs_direct_req_release(dreq);
 
 
 481out:
 482	return result;
 483}
 484
 485static void nfs_direct_add_page_head(struct list_head *list,
 486				     struct nfs_page *req)
 487{
 488	struct nfs_page *head = req->wb_head;
 489
 490	if (!list_empty(&head->wb_list) || !nfs_lock_request(head))
 491		return;
 492	if (!list_empty(&head->wb_list)) {
 493		nfs_unlock_request(head);
 494		return;
 495	}
 496	list_add(&head->wb_list, list);
 497	kref_get(&head->wb_kref);
 498	kref_get(&head->wb_kref);
 499}
 500
 501static void nfs_direct_join_group(struct list_head *list,
 502				  struct nfs_commit_info *cinfo,
 503				  struct inode *inode)
 504{
 505	struct nfs_page *req, *subreq;
 506
 507	list_for_each_entry(req, list, wb_list) {
 508		if (req->wb_head != req) {
 509			nfs_direct_add_page_head(&req->wb_list, req);
 510			continue;
 511		}
 512		subreq = req->wb_this_page;
 513		if (subreq == req)
 514			continue;
 515		do {
 516			/*
 517			 * Remove subrequests from this list before freeing
 518			 * them in the call to nfs_join_page_group().
 519			 */
 520			if (!list_empty(&subreq->wb_list)) {
 521				nfs_list_remove_request(subreq);
 522				nfs_release_request(subreq);
 523			}
 524		} while ((subreq = subreq->wb_this_page) != req);
 525		nfs_join_page_group(req, cinfo, inode);
 526	}
 527}
 528
 529static void
 530nfs_direct_write_scan_commit_list(struct inode *inode,
 531				  struct list_head *list,
 532				  struct nfs_commit_info *cinfo)
 533{
 534	mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
 535	pnfs_recover_commit_reqs(list, cinfo);
 536	nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0);
 537	mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
 538}
 539
 540static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
 541{
 542	struct nfs_pageio_descriptor desc;
 543	struct nfs_page *req;
 544	LIST_HEAD(reqs);
 545	struct nfs_commit_info cinfo;
 
 546
 547	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 548	nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
 
 
 
 549
 550	nfs_direct_join_group(&reqs, &cinfo, dreq->inode);
 551
 552	nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
 553	get_dreq(dreq);
 554
 555	nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
 556			      &nfs_direct_write_completion_ops);
 557	desc.pg_dreq = dreq;
 558
 559	while (!list_empty(&reqs)) {
 560		req = nfs_list_entry(reqs.next);
 561		/* Bump the transmission count */
 562		req->wb_nio++;
 563		if (!nfs_pageio_add_request(&desc, req)) {
 564			spin_lock(&dreq->lock);
 565			if (dreq->error < 0) {
 566				desc.pg_error = dreq->error;
 567			} else if (desc.pg_error != -EAGAIN) {
 568				dreq->flags = 0;
 569				if (!desc.pg_error)
 570					desc.pg_error = -EIO;
 571				dreq->error = desc.pg_error;
 572			} else
 573				dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 574			spin_unlock(&dreq->lock);
 575			break;
 576		}
 577		nfs_release_request(req);
 578	}
 579	nfs_pageio_complete(&desc);
 580
 581	while (!list_empty(&reqs)) {
 582		req = nfs_list_entry(reqs.next);
 583		nfs_list_remove_request(req);
 584		nfs_unlock_and_release_request(req);
 585		if (desc.pg_error == -EAGAIN) {
 586			nfs_mark_request_commit(req, NULL, &cinfo, 0);
 587		} else {
 588			spin_lock(&dreq->lock);
 589			nfs_direct_truncate_request(dreq, req);
 590			spin_unlock(&dreq->lock);
 591			nfs_release_request(req);
 592		}
 593	}
 594
 595	if (put_dreq(dreq))
 596		nfs_direct_write_complete(dreq);
 597}
 598
 599static void nfs_direct_commit_complete(struct nfs_commit_data *data)
 600{
 601	const struct nfs_writeverf *verf = data->res.verf;
 602	struct nfs_direct_req *dreq = data->dreq;
 603	struct nfs_commit_info cinfo;
 604	struct nfs_page *req;
 605	int status = data->task.tk_status;
 606
 607	trace_nfs_direct_commit_complete(dreq);
 608
 609	if (status < 0) {
 610		/* Errors in commit are fatal */
 611		dreq->error = status;
 612		dreq->flags = NFS_ODIRECT_DONE;
 613	} else {
 614		status = dreq->error;
 
 615	}
 616
 617	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 618
 619	while (!list_empty(&data->pages)) {
 620		req = nfs_list_entry(data->pages.next);
 621		nfs_list_remove_request(req);
 622		if (status < 0) {
 623			spin_lock(&dreq->lock);
 624			nfs_direct_truncate_request(dreq, req);
 625			spin_unlock(&dreq->lock);
 626			nfs_release_request(req);
 627		} else if (!nfs_write_match_verf(verf, req)) {
 628			dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 629			/*
 630			 * Despite the reboot, the write was successful,
 631			 * so reset wb_nio.
 632			 */
 633			req->wb_nio = 0;
 634			nfs_mark_request_commit(req, NULL, &cinfo, 0);
 635		} else
 636			nfs_release_request(req);
 637		nfs_unlock_and_release_request(req);
 638	}
 639
 640	if (nfs_commit_end(cinfo.mds))
 641		nfs_direct_write_complete(dreq);
 642}
 643
 644static void nfs_direct_resched_write(struct nfs_commit_info *cinfo,
 645		struct nfs_page *req)
 646{
 647	struct nfs_direct_req *dreq = cinfo->dreq;
 648
 649	trace_nfs_direct_resched_write(dreq);
 650
 651	spin_lock(&dreq->lock);
 652	if (dreq->flags != NFS_ODIRECT_DONE)
 653		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 654	spin_unlock(&dreq->lock);
 655	nfs_mark_request_commit(req, NULL, cinfo, 0);
 656}
 657
 658static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
 659	.completion = nfs_direct_commit_complete,
 660	.resched_write = nfs_direct_resched_write,
 661};
 662
 663static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
 664{
 665	int res;
 666	struct nfs_commit_info cinfo;
 667	LIST_HEAD(mds_list);
 668
 669	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 670	nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
 671	res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
 672	if (res < 0) /* res == -ENOMEM */
 673		nfs_direct_write_reschedule(dreq);
 674}
 675
 676static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq)
 677{
 678	struct nfs_commit_info cinfo;
 679	struct nfs_page *req;
 680	LIST_HEAD(reqs);
 681
 682	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 683	nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
 684
 685	while (!list_empty(&reqs)) {
 686		req = nfs_list_entry(reqs.next);
 687		nfs_list_remove_request(req);
 688		nfs_direct_truncate_request(dreq, req);
 689		nfs_release_request(req);
 690		nfs_unlock_and_release_request(req);
 691	}
 692}
 693
 694static void nfs_direct_write_schedule_work(struct work_struct *work)
 695{
 696	struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
 697	int flags = dreq->flags;
 698
 699	dreq->flags = 0;
 700	switch (flags) {
 701		case NFS_ODIRECT_DO_COMMIT:
 702			nfs_direct_commit_schedule(dreq);
 703			break;
 704		case NFS_ODIRECT_RESCHED_WRITES:
 705			nfs_direct_write_reschedule(dreq);
 706			break;
 707		default:
 708			nfs_direct_write_clear_reqs(dreq);
 709			nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping);
 710			nfs_direct_complete(dreq);
 711	}
 712}
 713
 714static void nfs_direct_write_complete(struct nfs_direct_req *dreq)
 715{
 716	trace_nfs_direct_write_complete(dreq);
 717	queue_work(nfsiod_workqueue, &dreq->work); /* Calls nfs_direct_write_schedule_work */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 718}
 719
 720static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
 721{
 722	struct nfs_direct_req *dreq = hdr->dreq;
 723	struct nfs_commit_info cinfo;
 
 724	struct nfs_page *req = nfs_list_entry(hdr->pages.next);
 725	int flags = NFS_ODIRECT_DONE;
 726
 727	trace_nfs_direct_write_completion(dreq);
 
 728
 729	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 730
 731	spin_lock(&dreq->lock);
 732	if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
 733		spin_unlock(&dreq->lock);
 734		goto out_put;
 735	}
 736
 737	nfs_direct_count_bytes(dreq, hdr);
 738	if (test_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags) &&
 739	    !test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
 740		if (!dreq->flags)
 741			dreq->flags = NFS_ODIRECT_DO_COMMIT;
 742		flags = dreq->flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 743	}
 744	spin_unlock(&dreq->lock);
 745
 746	while (!list_empty(&hdr->pages)) {
 747
 748		req = nfs_list_entry(hdr->pages.next);
 749		nfs_list_remove_request(req);
 750		if (flags == NFS_ODIRECT_DO_COMMIT) {
 751			kref_get(&req->wb_kref);
 752			memcpy(&req->wb_verf, &hdr->verf.verifier,
 753			       sizeof(req->wb_verf));
 754			nfs_mark_request_commit(req, hdr->lseg, &cinfo,
 755				hdr->ds_commit_idx);
 756		} else if (flags == NFS_ODIRECT_RESCHED_WRITES) {
 757			kref_get(&req->wb_kref);
 758			nfs_mark_request_commit(req, NULL, &cinfo, 0);
 759		}
 760		nfs_unlock_and_release_request(req);
 761	}
 762
 763out_put:
 764	if (put_dreq(dreq))
 765		nfs_direct_write_complete(dreq);
 766	hdr->release(hdr);
 767}
 768
 769static void nfs_write_sync_pgio_error(struct list_head *head, int error)
 770{
 771	struct nfs_page *req;
 772
 773	while (!list_empty(head)) {
 774		req = nfs_list_entry(head->next);
 775		nfs_list_remove_request(req);
 776		nfs_unlock_and_release_request(req);
 777	}
 778}
 779
 780static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
 781{
 782	struct nfs_direct_req *dreq = hdr->dreq;
 783	struct nfs_page *req;
 784	struct nfs_commit_info cinfo;
 785
 786	trace_nfs_direct_write_reschedule_io(dreq);
 787
 788	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 789	spin_lock(&dreq->lock);
 790	if (dreq->error == 0)
 791		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 792	set_bit(NFS_IOHDR_REDO, &hdr->flags);
 793	spin_unlock(&dreq->lock);
 794	while (!list_empty(&hdr->pages)) {
 795		req = nfs_list_entry(hdr->pages.next);
 796		nfs_list_remove_request(req);
 797		nfs_unlock_request(req);
 798		nfs_mark_request_commit(req, NULL, &cinfo, 0);
 799	}
 800}
 801
 802static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
 803	.error_cleanup = nfs_write_sync_pgio_error,
 804	.init_hdr = nfs_direct_pgio_init,
 805	.completion = nfs_direct_write_completion,
 806	.reschedule_io = nfs_direct_write_reschedule_io,
 807};
 808
 809
 810/*
 811 * NB: Return the value of the first error return code.  Subsequent
 812 *     errors after the first one are ignored.
 813 */
 814/*
 815 * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
 816 * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
 817 * bail and stop sending more writes.  Write length accounting is
 818 * handled automatically by nfs_direct_write_result().  Otherwise, if
 819 * no requests have been sent, just return an error.
 820 */
 821static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
 822					       struct iov_iter *iter,
 823					       loff_t pos, int ioflags)
 
 824{
 825	struct nfs_pageio_descriptor desc;
 826	struct inode *inode = dreq->inode;
 827	struct nfs_commit_info cinfo;
 828	ssize_t result = 0;
 829	size_t requested_bytes = 0;
 830	size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
 831	bool defer = false;
 832
 833	trace_nfs_direct_write_schedule_iovec(dreq);
 834
 835	nfs_pageio_init_write(&desc, inode, ioflags, false,
 836			      &nfs_direct_write_completion_ops);
 837	desc.pg_dreq = dreq;
 838	get_dreq(dreq);
 839	inode_dio_begin(inode);
 840
 841	NFS_I(inode)->write_io += iov_iter_count(iter);
 842	while (iov_iter_count(iter)) {
 843		struct page **pagevec;
 844		size_t bytes;
 845		size_t pgbase;
 846		unsigned npages, i;
 847
 848		result = iov_iter_get_pages_alloc2(iter, &pagevec,
 849						  wsize, &pgbase);
 
 
 850		if (result < 0)
 851			break;
 852
 853		bytes = result;
 854		npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
 855		for (i = 0; i < npages; i++) {
 856			struct nfs_page *req;
 857			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
 858
 859			req = nfs_page_create_from_page(dreq->ctx, pagevec[i],
 860							pgbase, pos, req_len);
 861			if (IS_ERR(req)) {
 862				result = PTR_ERR(req);
 863				break;
 864			}
 865
 866			if (desc.pg_error < 0) {
 867				nfs_free_request(req);
 868				result = desc.pg_error;
 869				break;
 870			}
 871
 872			pgbase = 0;
 873			bytes -= req_len;
 874			requested_bytes += req_len;
 875			pos += req_len;
 876
 877			if (defer) {
 878				nfs_mark_request_commit(req, NULL, &cinfo, 0);
 879				continue;
 880			}
 881
 882			nfs_lock_request(req);
 883			if (nfs_pageio_add_request(&desc, req))
 884				continue;
 885
 886			/* Exit on hard errors */
 887			if (desc.pg_error < 0 && desc.pg_error != -EAGAIN) {
 888				result = desc.pg_error;
 889				nfs_unlock_and_release_request(req);
 890				break;
 891			}
 892
 893			/* If the error is soft, defer remaining requests */
 894			nfs_init_cinfo_from_dreq(&cinfo, dreq);
 895			spin_lock(&dreq->lock);
 896			dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 897			spin_unlock(&dreq->lock);
 898			nfs_unlock_request(req);
 899			nfs_mark_request_commit(req, NULL, &cinfo, 0);
 900			desc.pg_error = 0;
 901			defer = true;
 902		}
 903		nfs_direct_release_pages(pagevec, npages);
 904		kvfree(pagevec);
 905		if (result < 0)
 906			break;
 
 907	}
 908	nfs_pageio_complete(&desc);
 909
 910	/*
 911	 * If no bytes were started, return the error, and let the
 912	 * generic layer handle the completion.
 913	 */
 914	if (requested_bytes == 0) {
 915		inode_dio_end(inode);
 916		nfs_direct_req_release(dreq);
 917		return result < 0 ? result : -EIO;
 918	}
 919
 920	if (put_dreq(dreq))
 921		nfs_direct_write_complete(dreq);
 922	return requested_bytes;
 923}
 924
 925/**
 926 * nfs_file_direct_write - file direct write operation for NFS files
 927 * @iocb: target I/O control block
 928 * @iter: vector of user buffers from which to write data
 929 * @swap: flag indicating this is swap IO, not O_DIRECT IO
 
 930 *
 931 * We use this function for direct writes instead of calling
 932 * generic_file_aio_write() in order to avoid taking the inode
 933 * semaphore and updating the i_size.  The NFS server will set
 934 * the new i_size and this client must read the updated size
 935 * back into its cache.  We let the server do generic write
 936 * parameter checking and report problems.
 937 *
 938 * We eliminate local atime updates, see direct read above.
 939 *
 940 * We avoid unnecessary page cache invalidations for normal cached
 941 * readers of this file.
 942 *
 943 * Note that O_APPEND is not supported for NFS direct writes, as there
 944 * is no atomic O_APPEND write facility in the NFS protocol.
 945 */
 946ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
 947			      bool swap)
 948{
 949	ssize_t result, requested;
 950	size_t count;
 951	struct file *file = iocb->ki_filp;
 952	struct address_space *mapping = file->f_mapping;
 953	struct inode *inode = mapping->host;
 954	struct nfs_direct_req *dreq;
 955	struct nfs_lock_context *l_ctx;
 956	loff_t pos, end;
 
 957
 958	dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
 959		file, iov_iter_count(iter), (long long) iocb->ki_pos);
 960
 961	if (swap)
 962		/* bypass generic checks */
 963		result =  iov_iter_count(iter);
 964	else
 965		result = generic_write_checks(iocb, iter);
 966	if (result <= 0)
 967		return result;
 968	count = result;
 969	nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
 970
 971	pos = iocb->ki_pos;
 972	end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 973
 974	task_io_account_write(count);
 975
 976	result = -ENOMEM;
 977	dreq = nfs_direct_req_alloc();
 978	if (!dreq)
 979		goto out;
 980
 981	dreq->inode = inode;
 982	dreq->max_count = count;
 983	dreq->io_start = pos;
 984	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
 985	l_ctx = nfs_get_lock_context(dreq->ctx);
 986	if (IS_ERR(l_ctx)) {
 987		result = PTR_ERR(l_ctx);
 988		nfs_direct_req_release(dreq);
 989		goto out_release;
 990	}
 991	dreq->l_ctx = l_ctx;
 992	if (!is_sync_kiocb(iocb))
 993		dreq->iocb = iocb;
 994	pnfs_init_ds_commit_info_ops(&dreq->ds_cinfo, inode);
 995
 996	if (swap) {
 997		requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
 998							    FLUSH_STABLE);
 999	} else {
1000		nfs_start_io_direct(inode);
1001
1002		requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
1003							    FLUSH_COND_STABLE);
1004
1005		if (mapping->nrpages) {
1006			invalidate_inode_pages2_range(mapping,
1007						      pos >> PAGE_SHIFT, end);
1008		}
1009
1010		nfs_end_io_direct(inode);
 
 
1011	}
1012
1013	if (requested > 0) {
 
 
1014		result = nfs_direct_wait(dreq);
1015		if (result > 0) {
1016			requested -= result;
 
1017			iocb->ki_pos = pos + result;
1018			/* XXX: should check the generic_write_sync retval */
1019			generic_write_sync(iocb, result);
 
 
1020		}
1021		iov_iter_revert(iter, requested);
1022	} else {
1023		result = requested;
1024	}
1025	nfs_fscache_invalidate(inode, FSCACHE_INVAL_DIO_WRITE);
 
 
1026out_release:
1027	nfs_direct_req_release(dreq);
 
 
1028out:
1029	return result;
1030}
1031
1032/**
1033 * nfs_init_directcache - create a slab cache for nfs_direct_req structures
1034 *
1035 */
1036int __init nfs_init_directcache(void)
1037{
1038	nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
1039						sizeof(struct nfs_direct_req),
1040						0, (SLAB_RECLAIM_ACCOUNT|
1041							SLAB_MEM_SPREAD),
1042						NULL);
1043	if (nfs_direct_cachep == NULL)
1044		return -ENOMEM;
1045
1046	return 0;
1047}
1048
1049/**
1050 * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
1051 *
1052 */
1053void nfs_destroy_directcache(void)
1054{
1055	kmem_cache_destroy(nfs_direct_cachep);
1056}
v3.15
 
   1/*
   2 * linux/fs/nfs/direct.c
   3 *
   4 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
   5 *
   6 * High-performance uncached I/O for the Linux NFS client
   7 *
   8 * There are important applications whose performance or correctness
   9 * depends on uncached access to file data.  Database clusters
  10 * (multiple copies of the same instance running on separate hosts)
  11 * implement their own cache coherency protocol that subsumes file
  12 * system cache protocols.  Applications that process datasets
  13 * considerably larger than the client's memory do not always benefit
  14 * from a local cache.  A streaming video server, for instance, has no
  15 * need to cache the contents of a file.
  16 *
  17 * When an application requests uncached I/O, all read and write requests
  18 * are made directly to the server; data stored or fetched via these
  19 * requests is not cached in the Linux page cache.  The client does not
  20 * correct unaligned requests from applications.  All requested bytes are
  21 * held on permanent storage before a direct write system call returns to
  22 * an application.
  23 *
  24 * Solaris implements an uncached I/O facility called directio() that
  25 * is used for backups and sequential I/O to very large files.  Solaris
  26 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
  27 * an undocumented mount option.
  28 *
  29 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
  30 * help from Andrew Morton.
  31 *
  32 * 18 Dec 2001	Initial implementation for 2.4  --cel
  33 * 08 Jul 2002	Version for 2.4.19, with bug fixes --trondmy
  34 * 08 Jun 2003	Port to 2.5 APIs  --cel
  35 * 31 Mar 2004	Handle direct I/O without VFS support  --cel
  36 * 15 Sep 2004	Parallel async reads  --cel
  37 * 04 May 2005	support O_DIRECT with aio  --cel
  38 *
  39 */
  40
  41#include <linux/errno.h>
  42#include <linux/sched.h>
  43#include <linux/kernel.h>
  44#include <linux/file.h>
  45#include <linux/pagemap.h>
  46#include <linux/kref.h>
  47#include <linux/slab.h>
  48#include <linux/task_io_accounting_ops.h>
  49#include <linux/module.h>
  50
  51#include <linux/nfs_fs.h>
  52#include <linux/nfs_page.h>
  53#include <linux/sunrpc/clnt.h>
  54
  55#include <asm/uaccess.h>
  56#include <linux/atomic.h>
  57
  58#include "internal.h"
  59#include "iostat.h"
  60#include "pnfs.h"
 
 
  61
  62#define NFSDBG_FACILITY		NFSDBG_VFS
  63
  64static struct kmem_cache *nfs_direct_cachep;
  65
  66/*
  67 * This represents a set of asynchronous requests that we're waiting on
  68 */
  69struct nfs_direct_req {
  70	struct kref		kref;		/* release manager */
  71
  72	/* I/O parameters */
  73	struct nfs_open_context	*ctx;		/* file open context info */
  74	struct nfs_lock_context *l_ctx;		/* Lock context info */
  75	struct kiocb *		iocb;		/* controlling i/o request */
  76	struct inode *		inode;		/* target file of i/o */
  77
  78	/* completion state */
  79	atomic_t		io_count;	/* i/os we're waiting for */
  80	spinlock_t		lock;		/* protect completion state */
  81	ssize_t			count,		/* bytes actually processed */
  82				bytes_left,	/* bytes left to be sent */
  83				error;		/* any reported error */
  84	struct completion	completion;	/* wait for i/o completion */
  85
  86	/* commit state */
  87	struct nfs_mds_commit_info mds_cinfo;	/* Storage for cinfo */
  88	struct pnfs_ds_commit_info ds_cinfo;	/* Storage for cinfo */
  89	struct work_struct	work;
  90	int			flags;
  91#define NFS_ODIRECT_DO_COMMIT		(1)	/* an unstable reply was received */
  92#define NFS_ODIRECT_RESCHED_WRITES	(2)	/* write verification failed */
  93	struct nfs_writeverf	verf;		/* unstable write verifier */
  94};
  95
  96static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
  97static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
  98static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
  99static void nfs_direct_write_schedule_work(struct work_struct *work);
 100
 101static inline void get_dreq(struct nfs_direct_req *dreq)
 102{
 103	atomic_inc(&dreq->io_count);
 104}
 105
 106static inline int put_dreq(struct nfs_direct_req *dreq)
 107{
 108	return atomic_dec_and_test(&dreq->io_count);
 109}
 110
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 111/**
 112 * nfs_direct_IO - NFS address space operation for direct I/O
 113 * @rw: direction (read or write)
 114 * @iocb: target I/O control block
 115 * @iov: array of vectors that define I/O buffer
 116 * @pos: offset in file to begin the operation
 117 * @nr_segs: size of iovec array
 118 *
 119 * The presence of this routine in the address space ops vector means
 120 * the NFS client supports direct I/O. However, for most direct IO, we
 121 * shunt off direct read and write requests before the VFS gets them,
 122 * so this method is only ever called for swap.
 123 */
 124ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
 125{
 126#ifndef CONFIG_NFS_SWAP
 127	dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n",
 128			iocb->ki_filp, (long long) pos, nr_segs);
 129
 130	return -EINVAL;
 131#else
 132	VM_BUG_ON(iocb->ki_nbytes != PAGE_SIZE);
 133
 134	if (rw == READ || rw == KERNEL_READ)
 135		return nfs_file_direct_read(iocb, iov, nr_segs, pos,
 136				rw == READ ? true : false);
 137	return nfs_file_direct_write(iocb, iov, nr_segs, pos,
 138				rw == WRITE ? true : false);
 139#endif /* CONFIG_NFS_SWAP */
 140}
 141
 142static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
 143{
 144	unsigned int i;
 145	for (i = 0; i < npages; i++)
 146		page_cache_release(pages[i]);
 147}
 148
 149void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
 150			      struct nfs_direct_req *dreq)
 151{
 152	cinfo->lock = &dreq->lock;
 153	cinfo->mds = &dreq->mds_cinfo;
 154	cinfo->ds = &dreq->ds_cinfo;
 155	cinfo->dreq = dreq;
 156	cinfo->completion_ops = &nfs_direct_commit_completion_ops;
 157}
 158
 159static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
 160{
 161	struct nfs_direct_req *dreq;
 162
 163	dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
 164	if (!dreq)
 165		return NULL;
 166
 167	kref_init(&dreq->kref);
 168	kref_get(&dreq->kref);
 169	init_completion(&dreq->completion);
 170	INIT_LIST_HEAD(&dreq->mds_cinfo.list);
 
 171	INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
 172	spin_lock_init(&dreq->lock);
 173
 174	return dreq;
 175}
 176
 177static void nfs_direct_req_free(struct kref *kref)
 178{
 179	struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
 180
 
 181	if (dreq->l_ctx != NULL)
 182		nfs_put_lock_context(dreq->l_ctx);
 183	if (dreq->ctx != NULL)
 184		put_nfs_open_context(dreq->ctx);
 185	kmem_cache_free(nfs_direct_cachep, dreq);
 186}
 187
 188static void nfs_direct_req_release(struct nfs_direct_req *dreq)
 189{
 190	kref_put(&dreq->kref, nfs_direct_req_free);
 191}
 192
 193ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq)
 194{
 195	return dreq->bytes_left;
 
 196}
 197EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);
 198
 199/*
 200 * Collects and returns the final error value/byte-count.
 201 */
 202static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
 203{
 204	ssize_t result = -EIOCBQUEUED;
 205
 206	/* Async requests don't wait here */
 207	if (dreq->iocb)
 208		goto out;
 209
 210	result = wait_for_completion_killable(&dreq->completion);
 211
 
 
 
 
 212	if (!result)
 213		result = dreq->error;
 214	if (!result)
 215		result = dreq->count;
 216
 217out:
 218	return (ssize_t) result;
 219}
 220
 221/*
 222 * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
 223 * the iocb is still valid here if this is a synchronous request.
 224 */
 225static void nfs_direct_complete(struct nfs_direct_req *dreq, bool write)
 226{
 227	struct inode *inode = dreq->inode;
 228
 229	if (dreq->iocb && write) {
 230		loff_t pos = dreq->iocb->ki_pos + dreq->count;
 231
 232		spin_lock(&inode->i_lock);
 233		if (i_size_read(inode) < pos)
 234			i_size_write(inode, pos);
 235		spin_unlock(&inode->i_lock);
 236	}
 237
 238	if (write)
 239		nfs_zap_mapping(inode, inode->i_mapping);
 240
 241	inode_dio_done(inode);
 242
 243	if (dreq->iocb) {
 244		long res = (long) dreq->error;
 245		if (!res)
 246			res = (long) dreq->count;
 247		aio_complete(dreq->iocb, res, 0);
 
 
 248	}
 249
 250	complete_all(&dreq->completion);
 251
 252	nfs_direct_req_release(dreq);
 253}
 254
 255static void nfs_direct_readpage_release(struct nfs_page *req)
 256{
 257	dprintk("NFS: direct read done (%s/%llu %d@%lld)\n",
 258		req->wb_context->dentry->d_inode->i_sb->s_id,
 259		(unsigned long long)NFS_FILEID(req->wb_context->dentry->d_inode),
 260		req->wb_bytes,
 261		(long long)req_offset(req));
 262	nfs_release_request(req);
 263}
 264
 265static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
 266{
 267	unsigned long bytes = 0;
 268	struct nfs_direct_req *dreq = hdr->dreq;
 269
 270	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
 
 
 271		goto out_put;
 
 272
 273	spin_lock(&dreq->lock);
 274	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
 275		dreq->error = hdr->error;
 276	else
 277		dreq->count += hdr->good_bytes;
 278	spin_unlock(&dreq->lock);
 279
 280	while (!list_empty(&hdr->pages)) {
 281		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
 282		struct page *page = req->wb_page;
 283
 284		if (!PageCompound(page) && bytes < hdr->good_bytes)
 
 285			set_page_dirty(page);
 286		bytes += req->wb_bytes;
 287		nfs_list_remove_request(req);
 288		nfs_direct_readpage_release(req);
 289	}
 290out_put:
 291	if (put_dreq(dreq))
 292		nfs_direct_complete(dreq, false);
 293	hdr->release(hdr);
 294}
 295
 296static void nfs_read_sync_pgio_error(struct list_head *head)
 297{
 298	struct nfs_page *req;
 299
 300	while (!list_empty(head)) {
 301		req = nfs_list_entry(head->next);
 302		nfs_list_remove_request(req);
 303		nfs_release_request(req);
 304	}
 305}
 306
 307static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
 308{
 309	get_dreq(hdr->dreq);
 310}
 311
 312static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
 313	.error_cleanup = nfs_read_sync_pgio_error,
 314	.init_hdr = nfs_direct_pgio_init,
 315	.completion = nfs_direct_read_completion,
 316};
 317
 318/*
 319 * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
 320 * operation.  If nfs_readdata_alloc() or get_user_pages() fails,
 321 * bail and stop sending more reads.  Read length accounting is
 322 * handled automatically by nfs_direct_read_result().  Otherwise, if
 323 * no requests have been sent, just return an error.
 324 */
 325static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *desc,
 326						const struct iovec *iov,
 327						loff_t pos, bool uio)
 328{
 329	struct nfs_direct_req *dreq = desc->pg_dreq;
 330	struct nfs_open_context *ctx = dreq->ctx;
 331	struct inode *inode = ctx->dentry->d_inode;
 332	unsigned long user_addr = (unsigned long)iov->iov_base;
 333	size_t count = iov->iov_len;
 334	size_t rsize = NFS_SERVER(inode)->rsize;
 335	unsigned int pgbase;
 336	int result;
 337	ssize_t started = 0;
 338	struct page **pagevec = NULL;
 339	unsigned int npages;
 340
 341	do {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 342		size_t bytes;
 343		int i;
 
 344
 345		pgbase = user_addr & ~PAGE_MASK;
 346		bytes = min(max_t(size_t, rsize, PAGE_SIZE), count);
 347
 348		result = -ENOMEM;
 349		npages = nfs_page_array_len(pgbase, bytes);
 350		if (!pagevec)
 351			pagevec = kmalloc(npages * sizeof(struct page *),
 352					  GFP_KERNEL);
 353		if (!pagevec)
 354			break;
 355		if (uio) {
 356			down_read(&current->mm->mmap_sem);
 357			result = get_user_pages(current, current->mm, user_addr,
 358					npages, 1, 0, pagevec, NULL);
 359			up_read(&current->mm->mmap_sem);
 360			if (result < 0)
 361				break;
 362		} else {
 363			WARN_ON(npages != 1);
 364			result = get_kernel_page(user_addr, 1, pagevec);
 365			if (WARN_ON(result != 1))
 366				break;
 367		}
 368
 369		if ((unsigned)result < npages) {
 370			bytes = result * PAGE_SIZE;
 371			if (bytes <= pgbase) {
 372				nfs_direct_release_pages(pagevec, result);
 373				break;
 374			}
 375			bytes -= pgbase;
 376			npages = result;
 377		}
 378
 379		for (i = 0; i < npages; i++) {
 380			struct nfs_page *req;
 381			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
 382			/* XXX do we need to do the eof zeroing found in async_filler? */
 383			req = nfs_create_request(dreq->ctx, dreq->inode,
 384						 pagevec[i],
 385						 pgbase, req_len);
 386			if (IS_ERR(req)) {
 387				result = PTR_ERR(req);
 388				break;
 389			}
 390			req->wb_index = pos >> PAGE_SHIFT;
 391			req->wb_offset = pos & ~PAGE_MASK;
 392			if (!nfs_pageio_add_request(desc, req)) {
 393				result = desc->pg_error;
 394				nfs_release_request(req);
 395				break;
 396			}
 397			pgbase = 0;
 398			bytes -= req_len;
 399			started += req_len;
 400			user_addr += req_len;
 401			pos += req_len;
 402			count -= req_len;
 403			dreq->bytes_left -= req_len;
 404		}
 405		/* The nfs_page now hold references to these pages */
 406		nfs_direct_release_pages(pagevec, npages);
 407	} while (count != 0 && result >= 0);
 408
 409	kfree(pagevec);
 410
 411	if (started)
 412		return started;
 413	return result < 0 ? (ssize_t) result : -EFAULT;
 414}
 415
 416static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
 417					      const struct iovec *iov,
 418					      unsigned long nr_segs,
 419					      loff_t pos, bool uio)
 420{
 421	struct nfs_pageio_descriptor desc;
 422	struct inode *inode = dreq->inode;
 423	ssize_t result = -EINVAL;
 424	size_t requested_bytes = 0;
 425	unsigned long seg;
 426
 427	NFS_PROTO(dreq->inode)->read_pageio_init(&desc, dreq->inode,
 428			     &nfs_direct_read_completion_ops);
 429	get_dreq(dreq);
 430	desc.pg_dreq = dreq;
 431	atomic_inc(&inode->i_dio_count);
 432
 433	for (seg = 0; seg < nr_segs; seg++) {
 434		const struct iovec *vec = &iov[seg];
 435		result = nfs_direct_read_schedule_segment(&desc, vec, pos, uio);
 436		if (result < 0)
 437			break;
 438		requested_bytes += result;
 439		if ((size_t)result < vec->iov_len)
 440			break;
 441		pos += vec->iov_len;
 442	}
 443
 444	nfs_pageio_complete(&desc);
 445
 446	/*
 447	 * If no bytes were started, return the error, and let the
 448	 * generic layer handle the completion.
 449	 */
 450	if (requested_bytes == 0) {
 451		inode_dio_done(inode);
 452		nfs_direct_req_release(dreq);
 453		return result < 0 ? result : -EIO;
 454	}
 455
 456	if (put_dreq(dreq))
 457		nfs_direct_complete(dreq, false);
 458	return 0;
 459}
 460
 461/**
 462 * nfs_file_direct_read - file direct read operation for NFS files
 463 * @iocb: target I/O control block
 464 * @iov: vector of user buffers into which to read data
 465 * @nr_segs: size of iov vector
 466 * @pos: byte offset in file where reading starts
 467 *
 468 * We use this function for direct reads instead of calling
 469 * generic_file_aio_read() in order to avoid gfar's check to see if
 470 * the request starts before the end of the file.  For that check
 471 * to work, we must generate a GETATTR before each direct read, and
 472 * even then there is a window between the GETATTR and the subsequent
 473 * READ where the file size could change.  Our preference is simply
 474 * to do all reads the application wants, and the server will take
 475 * care of managing the end of file boundary.
 476 *
 477 * This function also eliminates unnecessarily updating the file's
 478 * atime locally, as the NFS server sets the file's atime, and this
 479 * client must read the updated atime from the server back into its
 480 * cache.
 481 */
 482ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
 483				unsigned long nr_segs, loff_t pos, bool uio)
 484{
 485	struct file *file = iocb->ki_filp;
 486	struct address_space *mapping = file->f_mapping;
 487	struct inode *inode = mapping->host;
 488	struct nfs_direct_req *dreq;
 489	struct nfs_lock_context *l_ctx;
 490	ssize_t result = -EINVAL;
 491	size_t count;
 492
 493	count = iov_length(iov, nr_segs);
 494	nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
 495
 496	dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
 497		file, count, (long long) pos);
 498
 499	result = 0;
 500	if (!count)
 501		goto out;
 502
 503	mutex_lock(&inode->i_mutex);
 504	result = nfs_sync_mapping(mapping);
 505	if (result)
 506		goto out_unlock;
 507
 508	task_io_account_read(count);
 509
 510	result = -ENOMEM;
 511	dreq = nfs_direct_req_alloc();
 512	if (dreq == NULL)
 513		goto out_unlock;
 514
 515	dreq->inode = inode;
 516	dreq->bytes_left = iov_length(iov, nr_segs);
 
 517	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
 518	l_ctx = nfs_get_lock_context(dreq->ctx);
 519	if (IS_ERR(l_ctx)) {
 520		result = PTR_ERR(l_ctx);
 
 521		goto out_release;
 522	}
 523	dreq->l_ctx = l_ctx;
 524	if (!is_sync_kiocb(iocb))
 525		dreq->iocb = iocb;
 526
 527	NFS_I(inode)->read_io += iov_length(iov, nr_segs);
 528	result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos, uio);
 529
 530	mutex_unlock(&inode->i_mutex);
 
 531
 532	if (!result) {
 
 
 
 
 
 
 533		result = nfs_direct_wait(dreq);
 534		if (result > 0)
 535			iocb->ki_pos = pos + result;
 
 
 
 
 
 536	}
 537
 538	nfs_direct_req_release(dreq);
 539	return result;
 540
 541out_release:
 542	nfs_direct_req_release(dreq);
 543out_unlock:
 544	mutex_unlock(&inode->i_mutex);
 545out:
 546	return result;
 547}
 548
 549#if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 550static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
 551{
 552	struct nfs_pageio_descriptor desc;
 553	struct nfs_page *req, *tmp;
 554	LIST_HEAD(reqs);
 555	struct nfs_commit_info cinfo;
 556	LIST_HEAD(failed);
 557
 558	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 559	pnfs_recover_commit_reqs(dreq->inode, &reqs, &cinfo);
 560	spin_lock(cinfo.lock);
 561	nfs_scan_commit_list(&cinfo.mds->list, &reqs, &cinfo, 0);
 562	spin_unlock(cinfo.lock);
 563
 564	dreq->count = 0;
 
 
 565	get_dreq(dreq);
 566
 567	NFS_PROTO(dreq->inode)->write_pageio_init(&desc, dreq->inode, FLUSH_STABLE,
 568			      &nfs_direct_write_completion_ops);
 569	desc.pg_dreq = dreq;
 570
 571	list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
 
 
 
 572		if (!nfs_pageio_add_request(&desc, req)) {
 573			nfs_list_remove_request(req);
 574			nfs_list_add_request(req, &failed);
 575			spin_lock(cinfo.lock);
 576			dreq->flags = 0;
 577			dreq->error = -EIO;
 578			spin_unlock(cinfo.lock);
 
 
 
 
 
 
 579		}
 580		nfs_release_request(req);
 581	}
 582	nfs_pageio_complete(&desc);
 583
 584	while (!list_empty(&failed)) {
 585		req = nfs_list_entry(failed.next);
 586		nfs_list_remove_request(req);
 587		nfs_unlock_and_release_request(req);
 
 
 
 
 
 
 
 
 588	}
 589
 590	if (put_dreq(dreq))
 591		nfs_direct_write_complete(dreq, dreq->inode);
 592}
 593
 594static void nfs_direct_commit_complete(struct nfs_commit_data *data)
 595{
 
 596	struct nfs_direct_req *dreq = data->dreq;
 597	struct nfs_commit_info cinfo;
 598	struct nfs_page *req;
 599	int status = data->task.tk_status;
 600
 601	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 
 602	if (status < 0) {
 603		dprintk("NFS: %5u commit failed with error %d.\n",
 604			data->task.tk_pid, status);
 605		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 606	} else if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) {
 607		dprintk("NFS: %5u commit verify failed\n", data->task.tk_pid);
 608		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 609	}
 610
 611	dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
 
 612	while (!list_empty(&data->pages)) {
 613		req = nfs_list_entry(data->pages.next);
 614		nfs_list_remove_request(req);
 615		if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
 616			/* Note the rewrite will go through mds */
 617			nfs_mark_request_commit(req, NULL, &cinfo);
 
 
 
 
 
 
 
 
 
 
 618		} else
 619			nfs_release_request(req);
 620		nfs_unlock_and_release_request(req);
 621	}
 622
 623	if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
 624		nfs_direct_write_complete(dreq, data->inode);
 625}
 626
 627static void nfs_direct_error_cleanup(struct nfs_inode *nfsi)
 
 628{
 629	/* There is no lock to clear */
 
 
 
 
 
 
 
 
 630}
 631
 632static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
 633	.completion = nfs_direct_commit_complete,
 634	.error_cleanup = nfs_direct_error_cleanup,
 635};
 636
 637static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
 638{
 639	int res;
 640	struct nfs_commit_info cinfo;
 641	LIST_HEAD(mds_list);
 642
 643	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 644	nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
 645	res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
 646	if (res < 0) /* res == -ENOMEM */
 647		nfs_direct_write_reschedule(dreq);
 648}
 649
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 650static void nfs_direct_write_schedule_work(struct work_struct *work)
 651{
 652	struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
 653	int flags = dreq->flags;
 654
 655	dreq->flags = 0;
 656	switch (flags) {
 657		case NFS_ODIRECT_DO_COMMIT:
 658			nfs_direct_commit_schedule(dreq);
 659			break;
 660		case NFS_ODIRECT_RESCHED_WRITES:
 661			nfs_direct_write_reschedule(dreq);
 662			break;
 663		default:
 664			nfs_direct_complete(dreq, true);
 
 
 665	}
 666}
 667
 668static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
 669{
 670	schedule_work(&dreq->work); /* Calls nfs_direct_write_schedule_work */
 671}
 672
 673#else
 674static void nfs_direct_write_schedule_work(struct work_struct *work)
 675{
 676}
 677
 678static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
 679{
 680	nfs_direct_complete(dreq, true);
 681}
 682#endif
 683
 684/*
 685 * NB: Return the value of the first error return code.  Subsequent
 686 *     errors after the first one are ignored.
 687 */
 688/*
 689 * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
 690 * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
 691 * bail and stop sending more writes.  Write length accounting is
 692 * handled automatically by nfs_direct_write_result().  Otherwise, if
 693 * no requests have been sent, just return an error.
 694 */
 695static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *desc,
 696						 const struct iovec *iov,
 697						 loff_t pos, bool uio)
 698{
 699	struct nfs_direct_req *dreq = desc->pg_dreq;
 700	struct nfs_open_context *ctx = dreq->ctx;
 701	struct inode *inode = ctx->dentry->d_inode;
 702	unsigned long user_addr = (unsigned long)iov->iov_base;
 703	size_t count = iov->iov_len;
 704	size_t wsize = NFS_SERVER(inode)->wsize;
 705	unsigned int pgbase;
 706	int result;
 707	ssize_t started = 0;
 708	struct page **pagevec = NULL;
 709	unsigned int npages;
 710
 711	do {
 712		size_t bytes;
 713		int i;
 714
 715		pgbase = user_addr & ~PAGE_MASK;
 716		bytes = min(max_t(size_t, wsize, PAGE_SIZE), count);
 717
 718		result = -ENOMEM;
 719		npages = nfs_page_array_len(pgbase, bytes);
 720		if (!pagevec)
 721			pagevec = kmalloc(npages * sizeof(struct page *), GFP_KERNEL);
 722		if (!pagevec)
 723			break;
 724
 725		if (uio) {
 726			down_read(&current->mm->mmap_sem);
 727			result = get_user_pages(current, current->mm, user_addr,
 728						npages, 0, 0, pagevec, NULL);
 729			up_read(&current->mm->mmap_sem);
 730			if (result < 0)
 731				break;
 732		} else {
 733			WARN_ON(npages != 1);
 734			result = get_kernel_page(user_addr, 0, pagevec);
 735			if (WARN_ON(result != 1))
 736				break;
 737		}
 738
 739		if ((unsigned)result < npages) {
 740			bytes = result * PAGE_SIZE;
 741			if (bytes <= pgbase) {
 742				nfs_direct_release_pages(pagevec, result);
 743				break;
 744			}
 745			bytes -= pgbase;
 746			npages = result;
 747		}
 748
 749		for (i = 0; i < npages; i++) {
 750			struct nfs_page *req;
 751			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
 752
 753			req = nfs_create_request(dreq->ctx, dreq->inode,
 754						 pagevec[i],
 755						 pgbase, req_len);
 756			if (IS_ERR(req)) {
 757				result = PTR_ERR(req);
 758				break;
 759			}
 760			nfs_lock_request(req);
 761			req->wb_index = pos >> PAGE_SHIFT;
 762			req->wb_offset = pos & ~PAGE_MASK;
 763			if (!nfs_pageio_add_request(desc, req)) {
 764				result = desc->pg_error;
 765				nfs_unlock_and_release_request(req);
 766				break;
 767			}
 768			pgbase = 0;
 769			bytes -= req_len;
 770			started += req_len;
 771			user_addr += req_len;
 772			pos += req_len;
 773			count -= req_len;
 774			dreq->bytes_left -= req_len;
 775		}
 776		/* The nfs_page now hold references to these pages */
 777		nfs_direct_release_pages(pagevec, npages);
 778	} while (count != 0 && result >= 0);
 779
 780	kfree(pagevec);
 781
 782	if (started)
 783		return started;
 784	return result < 0 ? (ssize_t) result : -EFAULT;
 785}
 786
 787static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
 788{
 789	struct nfs_direct_req *dreq = hdr->dreq;
 790	struct nfs_commit_info cinfo;
 791	int bit = -1;
 792	struct nfs_page *req = nfs_list_entry(hdr->pages.next);
 
 793
 794	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
 795		goto out_put;
 796
 797	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 798
 799	spin_lock(&dreq->lock);
 
 
 
 
 800
 801	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
 802		dreq->flags = 0;
 803		dreq->error = hdr->error;
 804	}
 805	if (dreq->error != 0)
 806		bit = NFS_IOHDR_ERROR;
 807	else {
 808		dreq->count += hdr->good_bytes;
 809		if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) {
 810			dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 811			bit = NFS_IOHDR_NEED_RESCHED;
 812		} else if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) {
 813			if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
 814				bit = NFS_IOHDR_NEED_RESCHED;
 815			else if (dreq->flags == 0) {
 816				memcpy(&dreq->verf, hdr->verf,
 817				       sizeof(dreq->verf));
 818				bit = NFS_IOHDR_NEED_COMMIT;
 819				dreq->flags = NFS_ODIRECT_DO_COMMIT;
 820			} else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
 821				if (memcmp(&dreq->verf, hdr->verf, sizeof(dreq->verf))) {
 822					dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 823					bit = NFS_IOHDR_NEED_RESCHED;
 824				} else
 825					bit = NFS_IOHDR_NEED_COMMIT;
 826			}
 827		}
 828	}
 829	spin_unlock(&dreq->lock);
 830
 831	while (!list_empty(&hdr->pages)) {
 
 832		req = nfs_list_entry(hdr->pages.next);
 833		nfs_list_remove_request(req);
 834		switch (bit) {
 835		case NFS_IOHDR_NEED_RESCHED:
 836		case NFS_IOHDR_NEED_COMMIT:
 
 
 
 
 837			kref_get(&req->wb_kref);
 838			nfs_mark_request_commit(req, hdr->lseg, &cinfo);
 839		}
 840		nfs_unlock_and_release_request(req);
 841	}
 842
 843out_put:
 844	if (put_dreq(dreq))
 845		nfs_direct_write_complete(dreq, hdr->inode);
 846	hdr->release(hdr);
 847}
 848
 849static void nfs_write_sync_pgio_error(struct list_head *head)
 850{
 851	struct nfs_page *req;
 852
 853	while (!list_empty(head)) {
 854		req = nfs_list_entry(head->next);
 855		nfs_list_remove_request(req);
 856		nfs_unlock_and_release_request(req);
 857	}
 858}
 859
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 860static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
 861	.error_cleanup = nfs_write_sync_pgio_error,
 862	.init_hdr = nfs_direct_pgio_init,
 863	.completion = nfs_direct_write_completion,
 
 864};
 865
 
 
 
 
 
 
 
 
 
 
 
 
 866static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
 867					       const struct iovec *iov,
 868					       unsigned long nr_segs,
 869					       loff_t pos, bool uio)
 870{
 871	struct nfs_pageio_descriptor desc;
 872	struct inode *inode = dreq->inode;
 
 873	ssize_t result = 0;
 874	size_t requested_bytes = 0;
 875	unsigned long seg;
 
 
 
 876
 877	NFS_PROTO(inode)->write_pageio_init(&desc, inode, FLUSH_COND_STABLE,
 878			      &nfs_direct_write_completion_ops);
 879	desc.pg_dreq = dreq;
 880	get_dreq(dreq);
 881	atomic_inc(&inode->i_dio_count);
 
 
 
 
 
 
 
 882
 883	NFS_I(dreq->inode)->write_io += iov_length(iov, nr_segs);
 884	for (seg = 0; seg < nr_segs; seg++) {
 885		const struct iovec *vec = &iov[seg];
 886		result = nfs_direct_write_schedule_segment(&desc, vec, pos, uio);
 887		if (result < 0)
 888			break;
 889		requested_bytes += result;
 890		if ((size_t)result < vec->iov_len)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 891			break;
 892		pos += vec->iov_len;
 893	}
 894	nfs_pageio_complete(&desc);
 895
 896	/*
 897	 * If no bytes were started, return the error, and let the
 898	 * generic layer handle the completion.
 899	 */
 900	if (requested_bytes == 0) {
 901		inode_dio_done(inode);
 902		nfs_direct_req_release(dreq);
 903		return result < 0 ? result : -EIO;
 904	}
 905
 906	if (put_dreq(dreq))
 907		nfs_direct_write_complete(dreq, dreq->inode);
 908	return 0;
 909}
 910
 911/**
 912 * nfs_file_direct_write - file direct write operation for NFS files
 913 * @iocb: target I/O control block
 914 * @iov: vector of user buffers from which to write data
 915 * @nr_segs: size of iov vector
 916 * @pos: byte offset in file where writing starts
 917 *
 918 * We use this function for direct writes instead of calling
 919 * generic_file_aio_write() in order to avoid taking the inode
 920 * semaphore and updating the i_size.  The NFS server will set
 921 * the new i_size and this client must read the updated size
 922 * back into its cache.  We let the server do generic write
 923 * parameter checking and report problems.
 924 *
 925 * We eliminate local atime updates, see direct read above.
 926 *
 927 * We avoid unnecessary page cache invalidations for normal cached
 928 * readers of this file.
 929 *
 930 * Note that O_APPEND is not supported for NFS direct writes, as there
 931 * is no atomic O_APPEND write facility in the NFS protocol.
 932 */
 933ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
 934				unsigned long nr_segs, loff_t pos, bool uio)
 935{
 936	ssize_t result = -EINVAL;
 
 937	struct file *file = iocb->ki_filp;
 938	struct address_space *mapping = file->f_mapping;
 939	struct inode *inode = mapping->host;
 940	struct nfs_direct_req *dreq;
 941	struct nfs_lock_context *l_ctx;
 942	loff_t end;
 943	size_t count;
 944
 945	count = iov_length(iov, nr_segs);
 946	end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
 947
 
 
 
 
 
 
 
 
 948	nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
 949
 950	dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
 951		file, count, (long long) pos);
 952
 953	result = generic_write_checks(file, &pos, &count, 0);
 954	if (result)
 955		goto out;
 956
 957	result = -EINVAL;
 958	if ((ssize_t) count < 0)
 959		goto out;
 960	result = 0;
 961	if (!count)
 962		goto out;
 963
 964	mutex_lock(&inode->i_mutex);
 965
 966	result = nfs_sync_mapping(mapping);
 967	if (result)
 968		goto out_unlock;
 969
 970	if (mapping->nrpages) {
 971		result = invalidate_inode_pages2_range(mapping,
 972					pos >> PAGE_CACHE_SHIFT, end);
 973		if (result)
 974			goto out_unlock;
 975	}
 976
 977	task_io_account_write(count);
 978
 979	result = -ENOMEM;
 980	dreq = nfs_direct_req_alloc();
 981	if (!dreq)
 982		goto out_unlock;
 983
 984	dreq->inode = inode;
 985	dreq->bytes_left = count;
 
 986	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
 987	l_ctx = nfs_get_lock_context(dreq->ctx);
 988	if (IS_ERR(l_ctx)) {
 989		result = PTR_ERR(l_ctx);
 
 990		goto out_release;
 991	}
 992	dreq->l_ctx = l_ctx;
 993	if (!is_sync_kiocb(iocb))
 994		dreq->iocb = iocb;
 
 995
 996	result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, uio);
 
 
 
 
 
 
 
 
 
 
 
 
 997
 998	if (mapping->nrpages) {
 999		invalidate_inode_pages2_range(mapping,
1000					      pos >> PAGE_CACHE_SHIFT, end);
1001	}
1002
1003	mutex_unlock(&inode->i_mutex);
1004
1005	if (!result) {
1006		result = nfs_direct_wait(dreq);
1007		if (result > 0) {
1008			struct inode *inode = mapping->host;
1009
1010			iocb->ki_pos = pos + result;
1011			spin_lock(&inode->i_lock);
1012			if (i_size_read(inode) < iocb->ki_pos)
1013				i_size_write(inode, iocb->ki_pos);
1014			spin_unlock(&inode->i_lock);
1015		}
 
 
 
1016	}
1017	nfs_direct_req_release(dreq);
1018	return result;
1019
1020out_release:
1021	nfs_direct_req_release(dreq);
1022out_unlock:
1023	mutex_unlock(&inode->i_mutex);
1024out:
1025	return result;
1026}
1027
1028/**
1029 * nfs_init_directcache - create a slab cache for nfs_direct_req structures
1030 *
1031 */
1032int __init nfs_init_directcache(void)
1033{
1034	nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
1035						sizeof(struct nfs_direct_req),
1036						0, (SLAB_RECLAIM_ACCOUNT|
1037							SLAB_MEM_SPREAD),
1038						NULL);
1039	if (nfs_direct_cachep == NULL)
1040		return -ENOMEM;
1041
1042	return 0;
1043}
1044
1045/**
1046 * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
1047 *
1048 */
1049void nfs_destroy_directcache(void)
1050{
1051	kmem_cache_destroy(nfs_direct_cachep);
1052}