Linux Audio

Check our new training course

Loading...
v4.10.11
   1/*
   2 * linux/fs/nfs/write.c
   3 *
   4 * Write file data over NFS.
   5 *
   6 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
   7 */
   8
   9#include <linux/types.h>
  10#include <linux/slab.h>
  11#include <linux/mm.h>
  12#include <linux/pagemap.h>
  13#include <linux/file.h>
  14#include <linux/writeback.h>
  15#include <linux/swap.h>
  16#include <linux/migrate.h>
  17
  18#include <linux/sunrpc/clnt.h>
  19#include <linux/nfs_fs.h>
  20#include <linux/nfs_mount.h>
  21#include <linux/nfs_page.h>
  22#include <linux/backing-dev.h>
  23#include <linux/export.h>
  24#include <linux/freezer.h>
  25#include <linux/wait.h>
  26
  27#include <linux/uaccess.h>
  28
  29#include "delegation.h"
  30#include "internal.h"
  31#include "iostat.h"
  32#include "nfs4_fs.h"
  33#include "fscache.h"
  34#include "pnfs.h"
  35
  36#include "nfstrace.h"
  37
  38#define NFSDBG_FACILITY		NFSDBG_PAGECACHE
  39
  40#define MIN_POOL_WRITE		(32)
  41#define MIN_POOL_COMMIT		(4)
  42
  43/*
  44 * Local function declarations
  45 */
  46static void nfs_redirty_request(struct nfs_page *req);
  47static const struct rpc_call_ops nfs_commit_ops;
  48static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
  49static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
  50static const struct nfs_rw_ops nfs_rw_write_ops;
  51static void nfs_clear_request_commit(struct nfs_page *req);
  52static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
  53				      struct inode *inode);
  54static struct nfs_page *
  55nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi,
  56						struct page *page);
  57
  58static struct kmem_cache *nfs_wdata_cachep;
  59static mempool_t *nfs_wdata_mempool;
  60static struct kmem_cache *nfs_cdata_cachep;
  61static mempool_t *nfs_commit_mempool;
  62
  63struct nfs_commit_data *nfs_commitdata_alloc(void)
  64{
  65	struct nfs_commit_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOIO);
  66
  67	if (p) {
  68		memset(p, 0, sizeof(*p));
  69		INIT_LIST_HEAD(&p->pages);
  70	}
  71	return p;
  72}
  73EXPORT_SYMBOL_GPL(nfs_commitdata_alloc);
  74
  75void nfs_commit_free(struct nfs_commit_data *p)
  76{
  77	mempool_free(p, nfs_commit_mempool);
  78}
  79EXPORT_SYMBOL_GPL(nfs_commit_free);
  80
  81static struct nfs_pgio_header *nfs_writehdr_alloc(void)
  82{
  83	struct nfs_pgio_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOIO);
  84
  85	if (p)
  86		memset(p, 0, sizeof(*p));
  87	return p;
  88}
  89
  90static void nfs_writehdr_free(struct nfs_pgio_header *hdr)
  91{
  92	mempool_free(hdr, nfs_wdata_mempool);
  93}
  94
  95static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
  96{
  97	ctx->error = error;
  98	smp_wmb();
  99	set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
 100}
 101
 102/*
 103 * nfs_page_find_head_request_locked - find head request associated with @page
 104 *
 105 * must be called while holding the inode lock.
 106 *
 107 * returns matching head request with reference held, or NULL if not found.
 108 */
 109static struct nfs_page *
 110nfs_page_find_head_request_locked(struct nfs_inode *nfsi, struct page *page)
 111{
 112	struct nfs_page *req = NULL;
 113
 114	if (PagePrivate(page))
 115		req = (struct nfs_page *)page_private(page);
 116	else if (unlikely(PageSwapCache(page)))
 117		req = nfs_page_search_commits_for_head_request_locked(nfsi,
 118			page);
 119
 120	if (req) {
 121		WARN_ON_ONCE(req->wb_head != req);
 122		kref_get(&req->wb_kref);
 123	}
 124
 125	return req;
 126}
 127
 128/*
 129 * nfs_page_find_head_request - find head request associated with @page
 130 *
 131 * returns matching head request with reference held, or NULL if not found.
 132 */
 133static struct nfs_page *nfs_page_find_head_request(struct page *page)
 134{
 135	struct inode *inode = page_file_mapping(page)->host;
 136	struct nfs_page *req = NULL;
 137
 138	spin_lock(&inode->i_lock);
 139	req = nfs_page_find_head_request_locked(NFS_I(inode), page);
 140	spin_unlock(&inode->i_lock);
 141	return req;
 142}
 143
 144/* Adjust the file length if we're writing beyond the end */
 145static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
 146{
 147	struct inode *inode = page_file_mapping(page)->host;
 148	loff_t end, i_size;
 149	pgoff_t end_index;
 150
 151	spin_lock(&inode->i_lock);
 152	i_size = i_size_read(inode);
 153	end_index = (i_size - 1) >> PAGE_SHIFT;
 154	if (i_size > 0 && page_index(page) < end_index)
 155		goto out;
 156	end = page_file_offset(page) + ((loff_t)offset+count);
 157	if (i_size >= end)
 158		goto out;
 159	i_size_write(inode, end);
 160	nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
 161out:
 162	spin_unlock(&inode->i_lock);
 163}
 164
 165/* A writeback failed: mark the page as bad, and invalidate the page cache */
 166static void nfs_set_pageerror(struct page *page)
 167{
 168	nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page));
 169}
 170
 171/*
 172 * nfs_page_group_search_locked
 173 * @head - head request of page group
 174 * @page_offset - offset into page
 175 *
 176 * Search page group with head @head to find a request that contains the
 177 * page offset @page_offset.
 178 *
 179 * Returns a pointer to the first matching nfs request, or NULL if no
 180 * match is found.
 181 *
 182 * Must be called with the page group lock held
 183 */
 184static struct nfs_page *
 185nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset)
 186{
 187	struct nfs_page *req;
 188
 189	WARN_ON_ONCE(head != head->wb_head);
 190	WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_head->wb_flags));
 191
 192	req = head;
 193	do {
 194		if (page_offset >= req->wb_pgbase &&
 195		    page_offset < (req->wb_pgbase + req->wb_bytes))
 196			return req;
 197
 198		req = req->wb_this_page;
 199	} while (req != head);
 200
 201	return NULL;
 202}
 203
 204/*
 205 * nfs_page_group_covers_page
 206 * @head - head request of page group
 207 *
 208 * Return true if the page group with head @head covers the whole page,
 209 * returns false otherwise
 210 */
 211static bool nfs_page_group_covers_page(struct nfs_page *req)
 212{
 213	struct nfs_page *tmp;
 214	unsigned int pos = 0;
 215	unsigned int len = nfs_page_length(req->wb_page);
 216
 217	nfs_page_group_lock(req, false);
 218
 219	do {
 220		tmp = nfs_page_group_search_locked(req->wb_head, pos);
 221		if (tmp) {
 222			/* no way this should happen */
 223			WARN_ON_ONCE(tmp->wb_pgbase != pos);
 224			pos += tmp->wb_bytes - (pos - tmp->wb_pgbase);
 225		}
 226	} while (tmp && pos < len);
 227
 228	nfs_page_group_unlock(req);
 229	WARN_ON_ONCE(pos > len);
 230	return pos == len;
 231}
 232
 233/* We can set the PG_uptodate flag if we see that a write request
 234 * covers the full page.
 235 */
 236static void nfs_mark_uptodate(struct nfs_page *req)
 237{
 238	if (PageUptodate(req->wb_page))
 239		return;
 240	if (!nfs_page_group_covers_page(req))
 241		return;
 242	SetPageUptodate(req->wb_page);
 243}
 244
 245static int wb_priority(struct writeback_control *wbc)
 246{
 247	int ret = 0;
 248
 
 249	if (wbc->sync_mode == WB_SYNC_ALL)
 250		ret = FLUSH_COND_STABLE;
 251	return ret;
 252}
 253
 254/*
 255 * NFS congestion control
 256 */
 257
 258int nfs_congestion_kb;
 259
 260#define NFS_CONGESTION_ON_THRESH 	(nfs_congestion_kb >> (PAGE_SHIFT-10))
 261#define NFS_CONGESTION_OFF_THRESH	\
 262	(NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
 263
 264static void nfs_set_page_writeback(struct page *page)
 265{
 266	struct nfs_server *nfss = NFS_SERVER(page_file_mapping(page)->host);
 267	int ret = test_set_page_writeback(page);
 268
 269	WARN_ON_ONCE(ret != 0);
 270
 271	if (atomic_long_inc_return(&nfss->writeback) >
 272			NFS_CONGESTION_ON_THRESH) {
 273		set_bdi_congested(&nfss->backing_dev_info,
 274					BLK_RW_ASYNC);
 275	}
 276}
 277
 278static void nfs_end_page_writeback(struct nfs_page *req)
 279{
 280	struct inode *inode = page_file_mapping(req->wb_page)->host;
 281	struct nfs_server *nfss = NFS_SERVER(inode);
 282
 283	if (!nfs_page_group_sync_on_bit(req, PG_WB_END))
 284		return;
 285
 286	end_page_writeback(req->wb_page);
 287	if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
 288		clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
 289}
 290
 291
 292/* nfs_page_group_clear_bits
 293 *   @req - an nfs request
 294 * clears all page group related bits from @req
 295 */
 296static void
 297nfs_page_group_clear_bits(struct nfs_page *req)
 298{
 299	clear_bit(PG_TEARDOWN, &req->wb_flags);
 300	clear_bit(PG_UNLOCKPAGE, &req->wb_flags);
 301	clear_bit(PG_UPTODATE, &req->wb_flags);
 302	clear_bit(PG_WB_END, &req->wb_flags);
 303	clear_bit(PG_REMOVE, &req->wb_flags);
 304}
 305
 306
 307/*
 308 * nfs_unroll_locks_and_wait -  unlock all newly locked reqs and wait on @req
 309 *
 310 * this is a helper function for nfs_lock_and_join_requests
 311 *
 312 * @inode - inode associated with request page group, must be holding inode lock
 313 * @head  - head request of page group, must be holding head lock
 314 * @req   - request that couldn't lock and needs to wait on the req bit lock
 315 * @nonblock - if true, don't actually wait
 316 *
 317 * NOTE: this must be called holding page_group bit lock and inode spin lock
 318 *       and BOTH will be released before returning.
 319 *
 320 * returns 0 on success, < 0 on error.
 321 */
 322static int
 323nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head,
 324			  struct nfs_page *req, bool nonblock)
 325	__releases(&inode->i_lock)
 326{
 327	struct nfs_page *tmp;
 328	int ret;
 329
 330	/* relinquish all the locks successfully grabbed this run */
 331	for (tmp = head ; tmp != req; tmp = tmp->wb_this_page)
 332		nfs_unlock_request(tmp);
 333
 334	WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
 335
 336	/* grab a ref on the request that will be waited on */
 337	kref_get(&req->wb_kref);
 338
 339	nfs_page_group_unlock(head);
 340	spin_unlock(&inode->i_lock);
 341
 342	/* release ref from nfs_page_find_head_request_locked */
 343	nfs_release_request(head);
 344
 345	if (!nonblock)
 346		ret = nfs_wait_on_request(req);
 347	else
 348		ret = -EAGAIN;
 349	nfs_release_request(req);
 350
 351	return ret;
 352}
 353
 354/*
 355 * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests
 356 *
 357 * @destroy_list - request list (using wb_this_page) terminated by @old_head
 358 * @old_head - the old head of the list
 359 *
 360 * All subrequests must be locked and removed from all lists, so at this point
 361 * they are only "active" in this function, and possibly in nfs_wait_on_request
 362 * with a reference held by some other context.
 363 */
 364static void
 365nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
 366				 struct nfs_page *old_head)
 367{
 368	while (destroy_list) {
 369		struct nfs_page *subreq = destroy_list;
 370
 371		destroy_list = (subreq->wb_this_page == old_head) ?
 372				   NULL : subreq->wb_this_page;
 373
 374		WARN_ON_ONCE(old_head != subreq->wb_head);
 375
 376		/* make sure old group is not used */
 377		subreq->wb_head = subreq;
 378		subreq->wb_this_page = subreq;
 379
 380		/* subreq is now totally disconnected from page group or any
 381		 * write / commit lists. last chance to wake any waiters */
 382		nfs_unlock_request(subreq);
 383
 384		if (!test_bit(PG_TEARDOWN, &subreq->wb_flags)) {
 385			/* release ref on old head request */
 386			nfs_release_request(old_head);
 387
 388			nfs_page_group_clear_bits(subreq);
 389
 390			/* release the PG_INODE_REF reference */
 391			if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags))
 392				nfs_release_request(subreq);
 393			else
 394				WARN_ON_ONCE(1);
 395		} else {
 396			WARN_ON_ONCE(test_bit(PG_CLEAN, &subreq->wb_flags));
 397			/* zombie requests have already released the last
 398			 * reference and were waiting on the rest of the
 399			 * group to complete. Since it's no longer part of a
 400			 * group, simply free the request */
 401			nfs_page_group_clear_bits(subreq);
 402			nfs_free_request(subreq);
 403		}
 404	}
 405}
 406
 407/*
 408 * nfs_lock_and_join_requests - join all subreqs to the head req and return
 409 *                              a locked reference, cancelling any pending
 410 *                              operations for this page.
 411 *
 412 * @page - the page used to lookup the "page group" of nfs_page structures
 413 * @nonblock - if true, don't block waiting for request locks
 414 *
 415 * This function joins all sub requests to the head request by first
 416 * locking all requests in the group, cancelling any pending operations
 417 * and finally updating the head request to cover the whole range covered by
 418 * the (former) group.  All subrequests are removed from any write or commit
 419 * lists, unlinked from the group and destroyed.
 420 *
 421 * Returns a locked, referenced pointer to the head request - which after
 422 * this call is guaranteed to be the only request associated with the page.
 423 * Returns NULL if no requests are found for @page, or a ERR_PTR if an
 424 * error was encountered.
 425 */
 426static struct nfs_page *
 427nfs_lock_and_join_requests(struct page *page, bool nonblock)
 428{
 429	struct inode *inode = page_file_mapping(page)->host;
 430	struct nfs_page *head, *subreq;
 431	struct nfs_page *destroy_list = NULL;
 432	unsigned int total_bytes;
 433	int ret;
 434
 435try_again:
 436	total_bytes = 0;
 437
 438	WARN_ON_ONCE(destroy_list);
 439
 440	spin_lock(&inode->i_lock);
 441
 442	/*
 443	 * A reference is taken only on the head request which acts as a
 444	 * reference to the whole page group - the group will not be destroyed
 445	 * until the head reference is released.
 446	 */
 447	head = nfs_page_find_head_request_locked(NFS_I(inode), page);
 448
 449	if (!head) {
 450		spin_unlock(&inode->i_lock);
 451		return NULL;
 452	}
 453
 454	/* holding inode lock, so always make a non-blocking call to try the
 455	 * page group lock */
 456	ret = nfs_page_group_lock(head, true);
 457	if (ret < 0) {
 458		spin_unlock(&inode->i_lock);
 459
 460		if (!nonblock && ret == -EAGAIN) {
 461			nfs_page_group_lock_wait(head);
 462			nfs_release_request(head);
 463			goto try_again;
 464		}
 465
 466		nfs_release_request(head);
 467		return ERR_PTR(ret);
 468	}
 469
 470	/* lock each request in the page group */
 471	subreq = head;
 472	do {
 473		/*
 474		 * Subrequests are always contiguous, non overlapping
 475		 * and in order - but may be repeated (mirrored writes).
 476		 */
 477		if (subreq->wb_offset == (head->wb_offset + total_bytes)) {
 478			/* keep track of how many bytes this group covers */
 479			total_bytes += subreq->wb_bytes;
 480		} else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset ||
 481			    ((subreq->wb_offset + subreq->wb_bytes) >
 482			     (head->wb_offset + total_bytes)))) {
 483			nfs_page_group_unlock(head);
 484			spin_unlock(&inode->i_lock);
 485			return ERR_PTR(-EIO);
 486		}
 487
 488		if (!nfs_lock_request(subreq)) {
 489			/* releases page group bit lock and
 490			 * inode spin lock and all references */
 491			ret = nfs_unroll_locks_and_wait(inode, head,
 492				subreq, nonblock);
 493
 494			if (ret == 0)
 495				goto try_again;
 496
 497			return ERR_PTR(ret);
 498		}
 499
 500		subreq = subreq->wb_this_page;
 501	} while (subreq != head);
 502
 503	/* Now that all requests are locked, make sure they aren't on any list.
 504	 * Commit list removal accounting is done after locks are dropped */
 505	subreq = head;
 506	do {
 507		nfs_clear_request_commit(subreq);
 508		subreq = subreq->wb_this_page;
 509	} while (subreq != head);
 510
 511	/* unlink subrequests from head, destroy them later */
 512	if (head->wb_this_page != head) {
 513		/* destroy list will be terminated by head */
 514		destroy_list = head->wb_this_page;
 515		head->wb_this_page = head;
 516
 517		/* change head request to cover whole range that
 518		 * the former page group covered */
 519		head->wb_bytes = total_bytes;
 520	}
 521
 522	/*
 523	 * prepare head request to be added to new pgio descriptor
 524	 */
 525	nfs_page_group_clear_bits(head);
 526
 527	/*
 528	 * some part of the group was still on the inode list - otherwise
 529	 * the group wouldn't be involved in async write.
 530	 * grab a reference for the head request, iff it needs one.
 531	 */
 532	if (!test_and_set_bit(PG_INODE_REF, &head->wb_flags))
 533		kref_get(&head->wb_kref);
 534
 535	nfs_page_group_unlock(head);
 536
 537	/* drop lock to clean uprequests on destroy list */
 538	spin_unlock(&inode->i_lock);
 539
 540	nfs_destroy_unlinked_subrequests(destroy_list, head);
 541
 542	/* still holds ref on head from nfs_page_find_head_request_locked
 543	 * and still has lock on head from lock loop */
 544	return head;
 545}
 546
 547static void nfs_write_error_remove_page(struct nfs_page *req)
 548{
 549	nfs_unlock_request(req);
 550	nfs_end_page_writeback(req);
 551	nfs_release_request(req);
 552	generic_error_remove_page(page_file_mapping(req->wb_page),
 553				  req->wb_page);
 554}
 555
 556/*
 557 * Find an associated nfs write request, and prepare to flush it out
 558 * May return an error if the user signalled nfs_wait_on_request().
 559 */
 560static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
 561				struct page *page, bool nonblock,
 562				bool launder)
 563{
 564	struct nfs_page *req;
 565	int ret = 0;
 566
 567	req = nfs_lock_and_join_requests(page, nonblock);
 568	if (!req)
 569		goto out;
 570	ret = PTR_ERR(req);
 571	if (IS_ERR(req))
 572		goto out;
 573
 574	nfs_set_page_writeback(page);
 575	WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
 576
 577	ret = 0;
 578	if (!nfs_pageio_add_request(pgio, req)) {
 579		ret = pgio->pg_error;
 580		/*
 581		 * Remove the problematic req upon fatal errors
 582		 * in launder case, while other dirty pages can
 583		 * still be around until they get flushed.
 584		 */
 585		if (nfs_error_is_fatal(ret)) {
 586			nfs_context_set_write_error(req->wb_context, ret);
 587			if (launder) {
 588				nfs_write_error_remove_page(req);
 589				goto out;
 590			}
 591		}
 592		nfs_redirty_request(req);
 593		ret = -EAGAIN;
 594	} else
 595		nfs_add_stats(page_file_mapping(page)->host,
 596				NFSIOS_WRITEPAGES, 1);
 597out:
 598	return ret;
 599}
 600
 601static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
 602			    struct nfs_pageio_descriptor *pgio, bool launder)
 603{
 604	int ret;
 605
 606	nfs_pageio_cond_complete(pgio, page_index(page));
 607	ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE,
 608				   launder);
 609	if (ret == -EAGAIN) {
 610		redirty_page_for_writepage(wbc, page);
 611		ret = 0;
 612	}
 613	return ret;
 614}
 615
 616/*
 617 * Write an mmapped page to the server.
 618 */
 619static int nfs_writepage_locked(struct page *page,
 620				struct writeback_control *wbc,
 621				bool launder)
 622{
 623	struct nfs_pageio_descriptor pgio;
 624	struct inode *inode = page_file_mapping(page)->host;
 625	int err;
 626
 627	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
 628	nfs_pageio_init_write(&pgio, inode, 0,
 629				false, &nfs_async_write_completion_ops);
 630	err = nfs_do_writepage(page, wbc, &pgio, launder);
 631	nfs_pageio_complete(&pgio);
 632	if (err < 0)
 633		return err;
 634	if (pgio.pg_error < 0)
 635		return pgio.pg_error;
 636	return 0;
 637}
 638
 639int nfs_writepage(struct page *page, struct writeback_control *wbc)
 640{
 641	int ret;
 642
 643	ret = nfs_writepage_locked(page, wbc, false);
 644	unlock_page(page);
 645	return ret;
 646}
 647
 648static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data)
 649{
 650	int ret;
 651
 652	ret = nfs_do_writepage(page, wbc, data, false);
 653	unlock_page(page);
 654	return ret;
 655}
 656
 657int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
 658{
 659	struct inode *inode = mapping->host;
 
 660	struct nfs_pageio_descriptor pgio;
 661	int err;
 662
 
 
 
 
 
 
 663	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
 664
 665	nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), false,
 666				&nfs_async_write_completion_ops);
 667	err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
 668	nfs_pageio_complete(&pgio);
 669
 
 
 
 
 670	if (err < 0)
 671		goto out_err;
 672	err = pgio.pg_error;
 673	if (err < 0)
 674		goto out_err;
 675	return 0;
 676out_err:
 677	return err;
 678}
 679
 680/*
 681 * Insert a write request into an inode
 682 */
 683static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
 684{
 685	struct nfs_inode *nfsi = NFS_I(inode);
 686
 687	WARN_ON_ONCE(req->wb_this_page != req);
 688
 689	/* Lock the request! */
 690	nfs_lock_request(req);
 691
 692	spin_lock(&inode->i_lock);
 693	if (!nfsi->nrequests &&
 694	    NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
 695		inode->i_version++;
 696	/*
 697	 * Swap-space should not get truncated. Hence no need to plug the race
 698	 * with invalidate/truncate.
 699	 */
 700	if (likely(!PageSwapCache(req->wb_page))) {
 701		set_bit(PG_MAPPED, &req->wb_flags);
 702		SetPagePrivate(req->wb_page);
 703		set_page_private(req->wb_page, (unsigned long)req);
 704	}
 705	nfsi->nrequests++;
 706	/* this a head request for a page group - mark it as having an
 707	 * extra reference so sub groups can follow suit.
 708	 * This flag also informs pgio layer when to bump nrequests when
 709	 * adding subrequests. */
 710	WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags));
 711	kref_get(&req->wb_kref);
 712	spin_unlock(&inode->i_lock);
 713}
 714
 715/*
 716 * Remove a write request from an inode
 717 */
 718static void nfs_inode_remove_request(struct nfs_page *req)
 719{
 720	struct inode *inode = d_inode(req->wb_context->dentry);
 721	struct nfs_inode *nfsi = NFS_I(inode);
 722	struct nfs_page *head;
 723
 724	if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
 725		head = req->wb_head;
 726
 727		spin_lock(&inode->i_lock);
 728		if (likely(head->wb_page && !PageSwapCache(head->wb_page))) {
 729			set_page_private(head->wb_page, 0);
 730			ClearPagePrivate(head->wb_page);
 731			smp_mb__after_atomic();
 732			wake_up_page(head->wb_page, PG_private);
 733			clear_bit(PG_MAPPED, &head->wb_flags);
 734		}
 735		nfsi->nrequests--;
 736		spin_unlock(&inode->i_lock);
 737	} else {
 738		spin_lock(&inode->i_lock);
 739		nfsi->nrequests--;
 740		spin_unlock(&inode->i_lock);
 741	}
 742
 743	if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags))
 744		nfs_release_request(req);
 745}
 746
 747static void
 748nfs_mark_request_dirty(struct nfs_page *req)
 749{
 750	if (req->wb_page)
 751		__set_page_dirty_nobuffers(req->wb_page);
 752}
 753
 754/*
 755 * nfs_page_search_commits_for_head_request_locked
 756 *
 757 * Search through commit lists on @inode for the head request for @page.
 758 * Must be called while holding the inode (which is cinfo) lock.
 759 *
 760 * Returns the head request if found, or NULL if not found.
 761 */
 762static struct nfs_page *
 763nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi,
 764						struct page *page)
 765{
 766	struct nfs_page *freq, *t;
 767	struct nfs_commit_info cinfo;
 768	struct inode *inode = &nfsi->vfs_inode;
 769
 770	nfs_init_cinfo_from_inode(&cinfo, inode);
 771
 772	/* search through pnfs commit lists */
 773	freq = pnfs_search_commit_reqs(inode, &cinfo, page);
 774	if (freq)
 775		return freq->wb_head;
 776
 777	/* Linearly search the commit list for the correct request */
 778	list_for_each_entry_safe(freq, t, &cinfo.mds->list, wb_list) {
 779		if (freq->wb_page == page)
 780			return freq->wb_head;
 781	}
 782
 783	return NULL;
 784}
 785
 786/**
 787 * nfs_request_add_commit_list_locked - add request to a commit list
 788 * @req: pointer to a struct nfs_page
 789 * @dst: commit list head
 790 * @cinfo: holds list lock and accounting info
 791 *
 792 * This sets the PG_CLEAN bit, updates the cinfo count of
 793 * number of outstanding requests requiring a commit as well as
 794 * the MM page stats.
 795 *
 796 * The caller must hold cinfo->inode->i_lock, and the nfs_page lock.
 797 */
 798void
 799nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst,
 800			    struct nfs_commit_info *cinfo)
 801{
 802	set_bit(PG_CLEAN, &req->wb_flags);
 803	nfs_list_add_request(req, dst);
 804	cinfo->mds->ncommit++;
 805}
 806EXPORT_SYMBOL_GPL(nfs_request_add_commit_list_locked);
 807
 808/**
 809 * nfs_request_add_commit_list - add request to a commit list
 810 * @req: pointer to a struct nfs_page
 811 * @dst: commit list head
 812 * @cinfo: holds list lock and accounting info
 813 *
 814 * This sets the PG_CLEAN bit, updates the cinfo count of
 815 * number of outstanding requests requiring a commit as well as
 816 * the MM page stats.
 817 *
 818 * The caller must _not_ hold the cinfo->lock, but must be
 819 * holding the nfs_page lock.
 820 */
 821void
 822nfs_request_add_commit_list(struct nfs_page *req, struct nfs_commit_info *cinfo)
 823{
 824	spin_lock(&cinfo->inode->i_lock);
 825	nfs_request_add_commit_list_locked(req, &cinfo->mds->list, cinfo);
 826	spin_unlock(&cinfo->inode->i_lock);
 827	if (req->wb_page)
 828		nfs_mark_page_unstable(req->wb_page, cinfo);
 829}
 830EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
 831
 832/**
 833 * nfs_request_remove_commit_list - Remove request from a commit list
 834 * @req: pointer to a nfs_page
 835 * @cinfo: holds list lock and accounting info
 836 *
 837 * This clears the PG_CLEAN bit, and updates the cinfo's count of
 838 * number of outstanding requests requiring a commit
 839 * It does not update the MM page stats.
 840 *
 841 * The caller _must_ hold the cinfo->lock and the nfs_page lock.
 842 */
 843void
 844nfs_request_remove_commit_list(struct nfs_page *req,
 845			       struct nfs_commit_info *cinfo)
 846{
 847	if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags))
 848		return;
 849	nfs_list_remove_request(req);
 850	cinfo->mds->ncommit--;
 851}
 852EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list);
 853
 854static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
 855				      struct inode *inode)
 856{
 857	cinfo->inode = inode;
 858	cinfo->mds = &NFS_I(inode)->commit_info;
 859	cinfo->ds = pnfs_get_ds_info(inode);
 860	cinfo->dreq = NULL;
 861	cinfo->completion_ops = &nfs_commit_completion_ops;
 862}
 863
 864void nfs_init_cinfo(struct nfs_commit_info *cinfo,
 865		    struct inode *inode,
 866		    struct nfs_direct_req *dreq)
 867{
 868	if (dreq)
 869		nfs_init_cinfo_from_dreq(cinfo, dreq);
 870	else
 871		nfs_init_cinfo_from_inode(cinfo, inode);
 872}
 873EXPORT_SYMBOL_GPL(nfs_init_cinfo);
 874
 875/*
 876 * Add a request to the inode's commit list.
 877 */
 878void
 879nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
 880			struct nfs_commit_info *cinfo, u32 ds_commit_idx)
 881{
 882	if (pnfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx))
 883		return;
 884	nfs_request_add_commit_list(req, cinfo);
 885}
 886
 887static void
 888nfs_clear_page_commit(struct page *page)
 889{
 890	dec_node_page_state(page, NR_UNSTABLE_NFS);
 891	dec_wb_stat(&inode_to_bdi(page_file_mapping(page)->host)->wb,
 892		    WB_RECLAIMABLE);
 893}
 894
 895/* Called holding inode (/cinfo) lock */
 896static void
 897nfs_clear_request_commit(struct nfs_page *req)
 898{
 899	if (test_bit(PG_CLEAN, &req->wb_flags)) {
 900		struct inode *inode = d_inode(req->wb_context->dentry);
 901		struct nfs_commit_info cinfo;
 902
 903		nfs_init_cinfo_from_inode(&cinfo, inode);
 904		if (!pnfs_clear_request_commit(req, &cinfo)) {
 905			nfs_request_remove_commit_list(req, &cinfo);
 906		}
 907		nfs_clear_page_commit(req->wb_page);
 908	}
 909}
 910
 911int nfs_write_need_commit(struct nfs_pgio_header *hdr)
 912{
 913	if (hdr->verf.committed == NFS_DATA_SYNC)
 914		return hdr->lseg == NULL;
 915	return hdr->verf.committed != NFS_FILE_SYNC;
 916}
 917
 918static void nfs_write_completion(struct nfs_pgio_header *hdr)
 919{
 920	struct nfs_commit_info cinfo;
 921	unsigned long bytes = 0;
 922
 923	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
 924		goto out;
 925	nfs_init_cinfo_from_inode(&cinfo, hdr->inode);
 926	while (!list_empty(&hdr->pages)) {
 927		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
 928
 929		bytes += req->wb_bytes;
 930		nfs_list_remove_request(req);
 931		if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
 932		    (hdr->good_bytes < bytes)) {
 933			nfs_set_pageerror(req->wb_page);
 934			nfs_context_set_write_error(req->wb_context, hdr->error);
 935			goto remove_req;
 936		}
 937		if (nfs_write_need_commit(hdr)) {
 938			memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf));
 939			nfs_mark_request_commit(req, hdr->lseg, &cinfo,
 940				hdr->pgio_mirror_idx);
 941			goto next;
 942		}
 943remove_req:
 944		nfs_inode_remove_request(req);
 945next:
 946		nfs_unlock_request(req);
 947		nfs_end_page_writeback(req);
 948		nfs_release_request(req);
 949	}
 950out:
 951	hdr->release(hdr);
 952}
 953
 954unsigned long
 955nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
 956{
 957	return cinfo->mds->ncommit;
 958}
 959
 960/* cinfo->inode->i_lock held by caller */
 961int
 962nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
 963		     struct nfs_commit_info *cinfo, int max)
 964{
 965	struct nfs_page *req, *tmp;
 966	int ret = 0;
 967
 968	list_for_each_entry_safe(req, tmp, src, wb_list) {
 969		if (!nfs_lock_request(req))
 970			continue;
 971		kref_get(&req->wb_kref);
 972		if (cond_resched_lock(&cinfo->inode->i_lock))
 973			list_safe_reset_next(req, tmp, wb_list);
 974		nfs_request_remove_commit_list(req, cinfo);
 975		nfs_list_add_request(req, dst);
 976		ret++;
 977		if ((ret == max) && !cinfo->dreq)
 978			break;
 979	}
 980	return ret;
 981}
 982
 983/*
 984 * nfs_scan_commit - Scan an inode for commit requests
 985 * @inode: NFS inode to scan
 986 * @dst: mds destination list
 987 * @cinfo: mds and ds lists of reqs ready to commit
 988 *
 989 * Moves requests from the inode's 'commit' request list.
 990 * The requests are *not* checked to ensure that they form a contiguous set.
 991 */
 992int
 993nfs_scan_commit(struct inode *inode, struct list_head *dst,
 994		struct nfs_commit_info *cinfo)
 995{
 996	int ret = 0;
 997
 998	spin_lock(&cinfo->inode->i_lock);
 999	if (cinfo->mds->ncommit > 0) {
1000		const int max = INT_MAX;
1001
1002		ret = nfs_scan_commit_list(&cinfo->mds->list, dst,
1003					   cinfo, max);
1004		ret += pnfs_scan_commit_lists(inode, cinfo, max - ret);
1005	}
1006	spin_unlock(&cinfo->inode->i_lock);
1007	return ret;
1008}
1009
1010/*
1011 * Search for an existing write request, and attempt to update
1012 * it to reflect a new dirty region on a given page.
1013 *
1014 * If the attempt fails, then the existing request is flushed out
1015 * to disk.
1016 */
1017static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
1018		struct page *page,
1019		unsigned int offset,
1020		unsigned int bytes)
1021{
1022	struct nfs_page *req;
1023	unsigned int rqend;
1024	unsigned int end;
1025	int error;
1026
1027	if (!PagePrivate(page))
1028		return NULL;
1029
1030	end = offset + bytes;
1031	spin_lock(&inode->i_lock);
1032
1033	for (;;) {
1034		req = nfs_page_find_head_request_locked(NFS_I(inode), page);
1035		if (req == NULL)
1036			goto out_unlock;
1037
1038		/* should be handled by nfs_flush_incompatible */
1039		WARN_ON_ONCE(req->wb_head != req);
1040		WARN_ON_ONCE(req->wb_this_page != req);
1041
1042		rqend = req->wb_offset + req->wb_bytes;
1043		/*
1044		 * Tell the caller to flush out the request if
1045		 * the offsets are non-contiguous.
1046		 * Note: nfs_flush_incompatible() will already
1047		 * have flushed out requests having wrong owners.
1048		 */
1049		if (offset > rqend
1050		    || end < req->wb_offset)
1051			goto out_flushme;
1052
1053		if (nfs_lock_request(req))
1054			break;
1055
1056		/* The request is locked, so wait and then retry */
1057		spin_unlock(&inode->i_lock);
1058		error = nfs_wait_on_request(req);
1059		nfs_release_request(req);
1060		if (error != 0)
1061			goto out_err;
1062		spin_lock(&inode->i_lock);
1063	}
1064
1065	/* Okay, the request matches. Update the region */
1066	if (offset < req->wb_offset) {
1067		req->wb_offset = offset;
1068		req->wb_pgbase = offset;
1069	}
1070	if (end > rqend)
1071		req->wb_bytes = end - req->wb_offset;
1072	else
1073		req->wb_bytes = rqend - req->wb_offset;
1074out_unlock:
1075	if (req)
1076		nfs_clear_request_commit(req);
1077	spin_unlock(&inode->i_lock);
1078	return req;
1079out_flushme:
1080	spin_unlock(&inode->i_lock);
1081	nfs_release_request(req);
1082	error = nfs_wb_page(inode, page);
1083out_err:
1084	return ERR_PTR(error);
1085}
1086
1087/*
1088 * Try to update an existing write request, or create one if there is none.
1089 *
1090 * Note: Should always be called with the Page Lock held to prevent races
1091 * if we have to add a new request. Also assumes that the caller has
1092 * already called nfs_flush_incompatible() if necessary.
1093 */
1094static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
1095		struct page *page, unsigned int offset, unsigned int bytes)
1096{
1097	struct inode *inode = page_file_mapping(page)->host;
1098	struct nfs_page	*req;
1099
1100	req = nfs_try_to_update_request(inode, page, offset, bytes);
1101	if (req != NULL)
1102		goto out;
1103	req = nfs_create_request(ctx, page, NULL, offset, bytes);
1104	if (IS_ERR(req))
1105		goto out;
1106	nfs_inode_add_request(inode, req);
1107out:
1108	return req;
1109}
1110
1111static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
1112		unsigned int offset, unsigned int count)
1113{
1114	struct nfs_page	*req;
1115
1116	req = nfs_setup_write_request(ctx, page, offset, count);
1117	if (IS_ERR(req))
1118		return PTR_ERR(req);
1119	/* Update file length */
1120	nfs_grow_file(page, offset, count);
1121	nfs_mark_uptodate(req);
1122	nfs_mark_request_dirty(req);
1123	nfs_unlock_and_release_request(req);
1124	return 0;
1125}
1126
1127int nfs_flush_incompatible(struct file *file, struct page *page)
1128{
1129	struct nfs_open_context *ctx = nfs_file_open_context(file);
1130	struct nfs_lock_context *l_ctx;
1131	struct file_lock_context *flctx = file_inode(file)->i_flctx;
1132	struct nfs_page	*req;
1133	int do_flush, status;
1134	/*
1135	 * Look for a request corresponding to this page. If there
1136	 * is one, and it belongs to another file, we flush it out
1137	 * before we try to copy anything into the page. Do this
1138	 * due to the lack of an ACCESS-type call in NFSv2.
1139	 * Also do the same if we find a request from an existing
1140	 * dropped page.
1141	 */
1142	do {
1143		req = nfs_page_find_head_request(page);
1144		if (req == NULL)
1145			return 0;
1146		l_ctx = req->wb_lock_context;
1147		do_flush = req->wb_page != page ||
1148			!nfs_match_open_context(req->wb_context, ctx);
1149		/* for now, flush if more than 1 request in page_group */
1150		do_flush |= req->wb_this_page != req;
1151		if (l_ctx && flctx &&
1152		    !(list_empty_careful(&flctx->flc_posix) &&
1153		      list_empty_careful(&flctx->flc_flock))) {
1154			do_flush |= l_ctx->lockowner != current->files;
 
1155		}
1156		nfs_release_request(req);
1157		if (!do_flush)
1158			return 0;
1159		status = nfs_wb_page(page_file_mapping(page)->host, page);
1160	} while (status == 0);
1161	return status;
1162}
1163
1164/*
1165 * Avoid buffered writes when a open context credential's key would
1166 * expire soon.
1167 *
1168 * Returns -EACCES if the key will expire within RPC_KEY_EXPIRE_FAIL.
1169 *
1170 * Return 0 and set a credential flag which triggers the inode to flush
1171 * and performs  NFS_FILE_SYNC writes if the key will expired within
1172 * RPC_KEY_EXPIRE_TIMEO.
1173 */
1174int
1175nfs_key_timeout_notify(struct file *filp, struct inode *inode)
1176{
1177	struct nfs_open_context *ctx = nfs_file_open_context(filp);
1178	struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth;
1179
1180	return rpcauth_key_timeout_notify(auth, ctx->cred);
1181}
1182
1183/*
1184 * Test if the open context credential key is marked to expire soon.
1185 */
1186bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx, struct inode *inode)
1187{
1188	struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth;
1189
1190	return rpcauth_cred_key_to_expire(auth, ctx->cred);
1191}
1192
1193/*
1194 * If the page cache is marked as unsafe or invalid, then we can't rely on
1195 * the PageUptodate() flag. In this case, we will need to turn off
1196 * write optimisations that depend on the page contents being correct.
1197 */
1198static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
1199{
1200	struct nfs_inode *nfsi = NFS_I(inode);
1201
1202	if (nfs_have_delegated_attributes(inode))
1203		goto out;
1204	if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
1205		return false;
1206	smp_rmb();
1207	if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags))
1208		return false;
1209out:
1210	if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
1211		return false;
1212	return PageUptodate(page) != 0;
1213}
1214
1215static bool
1216is_whole_file_wrlock(struct file_lock *fl)
1217{
1218	return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX &&
1219			fl->fl_type == F_WRLCK;
1220}
1221
1222/* If we know the page is up to date, and we're not using byte range locks (or
1223 * if we have the whole file locked for writing), it may be more efficient to
1224 * extend the write to cover the entire page in order to avoid fragmentation
1225 * inefficiencies.
1226 *
1227 * If the file is opened for synchronous writes then we can just skip the rest
1228 * of the checks.
1229 */
1230static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
1231{
1232	int ret;
1233	struct file_lock_context *flctx = inode->i_flctx;
1234	struct file_lock *fl;
1235
1236	if (file->f_flags & O_DSYNC)
1237		return 0;
1238	if (!nfs_write_pageuptodate(page, inode))
1239		return 0;
1240	if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
1241		return 1;
1242	if (!flctx || (list_empty_careful(&flctx->flc_flock) &&
1243		       list_empty_careful(&flctx->flc_posix)))
1244		return 1;
1245
1246	/* Check to see if there are whole file write locks */
1247	ret = 0;
1248	spin_lock(&flctx->flc_lock);
1249	if (!list_empty(&flctx->flc_posix)) {
1250		fl = list_first_entry(&flctx->flc_posix, struct file_lock,
1251					fl_list);
1252		if (is_whole_file_wrlock(fl))
1253			ret = 1;
1254	} else if (!list_empty(&flctx->flc_flock)) {
1255		fl = list_first_entry(&flctx->flc_flock, struct file_lock,
1256					fl_list);
1257		if (fl->fl_type == F_WRLCK)
1258			ret = 1;
1259	}
1260	spin_unlock(&flctx->flc_lock);
1261	return ret;
1262}
1263
1264/*
1265 * Update and possibly write a cached page of an NFS file.
1266 *
1267 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
1268 * things with a page scheduled for an RPC call (e.g. invalidate it).
1269 */
1270int nfs_updatepage(struct file *file, struct page *page,
1271		unsigned int offset, unsigned int count)
1272{
1273	struct nfs_open_context *ctx = nfs_file_open_context(file);
1274	struct inode	*inode = page_file_mapping(page)->host;
1275	int		status = 0;
1276
1277	nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
1278
1279	dprintk("NFS:       nfs_updatepage(%pD2 %d@%lld)\n",
1280		file, count, (long long)(page_file_offset(page) + offset));
1281
1282	if (!count)
1283		goto out;
1284
1285	if (nfs_can_extend_write(file, page, inode)) {
1286		count = max(count + offset, nfs_page_length(page));
1287		offset = 0;
1288	}
1289
1290	status = nfs_writepage_setup(ctx, page, offset, count);
1291	if (status < 0)
1292		nfs_set_pageerror(page);
1293	else
1294		__set_page_dirty_nobuffers(page);
1295out:
1296	dprintk("NFS:       nfs_updatepage returns %d (isize %lld)\n",
1297			status, (long long)i_size_read(inode));
1298	return status;
1299}
1300
1301static int flush_task_priority(int how)
1302{
1303	switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
1304		case FLUSH_HIGHPRI:
1305			return RPC_PRIORITY_HIGH;
1306		case FLUSH_LOWPRI:
1307			return RPC_PRIORITY_LOW;
1308	}
1309	return RPC_PRIORITY_NORMAL;
1310}
1311
1312static void nfs_initiate_write(struct nfs_pgio_header *hdr,
1313			       struct rpc_message *msg,
1314			       const struct nfs_rpc_ops *rpc_ops,
1315			       struct rpc_task_setup *task_setup_data, int how)
1316{
1317	int priority = flush_task_priority(how);
1318
1319	task_setup_data->priority = priority;
1320	rpc_ops->write_setup(hdr, msg);
1321
1322	nfs4_state_protect_write(NFS_SERVER(hdr->inode)->nfs_client,
1323				 &task_setup_data->rpc_client, msg, hdr);
1324}
1325
1326/* If a nfs_flush_* function fails, it should remove reqs from @head and
1327 * call this on each, which will prepare them to be retried on next
1328 * writeback using standard nfs.
1329 */
1330static void nfs_redirty_request(struct nfs_page *req)
1331{
1332	nfs_mark_request_dirty(req);
1333	set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags);
1334	nfs_unlock_request(req);
1335	nfs_end_page_writeback(req);
1336	nfs_release_request(req);
1337}
1338
1339static void nfs_async_write_error(struct list_head *head)
1340{
1341	struct nfs_page	*req;
1342
1343	while (!list_empty(head)) {
1344		req = nfs_list_entry(head->next);
1345		nfs_list_remove_request(req);
1346		nfs_redirty_request(req);
1347	}
1348}
1349
1350static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr)
1351{
1352	nfs_async_write_error(&hdr->pages);
1353}
1354
1355static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = {
1356	.error_cleanup = nfs_async_write_error,
1357	.completion = nfs_write_completion,
1358	.reschedule_io = nfs_async_write_reschedule_io,
1359};
1360
1361void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
1362			       struct inode *inode, int ioflags, bool force_mds,
1363			       const struct nfs_pgio_completion_ops *compl_ops)
1364{
1365	struct nfs_server *server = NFS_SERVER(inode);
1366	const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
1367
1368#ifdef CONFIG_NFS_V4_1
1369	if (server->pnfs_curr_ld && !force_mds)
1370		pg_ops = server->pnfs_curr_ld->pg_write_ops;
1371#endif
1372	nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_write_ops,
1373			server->wsize, ioflags);
1374}
1375EXPORT_SYMBOL_GPL(nfs_pageio_init_write);
1376
1377void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
1378{
1379	struct nfs_pgio_mirror *mirror;
1380
1381	if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
1382		pgio->pg_ops->pg_cleanup(pgio);
1383
1384	pgio->pg_ops = &nfs_pgio_rw_ops;
1385
1386	nfs_pageio_stop_mirroring(pgio);
1387
1388	mirror = &pgio->pg_mirrors[0];
1389	mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize;
1390}
1391EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
1392
1393
1394void nfs_commit_prepare(struct rpc_task *task, void *calldata)
1395{
1396	struct nfs_commit_data *data = calldata;
1397
1398	NFS_PROTO(data->inode)->commit_rpc_prepare(task, data);
1399}
1400
1401/*
1402 * Special version of should_remove_suid() that ignores capabilities.
1403 */
1404static int nfs_should_remove_suid(const struct inode *inode)
1405{
1406	umode_t mode = inode->i_mode;
1407	int kill = 0;
1408
1409	/* suid always must be killed */
1410	if (unlikely(mode & S_ISUID))
1411		kill = ATTR_KILL_SUID;
1412
1413	/*
1414	 * sgid without any exec bits is just a mandatory locking mark; leave
1415	 * it alone.  If some exec bits are set, it's a real sgid; kill it.
1416	 */
1417	if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1418		kill |= ATTR_KILL_SGID;
1419
1420	if (unlikely(kill && S_ISREG(mode)))
1421		return kill;
1422
1423	return 0;
1424}
1425
1426static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
1427		struct nfs_fattr *fattr)
1428{
1429	struct nfs_pgio_args *argp = &hdr->args;
1430	struct nfs_pgio_res *resp = &hdr->res;
1431	u64 size = argp->offset + resp->count;
1432
1433	if (!(fattr->valid & NFS_ATTR_FATTR_SIZE))
1434		fattr->size = size;
1435	if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) {
1436		fattr->valid &= ~NFS_ATTR_FATTR_SIZE;
1437		return;
1438	}
1439	if (size != fattr->size)
1440		return;
1441	/* Set attribute barrier */
1442	nfs_fattr_set_barrier(fattr);
1443	/* ...and update size */
1444	fattr->valid |= NFS_ATTR_FATTR_SIZE;
1445}
1446
1447void nfs_writeback_update_inode(struct nfs_pgio_header *hdr)
1448{
1449	struct nfs_fattr *fattr = &hdr->fattr;
1450	struct inode *inode = hdr->inode;
1451
1452	spin_lock(&inode->i_lock);
1453	nfs_writeback_check_extend(hdr, fattr);
1454	nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
1455	spin_unlock(&inode->i_lock);
1456}
1457EXPORT_SYMBOL_GPL(nfs_writeback_update_inode);
1458
1459/*
1460 * This function is called when the WRITE call is complete.
1461 */
1462static int nfs_writeback_done(struct rpc_task *task,
1463			      struct nfs_pgio_header *hdr,
1464			      struct inode *inode)
1465{
1466	int status;
1467
1468	/*
1469	 * ->write_done will attempt to use post-op attributes to detect
1470	 * conflicting writes by other clients.  A strict interpretation
1471	 * of close-to-open would allow us to continue caching even if
1472	 * another writer had changed the file, but some applications
1473	 * depend on tighter cache coherency when writing.
1474	 */
1475	status = NFS_PROTO(inode)->write_done(task, hdr);
1476	if (status != 0)
1477		return status;
1478	nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count);
1479
1480	if (hdr->res.verf->committed < hdr->args.stable &&
1481	    task->tk_status >= 0) {
1482		/* We tried a write call, but the server did not
1483		 * commit data to stable storage even though we
1484		 * requested it.
1485		 * Note: There is a known bug in Tru64 < 5.0 in which
1486		 *	 the server reports NFS_DATA_SYNC, but performs
1487		 *	 NFS_FILE_SYNC. We therefore implement this checking
1488		 *	 as a dprintk() in order to avoid filling syslog.
1489		 */
1490		static unsigned long    complain;
1491
1492		/* Note this will print the MDS for a DS write */
1493		if (time_before(complain, jiffies)) {
1494			dprintk("NFS:       faulty NFS server %s:"
1495				" (committed = %d) != (stable = %d)\n",
1496				NFS_SERVER(inode)->nfs_client->cl_hostname,
1497				hdr->res.verf->committed, hdr->args.stable);
1498			complain = jiffies + 300 * HZ;
1499		}
1500	}
1501
1502	/* Deal with the suid/sgid bit corner case */
1503	if (nfs_should_remove_suid(inode))
1504		nfs_mark_for_revalidate(inode);
1505	return 0;
1506}
1507
1508/*
1509 * This function is called when the WRITE call is complete.
1510 */
1511static void nfs_writeback_result(struct rpc_task *task,
1512				 struct nfs_pgio_header *hdr)
1513{
1514	struct nfs_pgio_args	*argp = &hdr->args;
1515	struct nfs_pgio_res	*resp = &hdr->res;
1516
1517	if (resp->count < argp->count) {
1518		static unsigned long    complain;
1519
1520		/* This a short write! */
1521		nfs_inc_stats(hdr->inode, NFSIOS_SHORTWRITE);
1522
1523		/* Has the server at least made some progress? */
1524		if (resp->count == 0) {
1525			if (time_before(complain, jiffies)) {
1526				printk(KERN_WARNING
1527				       "NFS: Server wrote zero bytes, expected %u.\n",
1528				       argp->count);
1529				complain = jiffies + 300 * HZ;
1530			}
1531			nfs_set_pgio_error(hdr, -EIO, argp->offset);
1532			task->tk_status = -EIO;
1533			return;
1534		}
1535
1536		/* For non rpc-based layout drivers, retry-through-MDS */
1537		if (!task->tk_ops) {
1538			hdr->pnfs_error = -EAGAIN;
1539			return;
1540		}
1541
1542		/* Was this an NFSv2 write or an NFSv3 stable write? */
1543		if (resp->verf->committed != NFS_UNSTABLE) {
1544			/* Resend from where the server left off */
1545			hdr->mds_offset += resp->count;
1546			argp->offset += resp->count;
1547			argp->pgbase += resp->count;
1548			argp->count -= resp->count;
1549		} else {
1550			/* Resend as a stable write in order to avoid
1551			 * headaches in the case of a server crash.
1552			 */
1553			argp->stable = NFS_FILE_SYNC;
1554		}
1555		rpc_restart_call_prepare(task);
1556	}
1557}
1558
1559static int wait_on_commit(struct nfs_mds_commit_info *cinfo)
1560{
1561	return wait_on_atomic_t(&cinfo->rpcs_out,
1562			nfs_wait_atomic_killable, TASK_KILLABLE);
1563}
1564
1565static void nfs_commit_begin(struct nfs_mds_commit_info *cinfo)
1566{
1567	atomic_inc(&cinfo->rpcs_out);
1568}
1569
1570static void nfs_commit_end(struct nfs_mds_commit_info *cinfo)
1571{
1572	if (atomic_dec_and_test(&cinfo->rpcs_out))
1573		wake_up_atomic_t(&cinfo->rpcs_out);
1574}
1575
1576void nfs_commitdata_release(struct nfs_commit_data *data)
1577{
1578	put_nfs_open_context(data->context);
1579	nfs_commit_free(data);
1580}
1581EXPORT_SYMBOL_GPL(nfs_commitdata_release);
1582
1583int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
1584			const struct nfs_rpc_ops *nfs_ops,
1585			const struct rpc_call_ops *call_ops,
1586			int how, int flags)
1587{
1588	struct rpc_task *task;
1589	int priority = flush_task_priority(how);
1590	struct rpc_message msg = {
1591		.rpc_argp = &data->args,
1592		.rpc_resp = &data->res,
1593		.rpc_cred = data->cred,
1594	};
1595	struct rpc_task_setup task_setup_data = {
1596		.task = &data->task,
1597		.rpc_client = clnt,
1598		.rpc_message = &msg,
1599		.callback_ops = call_ops,
1600		.callback_data = data,
1601		.workqueue = nfsiod_workqueue,
1602		.flags = RPC_TASK_ASYNC | flags,
1603		.priority = priority,
1604	};
1605	/* Set up the initial task struct.  */
1606	nfs_ops->commit_setup(data, &msg);
1607
1608	dprintk("NFS: initiated commit call\n");
1609
1610	nfs4_state_protect(NFS_SERVER(data->inode)->nfs_client,
1611		NFS_SP4_MACH_CRED_COMMIT, &task_setup_data.rpc_client, &msg);
1612
1613	task = rpc_run_task(&task_setup_data);
1614	if (IS_ERR(task))
1615		return PTR_ERR(task);
1616	if (how & FLUSH_SYNC)
1617		rpc_wait_for_completion_task(task);
1618	rpc_put_task(task);
1619	return 0;
1620}
1621EXPORT_SYMBOL_GPL(nfs_initiate_commit);
1622
1623static loff_t nfs_get_lwb(struct list_head *head)
1624{
1625	loff_t lwb = 0;
1626	struct nfs_page *req;
1627
1628	list_for_each_entry(req, head, wb_list)
1629		if (lwb < (req_offset(req) + req->wb_bytes))
1630			lwb = req_offset(req) + req->wb_bytes;
1631
1632	return lwb;
1633}
1634
1635/*
1636 * Set up the argument/result storage required for the RPC call.
1637 */
1638void nfs_init_commit(struct nfs_commit_data *data,
1639		     struct list_head *head,
1640		     struct pnfs_layout_segment *lseg,
1641		     struct nfs_commit_info *cinfo)
1642{
1643	struct nfs_page *first = nfs_list_entry(head->next);
1644	struct inode *inode = d_inode(first->wb_context->dentry);
1645
1646	/* Set up the RPC argument and reply structs
1647	 * NB: take care not to mess about with data->commit et al. */
1648
1649	list_splice_init(head, &data->pages);
1650
1651	data->inode	  = inode;
1652	data->cred	  = first->wb_context->cred;
1653	data->lseg	  = lseg; /* reference transferred */
1654	/* only set lwb for pnfs commit */
1655	if (lseg)
1656		data->lwb = nfs_get_lwb(&data->pages);
1657	data->mds_ops     = &nfs_commit_ops;
1658	data->completion_ops = cinfo->completion_ops;
1659	data->dreq	  = cinfo->dreq;
1660
1661	data->args.fh     = NFS_FH(data->inode);
1662	/* Note: we always request a commit of the entire inode */
1663	data->args.offset = 0;
1664	data->args.count  = 0;
1665	data->context     = get_nfs_open_context(first->wb_context);
1666	data->res.fattr   = &data->fattr;
1667	data->res.verf    = &data->verf;
1668	nfs_fattr_init(&data->fattr);
1669}
1670EXPORT_SYMBOL_GPL(nfs_init_commit);
1671
1672void nfs_retry_commit(struct list_head *page_list,
1673		      struct pnfs_layout_segment *lseg,
1674		      struct nfs_commit_info *cinfo,
1675		      u32 ds_commit_idx)
1676{
1677	struct nfs_page *req;
1678
1679	while (!list_empty(page_list)) {
1680		req = nfs_list_entry(page_list->next);
1681		nfs_list_remove_request(req);
1682		nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx);
1683		if (!cinfo->dreq)
1684			nfs_clear_page_commit(req->wb_page);
1685		nfs_unlock_and_release_request(req);
1686	}
1687}
1688EXPORT_SYMBOL_GPL(nfs_retry_commit);
1689
1690static void
1691nfs_commit_resched_write(struct nfs_commit_info *cinfo,
1692		struct nfs_page *req)
1693{
1694	__set_page_dirty_nobuffers(req->wb_page);
1695}
1696
1697/*
1698 * Commit dirty pages
1699 */
1700static int
1701nfs_commit_list(struct inode *inode, struct list_head *head, int how,
1702		struct nfs_commit_info *cinfo)
1703{
1704	struct nfs_commit_data	*data;
1705
1706	/* another commit raced with us */
1707	if (list_empty(head))
1708		return 0;
1709
1710	data = nfs_commitdata_alloc();
1711
1712	if (!data)
1713		goto out_bad;
1714
1715	/* Set up the argument struct */
1716	nfs_init_commit(data, head, NULL, cinfo);
1717	atomic_inc(&cinfo->mds->rpcs_out);
1718	return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode),
1719				   data->mds_ops, how, 0);
1720 out_bad:
1721	nfs_retry_commit(head, NULL, cinfo, 0);
1722	return -ENOMEM;
1723}
1724
1725int nfs_commit_file(struct file *file, struct nfs_write_verifier *verf)
1726{
1727	struct inode *inode = file_inode(file);
1728	struct nfs_open_context *open;
1729	struct nfs_commit_info cinfo;
1730	struct nfs_page *req;
1731	int ret;
1732
1733	open = get_nfs_open_context(nfs_file_open_context(file));
1734	req  = nfs_create_request(open, NULL, NULL, 0, i_size_read(inode));
1735	if (IS_ERR(req)) {
1736		ret = PTR_ERR(req);
1737		goto out_put;
1738	}
1739
1740	nfs_init_cinfo_from_inode(&cinfo, inode);
1741
1742	memcpy(&req->wb_verf, verf, sizeof(struct nfs_write_verifier));
1743	nfs_request_add_commit_list(req, &cinfo);
1744	ret = nfs_commit_inode(inode, FLUSH_SYNC);
1745	if (ret > 0)
1746		ret = 0;
1747
1748	nfs_free_request(req);
1749out_put:
1750	put_nfs_open_context(open);
1751	return ret;
1752}
1753EXPORT_SYMBOL_GPL(nfs_commit_file);
1754
1755/*
1756 * COMMIT call returned
1757 */
1758static void nfs_commit_done(struct rpc_task *task, void *calldata)
1759{
1760	struct nfs_commit_data	*data = calldata;
1761
1762        dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1763                                task->tk_pid, task->tk_status);
1764
1765	/* Call the NFS version-specific code */
1766	NFS_PROTO(data->inode)->commit_done(task, data);
1767}
1768
1769static void nfs_commit_release_pages(struct nfs_commit_data *data)
1770{
1771	struct nfs_page	*req;
1772	int status = data->task.tk_status;
1773	struct nfs_commit_info cinfo;
1774	struct nfs_server *nfss;
1775
1776	while (!list_empty(&data->pages)) {
1777		req = nfs_list_entry(data->pages.next);
1778		nfs_list_remove_request(req);
1779		if (req->wb_page)
1780			nfs_clear_page_commit(req->wb_page);
1781
1782		dprintk("NFS:       commit (%s/%llu %d@%lld)",
1783			req->wb_context->dentry->d_sb->s_id,
1784			(unsigned long long)NFS_FILEID(d_inode(req->wb_context->dentry)),
1785			req->wb_bytes,
1786			(long long)req_offset(req));
1787		if (status < 0) {
1788			nfs_context_set_write_error(req->wb_context, status);
1789			nfs_inode_remove_request(req);
1790			dprintk(", error = %d\n", status);
1791			goto next;
1792		}
1793
1794		/* Okay, COMMIT succeeded, apparently. Check the verifier
1795		 * returned by the server against all stored verfs. */
1796		if (!nfs_write_verifier_cmp(&req->wb_verf, &data->verf.verifier)) {
1797			/* We have a match */
1798			nfs_inode_remove_request(req);
1799			dprintk(" OK\n");
1800			goto next;
1801		}
1802		/* We have a mismatch. Write the page again */
1803		dprintk(" mismatch\n");
1804		nfs_mark_request_dirty(req);
1805		set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags);
1806	next:
1807		nfs_unlock_and_release_request(req);
1808	}
1809	nfss = NFS_SERVER(data->inode);
1810	if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
1811		clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
1812
1813	nfs_init_cinfo(&cinfo, data->inode, data->dreq);
1814	nfs_commit_end(cinfo.mds);
1815}
1816
1817static void nfs_commit_release(void *calldata)
1818{
1819	struct nfs_commit_data *data = calldata;
1820
1821	data->completion_ops->completion(data);
1822	nfs_commitdata_release(calldata);
1823}
1824
1825static const struct rpc_call_ops nfs_commit_ops = {
1826	.rpc_call_prepare = nfs_commit_prepare,
1827	.rpc_call_done = nfs_commit_done,
1828	.rpc_release = nfs_commit_release,
1829};
1830
1831static const struct nfs_commit_completion_ops nfs_commit_completion_ops = {
1832	.completion = nfs_commit_release_pages,
1833	.resched_write = nfs_commit_resched_write,
1834};
1835
1836int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
1837			    int how, struct nfs_commit_info *cinfo)
1838{
1839	int status;
1840
1841	status = pnfs_commit_list(inode, head, how, cinfo);
1842	if (status == PNFS_NOT_ATTEMPTED)
1843		status = nfs_commit_list(inode, head, how, cinfo);
1844	return status;
1845}
1846
1847int nfs_commit_inode(struct inode *inode, int how)
1848{
1849	LIST_HEAD(head);
1850	struct nfs_commit_info cinfo;
1851	int may_wait = how & FLUSH_SYNC;
1852	int error = 0;
1853	int res;
1854
1855	nfs_init_cinfo_from_inode(&cinfo, inode);
1856	nfs_commit_begin(cinfo.mds);
1857	res = nfs_scan_commit(inode, &head, &cinfo);
1858	if (res)
1859		error = nfs_generic_commit_list(inode, &head, how, &cinfo);
1860	nfs_commit_end(cinfo.mds);
1861	if (error < 0)
1862		goto out_error;
1863	if (!may_wait)
1864		goto out_mark_dirty;
1865	error = wait_on_commit(cinfo.mds);
1866	if (error < 0)
1867		return error;
1868	return res;
1869out_error:
1870	res = error;
1871	/* Note: If we exit without ensuring that the commit is complete,
1872	 * we must mark the inode as dirty. Otherwise, future calls to
1873	 * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure
1874	 * that the data is on the disk.
1875	 */
1876out_mark_dirty:
1877	__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1878	return res;
1879}
1880EXPORT_SYMBOL_GPL(nfs_commit_inode);
1881
1882int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
1883{
1884	struct nfs_inode *nfsi = NFS_I(inode);
1885	int flags = FLUSH_SYNC;
1886	int ret = 0;
1887
1888	/* no commits means nothing needs to be done */
1889	if (!nfsi->commit_info.ncommit)
1890		return ret;
1891
1892	if (wbc->sync_mode == WB_SYNC_NONE) {
1893		/* Don't commit yet if this is a non-blocking flush and there
1894		 * are a lot of outstanding writes for this mapping.
1895		 */
1896		if (nfsi->commit_info.ncommit <= (nfsi->nrequests >> 1))
1897			goto out_mark_dirty;
1898
1899		/* don't wait for the COMMIT response */
1900		flags = 0;
1901	}
1902
1903	ret = nfs_commit_inode(inode, flags);
1904	if (ret >= 0) {
1905		if (wbc->sync_mode == WB_SYNC_NONE) {
1906			if (ret < wbc->nr_to_write)
1907				wbc->nr_to_write -= ret;
1908			else
1909				wbc->nr_to_write = 0;
1910		}
1911		return 0;
1912	}
1913out_mark_dirty:
1914	__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1915	return ret;
1916}
1917EXPORT_SYMBOL_GPL(nfs_write_inode);
1918
1919/*
1920 * Wrapper for filemap_write_and_wait_range()
1921 *
1922 * Needed for pNFS in order to ensure data becomes visible to the
1923 * client.
1924 */
1925int nfs_filemap_write_and_wait_range(struct address_space *mapping,
1926		loff_t lstart, loff_t lend)
1927{
1928	int ret;
1929
1930	ret = filemap_write_and_wait_range(mapping, lstart, lend);
1931	if (ret == 0)
1932		ret = pnfs_sync_inode(mapping->host, true);
1933	return ret;
1934}
1935EXPORT_SYMBOL_GPL(nfs_filemap_write_and_wait_range);
1936
1937/*
1938 * flush the inode to disk.
1939 */
1940int nfs_wb_all(struct inode *inode)
1941{
1942	int ret;
1943
1944	trace_nfs_writeback_inode_enter(inode);
1945
1946	ret = filemap_write_and_wait(inode->i_mapping);
1947	if (ret)
1948		goto out;
1949	ret = nfs_commit_inode(inode, FLUSH_SYNC);
1950	if (ret < 0)
1951		goto out;
1952	pnfs_sync_inode(inode, true);
1953	ret = 0;
1954
1955out:
1956	trace_nfs_writeback_inode_exit(inode, ret);
1957	return ret;
1958}
1959EXPORT_SYMBOL_GPL(nfs_wb_all);
1960
1961int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1962{
1963	struct nfs_page *req;
1964	int ret = 0;
1965
1966	wait_on_page_writeback(page);
1967
1968	/* blocking call to cancel all requests and join to a single (head)
1969	 * request */
1970	req = nfs_lock_and_join_requests(page, false);
1971
1972	if (IS_ERR(req)) {
1973		ret = PTR_ERR(req);
1974	} else if (req) {
1975		/* all requests from this page have been cancelled by
1976		 * nfs_lock_and_join_requests, so just remove the head
1977		 * request from the inode / page_private pointer and
1978		 * release it */
1979		nfs_inode_remove_request(req);
1980		nfs_unlock_and_release_request(req);
1981	}
1982
1983	return ret;
1984}
1985
1986/*
1987 * Write back all requests on one page - we do this before reading it.
1988 */
1989int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder)
1990{
1991	loff_t range_start = page_file_offset(page);
1992	loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
1993	struct writeback_control wbc = {
1994		.sync_mode = WB_SYNC_ALL,
1995		.nr_to_write = 0,
1996		.range_start = range_start,
1997		.range_end = range_end,
1998	};
1999	int ret;
2000
2001	trace_nfs_writeback_page_enter(inode);
2002
2003	for (;;) {
2004		wait_on_page_writeback(page);
2005		if (clear_page_dirty_for_io(page)) {
2006			ret = nfs_writepage_locked(page, &wbc, launder);
2007			if (ret < 0)
2008				goto out_error;
2009			continue;
2010		}
2011		ret = 0;
2012		if (!PagePrivate(page))
2013			break;
2014		ret = nfs_commit_inode(inode, FLUSH_SYNC);
2015		if (ret < 0)
2016			goto out_error;
2017	}
2018out_error:
2019	trace_nfs_writeback_page_exit(inode, ret);
2020	return ret;
2021}
2022
2023#ifdef CONFIG_MIGRATION
2024int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
2025		struct page *page, enum migrate_mode mode)
2026{
2027	/*
2028	 * If PagePrivate is set, then the page is currently associated with
2029	 * an in-progress read or write request. Don't try to migrate it.
2030	 *
2031	 * FIXME: we could do this in principle, but we'll need a way to ensure
2032	 *        that we can safely release the inode reference while holding
2033	 *        the page lock.
2034	 */
2035	if (PagePrivate(page))
2036		return -EBUSY;
2037
2038	if (!nfs_fscache_release_page(page, GFP_KERNEL))
2039		return -EBUSY;
2040
2041	return migrate_page(mapping, newpage, page, mode);
2042}
2043#endif
2044
2045int __init nfs_init_writepagecache(void)
2046{
2047	nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
2048					     sizeof(struct nfs_pgio_header),
2049					     0, SLAB_HWCACHE_ALIGN,
2050					     NULL);
2051	if (nfs_wdata_cachep == NULL)
2052		return -ENOMEM;
2053
2054	nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
2055						     nfs_wdata_cachep);
2056	if (nfs_wdata_mempool == NULL)
2057		goto out_destroy_write_cache;
2058
2059	nfs_cdata_cachep = kmem_cache_create("nfs_commit_data",
2060					     sizeof(struct nfs_commit_data),
2061					     0, SLAB_HWCACHE_ALIGN,
2062					     NULL);
2063	if (nfs_cdata_cachep == NULL)
2064		goto out_destroy_write_mempool;
2065
2066	nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
2067						      nfs_cdata_cachep);
2068	if (nfs_commit_mempool == NULL)
2069		goto out_destroy_commit_cache;
2070
2071	/*
2072	 * NFS congestion size, scale with available memory.
2073	 *
2074	 *  64MB:    8192k
2075	 * 128MB:   11585k
2076	 * 256MB:   16384k
2077	 * 512MB:   23170k
2078	 *   1GB:   32768k
2079	 *   2GB:   46340k
2080	 *   4GB:   65536k
2081	 *   8GB:   92681k
2082	 *  16GB:  131072k
2083	 *
2084	 * This allows larger machines to have larger/more transfers.
2085	 * Limit the default to 256M
2086	 */
2087	nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
2088	if (nfs_congestion_kb > 256*1024)
2089		nfs_congestion_kb = 256*1024;
2090
2091	return 0;
2092
2093out_destroy_commit_cache:
2094	kmem_cache_destroy(nfs_cdata_cachep);
2095out_destroy_write_mempool:
2096	mempool_destroy(nfs_wdata_mempool);
2097out_destroy_write_cache:
2098	kmem_cache_destroy(nfs_wdata_cachep);
2099	return -ENOMEM;
2100}
2101
2102void nfs_destroy_writepagecache(void)
2103{
2104	mempool_destroy(nfs_commit_mempool);
2105	kmem_cache_destroy(nfs_cdata_cachep);
2106	mempool_destroy(nfs_wdata_mempool);
2107	kmem_cache_destroy(nfs_wdata_cachep);
2108}
2109
2110static const struct nfs_rw_ops nfs_rw_write_ops = {
2111	.rw_mode		= FMODE_WRITE,
2112	.rw_alloc_header	= nfs_writehdr_alloc,
2113	.rw_free_header		= nfs_writehdr_free,
2114	.rw_done		= nfs_writeback_done,
2115	.rw_result		= nfs_writeback_result,
2116	.rw_initiate		= nfs_initiate_write,
2117};
v4.6
   1/*
   2 * linux/fs/nfs/write.c
   3 *
   4 * Write file data over NFS.
   5 *
   6 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
   7 */
   8
   9#include <linux/types.h>
  10#include <linux/slab.h>
  11#include <linux/mm.h>
  12#include <linux/pagemap.h>
  13#include <linux/file.h>
  14#include <linux/writeback.h>
  15#include <linux/swap.h>
  16#include <linux/migrate.h>
  17
  18#include <linux/sunrpc/clnt.h>
  19#include <linux/nfs_fs.h>
  20#include <linux/nfs_mount.h>
  21#include <linux/nfs_page.h>
  22#include <linux/backing-dev.h>
  23#include <linux/export.h>
  24#include <linux/freezer.h>
  25#include <linux/wait.h>
  26
  27#include <asm/uaccess.h>
  28
  29#include "delegation.h"
  30#include "internal.h"
  31#include "iostat.h"
  32#include "nfs4_fs.h"
  33#include "fscache.h"
  34#include "pnfs.h"
  35
  36#include "nfstrace.h"
  37
  38#define NFSDBG_FACILITY		NFSDBG_PAGECACHE
  39
  40#define MIN_POOL_WRITE		(32)
  41#define MIN_POOL_COMMIT		(4)
  42
  43/*
  44 * Local function declarations
  45 */
  46static void nfs_redirty_request(struct nfs_page *req);
  47static const struct rpc_call_ops nfs_commit_ops;
  48static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
  49static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
  50static const struct nfs_rw_ops nfs_rw_write_ops;
  51static void nfs_clear_request_commit(struct nfs_page *req);
  52static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
  53				      struct inode *inode);
  54static struct nfs_page *
  55nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi,
  56						struct page *page);
  57
  58static struct kmem_cache *nfs_wdata_cachep;
  59static mempool_t *nfs_wdata_mempool;
  60static struct kmem_cache *nfs_cdata_cachep;
  61static mempool_t *nfs_commit_mempool;
  62
  63struct nfs_commit_data *nfs_commitdata_alloc(void)
  64{
  65	struct nfs_commit_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOIO);
  66
  67	if (p) {
  68		memset(p, 0, sizeof(*p));
  69		INIT_LIST_HEAD(&p->pages);
  70	}
  71	return p;
  72}
  73EXPORT_SYMBOL_GPL(nfs_commitdata_alloc);
  74
  75void nfs_commit_free(struct nfs_commit_data *p)
  76{
  77	mempool_free(p, nfs_commit_mempool);
  78}
  79EXPORT_SYMBOL_GPL(nfs_commit_free);
  80
  81static struct nfs_pgio_header *nfs_writehdr_alloc(void)
  82{
  83	struct nfs_pgio_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOIO);
  84
  85	if (p)
  86		memset(p, 0, sizeof(*p));
  87	return p;
  88}
  89
  90static void nfs_writehdr_free(struct nfs_pgio_header *hdr)
  91{
  92	mempool_free(hdr, nfs_wdata_mempool);
  93}
  94
  95static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
  96{
  97	ctx->error = error;
  98	smp_wmb();
  99	set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
 100}
 101
 102/*
 103 * nfs_page_find_head_request_locked - find head request associated with @page
 104 *
 105 * must be called while holding the inode lock.
 106 *
 107 * returns matching head request with reference held, or NULL if not found.
 108 */
 109static struct nfs_page *
 110nfs_page_find_head_request_locked(struct nfs_inode *nfsi, struct page *page)
 111{
 112	struct nfs_page *req = NULL;
 113
 114	if (PagePrivate(page))
 115		req = (struct nfs_page *)page_private(page);
 116	else if (unlikely(PageSwapCache(page)))
 117		req = nfs_page_search_commits_for_head_request_locked(nfsi,
 118			page);
 119
 120	if (req) {
 121		WARN_ON_ONCE(req->wb_head != req);
 122		kref_get(&req->wb_kref);
 123	}
 124
 125	return req;
 126}
 127
 128/*
 129 * nfs_page_find_head_request - find head request associated with @page
 130 *
 131 * returns matching head request with reference held, or NULL if not found.
 132 */
 133static struct nfs_page *nfs_page_find_head_request(struct page *page)
 134{
 135	struct inode *inode = page_file_mapping(page)->host;
 136	struct nfs_page *req = NULL;
 137
 138	spin_lock(&inode->i_lock);
 139	req = nfs_page_find_head_request_locked(NFS_I(inode), page);
 140	spin_unlock(&inode->i_lock);
 141	return req;
 142}
 143
 144/* Adjust the file length if we're writing beyond the end */
 145static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
 146{
 147	struct inode *inode = page_file_mapping(page)->host;
 148	loff_t end, i_size;
 149	pgoff_t end_index;
 150
 151	spin_lock(&inode->i_lock);
 152	i_size = i_size_read(inode);
 153	end_index = (i_size - 1) >> PAGE_SHIFT;
 154	if (i_size > 0 && page_file_index(page) < end_index)
 155		goto out;
 156	end = page_file_offset(page) + ((loff_t)offset+count);
 157	if (i_size >= end)
 158		goto out;
 159	i_size_write(inode, end);
 160	nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
 161out:
 162	spin_unlock(&inode->i_lock);
 163}
 164
 165/* A writeback failed: mark the page as bad, and invalidate the page cache */
 166static void nfs_set_pageerror(struct page *page)
 167{
 168	nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page));
 169}
 170
 171/*
 172 * nfs_page_group_search_locked
 173 * @head - head request of page group
 174 * @page_offset - offset into page
 175 *
 176 * Search page group with head @head to find a request that contains the
 177 * page offset @page_offset.
 178 *
 179 * Returns a pointer to the first matching nfs request, or NULL if no
 180 * match is found.
 181 *
 182 * Must be called with the page group lock held
 183 */
 184static struct nfs_page *
 185nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset)
 186{
 187	struct nfs_page *req;
 188
 189	WARN_ON_ONCE(head != head->wb_head);
 190	WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_head->wb_flags));
 191
 192	req = head;
 193	do {
 194		if (page_offset >= req->wb_pgbase &&
 195		    page_offset < (req->wb_pgbase + req->wb_bytes))
 196			return req;
 197
 198		req = req->wb_this_page;
 199	} while (req != head);
 200
 201	return NULL;
 202}
 203
 204/*
 205 * nfs_page_group_covers_page
 206 * @head - head request of page group
 207 *
 208 * Return true if the page group with head @head covers the whole page,
 209 * returns false otherwise
 210 */
 211static bool nfs_page_group_covers_page(struct nfs_page *req)
 212{
 213	struct nfs_page *tmp;
 214	unsigned int pos = 0;
 215	unsigned int len = nfs_page_length(req->wb_page);
 216
 217	nfs_page_group_lock(req, false);
 218
 219	do {
 220		tmp = nfs_page_group_search_locked(req->wb_head, pos);
 221		if (tmp) {
 222			/* no way this should happen */
 223			WARN_ON_ONCE(tmp->wb_pgbase != pos);
 224			pos += tmp->wb_bytes - (pos - tmp->wb_pgbase);
 225		}
 226	} while (tmp && pos < len);
 227
 228	nfs_page_group_unlock(req);
 229	WARN_ON_ONCE(pos > len);
 230	return pos == len;
 231}
 232
 233/* We can set the PG_uptodate flag if we see that a write request
 234 * covers the full page.
 235 */
 236static void nfs_mark_uptodate(struct nfs_page *req)
 237{
 238	if (PageUptodate(req->wb_page))
 239		return;
 240	if (!nfs_page_group_covers_page(req))
 241		return;
 242	SetPageUptodate(req->wb_page);
 243}
 244
 245static int wb_priority(struct writeback_control *wbc)
 246{
 247	int ret = 0;
 248	if (wbc->for_reclaim)
 249		return FLUSH_HIGHPRI | FLUSH_COND_STABLE;
 250	if (wbc->sync_mode == WB_SYNC_ALL)
 251		ret = FLUSH_COND_STABLE;
 252	return ret;
 253}
 254
 255/*
 256 * NFS congestion control
 257 */
 258
 259int nfs_congestion_kb;
 260
 261#define NFS_CONGESTION_ON_THRESH 	(nfs_congestion_kb >> (PAGE_SHIFT-10))
 262#define NFS_CONGESTION_OFF_THRESH	\
 263	(NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
 264
 265static void nfs_set_page_writeback(struct page *page)
 266{
 267	struct nfs_server *nfss = NFS_SERVER(page_file_mapping(page)->host);
 268	int ret = test_set_page_writeback(page);
 269
 270	WARN_ON_ONCE(ret != 0);
 271
 272	if (atomic_long_inc_return(&nfss->writeback) >
 273			NFS_CONGESTION_ON_THRESH) {
 274		set_bdi_congested(&nfss->backing_dev_info,
 275					BLK_RW_ASYNC);
 276	}
 277}
 278
 279static void nfs_end_page_writeback(struct nfs_page *req)
 280{
 281	struct inode *inode = page_file_mapping(req->wb_page)->host;
 282	struct nfs_server *nfss = NFS_SERVER(inode);
 283
 284	if (!nfs_page_group_sync_on_bit(req, PG_WB_END))
 285		return;
 286
 287	end_page_writeback(req->wb_page);
 288	if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
 289		clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
 290}
 291
 292
 293/* nfs_page_group_clear_bits
 294 *   @req - an nfs request
 295 * clears all page group related bits from @req
 296 */
 297static void
 298nfs_page_group_clear_bits(struct nfs_page *req)
 299{
 300	clear_bit(PG_TEARDOWN, &req->wb_flags);
 301	clear_bit(PG_UNLOCKPAGE, &req->wb_flags);
 302	clear_bit(PG_UPTODATE, &req->wb_flags);
 303	clear_bit(PG_WB_END, &req->wb_flags);
 304	clear_bit(PG_REMOVE, &req->wb_flags);
 305}
 306
 307
 308/*
 309 * nfs_unroll_locks_and_wait -  unlock all newly locked reqs and wait on @req
 310 *
 311 * this is a helper function for nfs_lock_and_join_requests
 312 *
 313 * @inode - inode associated with request page group, must be holding inode lock
 314 * @head  - head request of page group, must be holding head lock
 315 * @req   - request that couldn't lock and needs to wait on the req bit lock
 316 * @nonblock - if true, don't actually wait
 317 *
 318 * NOTE: this must be called holding page_group bit lock and inode spin lock
 319 *       and BOTH will be released before returning.
 320 *
 321 * returns 0 on success, < 0 on error.
 322 */
 323static int
 324nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head,
 325			  struct nfs_page *req, bool nonblock)
 326	__releases(&inode->i_lock)
 327{
 328	struct nfs_page *tmp;
 329	int ret;
 330
 331	/* relinquish all the locks successfully grabbed this run */
 332	for (tmp = head ; tmp != req; tmp = tmp->wb_this_page)
 333		nfs_unlock_request(tmp);
 334
 335	WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
 336
 337	/* grab a ref on the request that will be waited on */
 338	kref_get(&req->wb_kref);
 339
 340	nfs_page_group_unlock(head);
 341	spin_unlock(&inode->i_lock);
 342
 343	/* release ref from nfs_page_find_head_request_locked */
 344	nfs_release_request(head);
 345
 346	if (!nonblock)
 347		ret = nfs_wait_on_request(req);
 348	else
 349		ret = -EAGAIN;
 350	nfs_release_request(req);
 351
 352	return ret;
 353}
 354
 355/*
 356 * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests
 357 *
 358 * @destroy_list - request list (using wb_this_page) terminated by @old_head
 359 * @old_head - the old head of the list
 360 *
 361 * All subrequests must be locked and removed from all lists, so at this point
 362 * they are only "active" in this function, and possibly in nfs_wait_on_request
 363 * with a reference held by some other context.
 364 */
 365static void
 366nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
 367				 struct nfs_page *old_head)
 368{
 369	while (destroy_list) {
 370		struct nfs_page *subreq = destroy_list;
 371
 372		destroy_list = (subreq->wb_this_page == old_head) ?
 373				   NULL : subreq->wb_this_page;
 374
 375		WARN_ON_ONCE(old_head != subreq->wb_head);
 376
 377		/* make sure old group is not used */
 378		subreq->wb_head = subreq;
 379		subreq->wb_this_page = subreq;
 380
 381		/* subreq is now totally disconnected from page group or any
 382		 * write / commit lists. last chance to wake any waiters */
 383		nfs_unlock_request(subreq);
 384
 385		if (!test_bit(PG_TEARDOWN, &subreq->wb_flags)) {
 386			/* release ref on old head request */
 387			nfs_release_request(old_head);
 388
 389			nfs_page_group_clear_bits(subreq);
 390
 391			/* release the PG_INODE_REF reference */
 392			if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags))
 393				nfs_release_request(subreq);
 394			else
 395				WARN_ON_ONCE(1);
 396		} else {
 397			WARN_ON_ONCE(test_bit(PG_CLEAN, &subreq->wb_flags));
 398			/* zombie requests have already released the last
 399			 * reference and were waiting on the rest of the
 400			 * group to complete. Since it's no longer part of a
 401			 * group, simply free the request */
 402			nfs_page_group_clear_bits(subreq);
 403			nfs_free_request(subreq);
 404		}
 405	}
 406}
 407
 408/*
 409 * nfs_lock_and_join_requests - join all subreqs to the head req and return
 410 *                              a locked reference, cancelling any pending
 411 *                              operations for this page.
 412 *
 413 * @page - the page used to lookup the "page group" of nfs_page structures
 414 * @nonblock - if true, don't block waiting for request locks
 415 *
 416 * This function joins all sub requests to the head request by first
 417 * locking all requests in the group, cancelling any pending operations
 418 * and finally updating the head request to cover the whole range covered by
 419 * the (former) group.  All subrequests are removed from any write or commit
 420 * lists, unlinked from the group and destroyed.
 421 *
 422 * Returns a locked, referenced pointer to the head request - which after
 423 * this call is guaranteed to be the only request associated with the page.
 424 * Returns NULL if no requests are found for @page, or a ERR_PTR if an
 425 * error was encountered.
 426 */
 427static struct nfs_page *
 428nfs_lock_and_join_requests(struct page *page, bool nonblock)
 429{
 430	struct inode *inode = page_file_mapping(page)->host;
 431	struct nfs_page *head, *subreq;
 432	struct nfs_page *destroy_list = NULL;
 433	unsigned int total_bytes;
 434	int ret;
 435
 436try_again:
 437	total_bytes = 0;
 438
 439	WARN_ON_ONCE(destroy_list);
 440
 441	spin_lock(&inode->i_lock);
 442
 443	/*
 444	 * A reference is taken only on the head request which acts as a
 445	 * reference to the whole page group - the group will not be destroyed
 446	 * until the head reference is released.
 447	 */
 448	head = nfs_page_find_head_request_locked(NFS_I(inode), page);
 449
 450	if (!head) {
 451		spin_unlock(&inode->i_lock);
 452		return NULL;
 453	}
 454
 455	/* holding inode lock, so always make a non-blocking call to try the
 456	 * page group lock */
 457	ret = nfs_page_group_lock(head, true);
 458	if (ret < 0) {
 459		spin_unlock(&inode->i_lock);
 460
 461		if (!nonblock && ret == -EAGAIN) {
 462			nfs_page_group_lock_wait(head);
 463			nfs_release_request(head);
 464			goto try_again;
 465		}
 466
 467		nfs_release_request(head);
 468		return ERR_PTR(ret);
 469	}
 470
 471	/* lock each request in the page group */
 472	subreq = head;
 473	do {
 474		/*
 475		 * Subrequests are always contiguous, non overlapping
 476		 * and in order - but may be repeated (mirrored writes).
 477		 */
 478		if (subreq->wb_offset == (head->wb_offset + total_bytes)) {
 479			/* keep track of how many bytes this group covers */
 480			total_bytes += subreq->wb_bytes;
 481		} else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset ||
 482			    ((subreq->wb_offset + subreq->wb_bytes) >
 483			     (head->wb_offset + total_bytes)))) {
 484			nfs_page_group_unlock(head);
 485			spin_unlock(&inode->i_lock);
 486			return ERR_PTR(-EIO);
 487		}
 488
 489		if (!nfs_lock_request(subreq)) {
 490			/* releases page group bit lock and
 491			 * inode spin lock and all references */
 492			ret = nfs_unroll_locks_and_wait(inode, head,
 493				subreq, nonblock);
 494
 495			if (ret == 0)
 496				goto try_again;
 497
 498			return ERR_PTR(ret);
 499		}
 500
 501		subreq = subreq->wb_this_page;
 502	} while (subreq != head);
 503
 504	/* Now that all requests are locked, make sure they aren't on any list.
 505	 * Commit list removal accounting is done after locks are dropped */
 506	subreq = head;
 507	do {
 508		nfs_clear_request_commit(subreq);
 509		subreq = subreq->wb_this_page;
 510	} while (subreq != head);
 511
 512	/* unlink subrequests from head, destroy them later */
 513	if (head->wb_this_page != head) {
 514		/* destroy list will be terminated by head */
 515		destroy_list = head->wb_this_page;
 516		head->wb_this_page = head;
 517
 518		/* change head request to cover whole range that
 519		 * the former page group covered */
 520		head->wb_bytes = total_bytes;
 521	}
 522
 523	/*
 524	 * prepare head request to be added to new pgio descriptor
 525	 */
 526	nfs_page_group_clear_bits(head);
 527
 528	/*
 529	 * some part of the group was still on the inode list - otherwise
 530	 * the group wouldn't be involved in async write.
 531	 * grab a reference for the head request, iff it needs one.
 532	 */
 533	if (!test_and_set_bit(PG_INODE_REF, &head->wb_flags))
 534		kref_get(&head->wb_kref);
 535
 536	nfs_page_group_unlock(head);
 537
 538	/* drop lock to clean uprequests on destroy list */
 539	spin_unlock(&inode->i_lock);
 540
 541	nfs_destroy_unlinked_subrequests(destroy_list, head);
 542
 543	/* still holds ref on head from nfs_page_find_head_request_locked
 544	 * and still has lock on head from lock loop */
 545	return head;
 546}
 547
 548static void nfs_write_error_remove_page(struct nfs_page *req)
 549{
 550	nfs_unlock_request(req);
 551	nfs_end_page_writeback(req);
 552	nfs_release_request(req);
 553	generic_error_remove_page(page_file_mapping(req->wb_page),
 554				  req->wb_page);
 555}
 556
 557/*
 558 * Find an associated nfs write request, and prepare to flush it out
 559 * May return an error if the user signalled nfs_wait_on_request().
 560 */
 561static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
 562				struct page *page, bool nonblock,
 563				bool launder)
 564{
 565	struct nfs_page *req;
 566	int ret = 0;
 567
 568	req = nfs_lock_and_join_requests(page, nonblock);
 569	if (!req)
 570		goto out;
 571	ret = PTR_ERR(req);
 572	if (IS_ERR(req))
 573		goto out;
 574
 575	nfs_set_page_writeback(page);
 576	WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
 577
 578	ret = 0;
 579	if (!nfs_pageio_add_request(pgio, req)) {
 580		ret = pgio->pg_error;
 581		/*
 582		 * Remove the problematic req upon fatal errors
 583		 * in launder case, while other dirty pages can
 584		 * still be around until they get flushed.
 585		 */
 586		if (nfs_error_is_fatal(ret)) {
 587			nfs_context_set_write_error(req->wb_context, ret);
 588			if (launder) {
 589				nfs_write_error_remove_page(req);
 590				goto out;
 591			}
 592		}
 593		nfs_redirty_request(req);
 594		ret = -EAGAIN;
 595	} else
 596		nfs_add_stats(page_file_mapping(page)->host,
 597				NFSIOS_WRITEPAGES, 1);
 598out:
 599	return ret;
 600}
 601
 602static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
 603			    struct nfs_pageio_descriptor *pgio, bool launder)
 604{
 605	int ret;
 606
 607	nfs_pageio_cond_complete(pgio, page_file_index(page));
 608	ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE,
 609				   launder);
 610	if (ret == -EAGAIN) {
 611		redirty_page_for_writepage(wbc, page);
 612		ret = 0;
 613	}
 614	return ret;
 615}
 616
 617/*
 618 * Write an mmapped page to the server.
 619 */
 620static int nfs_writepage_locked(struct page *page,
 621				struct writeback_control *wbc,
 622				bool launder)
 623{
 624	struct nfs_pageio_descriptor pgio;
 625	struct inode *inode = page_file_mapping(page)->host;
 626	int err;
 627
 628	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
 629	nfs_pageio_init_write(&pgio, inode, wb_priority(wbc),
 630				false, &nfs_async_write_completion_ops);
 631	err = nfs_do_writepage(page, wbc, &pgio, launder);
 632	nfs_pageio_complete(&pgio);
 633	if (err < 0)
 634		return err;
 635	if (pgio.pg_error < 0)
 636		return pgio.pg_error;
 637	return 0;
 638}
 639
 640int nfs_writepage(struct page *page, struct writeback_control *wbc)
 641{
 642	int ret;
 643
 644	ret = nfs_writepage_locked(page, wbc, false);
 645	unlock_page(page);
 646	return ret;
 647}
 648
 649static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data)
 650{
 651	int ret;
 652
 653	ret = nfs_do_writepage(page, wbc, data, false);
 654	unlock_page(page);
 655	return ret;
 656}
 657
 658int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
 659{
 660	struct inode *inode = mapping->host;
 661	unsigned long *bitlock = &NFS_I(inode)->flags;
 662	struct nfs_pageio_descriptor pgio;
 663	int err;
 664
 665	/* Stop dirtying of new pages while we sync */
 666	err = wait_on_bit_lock_action(bitlock, NFS_INO_FLUSHING,
 667			nfs_wait_bit_killable, TASK_KILLABLE);
 668	if (err)
 669		goto out_err;
 670
 671	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
 672
 673	nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), false,
 674				&nfs_async_write_completion_ops);
 675	err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
 676	nfs_pageio_complete(&pgio);
 677
 678	clear_bit_unlock(NFS_INO_FLUSHING, bitlock);
 679	smp_mb__after_atomic();
 680	wake_up_bit(bitlock, NFS_INO_FLUSHING);
 681
 682	if (err < 0)
 683		goto out_err;
 684	err = pgio.pg_error;
 685	if (err < 0)
 686		goto out_err;
 687	return 0;
 688out_err:
 689	return err;
 690}
 691
 692/*
 693 * Insert a write request into an inode
 694 */
 695static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
 696{
 697	struct nfs_inode *nfsi = NFS_I(inode);
 698
 699	WARN_ON_ONCE(req->wb_this_page != req);
 700
 701	/* Lock the request! */
 702	nfs_lock_request(req);
 703
 704	spin_lock(&inode->i_lock);
 705	if (!nfsi->nrequests &&
 706	    NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
 707		inode->i_version++;
 708	/*
 709	 * Swap-space should not get truncated. Hence no need to plug the race
 710	 * with invalidate/truncate.
 711	 */
 712	if (likely(!PageSwapCache(req->wb_page))) {
 713		set_bit(PG_MAPPED, &req->wb_flags);
 714		SetPagePrivate(req->wb_page);
 715		set_page_private(req->wb_page, (unsigned long)req);
 716	}
 717	nfsi->nrequests++;
 718	/* this a head request for a page group - mark it as having an
 719	 * extra reference so sub groups can follow suit.
 720	 * This flag also informs pgio layer when to bump nrequests when
 721	 * adding subrequests. */
 722	WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags));
 723	kref_get(&req->wb_kref);
 724	spin_unlock(&inode->i_lock);
 725}
 726
 727/*
 728 * Remove a write request from an inode
 729 */
 730static void nfs_inode_remove_request(struct nfs_page *req)
 731{
 732	struct inode *inode = d_inode(req->wb_context->dentry);
 733	struct nfs_inode *nfsi = NFS_I(inode);
 734	struct nfs_page *head;
 735
 736	if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
 737		head = req->wb_head;
 738
 739		spin_lock(&inode->i_lock);
 740		if (likely(!PageSwapCache(head->wb_page))) {
 741			set_page_private(head->wb_page, 0);
 742			ClearPagePrivate(head->wb_page);
 743			smp_mb__after_atomic();
 744			wake_up_page(head->wb_page, PG_private);
 745			clear_bit(PG_MAPPED, &head->wb_flags);
 746		}
 747		nfsi->nrequests--;
 748		spin_unlock(&inode->i_lock);
 749	} else {
 750		spin_lock(&inode->i_lock);
 751		nfsi->nrequests--;
 752		spin_unlock(&inode->i_lock);
 753	}
 754
 755	if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags))
 756		nfs_release_request(req);
 757}
 758
 759static void
 760nfs_mark_request_dirty(struct nfs_page *req)
 761{
 762	__set_page_dirty_nobuffers(req->wb_page);
 
 763}
 764
 765/*
 766 * nfs_page_search_commits_for_head_request_locked
 767 *
 768 * Search through commit lists on @inode for the head request for @page.
 769 * Must be called while holding the inode (which is cinfo) lock.
 770 *
 771 * Returns the head request if found, or NULL if not found.
 772 */
 773static struct nfs_page *
 774nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi,
 775						struct page *page)
 776{
 777	struct nfs_page *freq, *t;
 778	struct nfs_commit_info cinfo;
 779	struct inode *inode = &nfsi->vfs_inode;
 780
 781	nfs_init_cinfo_from_inode(&cinfo, inode);
 782
 783	/* search through pnfs commit lists */
 784	freq = pnfs_search_commit_reqs(inode, &cinfo, page);
 785	if (freq)
 786		return freq->wb_head;
 787
 788	/* Linearly search the commit list for the correct request */
 789	list_for_each_entry_safe(freq, t, &cinfo.mds->list, wb_list) {
 790		if (freq->wb_page == page)
 791			return freq->wb_head;
 792	}
 793
 794	return NULL;
 795}
 796
 797/**
 798 * nfs_request_add_commit_list_locked - add request to a commit list
 799 * @req: pointer to a struct nfs_page
 800 * @dst: commit list head
 801 * @cinfo: holds list lock and accounting info
 802 *
 803 * This sets the PG_CLEAN bit, updates the cinfo count of
 804 * number of outstanding requests requiring a commit as well as
 805 * the MM page stats.
 806 *
 807 * The caller must hold the cinfo->lock, and the nfs_page lock.
 808 */
 809void
 810nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst,
 811			    struct nfs_commit_info *cinfo)
 812{
 813	set_bit(PG_CLEAN, &req->wb_flags);
 814	nfs_list_add_request(req, dst);
 815	cinfo->mds->ncommit++;
 816}
 817EXPORT_SYMBOL_GPL(nfs_request_add_commit_list_locked);
 818
 819/**
 820 * nfs_request_add_commit_list - add request to a commit list
 821 * @req: pointer to a struct nfs_page
 822 * @dst: commit list head
 823 * @cinfo: holds list lock and accounting info
 824 *
 825 * This sets the PG_CLEAN bit, updates the cinfo count of
 826 * number of outstanding requests requiring a commit as well as
 827 * the MM page stats.
 828 *
 829 * The caller must _not_ hold the cinfo->lock, but must be
 830 * holding the nfs_page lock.
 831 */
 832void
 833nfs_request_add_commit_list(struct nfs_page *req, struct nfs_commit_info *cinfo)
 834{
 835	spin_lock(cinfo->lock);
 836	nfs_request_add_commit_list_locked(req, &cinfo->mds->list, cinfo);
 837	spin_unlock(cinfo->lock);
 838	nfs_mark_page_unstable(req->wb_page, cinfo);
 
 839}
 840EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
 841
 842/**
 843 * nfs_request_remove_commit_list - Remove request from a commit list
 844 * @req: pointer to a nfs_page
 845 * @cinfo: holds list lock and accounting info
 846 *
 847 * This clears the PG_CLEAN bit, and updates the cinfo's count of
 848 * number of outstanding requests requiring a commit
 849 * It does not update the MM page stats.
 850 *
 851 * The caller _must_ hold the cinfo->lock and the nfs_page lock.
 852 */
 853void
 854nfs_request_remove_commit_list(struct nfs_page *req,
 855			       struct nfs_commit_info *cinfo)
 856{
 857	if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags))
 858		return;
 859	nfs_list_remove_request(req);
 860	cinfo->mds->ncommit--;
 861}
 862EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list);
 863
 864static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
 865				      struct inode *inode)
 866{
 867	cinfo->lock = &inode->i_lock;
 868	cinfo->mds = &NFS_I(inode)->commit_info;
 869	cinfo->ds = pnfs_get_ds_info(inode);
 870	cinfo->dreq = NULL;
 871	cinfo->completion_ops = &nfs_commit_completion_ops;
 872}
 873
 874void nfs_init_cinfo(struct nfs_commit_info *cinfo,
 875		    struct inode *inode,
 876		    struct nfs_direct_req *dreq)
 877{
 878	if (dreq)
 879		nfs_init_cinfo_from_dreq(cinfo, dreq);
 880	else
 881		nfs_init_cinfo_from_inode(cinfo, inode);
 882}
 883EXPORT_SYMBOL_GPL(nfs_init_cinfo);
 884
 885/*
 886 * Add a request to the inode's commit list.
 887 */
 888void
 889nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
 890			struct nfs_commit_info *cinfo, u32 ds_commit_idx)
 891{
 892	if (pnfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx))
 893		return;
 894	nfs_request_add_commit_list(req, cinfo);
 895}
 896
 897static void
 898nfs_clear_page_commit(struct page *page)
 899{
 900	dec_zone_page_state(page, NR_UNSTABLE_NFS);
 901	dec_wb_stat(&inode_to_bdi(page_file_mapping(page)->host)->wb,
 902		    WB_RECLAIMABLE);
 903}
 904
 905/* Called holding inode (/cinfo) lock */
 906static void
 907nfs_clear_request_commit(struct nfs_page *req)
 908{
 909	if (test_bit(PG_CLEAN, &req->wb_flags)) {
 910		struct inode *inode = d_inode(req->wb_context->dentry);
 911		struct nfs_commit_info cinfo;
 912
 913		nfs_init_cinfo_from_inode(&cinfo, inode);
 914		if (!pnfs_clear_request_commit(req, &cinfo)) {
 915			nfs_request_remove_commit_list(req, &cinfo);
 916		}
 917		nfs_clear_page_commit(req->wb_page);
 918	}
 919}
 920
 921int nfs_write_need_commit(struct nfs_pgio_header *hdr)
 922{
 923	if (hdr->verf.committed == NFS_DATA_SYNC)
 924		return hdr->lseg == NULL;
 925	return hdr->verf.committed != NFS_FILE_SYNC;
 926}
 927
 928static void nfs_write_completion(struct nfs_pgio_header *hdr)
 929{
 930	struct nfs_commit_info cinfo;
 931	unsigned long bytes = 0;
 932
 933	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
 934		goto out;
 935	nfs_init_cinfo_from_inode(&cinfo, hdr->inode);
 936	while (!list_empty(&hdr->pages)) {
 937		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
 938
 939		bytes += req->wb_bytes;
 940		nfs_list_remove_request(req);
 941		if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
 942		    (hdr->good_bytes < bytes)) {
 943			nfs_set_pageerror(req->wb_page);
 944			nfs_context_set_write_error(req->wb_context, hdr->error);
 945			goto remove_req;
 946		}
 947		if (nfs_write_need_commit(hdr)) {
 948			memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf));
 949			nfs_mark_request_commit(req, hdr->lseg, &cinfo,
 950				hdr->pgio_mirror_idx);
 951			goto next;
 952		}
 953remove_req:
 954		nfs_inode_remove_request(req);
 955next:
 956		nfs_unlock_request(req);
 957		nfs_end_page_writeback(req);
 958		nfs_release_request(req);
 959	}
 960out:
 961	hdr->release(hdr);
 962}
 963
 964unsigned long
 965nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
 966{
 967	return cinfo->mds->ncommit;
 968}
 969
 970/* cinfo->lock held by caller */
 971int
 972nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
 973		     struct nfs_commit_info *cinfo, int max)
 974{
 975	struct nfs_page *req, *tmp;
 976	int ret = 0;
 977
 978	list_for_each_entry_safe(req, tmp, src, wb_list) {
 979		if (!nfs_lock_request(req))
 980			continue;
 981		kref_get(&req->wb_kref);
 982		if (cond_resched_lock(cinfo->lock))
 983			list_safe_reset_next(req, tmp, wb_list);
 984		nfs_request_remove_commit_list(req, cinfo);
 985		nfs_list_add_request(req, dst);
 986		ret++;
 987		if ((ret == max) && !cinfo->dreq)
 988			break;
 989	}
 990	return ret;
 991}
 992
 993/*
 994 * nfs_scan_commit - Scan an inode for commit requests
 995 * @inode: NFS inode to scan
 996 * @dst: mds destination list
 997 * @cinfo: mds and ds lists of reqs ready to commit
 998 *
 999 * Moves requests from the inode's 'commit' request list.
1000 * The requests are *not* checked to ensure that they form a contiguous set.
1001 */
1002int
1003nfs_scan_commit(struct inode *inode, struct list_head *dst,
1004		struct nfs_commit_info *cinfo)
1005{
1006	int ret = 0;
1007
1008	spin_lock(cinfo->lock);
1009	if (cinfo->mds->ncommit > 0) {
1010		const int max = INT_MAX;
1011
1012		ret = nfs_scan_commit_list(&cinfo->mds->list, dst,
1013					   cinfo, max);
1014		ret += pnfs_scan_commit_lists(inode, cinfo, max - ret);
1015	}
1016	spin_unlock(cinfo->lock);
1017	return ret;
1018}
1019
1020/*
1021 * Search for an existing write request, and attempt to update
1022 * it to reflect a new dirty region on a given page.
1023 *
1024 * If the attempt fails, then the existing request is flushed out
1025 * to disk.
1026 */
1027static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
1028		struct page *page,
1029		unsigned int offset,
1030		unsigned int bytes)
1031{
1032	struct nfs_page *req;
1033	unsigned int rqend;
1034	unsigned int end;
1035	int error;
1036
1037	if (!PagePrivate(page))
1038		return NULL;
1039
1040	end = offset + bytes;
1041	spin_lock(&inode->i_lock);
1042
1043	for (;;) {
1044		req = nfs_page_find_head_request_locked(NFS_I(inode), page);
1045		if (req == NULL)
1046			goto out_unlock;
1047
1048		/* should be handled by nfs_flush_incompatible */
1049		WARN_ON_ONCE(req->wb_head != req);
1050		WARN_ON_ONCE(req->wb_this_page != req);
1051
1052		rqend = req->wb_offset + req->wb_bytes;
1053		/*
1054		 * Tell the caller to flush out the request if
1055		 * the offsets are non-contiguous.
1056		 * Note: nfs_flush_incompatible() will already
1057		 * have flushed out requests having wrong owners.
1058		 */
1059		if (offset > rqend
1060		    || end < req->wb_offset)
1061			goto out_flushme;
1062
1063		if (nfs_lock_request(req))
1064			break;
1065
1066		/* The request is locked, so wait and then retry */
1067		spin_unlock(&inode->i_lock);
1068		error = nfs_wait_on_request(req);
1069		nfs_release_request(req);
1070		if (error != 0)
1071			goto out_err;
1072		spin_lock(&inode->i_lock);
1073	}
1074
1075	/* Okay, the request matches. Update the region */
1076	if (offset < req->wb_offset) {
1077		req->wb_offset = offset;
1078		req->wb_pgbase = offset;
1079	}
1080	if (end > rqend)
1081		req->wb_bytes = end - req->wb_offset;
1082	else
1083		req->wb_bytes = rqend - req->wb_offset;
1084out_unlock:
1085	if (req)
1086		nfs_clear_request_commit(req);
1087	spin_unlock(&inode->i_lock);
1088	return req;
1089out_flushme:
1090	spin_unlock(&inode->i_lock);
1091	nfs_release_request(req);
1092	error = nfs_wb_page(inode, page);
1093out_err:
1094	return ERR_PTR(error);
1095}
1096
1097/*
1098 * Try to update an existing write request, or create one if there is none.
1099 *
1100 * Note: Should always be called with the Page Lock held to prevent races
1101 * if we have to add a new request. Also assumes that the caller has
1102 * already called nfs_flush_incompatible() if necessary.
1103 */
1104static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
1105		struct page *page, unsigned int offset, unsigned int bytes)
1106{
1107	struct inode *inode = page_file_mapping(page)->host;
1108	struct nfs_page	*req;
1109
1110	req = nfs_try_to_update_request(inode, page, offset, bytes);
1111	if (req != NULL)
1112		goto out;
1113	req = nfs_create_request(ctx, page, NULL, offset, bytes);
1114	if (IS_ERR(req))
1115		goto out;
1116	nfs_inode_add_request(inode, req);
1117out:
1118	return req;
1119}
1120
1121static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
1122		unsigned int offset, unsigned int count)
1123{
1124	struct nfs_page	*req;
1125
1126	req = nfs_setup_write_request(ctx, page, offset, count);
1127	if (IS_ERR(req))
1128		return PTR_ERR(req);
1129	/* Update file length */
1130	nfs_grow_file(page, offset, count);
1131	nfs_mark_uptodate(req);
1132	nfs_mark_request_dirty(req);
1133	nfs_unlock_and_release_request(req);
1134	return 0;
1135}
1136
1137int nfs_flush_incompatible(struct file *file, struct page *page)
1138{
1139	struct nfs_open_context *ctx = nfs_file_open_context(file);
1140	struct nfs_lock_context *l_ctx;
1141	struct file_lock_context *flctx = file_inode(file)->i_flctx;
1142	struct nfs_page	*req;
1143	int do_flush, status;
1144	/*
1145	 * Look for a request corresponding to this page. If there
1146	 * is one, and it belongs to another file, we flush it out
1147	 * before we try to copy anything into the page. Do this
1148	 * due to the lack of an ACCESS-type call in NFSv2.
1149	 * Also do the same if we find a request from an existing
1150	 * dropped page.
1151	 */
1152	do {
1153		req = nfs_page_find_head_request(page);
1154		if (req == NULL)
1155			return 0;
1156		l_ctx = req->wb_lock_context;
1157		do_flush = req->wb_page != page ||
1158			!nfs_match_open_context(req->wb_context, ctx);
1159		/* for now, flush if more than 1 request in page_group */
1160		do_flush |= req->wb_this_page != req;
1161		if (l_ctx && flctx &&
1162		    !(list_empty_careful(&flctx->flc_posix) &&
1163		      list_empty_careful(&flctx->flc_flock))) {
1164			do_flush |= l_ctx->lockowner.l_owner != current->files
1165				|| l_ctx->lockowner.l_pid != current->tgid;
1166		}
1167		nfs_release_request(req);
1168		if (!do_flush)
1169			return 0;
1170		status = nfs_wb_page(page_file_mapping(page)->host, page);
1171	} while (status == 0);
1172	return status;
1173}
1174
1175/*
1176 * Avoid buffered writes when a open context credential's key would
1177 * expire soon.
1178 *
1179 * Returns -EACCES if the key will expire within RPC_KEY_EXPIRE_FAIL.
1180 *
1181 * Return 0 and set a credential flag which triggers the inode to flush
1182 * and performs  NFS_FILE_SYNC writes if the key will expired within
1183 * RPC_KEY_EXPIRE_TIMEO.
1184 */
1185int
1186nfs_key_timeout_notify(struct file *filp, struct inode *inode)
1187{
1188	struct nfs_open_context *ctx = nfs_file_open_context(filp);
1189	struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth;
1190
1191	return rpcauth_key_timeout_notify(auth, ctx->cred);
1192}
1193
1194/*
1195 * Test if the open context credential key is marked to expire soon.
1196 */
1197bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx)
1198{
1199	return rpcauth_cred_key_to_expire(ctx->cred);
 
 
1200}
1201
1202/*
1203 * If the page cache is marked as unsafe or invalid, then we can't rely on
1204 * the PageUptodate() flag. In this case, we will need to turn off
1205 * write optimisations that depend on the page contents being correct.
1206 */
1207static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
1208{
1209	struct nfs_inode *nfsi = NFS_I(inode);
1210
1211	if (nfs_have_delegated_attributes(inode))
1212		goto out;
1213	if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
1214		return false;
1215	smp_rmb();
1216	if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags))
1217		return false;
1218out:
1219	if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
1220		return false;
1221	return PageUptodate(page) != 0;
1222}
1223
1224static bool
1225is_whole_file_wrlock(struct file_lock *fl)
1226{
1227	return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX &&
1228			fl->fl_type == F_WRLCK;
1229}
1230
1231/* If we know the page is up to date, and we're not using byte range locks (or
1232 * if we have the whole file locked for writing), it may be more efficient to
1233 * extend the write to cover the entire page in order to avoid fragmentation
1234 * inefficiencies.
1235 *
1236 * If the file is opened for synchronous writes then we can just skip the rest
1237 * of the checks.
1238 */
1239static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
1240{
1241	int ret;
1242	struct file_lock_context *flctx = inode->i_flctx;
1243	struct file_lock *fl;
1244
1245	if (file->f_flags & O_DSYNC)
1246		return 0;
1247	if (!nfs_write_pageuptodate(page, inode))
1248		return 0;
1249	if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
1250		return 1;
1251	if (!flctx || (list_empty_careful(&flctx->flc_flock) &&
1252		       list_empty_careful(&flctx->flc_posix)))
1253		return 1;
1254
1255	/* Check to see if there are whole file write locks */
1256	ret = 0;
1257	spin_lock(&flctx->flc_lock);
1258	if (!list_empty(&flctx->flc_posix)) {
1259		fl = list_first_entry(&flctx->flc_posix, struct file_lock,
1260					fl_list);
1261		if (is_whole_file_wrlock(fl))
1262			ret = 1;
1263	} else if (!list_empty(&flctx->flc_flock)) {
1264		fl = list_first_entry(&flctx->flc_flock, struct file_lock,
1265					fl_list);
1266		if (fl->fl_type == F_WRLCK)
1267			ret = 1;
1268	}
1269	spin_unlock(&flctx->flc_lock);
1270	return ret;
1271}
1272
1273/*
1274 * Update and possibly write a cached page of an NFS file.
1275 *
1276 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
1277 * things with a page scheduled for an RPC call (e.g. invalidate it).
1278 */
1279int nfs_updatepage(struct file *file, struct page *page,
1280		unsigned int offset, unsigned int count)
1281{
1282	struct nfs_open_context *ctx = nfs_file_open_context(file);
1283	struct inode	*inode = page_file_mapping(page)->host;
1284	int		status = 0;
1285
1286	nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
1287
1288	dprintk("NFS:       nfs_updatepage(%pD2 %d@%lld)\n",
1289		file, count, (long long)(page_file_offset(page) + offset));
1290
 
 
 
1291	if (nfs_can_extend_write(file, page, inode)) {
1292		count = max(count + offset, nfs_page_length(page));
1293		offset = 0;
1294	}
1295
1296	status = nfs_writepage_setup(ctx, page, offset, count);
1297	if (status < 0)
1298		nfs_set_pageerror(page);
1299	else
1300		__set_page_dirty_nobuffers(page);
1301
1302	dprintk("NFS:       nfs_updatepage returns %d (isize %lld)\n",
1303			status, (long long)i_size_read(inode));
1304	return status;
1305}
1306
1307static int flush_task_priority(int how)
1308{
1309	switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
1310		case FLUSH_HIGHPRI:
1311			return RPC_PRIORITY_HIGH;
1312		case FLUSH_LOWPRI:
1313			return RPC_PRIORITY_LOW;
1314	}
1315	return RPC_PRIORITY_NORMAL;
1316}
1317
1318static void nfs_initiate_write(struct nfs_pgio_header *hdr,
1319			       struct rpc_message *msg,
1320			       const struct nfs_rpc_ops *rpc_ops,
1321			       struct rpc_task_setup *task_setup_data, int how)
1322{
1323	int priority = flush_task_priority(how);
1324
1325	task_setup_data->priority = priority;
1326	rpc_ops->write_setup(hdr, msg);
1327
1328	nfs4_state_protect_write(NFS_SERVER(hdr->inode)->nfs_client,
1329				 &task_setup_data->rpc_client, msg, hdr);
1330}
1331
1332/* If a nfs_flush_* function fails, it should remove reqs from @head and
1333 * call this on each, which will prepare them to be retried on next
1334 * writeback using standard nfs.
1335 */
1336static void nfs_redirty_request(struct nfs_page *req)
1337{
1338	nfs_mark_request_dirty(req);
1339	set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags);
1340	nfs_unlock_request(req);
1341	nfs_end_page_writeback(req);
1342	nfs_release_request(req);
1343}
1344
1345static void nfs_async_write_error(struct list_head *head)
1346{
1347	struct nfs_page	*req;
1348
1349	while (!list_empty(head)) {
1350		req = nfs_list_entry(head->next);
1351		nfs_list_remove_request(req);
1352		nfs_redirty_request(req);
1353	}
1354}
1355
1356static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr)
1357{
1358	nfs_async_write_error(&hdr->pages);
1359}
1360
1361static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = {
1362	.error_cleanup = nfs_async_write_error,
1363	.completion = nfs_write_completion,
1364	.reschedule_io = nfs_async_write_reschedule_io,
1365};
1366
1367void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
1368			       struct inode *inode, int ioflags, bool force_mds,
1369			       const struct nfs_pgio_completion_ops *compl_ops)
1370{
1371	struct nfs_server *server = NFS_SERVER(inode);
1372	const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
1373
1374#ifdef CONFIG_NFS_V4_1
1375	if (server->pnfs_curr_ld && !force_mds)
1376		pg_ops = server->pnfs_curr_ld->pg_write_ops;
1377#endif
1378	nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_write_ops,
1379			server->wsize, ioflags);
1380}
1381EXPORT_SYMBOL_GPL(nfs_pageio_init_write);
1382
1383void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
1384{
1385	struct nfs_pgio_mirror *mirror;
1386
1387	if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
1388		pgio->pg_ops->pg_cleanup(pgio);
1389
1390	pgio->pg_ops = &nfs_pgio_rw_ops;
1391
1392	nfs_pageio_stop_mirroring(pgio);
1393
1394	mirror = &pgio->pg_mirrors[0];
1395	mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize;
1396}
1397EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
1398
1399
1400void nfs_commit_prepare(struct rpc_task *task, void *calldata)
1401{
1402	struct nfs_commit_data *data = calldata;
1403
1404	NFS_PROTO(data->inode)->commit_rpc_prepare(task, data);
1405}
1406
1407/*
1408 * Special version of should_remove_suid() that ignores capabilities.
1409 */
1410static int nfs_should_remove_suid(const struct inode *inode)
1411{
1412	umode_t mode = inode->i_mode;
1413	int kill = 0;
1414
1415	/* suid always must be killed */
1416	if (unlikely(mode & S_ISUID))
1417		kill = ATTR_KILL_SUID;
1418
1419	/*
1420	 * sgid without any exec bits is just a mandatory locking mark; leave
1421	 * it alone.  If some exec bits are set, it's a real sgid; kill it.
1422	 */
1423	if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1424		kill |= ATTR_KILL_SGID;
1425
1426	if (unlikely(kill && S_ISREG(mode)))
1427		return kill;
1428
1429	return 0;
1430}
1431
1432static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
1433		struct nfs_fattr *fattr)
1434{
1435	struct nfs_pgio_args *argp = &hdr->args;
1436	struct nfs_pgio_res *resp = &hdr->res;
1437	u64 size = argp->offset + resp->count;
1438
1439	if (!(fattr->valid & NFS_ATTR_FATTR_SIZE))
1440		fattr->size = size;
1441	if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) {
1442		fattr->valid &= ~NFS_ATTR_FATTR_SIZE;
1443		return;
1444	}
1445	if (size != fattr->size)
1446		return;
1447	/* Set attribute barrier */
1448	nfs_fattr_set_barrier(fattr);
1449	/* ...and update size */
1450	fattr->valid |= NFS_ATTR_FATTR_SIZE;
1451}
1452
1453void nfs_writeback_update_inode(struct nfs_pgio_header *hdr)
1454{
1455	struct nfs_fattr *fattr = &hdr->fattr;
1456	struct inode *inode = hdr->inode;
1457
1458	spin_lock(&inode->i_lock);
1459	nfs_writeback_check_extend(hdr, fattr);
1460	nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
1461	spin_unlock(&inode->i_lock);
1462}
1463EXPORT_SYMBOL_GPL(nfs_writeback_update_inode);
1464
1465/*
1466 * This function is called when the WRITE call is complete.
1467 */
1468static int nfs_writeback_done(struct rpc_task *task,
1469			      struct nfs_pgio_header *hdr,
1470			      struct inode *inode)
1471{
1472	int status;
1473
1474	/*
1475	 * ->write_done will attempt to use post-op attributes to detect
1476	 * conflicting writes by other clients.  A strict interpretation
1477	 * of close-to-open would allow us to continue caching even if
1478	 * another writer had changed the file, but some applications
1479	 * depend on tighter cache coherency when writing.
1480	 */
1481	status = NFS_PROTO(inode)->write_done(task, hdr);
1482	if (status != 0)
1483		return status;
1484	nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count);
1485
1486	if (hdr->res.verf->committed < hdr->args.stable &&
1487	    task->tk_status >= 0) {
1488		/* We tried a write call, but the server did not
1489		 * commit data to stable storage even though we
1490		 * requested it.
1491		 * Note: There is a known bug in Tru64 < 5.0 in which
1492		 *	 the server reports NFS_DATA_SYNC, but performs
1493		 *	 NFS_FILE_SYNC. We therefore implement this checking
1494		 *	 as a dprintk() in order to avoid filling syslog.
1495		 */
1496		static unsigned long    complain;
1497
1498		/* Note this will print the MDS for a DS write */
1499		if (time_before(complain, jiffies)) {
1500			dprintk("NFS:       faulty NFS server %s:"
1501				" (committed = %d) != (stable = %d)\n",
1502				NFS_SERVER(inode)->nfs_client->cl_hostname,
1503				hdr->res.verf->committed, hdr->args.stable);
1504			complain = jiffies + 300 * HZ;
1505		}
1506	}
1507
1508	/* Deal with the suid/sgid bit corner case */
1509	if (nfs_should_remove_suid(inode))
1510		nfs_mark_for_revalidate(inode);
1511	return 0;
1512}
1513
1514/*
1515 * This function is called when the WRITE call is complete.
1516 */
1517static void nfs_writeback_result(struct rpc_task *task,
1518				 struct nfs_pgio_header *hdr)
1519{
1520	struct nfs_pgio_args	*argp = &hdr->args;
1521	struct nfs_pgio_res	*resp = &hdr->res;
1522
1523	if (resp->count < argp->count) {
1524		static unsigned long    complain;
1525
1526		/* This a short write! */
1527		nfs_inc_stats(hdr->inode, NFSIOS_SHORTWRITE);
1528
1529		/* Has the server at least made some progress? */
1530		if (resp->count == 0) {
1531			if (time_before(complain, jiffies)) {
1532				printk(KERN_WARNING
1533				       "NFS: Server wrote zero bytes, expected %u.\n",
1534				       argp->count);
1535				complain = jiffies + 300 * HZ;
1536			}
1537			nfs_set_pgio_error(hdr, -EIO, argp->offset);
1538			task->tk_status = -EIO;
1539			return;
1540		}
1541
1542		/* For non rpc-based layout drivers, retry-through-MDS */
1543		if (!task->tk_ops) {
1544			hdr->pnfs_error = -EAGAIN;
1545			return;
1546		}
1547
1548		/* Was this an NFSv2 write or an NFSv3 stable write? */
1549		if (resp->verf->committed != NFS_UNSTABLE) {
1550			/* Resend from where the server left off */
1551			hdr->mds_offset += resp->count;
1552			argp->offset += resp->count;
1553			argp->pgbase += resp->count;
1554			argp->count -= resp->count;
1555		} else {
1556			/* Resend as a stable write in order to avoid
1557			 * headaches in the case of a server crash.
1558			 */
1559			argp->stable = NFS_FILE_SYNC;
1560		}
1561		rpc_restart_call_prepare(task);
1562	}
1563}
1564
1565static int wait_on_commit(struct nfs_mds_commit_info *cinfo)
1566{
1567	return wait_on_atomic_t(&cinfo->rpcs_out,
1568			nfs_wait_atomic_killable, TASK_KILLABLE);
1569}
1570
1571static void nfs_commit_begin(struct nfs_mds_commit_info *cinfo)
1572{
1573	atomic_inc(&cinfo->rpcs_out);
1574}
1575
1576static void nfs_commit_end(struct nfs_mds_commit_info *cinfo)
1577{
1578	if (atomic_dec_and_test(&cinfo->rpcs_out))
1579		wake_up_atomic_t(&cinfo->rpcs_out);
1580}
1581
1582void nfs_commitdata_release(struct nfs_commit_data *data)
1583{
1584	put_nfs_open_context(data->context);
1585	nfs_commit_free(data);
1586}
1587EXPORT_SYMBOL_GPL(nfs_commitdata_release);
1588
1589int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
1590			const struct nfs_rpc_ops *nfs_ops,
1591			const struct rpc_call_ops *call_ops,
1592			int how, int flags)
1593{
1594	struct rpc_task *task;
1595	int priority = flush_task_priority(how);
1596	struct rpc_message msg = {
1597		.rpc_argp = &data->args,
1598		.rpc_resp = &data->res,
1599		.rpc_cred = data->cred,
1600	};
1601	struct rpc_task_setup task_setup_data = {
1602		.task = &data->task,
1603		.rpc_client = clnt,
1604		.rpc_message = &msg,
1605		.callback_ops = call_ops,
1606		.callback_data = data,
1607		.workqueue = nfsiod_workqueue,
1608		.flags = RPC_TASK_ASYNC | flags,
1609		.priority = priority,
1610	};
1611	/* Set up the initial task struct.  */
1612	nfs_ops->commit_setup(data, &msg);
1613
1614	dprintk("NFS: initiated commit call\n");
1615
1616	nfs4_state_protect(NFS_SERVER(data->inode)->nfs_client,
1617		NFS_SP4_MACH_CRED_COMMIT, &task_setup_data.rpc_client, &msg);
1618
1619	task = rpc_run_task(&task_setup_data);
1620	if (IS_ERR(task))
1621		return PTR_ERR(task);
1622	if (how & FLUSH_SYNC)
1623		rpc_wait_for_completion_task(task);
1624	rpc_put_task(task);
1625	return 0;
1626}
1627EXPORT_SYMBOL_GPL(nfs_initiate_commit);
1628
1629static loff_t nfs_get_lwb(struct list_head *head)
1630{
1631	loff_t lwb = 0;
1632	struct nfs_page *req;
1633
1634	list_for_each_entry(req, head, wb_list)
1635		if (lwb < (req_offset(req) + req->wb_bytes))
1636			lwb = req_offset(req) + req->wb_bytes;
1637
1638	return lwb;
1639}
1640
1641/*
1642 * Set up the argument/result storage required for the RPC call.
1643 */
1644void nfs_init_commit(struct nfs_commit_data *data,
1645		     struct list_head *head,
1646		     struct pnfs_layout_segment *lseg,
1647		     struct nfs_commit_info *cinfo)
1648{
1649	struct nfs_page *first = nfs_list_entry(head->next);
1650	struct inode *inode = d_inode(first->wb_context->dentry);
1651
1652	/* Set up the RPC argument and reply structs
1653	 * NB: take care not to mess about with data->commit et al. */
1654
1655	list_splice_init(head, &data->pages);
1656
1657	data->inode	  = inode;
1658	data->cred	  = first->wb_context->cred;
1659	data->lseg	  = lseg; /* reference transferred */
1660	/* only set lwb for pnfs commit */
1661	if (lseg)
1662		data->lwb = nfs_get_lwb(&data->pages);
1663	data->mds_ops     = &nfs_commit_ops;
1664	data->completion_ops = cinfo->completion_ops;
1665	data->dreq	  = cinfo->dreq;
1666
1667	data->args.fh     = NFS_FH(data->inode);
1668	/* Note: we always request a commit of the entire inode */
1669	data->args.offset = 0;
1670	data->args.count  = 0;
1671	data->context     = get_nfs_open_context(first->wb_context);
1672	data->res.fattr   = &data->fattr;
1673	data->res.verf    = &data->verf;
1674	nfs_fattr_init(&data->fattr);
1675}
1676EXPORT_SYMBOL_GPL(nfs_init_commit);
1677
1678void nfs_retry_commit(struct list_head *page_list,
1679		      struct pnfs_layout_segment *lseg,
1680		      struct nfs_commit_info *cinfo,
1681		      u32 ds_commit_idx)
1682{
1683	struct nfs_page *req;
1684
1685	while (!list_empty(page_list)) {
1686		req = nfs_list_entry(page_list->next);
1687		nfs_list_remove_request(req);
1688		nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx);
1689		if (!cinfo->dreq)
1690			nfs_clear_page_commit(req->wb_page);
1691		nfs_unlock_and_release_request(req);
1692	}
1693}
1694EXPORT_SYMBOL_GPL(nfs_retry_commit);
1695
1696static void
1697nfs_commit_resched_write(struct nfs_commit_info *cinfo,
1698		struct nfs_page *req)
1699{
1700	__set_page_dirty_nobuffers(req->wb_page);
1701}
1702
1703/*
1704 * Commit dirty pages
1705 */
1706static int
1707nfs_commit_list(struct inode *inode, struct list_head *head, int how,
1708		struct nfs_commit_info *cinfo)
1709{
1710	struct nfs_commit_data	*data;
1711
 
 
 
 
1712	data = nfs_commitdata_alloc();
1713
1714	if (!data)
1715		goto out_bad;
1716
1717	/* Set up the argument struct */
1718	nfs_init_commit(data, head, NULL, cinfo);
1719	atomic_inc(&cinfo->mds->rpcs_out);
1720	return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode),
1721				   data->mds_ops, how, 0);
1722 out_bad:
1723	nfs_retry_commit(head, NULL, cinfo, 0);
1724	return -ENOMEM;
1725}
1726
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1727/*
1728 * COMMIT call returned
1729 */
1730static void nfs_commit_done(struct rpc_task *task, void *calldata)
1731{
1732	struct nfs_commit_data	*data = calldata;
1733
1734        dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1735                                task->tk_pid, task->tk_status);
1736
1737	/* Call the NFS version-specific code */
1738	NFS_PROTO(data->inode)->commit_done(task, data);
1739}
1740
1741static void nfs_commit_release_pages(struct nfs_commit_data *data)
1742{
1743	struct nfs_page	*req;
1744	int status = data->task.tk_status;
1745	struct nfs_commit_info cinfo;
1746	struct nfs_server *nfss;
1747
1748	while (!list_empty(&data->pages)) {
1749		req = nfs_list_entry(data->pages.next);
1750		nfs_list_remove_request(req);
1751		nfs_clear_page_commit(req->wb_page);
 
1752
1753		dprintk("NFS:       commit (%s/%llu %d@%lld)",
1754			req->wb_context->dentry->d_sb->s_id,
1755			(unsigned long long)NFS_FILEID(d_inode(req->wb_context->dentry)),
1756			req->wb_bytes,
1757			(long long)req_offset(req));
1758		if (status < 0) {
1759			nfs_context_set_write_error(req->wb_context, status);
1760			nfs_inode_remove_request(req);
1761			dprintk(", error = %d\n", status);
1762			goto next;
1763		}
1764
1765		/* Okay, COMMIT succeeded, apparently. Check the verifier
1766		 * returned by the server against all stored verfs. */
1767		if (!memcmp(&req->wb_verf, &data->verf.verifier, sizeof(req->wb_verf))) {
1768			/* We have a match */
1769			nfs_inode_remove_request(req);
1770			dprintk(" OK\n");
1771			goto next;
1772		}
1773		/* We have a mismatch. Write the page again */
1774		dprintk(" mismatch\n");
1775		nfs_mark_request_dirty(req);
1776		set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags);
1777	next:
1778		nfs_unlock_and_release_request(req);
1779	}
1780	nfss = NFS_SERVER(data->inode);
1781	if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
1782		clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
1783
1784	nfs_init_cinfo(&cinfo, data->inode, data->dreq);
1785	nfs_commit_end(cinfo.mds);
1786}
1787
1788static void nfs_commit_release(void *calldata)
1789{
1790	struct nfs_commit_data *data = calldata;
1791
1792	data->completion_ops->completion(data);
1793	nfs_commitdata_release(calldata);
1794}
1795
1796static const struct rpc_call_ops nfs_commit_ops = {
1797	.rpc_call_prepare = nfs_commit_prepare,
1798	.rpc_call_done = nfs_commit_done,
1799	.rpc_release = nfs_commit_release,
1800};
1801
1802static const struct nfs_commit_completion_ops nfs_commit_completion_ops = {
1803	.completion = nfs_commit_release_pages,
1804	.resched_write = nfs_commit_resched_write,
1805};
1806
1807int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
1808			    int how, struct nfs_commit_info *cinfo)
1809{
1810	int status;
1811
1812	status = pnfs_commit_list(inode, head, how, cinfo);
1813	if (status == PNFS_NOT_ATTEMPTED)
1814		status = nfs_commit_list(inode, head, how, cinfo);
1815	return status;
1816}
1817
1818int nfs_commit_inode(struct inode *inode, int how)
1819{
1820	LIST_HEAD(head);
1821	struct nfs_commit_info cinfo;
1822	int may_wait = how & FLUSH_SYNC;
1823	int error = 0;
1824	int res;
1825
1826	nfs_init_cinfo_from_inode(&cinfo, inode);
1827	nfs_commit_begin(cinfo.mds);
1828	res = nfs_scan_commit(inode, &head, &cinfo);
1829	if (res)
1830		error = nfs_generic_commit_list(inode, &head, how, &cinfo);
1831	nfs_commit_end(cinfo.mds);
1832	if (error < 0)
1833		goto out_error;
1834	if (!may_wait)
1835		goto out_mark_dirty;
1836	error = wait_on_commit(cinfo.mds);
1837	if (error < 0)
1838		return error;
1839	return res;
1840out_error:
1841	res = error;
1842	/* Note: If we exit without ensuring that the commit is complete,
1843	 * we must mark the inode as dirty. Otherwise, future calls to
1844	 * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure
1845	 * that the data is on the disk.
1846	 */
1847out_mark_dirty:
1848	__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1849	return res;
1850}
1851EXPORT_SYMBOL_GPL(nfs_commit_inode);
1852
1853int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
1854{
1855	struct nfs_inode *nfsi = NFS_I(inode);
1856	int flags = FLUSH_SYNC;
1857	int ret = 0;
1858
1859	/* no commits means nothing needs to be done */
1860	if (!nfsi->commit_info.ncommit)
1861		return ret;
1862
1863	if (wbc->sync_mode == WB_SYNC_NONE) {
1864		/* Don't commit yet if this is a non-blocking flush and there
1865		 * are a lot of outstanding writes for this mapping.
1866		 */
1867		if (nfsi->commit_info.ncommit <= (nfsi->nrequests >> 1))
1868			goto out_mark_dirty;
1869
1870		/* don't wait for the COMMIT response */
1871		flags = 0;
1872	}
1873
1874	ret = nfs_commit_inode(inode, flags);
1875	if (ret >= 0) {
1876		if (wbc->sync_mode == WB_SYNC_NONE) {
1877			if (ret < wbc->nr_to_write)
1878				wbc->nr_to_write -= ret;
1879			else
1880				wbc->nr_to_write = 0;
1881		}
1882		return 0;
1883	}
1884out_mark_dirty:
1885	__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1886	return ret;
1887}
1888EXPORT_SYMBOL_GPL(nfs_write_inode);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1889
1890/*
1891 * flush the inode to disk.
1892 */
1893int nfs_wb_all(struct inode *inode)
1894{
1895	int ret;
1896
1897	trace_nfs_writeback_inode_enter(inode);
1898
1899	ret = filemap_write_and_wait(inode->i_mapping);
1900	if (ret)
1901		goto out;
1902	ret = nfs_commit_inode(inode, FLUSH_SYNC);
1903	if (ret < 0)
1904		goto out;
1905	pnfs_sync_inode(inode, true);
1906	ret = 0;
1907
1908out:
1909	trace_nfs_writeback_inode_exit(inode, ret);
1910	return ret;
1911}
1912EXPORT_SYMBOL_GPL(nfs_wb_all);
1913
1914int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1915{
1916	struct nfs_page *req;
1917	int ret = 0;
1918
1919	wait_on_page_writeback(page);
1920
1921	/* blocking call to cancel all requests and join to a single (head)
1922	 * request */
1923	req = nfs_lock_and_join_requests(page, false);
1924
1925	if (IS_ERR(req)) {
1926		ret = PTR_ERR(req);
1927	} else if (req) {
1928		/* all requests from this page have been cancelled by
1929		 * nfs_lock_and_join_requests, so just remove the head
1930		 * request from the inode / page_private pointer and
1931		 * release it */
1932		nfs_inode_remove_request(req);
1933		nfs_unlock_and_release_request(req);
1934	}
1935
1936	return ret;
1937}
1938
1939/*
1940 * Write back all requests on one page - we do this before reading it.
1941 */
1942int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder)
1943{
1944	loff_t range_start = page_file_offset(page);
1945	loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
1946	struct writeback_control wbc = {
1947		.sync_mode = WB_SYNC_ALL,
1948		.nr_to_write = 0,
1949		.range_start = range_start,
1950		.range_end = range_end,
1951	};
1952	int ret;
1953
1954	trace_nfs_writeback_page_enter(inode);
1955
1956	for (;;) {
1957		wait_on_page_writeback(page);
1958		if (clear_page_dirty_for_io(page)) {
1959			ret = nfs_writepage_locked(page, &wbc, launder);
1960			if (ret < 0)
1961				goto out_error;
1962			continue;
1963		}
1964		ret = 0;
1965		if (!PagePrivate(page))
1966			break;
1967		ret = nfs_commit_inode(inode, FLUSH_SYNC);
1968		if (ret < 0)
1969			goto out_error;
1970	}
1971out_error:
1972	trace_nfs_writeback_page_exit(inode, ret);
1973	return ret;
1974}
1975
1976#ifdef CONFIG_MIGRATION
1977int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
1978		struct page *page, enum migrate_mode mode)
1979{
1980	/*
1981	 * If PagePrivate is set, then the page is currently associated with
1982	 * an in-progress read or write request. Don't try to migrate it.
1983	 *
1984	 * FIXME: we could do this in principle, but we'll need a way to ensure
1985	 *        that we can safely release the inode reference while holding
1986	 *        the page lock.
1987	 */
1988	if (PagePrivate(page))
1989		return -EBUSY;
1990
1991	if (!nfs_fscache_release_page(page, GFP_KERNEL))
1992		return -EBUSY;
1993
1994	return migrate_page(mapping, newpage, page, mode);
1995}
1996#endif
1997
1998int __init nfs_init_writepagecache(void)
1999{
2000	nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
2001					     sizeof(struct nfs_pgio_header),
2002					     0, SLAB_HWCACHE_ALIGN,
2003					     NULL);
2004	if (nfs_wdata_cachep == NULL)
2005		return -ENOMEM;
2006
2007	nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
2008						     nfs_wdata_cachep);
2009	if (nfs_wdata_mempool == NULL)
2010		goto out_destroy_write_cache;
2011
2012	nfs_cdata_cachep = kmem_cache_create("nfs_commit_data",
2013					     sizeof(struct nfs_commit_data),
2014					     0, SLAB_HWCACHE_ALIGN,
2015					     NULL);
2016	if (nfs_cdata_cachep == NULL)
2017		goto out_destroy_write_mempool;
2018
2019	nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
2020						      nfs_cdata_cachep);
2021	if (nfs_commit_mempool == NULL)
2022		goto out_destroy_commit_cache;
2023
2024	/*
2025	 * NFS congestion size, scale with available memory.
2026	 *
2027	 *  64MB:    8192k
2028	 * 128MB:   11585k
2029	 * 256MB:   16384k
2030	 * 512MB:   23170k
2031	 *   1GB:   32768k
2032	 *   2GB:   46340k
2033	 *   4GB:   65536k
2034	 *   8GB:   92681k
2035	 *  16GB:  131072k
2036	 *
2037	 * This allows larger machines to have larger/more transfers.
2038	 * Limit the default to 256M
2039	 */
2040	nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
2041	if (nfs_congestion_kb > 256*1024)
2042		nfs_congestion_kb = 256*1024;
2043
2044	return 0;
2045
2046out_destroy_commit_cache:
2047	kmem_cache_destroy(nfs_cdata_cachep);
2048out_destroy_write_mempool:
2049	mempool_destroy(nfs_wdata_mempool);
2050out_destroy_write_cache:
2051	kmem_cache_destroy(nfs_wdata_cachep);
2052	return -ENOMEM;
2053}
2054
2055void nfs_destroy_writepagecache(void)
2056{
2057	mempool_destroy(nfs_commit_mempool);
2058	kmem_cache_destroy(nfs_cdata_cachep);
2059	mempool_destroy(nfs_wdata_mempool);
2060	kmem_cache_destroy(nfs_wdata_cachep);
2061}
2062
2063static const struct nfs_rw_ops nfs_rw_write_ops = {
2064	.rw_mode		= FMODE_WRITE,
2065	.rw_alloc_header	= nfs_writehdr_alloc,
2066	.rw_free_header		= nfs_writehdr_free,
2067	.rw_done		= nfs_writeback_done,
2068	.rw_result		= nfs_writeback_result,
2069	.rw_initiate		= nfs_initiate_write,
2070};