Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * linux/fs/nfs/pagelist.c
   4 *
   5 * A set of helper functions for managing NFS read and write requests.
   6 * The main purpose of these routines is to provide support for the
   7 * coalescing of several requests into a single RPC call.
   8 *
   9 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
  10 *
  11 */
  12
  13#include <linux/slab.h>
  14#include <linux/file.h>
  15#include <linux/sched.h>
  16#include <linux/sunrpc/clnt.h>
  17#include <linux/nfs.h>
  18#include <linux/nfs3.h>
  19#include <linux/nfs4.h>
  20#include <linux/nfs_fs.h>
  21#include <linux/nfs_page.h>
  22#include <linux/nfs_mount.h>
  23#include <linux/export.h>
  24
  25#include "internal.h"
  26#include "pnfs.h"
  27#include "nfstrace.h"
  28
  29#define NFSDBG_FACILITY		NFSDBG_PAGECACHE
  30
  31static struct kmem_cache *nfs_page_cachep;
  32static const struct rpc_call_ops nfs_pgio_common_ops;
  33
  34static struct nfs_pgio_mirror *
  35nfs_pgio_get_mirror(struct nfs_pageio_descriptor *desc, u32 idx)
  36{
  37	if (desc->pg_ops->pg_get_mirror)
  38		return desc->pg_ops->pg_get_mirror(desc, idx);
  39	return &desc->pg_mirrors[0];
  40}
  41
  42struct nfs_pgio_mirror *
  43nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc)
  44{
  45	return nfs_pgio_get_mirror(desc, desc->pg_mirror_idx);
 
 
  46}
  47EXPORT_SYMBOL_GPL(nfs_pgio_current_mirror);
  48
  49static u32
  50nfs_pgio_set_current_mirror(struct nfs_pageio_descriptor *desc, u32 idx)
  51{
  52	if (desc->pg_ops->pg_set_mirror)
  53		return desc->pg_ops->pg_set_mirror(desc, idx);
  54	return desc->pg_mirror_idx;
  55}
  56
  57void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
  58		       struct nfs_pgio_header *hdr,
  59		       void (*release)(struct nfs_pgio_header *hdr))
  60{
  61	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
  62
  63
  64	hdr->req = nfs_list_entry(mirror->pg_list.next);
  65	hdr->inode = desc->pg_inode;
  66	hdr->cred = nfs_req_openctx(hdr->req)->cred;
  67	hdr->io_start = req_offset(hdr->req);
  68	hdr->good_bytes = mirror->pg_count;
  69	hdr->io_completion = desc->pg_io_completion;
  70	hdr->dreq = desc->pg_dreq;
  71	hdr->release = release;
  72	hdr->completion_ops = desc->pg_completion_ops;
  73	if (hdr->completion_ops->init_hdr)
  74		hdr->completion_ops->init_hdr(hdr);
  75
  76	hdr->pgio_mirror_idx = desc->pg_mirror_idx;
  77}
  78EXPORT_SYMBOL_GPL(nfs_pgheader_init);
  79
  80void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
  81{
  82	unsigned int new = pos - hdr->io_start;
  83
  84	trace_nfs_pgio_error(hdr, error, pos);
  85	if (hdr->good_bytes > new) {
  86		hdr->good_bytes = new;
  87		clear_bit(NFS_IOHDR_EOF, &hdr->flags);
  88		if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags))
  89			hdr->error = error;
  90	}
  91}
  92
  93static inline struct nfs_page *nfs_page_alloc(void)
 
  94{
  95	struct nfs_page *p =
  96		kmem_cache_zalloc(nfs_page_cachep, nfs_io_gfp_mask());
  97	if (p)
  98		INIT_LIST_HEAD(&p->wb_list);
  99	return p;
 100}
 101
 102static inline void
 103nfs_page_free(struct nfs_page *p)
 104{
 105	kmem_cache_free(nfs_page_cachep, p);
 106}
 107
 108/**
 109 * nfs_iocounter_wait - wait for i/o to complete
 110 * @l_ctx: nfs_lock_context with io_counter to use
 111 *
 112 * returns -ERESTARTSYS if interrupted by a fatal signal.
 113 * Otherwise returns 0 once the io_count hits 0.
 114 */
 115int
 116nfs_iocounter_wait(struct nfs_lock_context *l_ctx)
 117{
 118	return wait_var_event_killable(&l_ctx->io_count,
 119				       !atomic_read(&l_ctx->io_count));
 120}
 121
 122/**
 123 * nfs_async_iocounter_wait - wait on a rpc_waitqueue for I/O
 124 * to complete
 125 * @task: the rpc_task that should wait
 126 * @l_ctx: nfs_lock_context with io_counter to check
 127 *
 128 * Returns true if there is outstanding I/O to wait on and the
 129 * task has been put to sleep.
 130 */
 131bool
 132nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx)
 133{
 134	struct inode *inode = d_inode(l_ctx->open_context->dentry);
 135	bool ret = false;
 136
 137	if (atomic_read(&l_ctx->io_count) > 0) {
 138		rpc_sleep_on(&NFS_SERVER(inode)->uoc_rpcwaitq, task, NULL);
 139		ret = true;
 140	}
 141
 142	if (atomic_read(&l_ctx->io_count) == 0) {
 143		rpc_wake_up_queued_task(&NFS_SERVER(inode)->uoc_rpcwaitq, task);
 144		ret = false;
 145	}
 146
 147	return ret;
 148}
 149EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait);
 150
 151/*
 152 * nfs_page_lock_head_request - page lock the head of the page group
 153 * @req: any member of the page group
 154 */
 155struct nfs_page *
 156nfs_page_group_lock_head(struct nfs_page *req)
 157{
 158	struct nfs_page *head = req->wb_head;
 159
 160	while (!nfs_lock_request(head)) {
 161		int ret = nfs_wait_on_request(head);
 162		if (ret < 0)
 163			return ERR_PTR(ret);
 164	}
 165	if (head != req)
 166		kref_get(&head->wb_kref);
 167	return head;
 168}
 169
 170/*
 171 * nfs_unroll_locks -  unlock all newly locked reqs and wait on @req
 172 * @head: head request of page group, must be holding head lock
 173 * @req: request that couldn't lock and needs to wait on the req bit lock
 174 *
 175 * This is a helper function for nfs_lock_and_join_requests
 176 * returns 0 on success, < 0 on error.
 177 */
 178static void
 179nfs_unroll_locks(struct nfs_page *head, struct nfs_page *req)
 180{
 181	struct nfs_page *tmp;
 182
 183	/* relinquish all the locks successfully grabbed this run */
 184	for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) {
 185		if (!kref_read(&tmp->wb_kref))
 186			continue;
 187		nfs_unlock_and_release_request(tmp);
 188	}
 189}
 190
 191/*
 192 * nfs_page_group_lock_subreq -  try to lock a subrequest
 193 * @head: head request of page group
 194 * @subreq: request to lock
 195 *
 196 * This is a helper function for nfs_lock_and_join_requests which
 197 * must be called with the head request and page group both locked.
 198 * On error, it returns with the page group unlocked.
 199 */
 200static int
 201nfs_page_group_lock_subreq(struct nfs_page *head, struct nfs_page *subreq)
 202{
 203	int ret;
 204
 205	if (!kref_get_unless_zero(&subreq->wb_kref))
 206		return 0;
 207	while (!nfs_lock_request(subreq)) {
 208		nfs_page_group_unlock(head);
 209		ret = nfs_wait_on_request(subreq);
 210		if (!ret)
 211			ret = nfs_page_group_lock(head);
 212		if (ret < 0) {
 213			nfs_unroll_locks(head, subreq);
 214			nfs_release_request(subreq);
 215			return ret;
 216		}
 217	}
 218	return 0;
 219}
 220
 221/*
 222 * nfs_page_group_lock_subrequests -  try to lock the subrequests
 223 * @head: head request of page group
 224 *
 225 * This is a helper function for nfs_lock_and_join_requests which
 226 * must be called with the head request locked.
 227 */
 228int nfs_page_group_lock_subrequests(struct nfs_page *head)
 229{
 230	struct nfs_page *subreq;
 231	int ret;
 232
 233	ret = nfs_page_group_lock(head);
 234	if (ret < 0)
 235		return ret;
 236	/* lock each request in the page group */
 237	for (subreq = head->wb_this_page; subreq != head;
 238			subreq = subreq->wb_this_page) {
 239		ret = nfs_page_group_lock_subreq(head, subreq);
 240		if (ret < 0)
 241			return ret;
 242	}
 243	nfs_page_group_unlock(head);
 244	return 0;
 245}
 246
 247/*
 248 * nfs_page_set_headlock - set the request PG_HEADLOCK
 249 * @req: request that is to be locked
 250 *
 251 * this lock must be held when modifying req->wb_head
 252 *
 253 * return 0 on success, < 0 on error
 254 */
 255int
 256nfs_page_set_headlock(struct nfs_page *req)
 257{
 258	if (!test_and_set_bit(PG_HEADLOCK, &req->wb_flags))
 259		return 0;
 260
 261	set_bit(PG_CONTENDED1, &req->wb_flags);
 262	smp_mb__after_atomic();
 263	return wait_on_bit_lock(&req->wb_flags, PG_HEADLOCK,
 264				TASK_UNINTERRUPTIBLE);
 265}
 266
 267/*
 268 * nfs_page_clear_headlock - clear the request PG_HEADLOCK
 269 * @req: request that is to be locked
 270 */
 271void
 272nfs_page_clear_headlock(struct nfs_page *req)
 273{
 274	clear_bit_unlock(PG_HEADLOCK, &req->wb_flags);
 275	smp_mb__after_atomic();
 276	if (!test_bit(PG_CONTENDED1, &req->wb_flags))
 277		return;
 278	wake_up_bit(&req->wb_flags, PG_HEADLOCK);
 279}
 280
 281/*
 282 * nfs_page_group_lock - lock the head of the page group
 283 * @req: request in group that is to be locked
 284 *
 285 * this lock must be held when traversing or modifying the page
 286 * group list
 287 *
 288 * return 0 on success, < 0 on error
 289 */
 290int
 291nfs_page_group_lock(struct nfs_page *req)
 292{
 293	int ret;
 294
 295	ret = nfs_page_set_headlock(req);
 296	if (ret || req->wb_head == req)
 297		return ret;
 298	return nfs_page_set_headlock(req->wb_head);
 
 
 
 
 
 299}
 300
 301/*
 302 * nfs_page_group_unlock - unlock the head of the page group
 303 * @req: request in group that is to be unlocked
 304 */
 305void
 306nfs_page_group_unlock(struct nfs_page *req)
 307{
 308	if (req != req->wb_head)
 309		nfs_page_clear_headlock(req->wb_head);
 310	nfs_page_clear_headlock(req);
 
 
 
 
 
 
 
 311}
 312
 313/*
 314 * nfs_page_group_sync_on_bit_locked
 315 *
 316 * must be called with page group lock held
 317 */
 318static bool
 319nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
 320{
 321	struct nfs_page *head = req->wb_head;
 322	struct nfs_page *tmp;
 323
 324	WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_flags));
 325	WARN_ON_ONCE(test_and_set_bit(bit, &req->wb_flags));
 326
 327	tmp = req->wb_this_page;
 328	while (tmp != req) {
 329		if (!test_bit(bit, &tmp->wb_flags))
 330			return false;
 331		tmp = tmp->wb_this_page;
 332	}
 333
 334	/* true! reset all bits */
 335	tmp = req;
 336	do {
 337		clear_bit(bit, &tmp->wb_flags);
 338		tmp = tmp->wb_this_page;
 339	} while (tmp != req);
 340
 341	return true;
 342}
 343
 344/*
 345 * nfs_page_group_sync_on_bit - set bit on current request, but only
 346 *   return true if the bit is set for all requests in page group
 347 * @req - request in page group
 348 * @bit - PG_* bit that is used to sync page group
 349 */
 350bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit)
 351{
 352	bool ret;
 353
 354	nfs_page_group_lock(req);
 355	ret = nfs_page_group_sync_on_bit_locked(req, bit);
 356	nfs_page_group_unlock(req);
 357
 358	return ret;
 359}
 360
 361/*
 362 * nfs_page_group_init - Initialize the page group linkage for @req
 363 * @req - a new nfs request
 364 * @prev - the previous request in page group, or NULL if @req is the first
 365 *         or only request in the group (the head).
 366 */
 367static inline void
 368nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev)
 369{
 370	struct inode *inode;
 371	WARN_ON_ONCE(prev == req);
 372
 373	if (!prev) {
 374		/* a head request */
 375		req->wb_head = req;
 376		req->wb_this_page = req;
 377	} else {
 378		/* a subrequest */
 379		WARN_ON_ONCE(prev->wb_this_page != prev->wb_head);
 380		WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags));
 381		req->wb_head = prev->wb_head;
 382		req->wb_this_page = prev->wb_this_page;
 383		prev->wb_this_page = req;
 384
 385		/* All subrequests take a ref on the head request until
 386		 * nfs_page_group_destroy is called */
 387		kref_get(&req->wb_head->wb_kref);
 388
 389		/* grab extra ref and bump the request count if head request
 390		 * has extra ref from the write/commit path to handle handoff
 391		 * between write and commit lists. */
 392		if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) {
 393			inode = page_file_mapping(req->wb_page)->host;
 394			set_bit(PG_INODE_REF, &req->wb_flags);
 395			kref_get(&req->wb_kref);
 396			atomic_long_inc(&NFS_I(inode)->nrequests);
 397		}
 398	}
 399}
 400
 401/*
 402 * nfs_page_group_destroy - sync the destruction of page groups
 403 * @req - request that no longer needs the page group
 404 *
 405 * releases the page group reference from each member once all
 406 * members have called this function.
 407 */
 408static void
 409nfs_page_group_destroy(struct kref *kref)
 410{
 411	struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
 412	struct nfs_page *head = req->wb_head;
 413	struct nfs_page *tmp, *next;
 414
 415	if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN))
 416		goto out;
 417
 418	tmp = req;
 419	do {
 420		next = tmp->wb_this_page;
 421		/* unlink and free */
 422		tmp->wb_this_page = tmp;
 423		tmp->wb_head = tmp;
 424		nfs_free_request(tmp);
 425		tmp = next;
 426	} while (tmp != req);
 427out:
 428	/* subrequests must release the ref on the head request */
 429	if (head != req)
 430		nfs_release_request(head);
 431}
 432
 433static struct nfs_page *
 434__nfs_create_request(struct nfs_lock_context *l_ctx, struct page *page,
 435		   unsigned int pgbase, unsigned int offset,
 436		   unsigned int count)
 437{
 438	struct nfs_page		*req;
 439	struct nfs_open_context *ctx = l_ctx->open_context;
 440
 441	if (test_bit(NFS_CONTEXT_BAD, &ctx->flags))
 442		return ERR_PTR(-EBADF);
 443	/* try to allocate the request struct */
 444	req = nfs_page_alloc();
 445	if (req == NULL)
 446		return ERR_PTR(-ENOMEM);
 447
 448	req->wb_lock_context = l_ctx;
 449	refcount_inc(&l_ctx->count);
 450	atomic_inc(&l_ctx->io_count);
 451
 452	/* Initialize the request struct. Initially, we assume a
 453	 * long write-back delay. This will be adjusted in
 454	 * update_nfs_request below if the region is not locked. */
 455	req->wb_page    = page;
 456	if (page) {
 457		req->wb_index = page_index(page);
 458		get_page(page);
 459	}
 460	req->wb_offset  = offset;
 461	req->wb_pgbase	= pgbase;
 462	req->wb_bytes   = count;
 463	kref_init(&req->wb_kref);
 464	req->wb_nio = 0;
 465	return req;
 466}
 467
 468/**
 469 * nfs_create_request - Create an NFS read/write request.
 470 * @ctx: open context to use
 471 * @page: page to write
 472 * @offset: starting offset within the page for the write
 473 * @count: number of bytes to read/write
 474 *
 475 * The page must be locked by the caller. This makes sure we never
 476 * create two different requests for the same page.
 477 * User should ensure it is safe to sleep in this function.
 478 */
 479struct nfs_page *
 480nfs_create_request(struct nfs_open_context *ctx, struct page *page,
 481		   unsigned int offset, unsigned int count)
 482{
 483	struct nfs_lock_context *l_ctx = nfs_get_lock_context(ctx);
 484	struct nfs_page *ret;
 485
 486	if (IS_ERR(l_ctx))
 487		return ERR_CAST(l_ctx);
 488	ret = __nfs_create_request(l_ctx, page, offset, offset, count);
 489	if (!IS_ERR(ret))
 490		nfs_page_group_init(ret, NULL);
 491	nfs_put_lock_context(l_ctx);
 492	return ret;
 493}
 494
 495static struct nfs_page *
 496nfs_create_subreq(struct nfs_page *req,
 497		  unsigned int pgbase,
 498		  unsigned int offset,
 499		  unsigned int count)
 500{
 501	struct nfs_page *last;
 502	struct nfs_page *ret;
 503
 504	ret = __nfs_create_request(req->wb_lock_context, req->wb_page,
 505			pgbase, offset, count);
 506	if (!IS_ERR(ret)) {
 507		/* find the last request */
 508		for (last = req->wb_head;
 509		     last->wb_this_page != req->wb_head;
 510		     last = last->wb_this_page)
 511			;
 512
 513		nfs_lock_request(ret);
 514		ret->wb_index = req->wb_index;
 515		nfs_page_group_init(ret, last);
 516		ret->wb_nio = req->wb_nio;
 517	}
 518	return ret;
 519}
 520
 521/**
 522 * nfs_unlock_request - Unlock request and wake up sleepers.
 523 * @req: pointer to request
 524 */
 525void nfs_unlock_request(struct nfs_page *req)
 526{
 527	clear_bit_unlock(PG_BUSY, &req->wb_flags);
 
 
 
 
 
 528	smp_mb__after_atomic();
 529	if (!test_bit(PG_CONTENDED2, &req->wb_flags))
 530		return;
 531	wake_up_bit(&req->wb_flags, PG_BUSY);
 532}
 533
 534/**
 535 * nfs_unlock_and_release_request - Unlock request and release the nfs_page
 536 * @req: pointer to request
 537 */
 538void nfs_unlock_and_release_request(struct nfs_page *req)
 539{
 540	nfs_unlock_request(req);
 541	nfs_release_request(req);
 542}
 543
 544/*
 545 * nfs_clear_request - Free up all resources allocated to the request
 546 * @req:
 547 *
 548 * Release page and open context resources associated with a read/write
 549 * request after it has completed.
 550 */
 551static void nfs_clear_request(struct nfs_page *req)
 552{
 553	struct page *page = req->wb_page;
 554	struct nfs_lock_context *l_ctx = req->wb_lock_context;
 555	struct nfs_open_context *ctx;
 556
 557	if (page != NULL) {
 558		put_page(page);
 559		req->wb_page = NULL;
 560	}
 561	if (l_ctx != NULL) {
 562		if (atomic_dec_and_test(&l_ctx->io_count)) {
 563			wake_up_var(&l_ctx->io_count);
 564			ctx = l_ctx->open_context;
 565			if (test_bit(NFS_CONTEXT_UNLOCK, &ctx->flags))
 566				rpc_wake_up(&NFS_SERVER(d_inode(ctx->dentry))->uoc_rpcwaitq);
 567		}
 568		nfs_put_lock_context(l_ctx);
 569		req->wb_lock_context = NULL;
 570	}
 571}
 572
 573/**
 574 * nfs_free_request - Release the count on an NFS read/write request
 575 * @req: request to release
 576 *
 577 * Note: Should never be called with the spinlock held!
 578 */
 579void nfs_free_request(struct nfs_page *req)
 580{
 581	WARN_ON_ONCE(req->wb_this_page != req);
 582
 583	/* extra debug: make sure no sync bits are still set */
 584	WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
 585	WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE, &req->wb_flags));
 586	WARN_ON_ONCE(test_bit(PG_UPTODATE, &req->wb_flags));
 587	WARN_ON_ONCE(test_bit(PG_WB_END, &req->wb_flags));
 588	WARN_ON_ONCE(test_bit(PG_REMOVE, &req->wb_flags));
 589
 590	/* Release struct file and open context */
 591	nfs_clear_request(req);
 592	nfs_page_free(req);
 593}
 594
 595void nfs_release_request(struct nfs_page *req)
 596{
 597	kref_put(&req->wb_kref, nfs_page_group_destroy);
 598}
 599EXPORT_SYMBOL_GPL(nfs_release_request);
 600
 601/**
 602 * nfs_wait_on_request - Wait for a request to complete.
 603 * @req: request to wait upon.
 604 *
 605 * Interruptible by fatal signals only.
 606 * The user is responsible for holding a count on the request.
 607 */
 608int
 609nfs_wait_on_request(struct nfs_page *req)
 610{
 611	if (!test_bit(PG_BUSY, &req->wb_flags))
 612		return 0;
 613	set_bit(PG_CONTENDED2, &req->wb_flags);
 614	smp_mb__after_atomic();
 615	return wait_on_bit_io(&req->wb_flags, PG_BUSY,
 616			      TASK_UNINTERRUPTIBLE);
 617}
 618EXPORT_SYMBOL_GPL(nfs_wait_on_request);
 619
 620/*
 621 * nfs_generic_pg_test - determine if requests can be coalesced
 622 * @desc: pointer to descriptor
 623 * @prev: previous request in desc, or NULL
 624 * @req: this request
 625 *
 626 * Returns zero if @req cannot be coalesced into @desc, otherwise it returns
 627 * the size of the request.
 628 */
 629size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
 630			   struct nfs_page *prev, struct nfs_page *req)
 631{
 632	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 633
 634
 635	if (mirror->pg_count > mirror->pg_bsize) {
 636		/* should never happen */
 637		WARN_ON_ONCE(1);
 638		return 0;
 639	}
 640
 641	/*
 642	 * Limit the request size so that we can still allocate a page array
 643	 * for it without upsetting the slab allocator.
 644	 */
 645	if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) *
 646			sizeof(struct page *) > PAGE_SIZE)
 647		return 0;
 648
 649	return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes);
 650}
 651EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
 652
 653struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *ops)
 654{
 655	struct nfs_pgio_header *hdr = ops->rw_alloc_header();
 656
 657	if (hdr) {
 658		INIT_LIST_HEAD(&hdr->pages);
 659		hdr->rw_ops = ops;
 660	}
 661	return hdr;
 662}
 663EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc);
 664
 665/**
 666 * nfs_pgio_data_destroy - make @hdr suitable for reuse
 667 *
 668 * Frees memory and releases refs from nfs_generic_pgio, so that it may
 669 * be called again.
 670 *
 671 * @hdr: A header that has had nfs_generic_pgio called
 672 */
 673static void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
 674{
 675	if (hdr->args.context)
 676		put_nfs_open_context(hdr->args.context);
 677	if (hdr->page_array.pagevec != hdr->page_array.page_array)
 678		kfree(hdr->page_array.pagevec);
 679}
 680
 681/*
 682 * nfs_pgio_header_free - Free a read or write header
 683 * @hdr: The header to free
 684 */
 685void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
 686{
 687	nfs_pgio_data_destroy(hdr);
 688	hdr->rw_ops->rw_free_header(hdr);
 689}
 690EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
 691
 692/**
 693 * nfs_pgio_rpcsetup - Set up arguments for a pageio call
 694 * @hdr: The pageio hdr
 695 * @count: Number of bytes to read
 696 * @how: How to commit data (writes only)
 697 * @cinfo: Commit information for the call (writes only)
 698 */
 699static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
 700			      unsigned int count,
 701			      int how, struct nfs_commit_info *cinfo)
 702{
 703	struct nfs_page *req = hdr->req;
 704
 705	/* Set up the RPC argument and reply structs
 706	 * NB: take care not to mess about with hdr->commit et al. */
 707
 708	hdr->args.fh     = NFS_FH(hdr->inode);
 709	hdr->args.offset = req_offset(req);
 710	/* pnfs_set_layoutcommit needs this */
 711	hdr->mds_offset = hdr->args.offset;
 712	hdr->args.pgbase = req->wb_pgbase;
 713	hdr->args.pages  = hdr->page_array.pagevec;
 714	hdr->args.count  = count;
 715	hdr->args.context = get_nfs_open_context(nfs_req_openctx(req));
 716	hdr->args.lock_context = req->wb_lock_context;
 717	hdr->args.stable  = NFS_UNSTABLE;
 718	switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) {
 719	case 0:
 720		break;
 721	case FLUSH_COND_STABLE:
 722		if (nfs_reqs_to_commit(cinfo))
 723			break;
 724		fallthrough;
 725	default:
 726		hdr->args.stable = NFS_FILE_SYNC;
 727	}
 728
 729	hdr->res.fattr   = &hdr->fattr;
 730	hdr->res.count   = 0;
 731	hdr->res.eof     = 0;
 732	hdr->res.verf    = &hdr->verf;
 733	nfs_fattr_init(&hdr->fattr);
 734}
 735
 736/**
 737 * nfs_pgio_prepare - Prepare pageio hdr to go over the wire
 738 * @task: The current task
 739 * @calldata: pageio header to prepare
 740 */
 741static void nfs_pgio_prepare(struct rpc_task *task, void *calldata)
 742{
 743	struct nfs_pgio_header *hdr = calldata;
 744	int err;
 745	err = NFS_PROTO(hdr->inode)->pgio_rpc_prepare(task, hdr);
 746	if (err)
 747		rpc_exit(task, err);
 748}
 749
 750int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
 751		      const struct cred *cred, const struct nfs_rpc_ops *rpc_ops,
 752		      const struct rpc_call_ops *call_ops, int how, int flags)
 753{
 754	struct rpc_task *task;
 755	struct rpc_message msg = {
 756		.rpc_argp = &hdr->args,
 757		.rpc_resp = &hdr->res,
 758		.rpc_cred = cred,
 759	};
 760	struct rpc_task_setup task_setup_data = {
 761		.rpc_client = clnt,
 762		.task = &hdr->task,
 763		.rpc_message = &msg,
 764		.callback_ops = call_ops,
 765		.callback_data = hdr,
 766		.workqueue = nfsiod_workqueue,
 767		.flags = RPC_TASK_ASYNC | flags,
 768	};
 769
 770	if (nfs_server_capable(hdr->inode, NFS_CAP_MOVEABLE))
 771		task_setup_data.flags |= RPC_TASK_MOVEABLE;
 772
 773	hdr->rw_ops->rw_initiate(hdr, &msg, rpc_ops, &task_setup_data, how);
 774
 775	dprintk("NFS: initiated pgio call "
 776		"(req %s/%llu, %u bytes @ offset %llu)\n",
 777		hdr->inode->i_sb->s_id,
 778		(unsigned long long)NFS_FILEID(hdr->inode),
 779		hdr->args.count,
 780		(unsigned long long)hdr->args.offset);
 781
 782	task = rpc_run_task(&task_setup_data);
 783	if (IS_ERR(task))
 784		return PTR_ERR(task);
 
 
 
 
 
 
 
 785	rpc_put_task(task);
 786	return 0;
 
 787}
 788EXPORT_SYMBOL_GPL(nfs_initiate_pgio);
 789
 790/**
 791 * nfs_pgio_error - Clean up from a pageio error
 792 * @hdr: pageio header
 793 */
 794static void nfs_pgio_error(struct nfs_pgio_header *hdr)
 795{
 796	set_bit(NFS_IOHDR_REDO, &hdr->flags);
 797	hdr->completion_ops->completion(hdr);
 798}
 799
 800/**
 801 * nfs_pgio_release - Release pageio data
 802 * @calldata: The pageio header to release
 803 */
 804static void nfs_pgio_release(void *calldata)
 805{
 806	struct nfs_pgio_header *hdr = calldata;
 807	hdr->completion_ops->completion(hdr);
 808}
 809
 810static void nfs_pageio_mirror_init(struct nfs_pgio_mirror *mirror,
 811				   unsigned int bsize)
 812{
 813	INIT_LIST_HEAD(&mirror->pg_list);
 814	mirror->pg_bytes_written = 0;
 815	mirror->pg_count = 0;
 816	mirror->pg_bsize = bsize;
 817	mirror->pg_base = 0;
 818	mirror->pg_recoalesce = 0;
 819}
 820
 821/**
 822 * nfs_pageio_init - initialise a page io descriptor
 823 * @desc: pointer to descriptor
 824 * @inode: pointer to inode
 825 * @pg_ops: pointer to pageio operations
 826 * @compl_ops: pointer to pageio completion operations
 827 * @rw_ops: pointer to nfs read/write operations
 828 * @bsize: io block size
 829 * @io_flags: extra parameters for the io function
 830 */
 831void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
 832		     struct inode *inode,
 833		     const struct nfs_pageio_ops *pg_ops,
 834		     const struct nfs_pgio_completion_ops *compl_ops,
 835		     const struct nfs_rw_ops *rw_ops,
 836		     size_t bsize,
 837		     int io_flags)
 838{
 839	desc->pg_moreio = 0;
 840	desc->pg_inode = inode;
 841	desc->pg_ops = pg_ops;
 842	desc->pg_completion_ops = compl_ops;
 843	desc->pg_rw_ops = rw_ops;
 844	desc->pg_ioflags = io_flags;
 845	desc->pg_error = 0;
 846	desc->pg_lseg = NULL;
 847	desc->pg_io_completion = NULL;
 848	desc->pg_dreq = NULL;
 849	desc->pg_bsize = bsize;
 850
 851	desc->pg_mirror_count = 1;
 852	desc->pg_mirror_idx = 0;
 853
 854	desc->pg_mirrors_dynamic = NULL;
 855	desc->pg_mirrors = desc->pg_mirrors_static;
 856	nfs_pageio_mirror_init(&desc->pg_mirrors[0], bsize);
 857	desc->pg_maxretrans = 0;
 858}
 859
 860/**
 861 * nfs_pgio_result - Basic pageio error handling
 862 * @task: The task that ran
 863 * @calldata: Pageio header to check
 864 */
 865static void nfs_pgio_result(struct rpc_task *task, void *calldata)
 866{
 867	struct nfs_pgio_header *hdr = calldata;
 868	struct inode *inode = hdr->inode;
 869
 
 
 
 870	if (hdr->rw_ops->rw_done(task, hdr, inode) != 0)
 871		return;
 872	if (task->tk_status < 0)
 873		nfs_set_pgio_error(hdr, task->tk_status, hdr->args.offset);
 874	else
 875		hdr->rw_ops->rw_result(task, hdr);
 876}
 877
 878/*
 879 * Create an RPC task for the given read or write request and kick it.
 880 * The page must have been locked by the caller.
 881 *
 882 * It may happen that the page we're passed is not marked dirty.
 883 * This is the case if nfs_updatepage detects a conflicting request
 884 * that has been written but not committed.
 885 */
 886int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
 887		     struct nfs_pgio_header *hdr)
 888{
 889	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 890
 891	struct nfs_page		*req;
 892	struct page		**pages,
 893				*last_page;
 894	struct list_head *head = &mirror->pg_list;
 895	struct nfs_commit_info cinfo;
 896	struct nfs_page_array *pg_array = &hdr->page_array;
 897	unsigned int pagecount, pageused;
 898	gfp_t gfp_flags = nfs_io_gfp_mask();
 899
 900	pagecount = nfs_page_array_len(mirror->pg_base, mirror->pg_count);
 901	pg_array->npages = pagecount;
 902
 903	if (pagecount <= ARRAY_SIZE(pg_array->page_array))
 904		pg_array->pagevec = pg_array->page_array;
 905	else {
 906		pg_array->pagevec = kcalloc(pagecount, sizeof(struct page *), gfp_flags);
 907		if (!pg_array->pagevec) {
 908			pg_array->npages = 0;
 909			nfs_pgio_error(hdr);
 910			desc->pg_error = -ENOMEM;
 911			return desc->pg_error;
 912		}
 913	}
 914
 915	nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
 916	pages = hdr->page_array.pagevec;
 917	last_page = NULL;
 918	pageused = 0;
 919	while (!list_empty(head)) {
 920		req = nfs_list_entry(head->next);
 921		nfs_list_move_request(req, &hdr->pages);
 922
 923		if (!last_page || last_page != req->wb_page) {
 924			pageused++;
 925			if (pageused > pagecount)
 926				break;
 927			*pages++ = last_page = req->wb_page;
 928		}
 929	}
 930	if (WARN_ON_ONCE(pageused != pagecount)) {
 931		nfs_pgio_error(hdr);
 932		desc->pg_error = -EINVAL;
 933		return desc->pg_error;
 934	}
 935
 936	if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
 937	    (desc->pg_moreio || nfs_reqs_to_commit(&cinfo)))
 938		desc->pg_ioflags &= ~FLUSH_COND_STABLE;
 939
 940	/* Set up the argument struct */
 941	nfs_pgio_rpcsetup(hdr, mirror->pg_count, desc->pg_ioflags, &cinfo);
 942	desc->pg_rpc_callops = &nfs_pgio_common_ops;
 943	return 0;
 944}
 945EXPORT_SYMBOL_GPL(nfs_generic_pgio);
 946
 947static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
 948{
 949	struct nfs_pgio_header *hdr;
 950	int ret;
 951	unsigned short task_flags = 0;
 952
 953	hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
 954	if (!hdr) {
 955		desc->pg_error = -ENOMEM;
 956		return desc->pg_error;
 957	}
 958	nfs_pgheader_init(desc, hdr, nfs_pgio_header_free);
 959	ret = nfs_generic_pgio(desc, hdr);
 960	if (ret == 0) {
 961		if (NFS_SERVER(hdr->inode)->nfs_client->cl_minorversion)
 962			task_flags = RPC_TASK_MOVEABLE;
 963		ret = nfs_initiate_pgio(NFS_CLIENT(hdr->inode),
 964					hdr,
 965					hdr->cred,
 966					NFS_PROTO(hdr->inode),
 967					desc->pg_rpc_callops,
 968					desc->pg_ioflags,
 969					RPC_TASK_CRED_NOREF | task_flags);
 970	}
 971	return ret;
 972}
 973
 974static struct nfs_pgio_mirror *
 975nfs_pageio_alloc_mirrors(struct nfs_pageio_descriptor *desc,
 976		unsigned int mirror_count)
 977{
 978	struct nfs_pgio_mirror *ret;
 979	unsigned int i;
 980
 981	kfree(desc->pg_mirrors_dynamic);
 982	desc->pg_mirrors_dynamic = NULL;
 983	if (mirror_count == 1)
 984		return desc->pg_mirrors_static;
 985	ret = kmalloc_array(mirror_count, sizeof(*ret), nfs_io_gfp_mask());
 986	if (ret != NULL) {
 987		for (i = 0; i < mirror_count; i++)
 988			nfs_pageio_mirror_init(&ret[i], desc->pg_bsize);
 989		desc->pg_mirrors_dynamic = ret;
 990	}
 991	return ret;
 992}
 993
 994/*
 995 * nfs_pageio_setup_mirroring - determine if mirroring is to be used
 996 *				by calling the pg_get_mirror_count op
 997 */
 998static void nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
 999				       struct nfs_page *req)
1000{
1001	unsigned int mirror_count = 1;
1002
1003	if (pgio->pg_ops->pg_get_mirror_count)
1004		mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
1005	if (mirror_count == pgio->pg_mirror_count || pgio->pg_error < 0)
1006		return;
1007
1008	if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX) {
1009		pgio->pg_error = -EINVAL;
1010		return;
1011	}
1012
1013	pgio->pg_mirrors = nfs_pageio_alloc_mirrors(pgio, mirror_count);
1014	if (pgio->pg_mirrors == NULL) {
1015		pgio->pg_error = -ENOMEM;
1016		pgio->pg_mirrors = pgio->pg_mirrors_static;
1017		mirror_count = 1;
1018	}
1019	pgio->pg_mirror_count = mirror_count;
1020}
1021
 
 
 
 
 
 
 
 
 
1022static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor *pgio)
1023{
1024	pgio->pg_mirror_count = 1;
1025	pgio->pg_mirror_idx = 0;
1026	pgio->pg_mirrors = pgio->pg_mirrors_static;
1027	kfree(pgio->pg_mirrors_dynamic);
1028	pgio->pg_mirrors_dynamic = NULL;
1029}
1030
1031static bool nfs_match_lock_context(const struct nfs_lock_context *l1,
1032		const struct nfs_lock_context *l2)
1033{
1034	return l1->lockowner == l2->lockowner;
1035}
1036
1037/**
1038 * nfs_coalesce_size - test two requests for compatibility
1039 * @prev: pointer to nfs_page
1040 * @req: pointer to nfs_page
1041 * @pgio: pointer to nfs_pagio_descriptor
1042 *
1043 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
1044 * page data area they describe is contiguous, and that their RPC
1045 * credentials, NFSv4 open state, and lockowners are the same.
1046 *
1047 * Returns size of the request that can be coalesced
1048 */
1049static unsigned int nfs_coalesce_size(struct nfs_page *prev,
1050				      struct nfs_page *req,
1051				      struct nfs_pageio_descriptor *pgio)
1052{
 
1053	struct file_lock_context *flctx;
1054
1055	if (prev) {
1056		if (!nfs_match_open_context(nfs_req_openctx(req), nfs_req_openctx(prev)))
1057			return 0;
1058		flctx = locks_inode_context(d_inode(nfs_req_openctx(req)->dentry));
1059		if (flctx != NULL &&
1060		    !(list_empty_careful(&flctx->flc_posix) &&
1061		      list_empty_careful(&flctx->flc_flock)) &&
1062		    !nfs_match_lock_context(req->wb_lock_context,
1063					    prev->wb_lock_context))
1064			return 0;
1065		if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
1066			return 0;
1067		if (req->wb_page == prev->wb_page) {
1068			if (req->wb_pgbase != prev->wb_pgbase + prev->wb_bytes)
1069				return 0;
1070		} else {
1071			if (req->wb_pgbase != 0 ||
1072			    prev->wb_pgbase + prev->wb_bytes != PAGE_SIZE)
1073				return 0;
1074		}
1075	}
1076	return pgio->pg_ops->pg_test(pgio, prev, req);
 
 
 
 
1077}
1078
1079/**
1080 * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
1081 * @desc: destination io descriptor
1082 * @req: request
1083 *
1084 * If the request 'req' was successfully coalesced into the existing list
1085 * of pages 'desc', it returns the size of req.
1086 */
1087static unsigned int
1088nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
1089		struct nfs_page *req)
1090{
1091	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 
1092	struct nfs_page *prev = NULL;
1093	unsigned int size;
1094
1095	if (list_empty(&mirror->pg_list)) {
 
 
1096		if (desc->pg_ops->pg_init)
1097			desc->pg_ops->pg_init(desc, req);
1098		if (desc->pg_error < 0)
1099			return 0;
1100		mirror->pg_base = req->wb_pgbase;
1101		mirror->pg_count = 0;
1102		mirror->pg_recoalesce = 0;
1103	} else
1104		prev = nfs_list_entry(mirror->pg_list.prev);
1105
1106	if (desc->pg_maxretrans && req->wb_nio > desc->pg_maxretrans) {
1107		if (NFS_SERVER(desc->pg_inode)->flags & NFS_MOUNT_SOFTERR)
1108			desc->pg_error = -ETIMEDOUT;
1109		else
1110			desc->pg_error = -EIO;
1111		return 0;
1112	}
1113
1114	size = nfs_coalesce_size(prev, req, desc);
1115	if (size < req->wb_bytes)
1116		return size;
1117	nfs_list_move_request(req, &mirror->pg_list);
1118	mirror->pg_count += req->wb_bytes;
1119	return req->wb_bytes;
1120}
1121
1122/*
1123 * Helper for nfs_pageio_add_request and nfs_pageio_complete
1124 */
1125static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
1126{
1127	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1128
 
1129	if (!list_empty(&mirror->pg_list)) {
1130		int error = desc->pg_ops->pg_doio(desc);
1131		if (error < 0)
1132			desc->pg_error = error;
1133		if (list_empty(&mirror->pg_list))
1134			mirror->pg_bytes_written += mirror->pg_count;
1135	}
 
 
 
 
1136}
1137
1138static void
1139nfs_pageio_cleanup_request(struct nfs_pageio_descriptor *desc,
1140		struct nfs_page *req)
1141{
1142	LIST_HEAD(head);
1143
1144	nfs_list_move_request(req, &head);
1145	desc->pg_completion_ops->error_cleanup(&head, desc->pg_error);
1146}
1147
1148/**
1149 * __nfs_pageio_add_request - Attempt to coalesce a request into a page list.
1150 * @desc: destination io descriptor
1151 * @req: request
1152 *
1153 * This may split a request into subrequests which are all part of the
1154 * same page group. If so, it will submit @req as the last one, to ensure
1155 * the pointer to @req is still valid in case of failure.
1156 *
1157 * Returns true if the request 'req' was successfully coalesced into the
1158 * existing list of pages 'desc'.
1159 */
1160static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
1161			   struct nfs_page *req)
1162{
1163	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 
1164	struct nfs_page *subreq;
1165	unsigned int size, subreq_size;
 
1166
1167	nfs_page_group_lock(req);
1168
1169	subreq = req;
1170	subreq_size = subreq->wb_bytes;
1171	for(;;) {
1172		size = nfs_pageio_do_add_request(desc, subreq);
1173		if (size == subreq_size) {
1174			/* We successfully submitted a request */
1175			if (subreq == req)
1176				break;
1177			req->wb_pgbase += size;
1178			req->wb_bytes -= size;
1179			req->wb_offset += size;
1180			subreq_size = req->wb_bytes;
1181			subreq = req;
1182			continue;
1183		}
1184		if (WARN_ON_ONCE(subreq != req)) {
1185			nfs_page_group_unlock(req);
1186			nfs_pageio_cleanup_request(desc, subreq);
1187			subreq = req;
1188			subreq_size = req->wb_bytes;
1189			nfs_page_group_lock(req);
1190		}
1191		if (!size) {
1192			/* Can't coalesce any more, so do I/O */
1193			nfs_page_group_unlock(req);
1194			desc->pg_moreio = 1;
1195			nfs_pageio_doio(desc);
1196			if (desc->pg_error < 0 || mirror->pg_recoalesce)
1197				return 0;
1198			/* retry add_request for this subreq */
1199			nfs_page_group_lock(req);
1200			continue;
1201		}
1202		subreq = nfs_create_subreq(req, req->wb_pgbase,
1203				req->wb_offset, size);
1204		if (IS_ERR(subreq))
1205			goto err_ptr;
1206		subreq_size = size;
1207	}
 
 
 
 
 
 
 
 
 
 
 
1208
1209	nfs_page_group_unlock(req);
1210	return 1;
1211err_ptr:
1212	desc->pg_error = PTR_ERR(subreq);
1213	nfs_page_group_unlock(req);
1214	return 0;
 
 
 
 
1215}
1216
1217static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
1218{
1219	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1220	LIST_HEAD(head);
1221
1222	do {
1223		list_splice_init(&mirror->pg_list, &head);
 
 
 
1224		mirror->pg_recoalesce = 0;
1225
1226		while (!list_empty(&head)) {
1227			struct nfs_page *req;
1228
1229			req = list_first_entry(&head, struct nfs_page, wb_list);
1230			if (__nfs_pageio_add_request(desc, req))
1231				continue;
1232			if (desc->pg_error < 0) {
1233				list_splice_tail(&head, &mirror->pg_list);
1234				mirror->pg_recoalesce = 1;
1235				return 0;
1236			}
1237			break;
1238		}
1239	} while (mirror->pg_recoalesce);
1240	return 1;
1241}
1242
1243static int nfs_pageio_add_request_mirror(struct nfs_pageio_descriptor *desc,
1244		struct nfs_page *req)
1245{
1246	int ret;
1247
1248	do {
1249		ret = __nfs_pageio_add_request(desc, req);
1250		if (ret)
1251			break;
1252		if (desc->pg_error < 0)
1253			break;
1254		ret = nfs_do_recoalesce(desc);
1255	} while (ret);
1256
1257	return ret;
1258}
1259
1260static void nfs_pageio_error_cleanup(struct nfs_pageio_descriptor *desc)
1261{
1262	u32 midx;
1263	struct nfs_pgio_mirror *mirror;
1264
1265	if (!desc->pg_error)
1266		return;
1267
1268	for (midx = 0; midx < desc->pg_mirror_count; midx++) {
1269		mirror = nfs_pgio_get_mirror(desc, midx);
1270		desc->pg_completion_ops->error_cleanup(&mirror->pg_list,
1271				desc->pg_error);
1272	}
1273}
1274
1275int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
1276			   struct nfs_page *req)
1277{
1278	u32 midx;
1279	unsigned int pgbase, offset, bytes;
1280	struct nfs_page *dupreq;
1281
1282	pgbase = req->wb_pgbase;
1283	offset = req->wb_offset;
1284	bytes = req->wb_bytes;
1285
1286	nfs_pageio_setup_mirroring(desc, req);
1287	if (desc->pg_error < 0)
1288		goto out_failed;
1289
1290	/* Create the mirror instances first, and fire them off */
1291	for (midx = 1; midx < desc->pg_mirror_count; midx++) {
1292		nfs_page_group_lock(req);
1293
1294		dupreq = nfs_create_subreq(req,
1295				pgbase, offset, bytes);
1296
1297		nfs_page_group_unlock(req);
1298		if (IS_ERR(dupreq)) {
1299			desc->pg_error = PTR_ERR(dupreq);
1300			goto out_failed;
1301		}
1302
1303		nfs_pgio_set_current_mirror(desc, midx);
 
 
 
 
 
 
 
 
 
 
 
 
1304		if (!nfs_pageio_add_request_mirror(desc, dupreq))
1305			goto out_cleanup_subreq;
1306	}
1307
1308	nfs_pgio_set_current_mirror(desc, 0);
1309	if (!nfs_pageio_add_request_mirror(desc, req))
1310		goto out_failed;
1311
1312	return 1;
1313
1314out_cleanup_subreq:
1315	nfs_pageio_cleanup_request(desc, dupreq);
 
1316out_failed:
1317	nfs_pageio_error_cleanup(desc);
1318	return 0;
1319}
1320
1321/*
1322 * nfs_pageio_complete_mirror - Complete I/O on the current mirror of an
1323 *				nfs_pageio_descriptor
1324 * @desc: pointer to io descriptor
1325 * @mirror_idx: pointer to mirror index
1326 */
1327static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
1328				       u32 mirror_idx)
1329{
1330	struct nfs_pgio_mirror *mirror;
1331	u32 restore_idx;
1332
1333	restore_idx = nfs_pgio_set_current_mirror(desc, mirror_idx);
1334	mirror = nfs_pgio_current_mirror(desc);
1335
 
 
1336	for (;;) {
1337		nfs_pageio_doio(desc);
1338		if (desc->pg_error < 0 || !mirror->pg_recoalesce)
1339			break;
1340		if (!nfs_do_recoalesce(desc))
1341			break;
1342	}
1343	nfs_pgio_set_current_mirror(desc, restore_idx);
1344}
1345
1346/*
1347 * nfs_pageio_resend - Transfer requests to new descriptor and resend
1348 * @hdr - the pgio header to move request from
1349 * @desc - the pageio descriptor to add requests to
1350 *
1351 * Try to move each request (nfs_page) from @hdr to @desc then attempt
1352 * to send them.
1353 *
1354 * Returns 0 on success and < 0 on error.
1355 */
1356int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
1357		      struct nfs_pgio_header *hdr)
1358{
1359	LIST_HEAD(pages);
1360
1361	desc->pg_io_completion = hdr->io_completion;
1362	desc->pg_dreq = hdr->dreq;
1363	list_splice_init(&hdr->pages, &pages);
1364	while (!list_empty(&pages)) {
1365		struct nfs_page *req = nfs_list_entry(pages.next);
1366
1367		if (!nfs_pageio_add_request(desc, req))
1368			break;
1369	}
1370	nfs_pageio_complete(desc);
1371	if (!list_empty(&pages)) {
1372		int err = desc->pg_error < 0 ? desc->pg_error : -EIO;
1373		hdr->completion_ops->error_cleanup(&pages, err);
1374		nfs_set_pgio_error(hdr, err, hdr->io_start);
1375		return err;
1376	}
1377	return 0;
1378}
1379EXPORT_SYMBOL_GPL(nfs_pageio_resend);
1380
1381/**
1382 * nfs_pageio_complete - Complete I/O then cleanup an nfs_pageio_descriptor
1383 * @desc: pointer to io descriptor
1384 */
1385void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
1386{
1387	u32 midx;
1388
1389	for (midx = 0; midx < desc->pg_mirror_count; midx++)
1390		nfs_pageio_complete_mirror(desc, midx);
1391
1392	if (desc->pg_error < 0)
1393		nfs_pageio_error_cleanup(desc);
1394	if (desc->pg_ops->pg_cleanup)
1395		desc->pg_ops->pg_cleanup(desc);
1396	nfs_pageio_cleanup_mirroring(desc);
1397}
1398
1399/**
1400 * nfs_pageio_cond_complete - Conditional I/O completion
1401 * @desc: pointer to io descriptor
1402 * @index: page index
1403 *
1404 * It is important to ensure that processes don't try to take locks
1405 * on non-contiguous ranges of pages as that might deadlock. This
1406 * function should be called before attempting to wait on a locked
1407 * nfs_page. It will complete the I/O if the page index 'index'
1408 * is not contiguous with the existing list of pages in 'desc'.
1409 */
1410void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
1411{
1412	struct nfs_pgio_mirror *mirror;
1413	struct nfs_page *prev;
1414	u32 midx;
1415
1416	for (midx = 0; midx < desc->pg_mirror_count; midx++) {
1417		mirror = nfs_pgio_get_mirror(desc, midx);
1418		if (!list_empty(&mirror->pg_list)) {
1419			prev = nfs_list_entry(mirror->pg_list.prev);
1420			if (index != prev->wb_index + 1) {
1421				nfs_pageio_complete(desc);
1422				break;
1423			}
1424		}
1425	}
1426}
1427
1428/*
1429 * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1)
1430 */
1431void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio)
1432{
1433	nfs_pageio_complete(pgio);
1434}
1435
1436int __init nfs_init_nfspagecache(void)
1437{
1438	nfs_page_cachep = kmem_cache_create("nfs_page",
1439					    sizeof(struct nfs_page),
1440					    0, SLAB_HWCACHE_ALIGN,
1441					    NULL);
1442	if (nfs_page_cachep == NULL)
1443		return -ENOMEM;
1444
1445	return 0;
1446}
1447
1448void nfs_destroy_nfspagecache(void)
1449{
1450	kmem_cache_destroy(nfs_page_cachep);
1451}
1452
1453static const struct rpc_call_ops nfs_pgio_common_ops = {
1454	.rpc_call_prepare = nfs_pgio_prepare,
1455	.rpc_call_done = nfs_pgio_result,
1456	.rpc_release = nfs_pgio_release,
1457};
1458
1459const struct nfs_pageio_ops nfs_pgio_rw_ops = {
1460	.pg_test = nfs_generic_pg_test,
1461	.pg_doio = nfs_generic_pg_pgios,
1462};
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * linux/fs/nfs/pagelist.c
   4 *
   5 * A set of helper functions for managing NFS read and write requests.
   6 * The main purpose of these routines is to provide support for the
   7 * coalescing of several requests into a single RPC call.
   8 *
   9 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
  10 *
  11 */
  12
  13#include <linux/slab.h>
  14#include <linux/file.h>
  15#include <linux/sched.h>
  16#include <linux/sunrpc/clnt.h>
  17#include <linux/nfs.h>
  18#include <linux/nfs3.h>
  19#include <linux/nfs4.h>
  20#include <linux/nfs_fs.h>
  21#include <linux/nfs_page.h>
  22#include <linux/nfs_mount.h>
  23#include <linux/export.h>
  24
  25#include "internal.h"
  26#include "pnfs.h"
 
  27
  28#define NFSDBG_FACILITY		NFSDBG_PAGECACHE
  29
  30static struct kmem_cache *nfs_page_cachep;
  31static const struct rpc_call_ops nfs_pgio_common_ops;
  32
 
 
 
 
 
 
 
 
  33struct nfs_pgio_mirror *
  34nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc)
  35{
  36	return nfs_pgio_has_mirroring(desc) ?
  37		&desc->pg_mirrors[desc->pg_mirror_idx] :
  38		&desc->pg_mirrors[0];
  39}
  40EXPORT_SYMBOL_GPL(nfs_pgio_current_mirror);
  41
 
 
 
 
 
 
 
 
  42void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
  43		       struct nfs_pgio_header *hdr,
  44		       void (*release)(struct nfs_pgio_header *hdr))
  45{
  46	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
  47
  48
  49	hdr->req = nfs_list_entry(mirror->pg_list.next);
  50	hdr->inode = desc->pg_inode;
  51	hdr->cred = nfs_req_openctx(hdr->req)->cred;
  52	hdr->io_start = req_offset(hdr->req);
  53	hdr->good_bytes = mirror->pg_count;
  54	hdr->io_completion = desc->pg_io_completion;
  55	hdr->dreq = desc->pg_dreq;
  56	hdr->release = release;
  57	hdr->completion_ops = desc->pg_completion_ops;
  58	if (hdr->completion_ops->init_hdr)
  59		hdr->completion_ops->init_hdr(hdr);
  60
  61	hdr->pgio_mirror_idx = desc->pg_mirror_idx;
  62}
  63EXPORT_SYMBOL_GPL(nfs_pgheader_init);
  64
  65void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
  66{
  67	unsigned int new = pos - hdr->io_start;
  68
 
  69	if (hdr->good_bytes > new) {
  70		hdr->good_bytes = new;
  71		clear_bit(NFS_IOHDR_EOF, &hdr->flags);
  72		if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags))
  73			hdr->error = error;
  74	}
  75}
  76
  77static inline struct nfs_page *
  78nfs_page_alloc(void)
  79{
  80	struct nfs_page	*p = kmem_cache_zalloc(nfs_page_cachep, GFP_KERNEL);
 
  81	if (p)
  82		INIT_LIST_HEAD(&p->wb_list);
  83	return p;
  84}
  85
  86static inline void
  87nfs_page_free(struct nfs_page *p)
  88{
  89	kmem_cache_free(nfs_page_cachep, p);
  90}
  91
  92/**
  93 * nfs_iocounter_wait - wait for i/o to complete
  94 * @l_ctx: nfs_lock_context with io_counter to use
  95 *
  96 * returns -ERESTARTSYS if interrupted by a fatal signal.
  97 * Otherwise returns 0 once the io_count hits 0.
  98 */
  99int
 100nfs_iocounter_wait(struct nfs_lock_context *l_ctx)
 101{
 102	return wait_var_event_killable(&l_ctx->io_count,
 103				       !atomic_read(&l_ctx->io_count));
 104}
 105
 106/**
 107 * nfs_async_iocounter_wait - wait on a rpc_waitqueue for I/O
 108 * to complete
 109 * @task: the rpc_task that should wait
 110 * @l_ctx: nfs_lock_context with io_counter to check
 111 *
 112 * Returns true if there is outstanding I/O to wait on and the
 113 * task has been put to sleep.
 114 */
 115bool
 116nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx)
 117{
 118	struct inode *inode = d_inode(l_ctx->open_context->dentry);
 119	bool ret = false;
 120
 121	if (atomic_read(&l_ctx->io_count) > 0) {
 122		rpc_sleep_on(&NFS_SERVER(inode)->uoc_rpcwaitq, task, NULL);
 123		ret = true;
 124	}
 125
 126	if (atomic_read(&l_ctx->io_count) == 0) {
 127		rpc_wake_up_queued_task(&NFS_SERVER(inode)->uoc_rpcwaitq, task);
 128		ret = false;
 129	}
 130
 131	return ret;
 132}
 133EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait);
 134
 135/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 136 * nfs_page_group_lock - lock the head of the page group
 137 * @req - request in group that is to be locked
 138 *
 139 * this lock must be held when traversing or modifying the page
 140 * group list
 141 *
 142 * return 0 on success, < 0 on error
 143 */
 144int
 145nfs_page_group_lock(struct nfs_page *req)
 146{
 147	struct nfs_page *head = req->wb_head;
 148
 149	WARN_ON_ONCE(head != head->wb_head);
 150
 151	if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags))
 152		return 0;
 153
 154	set_bit(PG_CONTENDED1, &head->wb_flags);
 155	smp_mb__after_atomic();
 156	return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
 157				TASK_UNINTERRUPTIBLE);
 158}
 159
 160/*
 161 * nfs_page_group_unlock - unlock the head of the page group
 162 * @req - request in group that is to be unlocked
 163 */
 164void
 165nfs_page_group_unlock(struct nfs_page *req)
 166{
 167	struct nfs_page *head = req->wb_head;
 168
 169	WARN_ON_ONCE(head != head->wb_head);
 170
 171	smp_mb__before_atomic();
 172	clear_bit(PG_HEADLOCK, &head->wb_flags);
 173	smp_mb__after_atomic();
 174	if (!test_bit(PG_CONTENDED1, &head->wb_flags))
 175		return;
 176	wake_up_bit(&head->wb_flags, PG_HEADLOCK);
 177}
 178
 179/*
 180 * nfs_page_group_sync_on_bit_locked
 181 *
 182 * must be called with page group lock held
 183 */
 184static bool
 185nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
 186{
 187	struct nfs_page *head = req->wb_head;
 188	struct nfs_page *tmp;
 189
 190	WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_flags));
 191	WARN_ON_ONCE(test_and_set_bit(bit, &req->wb_flags));
 192
 193	tmp = req->wb_this_page;
 194	while (tmp != req) {
 195		if (!test_bit(bit, &tmp->wb_flags))
 196			return false;
 197		tmp = tmp->wb_this_page;
 198	}
 199
 200	/* true! reset all bits */
 201	tmp = req;
 202	do {
 203		clear_bit(bit, &tmp->wb_flags);
 204		tmp = tmp->wb_this_page;
 205	} while (tmp != req);
 206
 207	return true;
 208}
 209
 210/*
 211 * nfs_page_group_sync_on_bit - set bit on current request, but only
 212 *   return true if the bit is set for all requests in page group
 213 * @req - request in page group
 214 * @bit - PG_* bit that is used to sync page group
 215 */
 216bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit)
 217{
 218	bool ret;
 219
 220	nfs_page_group_lock(req);
 221	ret = nfs_page_group_sync_on_bit_locked(req, bit);
 222	nfs_page_group_unlock(req);
 223
 224	return ret;
 225}
 226
 227/*
 228 * nfs_page_group_init - Initialize the page group linkage for @req
 229 * @req - a new nfs request
 230 * @prev - the previous request in page group, or NULL if @req is the first
 231 *         or only request in the group (the head).
 232 */
 233static inline void
 234nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev)
 235{
 236	struct inode *inode;
 237	WARN_ON_ONCE(prev == req);
 238
 239	if (!prev) {
 240		/* a head request */
 241		req->wb_head = req;
 242		req->wb_this_page = req;
 243	} else {
 244		/* a subrequest */
 245		WARN_ON_ONCE(prev->wb_this_page != prev->wb_head);
 246		WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags));
 247		req->wb_head = prev->wb_head;
 248		req->wb_this_page = prev->wb_this_page;
 249		prev->wb_this_page = req;
 250
 251		/* All subrequests take a ref on the head request until
 252		 * nfs_page_group_destroy is called */
 253		kref_get(&req->wb_head->wb_kref);
 254
 255		/* grab extra ref and bump the request count if head request
 256		 * has extra ref from the write/commit path to handle handoff
 257		 * between write and commit lists. */
 258		if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) {
 259			inode = page_file_mapping(req->wb_page)->host;
 260			set_bit(PG_INODE_REF, &req->wb_flags);
 261			kref_get(&req->wb_kref);
 262			atomic_long_inc(&NFS_I(inode)->nrequests);
 263		}
 264	}
 265}
 266
 267/*
 268 * nfs_page_group_destroy - sync the destruction of page groups
 269 * @req - request that no longer needs the page group
 270 *
 271 * releases the page group reference from each member once all
 272 * members have called this function.
 273 */
 274static void
 275nfs_page_group_destroy(struct kref *kref)
 276{
 277	struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
 278	struct nfs_page *head = req->wb_head;
 279	struct nfs_page *tmp, *next;
 280
 281	if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN))
 282		goto out;
 283
 284	tmp = req;
 285	do {
 286		next = tmp->wb_this_page;
 287		/* unlink and free */
 288		tmp->wb_this_page = tmp;
 289		tmp->wb_head = tmp;
 290		nfs_free_request(tmp);
 291		tmp = next;
 292	} while (tmp != req);
 293out:
 294	/* subrequests must release the ref on the head request */
 295	if (head != req)
 296		nfs_release_request(head);
 297}
 298
 299static struct nfs_page *
 300__nfs_create_request(struct nfs_lock_context *l_ctx, struct page *page,
 301		   unsigned int pgbase, unsigned int offset,
 302		   unsigned int count)
 303{
 304	struct nfs_page		*req;
 305	struct nfs_open_context *ctx = l_ctx->open_context;
 306
 307	if (test_bit(NFS_CONTEXT_BAD, &ctx->flags))
 308		return ERR_PTR(-EBADF);
 309	/* try to allocate the request struct */
 310	req = nfs_page_alloc();
 311	if (req == NULL)
 312		return ERR_PTR(-ENOMEM);
 313
 314	req->wb_lock_context = l_ctx;
 315	refcount_inc(&l_ctx->count);
 316	atomic_inc(&l_ctx->io_count);
 317
 318	/* Initialize the request struct. Initially, we assume a
 319	 * long write-back delay. This will be adjusted in
 320	 * update_nfs_request below if the region is not locked. */
 321	req->wb_page    = page;
 322	if (page) {
 323		req->wb_index = page_index(page);
 324		get_page(page);
 325	}
 326	req->wb_offset  = offset;
 327	req->wb_pgbase	= pgbase;
 328	req->wb_bytes   = count;
 329	kref_init(&req->wb_kref);
 330	req->wb_nio = 0;
 331	return req;
 332}
 333
 334/**
 335 * nfs_create_request - Create an NFS read/write request.
 336 * @ctx: open context to use
 337 * @page: page to write
 338 * @offset: starting offset within the page for the write
 339 * @count: number of bytes to read/write
 340 *
 341 * The page must be locked by the caller. This makes sure we never
 342 * create two different requests for the same page.
 343 * User should ensure it is safe to sleep in this function.
 344 */
 345struct nfs_page *
 346nfs_create_request(struct nfs_open_context *ctx, struct page *page,
 347		   unsigned int offset, unsigned int count)
 348{
 349	struct nfs_lock_context *l_ctx = nfs_get_lock_context(ctx);
 350	struct nfs_page *ret;
 351
 352	if (IS_ERR(l_ctx))
 353		return ERR_CAST(l_ctx);
 354	ret = __nfs_create_request(l_ctx, page, offset, offset, count);
 355	if (!IS_ERR(ret))
 356		nfs_page_group_init(ret, NULL);
 357	nfs_put_lock_context(l_ctx);
 358	return ret;
 359}
 360
 361static struct nfs_page *
 362nfs_create_subreq(struct nfs_page *req, struct nfs_page *last,
 363		  unsigned int pgbase, unsigned int offset,
 
 364		  unsigned int count)
 365{
 
 366	struct nfs_page *ret;
 367
 368	ret = __nfs_create_request(req->wb_lock_context, req->wb_page,
 369			pgbase, offset, count);
 370	if (!IS_ERR(ret)) {
 
 
 
 
 
 
 371		nfs_lock_request(ret);
 372		ret->wb_index = req->wb_index;
 373		nfs_page_group_init(ret, last);
 374		ret->wb_nio = req->wb_nio;
 375	}
 376	return ret;
 377}
 378
 379/**
 380 * nfs_unlock_request - Unlock request and wake up sleepers.
 381 * @req: pointer to request
 382 */
 383void nfs_unlock_request(struct nfs_page *req)
 384{
 385	if (!NFS_WBACK_BUSY(req)) {
 386		printk(KERN_ERR "NFS: Invalid unlock attempted\n");
 387		BUG();
 388	}
 389	smp_mb__before_atomic();
 390	clear_bit(PG_BUSY, &req->wb_flags);
 391	smp_mb__after_atomic();
 392	if (!test_bit(PG_CONTENDED2, &req->wb_flags))
 393		return;
 394	wake_up_bit(&req->wb_flags, PG_BUSY);
 395}
 396
 397/**
 398 * nfs_unlock_and_release_request - Unlock request and release the nfs_page
 399 * @req: pointer to request
 400 */
 401void nfs_unlock_and_release_request(struct nfs_page *req)
 402{
 403	nfs_unlock_request(req);
 404	nfs_release_request(req);
 405}
 406
 407/*
 408 * nfs_clear_request - Free up all resources allocated to the request
 409 * @req:
 410 *
 411 * Release page and open context resources associated with a read/write
 412 * request after it has completed.
 413 */
 414static void nfs_clear_request(struct nfs_page *req)
 415{
 416	struct page *page = req->wb_page;
 417	struct nfs_lock_context *l_ctx = req->wb_lock_context;
 418	struct nfs_open_context *ctx;
 419
 420	if (page != NULL) {
 421		put_page(page);
 422		req->wb_page = NULL;
 423	}
 424	if (l_ctx != NULL) {
 425		if (atomic_dec_and_test(&l_ctx->io_count)) {
 426			wake_up_var(&l_ctx->io_count);
 427			ctx = l_ctx->open_context;
 428			if (test_bit(NFS_CONTEXT_UNLOCK, &ctx->flags))
 429				rpc_wake_up(&NFS_SERVER(d_inode(ctx->dentry))->uoc_rpcwaitq);
 430		}
 431		nfs_put_lock_context(l_ctx);
 432		req->wb_lock_context = NULL;
 433	}
 434}
 435
 436/**
 437 * nfs_release_request - Release the count on an NFS read/write request
 438 * @req: request to release
 439 *
 440 * Note: Should never be called with the spinlock held!
 441 */
 442void nfs_free_request(struct nfs_page *req)
 443{
 444	WARN_ON_ONCE(req->wb_this_page != req);
 445
 446	/* extra debug: make sure no sync bits are still set */
 447	WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
 448	WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE, &req->wb_flags));
 449	WARN_ON_ONCE(test_bit(PG_UPTODATE, &req->wb_flags));
 450	WARN_ON_ONCE(test_bit(PG_WB_END, &req->wb_flags));
 451	WARN_ON_ONCE(test_bit(PG_REMOVE, &req->wb_flags));
 452
 453	/* Release struct file and open context */
 454	nfs_clear_request(req);
 455	nfs_page_free(req);
 456}
 457
 458void nfs_release_request(struct nfs_page *req)
 459{
 460	kref_put(&req->wb_kref, nfs_page_group_destroy);
 461}
 462EXPORT_SYMBOL_GPL(nfs_release_request);
 463
 464/**
 465 * nfs_wait_on_request - Wait for a request to complete.
 466 * @req: request to wait upon.
 467 *
 468 * Interruptible by fatal signals only.
 469 * The user is responsible for holding a count on the request.
 470 */
 471int
 472nfs_wait_on_request(struct nfs_page *req)
 473{
 474	if (!test_bit(PG_BUSY, &req->wb_flags))
 475		return 0;
 476	set_bit(PG_CONTENDED2, &req->wb_flags);
 477	smp_mb__after_atomic();
 478	return wait_on_bit_io(&req->wb_flags, PG_BUSY,
 479			      TASK_UNINTERRUPTIBLE);
 480}
 481EXPORT_SYMBOL_GPL(nfs_wait_on_request);
 482
 483/*
 484 * nfs_generic_pg_test - determine if requests can be coalesced
 485 * @desc: pointer to descriptor
 486 * @prev: previous request in desc, or NULL
 487 * @req: this request
 488 *
 489 * Returns zero if @req cannot be coalesced into @desc, otherwise it returns
 490 * the size of the request.
 491 */
 492size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
 493			   struct nfs_page *prev, struct nfs_page *req)
 494{
 495	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 496
 497
 498	if (mirror->pg_count > mirror->pg_bsize) {
 499		/* should never happen */
 500		WARN_ON_ONCE(1);
 501		return 0;
 502	}
 503
 504	/*
 505	 * Limit the request size so that we can still allocate a page array
 506	 * for it without upsetting the slab allocator.
 507	 */
 508	if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) *
 509			sizeof(struct page *) > PAGE_SIZE)
 510		return 0;
 511
 512	return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes);
 513}
 514EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
 515
 516struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *ops)
 517{
 518	struct nfs_pgio_header *hdr = ops->rw_alloc_header();
 519
 520	if (hdr) {
 521		INIT_LIST_HEAD(&hdr->pages);
 522		hdr->rw_ops = ops;
 523	}
 524	return hdr;
 525}
 526EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc);
 527
 528/**
 529 * nfs_pgio_data_destroy - make @hdr suitable for reuse
 530 *
 531 * Frees memory and releases refs from nfs_generic_pgio, so that it may
 532 * be called again.
 533 *
 534 * @hdr: A header that has had nfs_generic_pgio called
 535 */
 536static void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
 537{
 538	if (hdr->args.context)
 539		put_nfs_open_context(hdr->args.context);
 540	if (hdr->page_array.pagevec != hdr->page_array.page_array)
 541		kfree(hdr->page_array.pagevec);
 542}
 543
 544/*
 545 * nfs_pgio_header_free - Free a read or write header
 546 * @hdr: The header to free
 547 */
 548void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
 549{
 550	nfs_pgio_data_destroy(hdr);
 551	hdr->rw_ops->rw_free_header(hdr);
 552}
 553EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
 554
 555/**
 556 * nfs_pgio_rpcsetup - Set up arguments for a pageio call
 557 * @hdr: The pageio hdr
 558 * @count: Number of bytes to read
 559 * @how: How to commit data (writes only)
 560 * @cinfo: Commit information for the call (writes only)
 561 */
 562static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
 563			      unsigned int count,
 564			      int how, struct nfs_commit_info *cinfo)
 565{
 566	struct nfs_page *req = hdr->req;
 567
 568	/* Set up the RPC argument and reply structs
 569	 * NB: take care not to mess about with hdr->commit et al. */
 570
 571	hdr->args.fh     = NFS_FH(hdr->inode);
 572	hdr->args.offset = req_offset(req);
 573	/* pnfs_set_layoutcommit needs this */
 574	hdr->mds_offset = hdr->args.offset;
 575	hdr->args.pgbase = req->wb_pgbase;
 576	hdr->args.pages  = hdr->page_array.pagevec;
 577	hdr->args.count  = count;
 578	hdr->args.context = get_nfs_open_context(nfs_req_openctx(req));
 579	hdr->args.lock_context = req->wb_lock_context;
 580	hdr->args.stable  = NFS_UNSTABLE;
 581	switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) {
 582	case 0:
 583		break;
 584	case FLUSH_COND_STABLE:
 585		if (nfs_reqs_to_commit(cinfo))
 586			break;
 587		/* fall through */
 588	default:
 589		hdr->args.stable = NFS_FILE_SYNC;
 590	}
 591
 592	hdr->res.fattr   = &hdr->fattr;
 593	hdr->res.count   = 0;
 594	hdr->res.eof     = 0;
 595	hdr->res.verf    = &hdr->verf;
 596	nfs_fattr_init(&hdr->fattr);
 597}
 598
 599/**
 600 * nfs_pgio_prepare - Prepare pageio hdr to go over the wire
 601 * @task: The current task
 602 * @calldata: pageio header to prepare
 603 */
 604static void nfs_pgio_prepare(struct rpc_task *task, void *calldata)
 605{
 606	struct nfs_pgio_header *hdr = calldata;
 607	int err;
 608	err = NFS_PROTO(hdr->inode)->pgio_rpc_prepare(task, hdr);
 609	if (err)
 610		rpc_exit(task, err);
 611}
 612
 613int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
 614		      const struct cred *cred, const struct nfs_rpc_ops *rpc_ops,
 615		      const struct rpc_call_ops *call_ops, int how, int flags)
 616{
 617	struct rpc_task *task;
 618	struct rpc_message msg = {
 619		.rpc_argp = &hdr->args,
 620		.rpc_resp = &hdr->res,
 621		.rpc_cred = cred,
 622	};
 623	struct rpc_task_setup task_setup_data = {
 624		.rpc_client = clnt,
 625		.task = &hdr->task,
 626		.rpc_message = &msg,
 627		.callback_ops = call_ops,
 628		.callback_data = hdr,
 629		.workqueue = nfsiod_workqueue,
 630		.flags = RPC_TASK_ASYNC | flags,
 631	};
 632	int ret = 0;
 
 
 633
 634	hdr->rw_ops->rw_initiate(hdr, &msg, rpc_ops, &task_setup_data, how);
 635
 636	dprintk("NFS: initiated pgio call "
 637		"(req %s/%llu, %u bytes @ offset %llu)\n",
 638		hdr->inode->i_sb->s_id,
 639		(unsigned long long)NFS_FILEID(hdr->inode),
 640		hdr->args.count,
 641		(unsigned long long)hdr->args.offset);
 642
 643	task = rpc_run_task(&task_setup_data);
 644	if (IS_ERR(task)) {
 645		ret = PTR_ERR(task);
 646		goto out;
 647	}
 648	if (how & FLUSH_SYNC) {
 649		ret = rpc_wait_for_completion_task(task);
 650		if (ret == 0)
 651			ret = task->tk_status;
 652	}
 653	rpc_put_task(task);
 654out:
 655	return ret;
 656}
 657EXPORT_SYMBOL_GPL(nfs_initiate_pgio);
 658
 659/**
 660 * nfs_pgio_error - Clean up from a pageio error
 661 * @hdr: pageio header
 662 */
 663static void nfs_pgio_error(struct nfs_pgio_header *hdr)
 664{
 665	set_bit(NFS_IOHDR_REDO, &hdr->flags);
 666	hdr->completion_ops->completion(hdr);
 667}
 668
 669/**
 670 * nfs_pgio_release - Release pageio data
 671 * @calldata: The pageio header to release
 672 */
 673static void nfs_pgio_release(void *calldata)
 674{
 675	struct nfs_pgio_header *hdr = calldata;
 676	hdr->completion_ops->completion(hdr);
 677}
 678
 679static void nfs_pageio_mirror_init(struct nfs_pgio_mirror *mirror,
 680				   unsigned int bsize)
 681{
 682	INIT_LIST_HEAD(&mirror->pg_list);
 683	mirror->pg_bytes_written = 0;
 684	mirror->pg_count = 0;
 685	mirror->pg_bsize = bsize;
 686	mirror->pg_base = 0;
 687	mirror->pg_recoalesce = 0;
 688}
 689
 690/**
 691 * nfs_pageio_init - initialise a page io descriptor
 692 * @desc: pointer to descriptor
 693 * @inode: pointer to inode
 694 * @pg_ops: pointer to pageio operations
 695 * @compl_ops: pointer to pageio completion operations
 696 * @rw_ops: pointer to nfs read/write operations
 697 * @bsize: io block size
 698 * @io_flags: extra parameters for the io function
 699 */
 700void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
 701		     struct inode *inode,
 702		     const struct nfs_pageio_ops *pg_ops,
 703		     const struct nfs_pgio_completion_ops *compl_ops,
 704		     const struct nfs_rw_ops *rw_ops,
 705		     size_t bsize,
 706		     int io_flags)
 707{
 708	desc->pg_moreio = 0;
 709	desc->pg_inode = inode;
 710	desc->pg_ops = pg_ops;
 711	desc->pg_completion_ops = compl_ops;
 712	desc->pg_rw_ops = rw_ops;
 713	desc->pg_ioflags = io_flags;
 714	desc->pg_error = 0;
 715	desc->pg_lseg = NULL;
 716	desc->pg_io_completion = NULL;
 717	desc->pg_dreq = NULL;
 718	desc->pg_bsize = bsize;
 719
 720	desc->pg_mirror_count = 1;
 721	desc->pg_mirror_idx = 0;
 722
 723	desc->pg_mirrors_dynamic = NULL;
 724	desc->pg_mirrors = desc->pg_mirrors_static;
 725	nfs_pageio_mirror_init(&desc->pg_mirrors[0], bsize);
 726	desc->pg_maxretrans = 0;
 727}
 728
 729/**
 730 * nfs_pgio_result - Basic pageio error handling
 731 * @task: The task that ran
 732 * @calldata: Pageio header to check
 733 */
 734static void nfs_pgio_result(struct rpc_task *task, void *calldata)
 735{
 736	struct nfs_pgio_header *hdr = calldata;
 737	struct inode *inode = hdr->inode;
 738
 739	dprintk("NFS: %s: %5u, (status %d)\n", __func__,
 740		task->tk_pid, task->tk_status);
 741
 742	if (hdr->rw_ops->rw_done(task, hdr, inode) != 0)
 743		return;
 744	if (task->tk_status < 0)
 745		nfs_set_pgio_error(hdr, task->tk_status, hdr->args.offset);
 746	else
 747		hdr->rw_ops->rw_result(task, hdr);
 748}
 749
 750/*
 751 * Create an RPC task for the given read or write request and kick it.
 752 * The page must have been locked by the caller.
 753 *
 754 * It may happen that the page we're passed is not marked dirty.
 755 * This is the case if nfs_updatepage detects a conflicting request
 756 * that has been written but not committed.
 757 */
 758int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
 759		     struct nfs_pgio_header *hdr)
 760{
 761	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 762
 763	struct nfs_page		*req;
 764	struct page		**pages,
 765				*last_page;
 766	struct list_head *head = &mirror->pg_list;
 767	struct nfs_commit_info cinfo;
 768	struct nfs_page_array *pg_array = &hdr->page_array;
 769	unsigned int pagecount, pageused;
 770	gfp_t gfp_flags = GFP_KERNEL;
 771
 772	pagecount = nfs_page_array_len(mirror->pg_base, mirror->pg_count);
 773	pg_array->npages = pagecount;
 774
 775	if (pagecount <= ARRAY_SIZE(pg_array->page_array))
 776		pg_array->pagevec = pg_array->page_array;
 777	else {
 778		pg_array->pagevec = kcalloc(pagecount, sizeof(struct page *), gfp_flags);
 779		if (!pg_array->pagevec) {
 780			pg_array->npages = 0;
 781			nfs_pgio_error(hdr);
 782			desc->pg_error = -ENOMEM;
 783			return desc->pg_error;
 784		}
 785	}
 786
 787	nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
 788	pages = hdr->page_array.pagevec;
 789	last_page = NULL;
 790	pageused = 0;
 791	while (!list_empty(head)) {
 792		req = nfs_list_entry(head->next);
 793		nfs_list_move_request(req, &hdr->pages);
 794
 795		if (!last_page || last_page != req->wb_page) {
 796			pageused++;
 797			if (pageused > pagecount)
 798				break;
 799			*pages++ = last_page = req->wb_page;
 800		}
 801	}
 802	if (WARN_ON_ONCE(pageused != pagecount)) {
 803		nfs_pgio_error(hdr);
 804		desc->pg_error = -EINVAL;
 805		return desc->pg_error;
 806	}
 807
 808	if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
 809	    (desc->pg_moreio || nfs_reqs_to_commit(&cinfo)))
 810		desc->pg_ioflags &= ~FLUSH_COND_STABLE;
 811
 812	/* Set up the argument struct */
 813	nfs_pgio_rpcsetup(hdr, mirror->pg_count, desc->pg_ioflags, &cinfo);
 814	desc->pg_rpc_callops = &nfs_pgio_common_ops;
 815	return 0;
 816}
 817EXPORT_SYMBOL_GPL(nfs_generic_pgio);
 818
 819static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
 820{
 821	struct nfs_pgio_header *hdr;
 822	int ret;
 
 823
 824	hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
 825	if (!hdr) {
 826		desc->pg_error = -ENOMEM;
 827		return desc->pg_error;
 828	}
 829	nfs_pgheader_init(desc, hdr, nfs_pgio_header_free);
 830	ret = nfs_generic_pgio(desc, hdr);
 831	if (ret == 0)
 
 
 832		ret = nfs_initiate_pgio(NFS_CLIENT(hdr->inode),
 833					hdr,
 834					hdr->cred,
 835					NFS_PROTO(hdr->inode),
 836					desc->pg_rpc_callops,
 837					desc->pg_ioflags, 0);
 
 
 838	return ret;
 839}
 840
 841static struct nfs_pgio_mirror *
 842nfs_pageio_alloc_mirrors(struct nfs_pageio_descriptor *desc,
 843		unsigned int mirror_count)
 844{
 845	struct nfs_pgio_mirror *ret;
 846	unsigned int i;
 847
 848	kfree(desc->pg_mirrors_dynamic);
 849	desc->pg_mirrors_dynamic = NULL;
 850	if (mirror_count == 1)
 851		return desc->pg_mirrors_static;
 852	ret = kmalloc_array(mirror_count, sizeof(*ret), GFP_KERNEL);
 853	if (ret != NULL) {
 854		for (i = 0; i < mirror_count; i++)
 855			nfs_pageio_mirror_init(&ret[i], desc->pg_bsize);
 856		desc->pg_mirrors_dynamic = ret;
 857	}
 858	return ret;
 859}
 860
 861/*
 862 * nfs_pageio_setup_mirroring - determine if mirroring is to be used
 863 *				by calling the pg_get_mirror_count op
 864 */
 865static void nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
 866				       struct nfs_page *req)
 867{
 868	unsigned int mirror_count = 1;
 869
 870	if (pgio->pg_ops->pg_get_mirror_count)
 871		mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
 872	if (mirror_count == pgio->pg_mirror_count || pgio->pg_error < 0)
 873		return;
 874
 875	if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX) {
 876		pgio->pg_error = -EINVAL;
 877		return;
 878	}
 879
 880	pgio->pg_mirrors = nfs_pageio_alloc_mirrors(pgio, mirror_count);
 881	if (pgio->pg_mirrors == NULL) {
 882		pgio->pg_error = -ENOMEM;
 883		pgio->pg_mirrors = pgio->pg_mirrors_static;
 884		mirror_count = 1;
 885	}
 886	pgio->pg_mirror_count = mirror_count;
 887}
 888
 889/*
 890 * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1)
 891 */
 892void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio)
 893{
 894	pgio->pg_mirror_count = 1;
 895	pgio->pg_mirror_idx = 0;
 896}
 897
 898static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor *pgio)
 899{
 900	pgio->pg_mirror_count = 1;
 901	pgio->pg_mirror_idx = 0;
 902	pgio->pg_mirrors = pgio->pg_mirrors_static;
 903	kfree(pgio->pg_mirrors_dynamic);
 904	pgio->pg_mirrors_dynamic = NULL;
 905}
 906
 907static bool nfs_match_lock_context(const struct nfs_lock_context *l1,
 908		const struct nfs_lock_context *l2)
 909{
 910	return l1->lockowner == l2->lockowner;
 911}
 912
 913/**
 914 * nfs_can_coalesce_requests - test two requests for compatibility
 915 * @prev: pointer to nfs_page
 916 * @req: pointer to nfs_page
 917 * @pgio: pointer to nfs_pagio_descriptor
 918 *
 919 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
 920 * page data area they describe is contiguous, and that their RPC
 921 * credentials, NFSv4 open state, and lockowners are the same.
 922 *
 923 * Return 'true' if this is the case, else return 'false'.
 924 */
 925static bool nfs_can_coalesce_requests(struct nfs_page *prev,
 926				      struct nfs_page *req,
 927				      struct nfs_pageio_descriptor *pgio)
 928{
 929	size_t size;
 930	struct file_lock_context *flctx;
 931
 932	if (prev) {
 933		if (!nfs_match_open_context(nfs_req_openctx(req), nfs_req_openctx(prev)))
 934			return false;
 935		flctx = d_inode(nfs_req_openctx(req)->dentry)->i_flctx;
 936		if (flctx != NULL &&
 937		    !(list_empty_careful(&flctx->flc_posix) &&
 938		      list_empty_careful(&flctx->flc_flock)) &&
 939		    !nfs_match_lock_context(req->wb_lock_context,
 940					    prev->wb_lock_context))
 941			return false;
 942		if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
 943			return false;
 944		if (req->wb_page == prev->wb_page) {
 945			if (req->wb_pgbase != prev->wb_pgbase + prev->wb_bytes)
 946				return false;
 947		} else {
 948			if (req->wb_pgbase != 0 ||
 949			    prev->wb_pgbase + prev->wb_bytes != PAGE_SIZE)
 950				return false;
 951		}
 952	}
 953	size = pgio->pg_ops->pg_test(pgio, prev, req);
 954	WARN_ON_ONCE(size > req->wb_bytes);
 955	if (size && size < req->wb_bytes)
 956		req->wb_bytes = size;
 957	return size > 0;
 958}
 959
 960/**
 961 * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
 962 * @desc: destination io descriptor
 963 * @req: request
 964 *
 965 * Returns true if the request 'req' was successfully coalesced into the
 966 * existing list of pages 'desc'.
 967 */
 968static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
 969				     struct nfs_page *req)
 
 970{
 971	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 972
 973	struct nfs_page *prev = NULL;
 
 974
 975	if (mirror->pg_count != 0) {
 976		prev = nfs_list_entry(mirror->pg_list.prev);
 977	} else {
 978		if (desc->pg_ops->pg_init)
 979			desc->pg_ops->pg_init(desc, req);
 980		if (desc->pg_error < 0)
 981			return 0;
 982		mirror->pg_base = req->wb_pgbase;
 983	}
 
 
 
 984
 985	if (desc->pg_maxretrans && req->wb_nio > desc->pg_maxretrans) {
 986		if (NFS_SERVER(desc->pg_inode)->flags & NFS_MOUNT_SOFTERR)
 987			desc->pg_error = -ETIMEDOUT;
 988		else
 989			desc->pg_error = -EIO;
 990		return 0;
 991	}
 992
 993	if (!nfs_can_coalesce_requests(prev, req, desc))
 994		return 0;
 
 995	nfs_list_move_request(req, &mirror->pg_list);
 996	mirror->pg_count += req->wb_bytes;
 997	return 1;
 998}
 999
1000/*
1001 * Helper for nfs_pageio_add_request and nfs_pageio_complete
1002 */
1003static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
1004{
1005	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1006
1007
1008	if (!list_empty(&mirror->pg_list)) {
1009		int error = desc->pg_ops->pg_doio(desc);
1010		if (error < 0)
1011			desc->pg_error = error;
1012		else
1013			mirror->pg_bytes_written += mirror->pg_count;
1014	}
1015	if (list_empty(&mirror->pg_list)) {
1016		mirror->pg_count = 0;
1017		mirror->pg_base = 0;
1018	}
1019}
1020
1021static void
1022nfs_pageio_cleanup_request(struct nfs_pageio_descriptor *desc,
1023		struct nfs_page *req)
1024{
1025	LIST_HEAD(head);
1026
1027	nfs_list_move_request(req, &head);
1028	desc->pg_completion_ops->error_cleanup(&head, desc->pg_error);
1029}
1030
1031/**
1032 * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
1033 * @desc: destination io descriptor
1034 * @req: request
1035 *
1036 * This may split a request into subrequests which are all part of the
1037 * same page group.
 
1038 *
1039 * Returns true if the request 'req' was successfully coalesced into the
1040 * existing list of pages 'desc'.
1041 */
1042static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
1043			   struct nfs_page *req)
1044{
1045	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1046
1047	struct nfs_page *subreq;
1048	unsigned int bytes_left = 0;
1049	unsigned int offset, pgbase;
1050
1051	nfs_page_group_lock(req);
1052
1053	subreq = req;
1054	bytes_left = subreq->wb_bytes;
1055	offset = subreq->wb_offset;
1056	pgbase = subreq->wb_pgbase;
1057
1058	do {
1059		if (!nfs_pageio_do_add_request(desc, subreq)) {
1060			/* make sure pg_test call(s) did nothing */
1061			WARN_ON_ONCE(subreq->wb_bytes != bytes_left);
1062			WARN_ON_ONCE(subreq->wb_offset != offset);
1063			WARN_ON_ONCE(subreq->wb_pgbase != pgbase);
1064
 
 
 
 
 
 
 
 
 
 
 
 
1065			nfs_page_group_unlock(req);
1066			desc->pg_moreio = 1;
1067			nfs_pageio_doio(desc);
1068			if (desc->pg_error < 0 || mirror->pg_recoalesce)
1069				goto out_cleanup_subreq;
1070			/* retry add_request for this subreq */
1071			nfs_page_group_lock(req);
1072			continue;
1073		}
1074
1075		/* check for buggy pg_test call(s) */
1076		WARN_ON_ONCE(subreq->wb_bytes + subreq->wb_pgbase > PAGE_SIZE);
1077		WARN_ON_ONCE(subreq->wb_bytes > bytes_left);
1078		WARN_ON_ONCE(subreq->wb_bytes == 0);
1079
1080		bytes_left -= subreq->wb_bytes;
1081		offset += subreq->wb_bytes;
1082		pgbase += subreq->wb_bytes;
1083
1084		if (bytes_left) {
1085			subreq = nfs_create_subreq(req, subreq, pgbase,
1086					offset, bytes_left);
1087			if (IS_ERR(subreq))
1088				goto err_ptr;
1089		}
1090	} while (bytes_left > 0);
1091
1092	nfs_page_group_unlock(req);
1093	return 1;
1094err_ptr:
1095	desc->pg_error = PTR_ERR(subreq);
1096	nfs_page_group_unlock(req);
1097	return 0;
1098out_cleanup_subreq:
1099	if (req != subreq)
1100		nfs_pageio_cleanup_request(desc, subreq);
1101	return 0;
1102}
1103
1104static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
1105{
1106	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1107	LIST_HEAD(head);
1108
1109	do {
1110		list_splice_init(&mirror->pg_list, &head);
1111		mirror->pg_bytes_written -= mirror->pg_count;
1112		mirror->pg_count = 0;
1113		mirror->pg_base = 0;
1114		mirror->pg_recoalesce = 0;
1115
1116		while (!list_empty(&head)) {
1117			struct nfs_page *req;
1118
1119			req = list_first_entry(&head, struct nfs_page, wb_list);
1120			if (__nfs_pageio_add_request(desc, req))
1121				continue;
1122			if (desc->pg_error < 0) {
1123				list_splice_tail(&head, &mirror->pg_list);
1124				mirror->pg_recoalesce = 1;
1125				return 0;
1126			}
1127			break;
1128		}
1129	} while (mirror->pg_recoalesce);
1130	return 1;
1131}
1132
1133static int nfs_pageio_add_request_mirror(struct nfs_pageio_descriptor *desc,
1134		struct nfs_page *req)
1135{
1136	int ret;
1137
1138	do {
1139		ret = __nfs_pageio_add_request(desc, req);
1140		if (ret)
1141			break;
1142		if (desc->pg_error < 0)
1143			break;
1144		ret = nfs_do_recoalesce(desc);
1145	} while (ret);
1146
1147	return ret;
1148}
1149
1150static void nfs_pageio_error_cleanup(struct nfs_pageio_descriptor *desc)
1151{
1152	u32 midx;
1153	struct nfs_pgio_mirror *mirror;
1154
1155	if (!desc->pg_error)
1156		return;
1157
1158	for (midx = 0; midx < desc->pg_mirror_count; midx++) {
1159		mirror = &desc->pg_mirrors[midx];
1160		desc->pg_completion_ops->error_cleanup(&mirror->pg_list,
1161				desc->pg_error);
1162	}
1163}
1164
1165int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
1166			   struct nfs_page *req)
1167{
1168	u32 midx;
1169	unsigned int pgbase, offset, bytes;
1170	struct nfs_page *dupreq, *lastreq;
1171
1172	pgbase = req->wb_pgbase;
1173	offset = req->wb_offset;
1174	bytes = req->wb_bytes;
1175
1176	nfs_pageio_setup_mirroring(desc, req);
1177	if (desc->pg_error < 0)
1178		goto out_failed;
1179
1180	for (midx = 0; midx < desc->pg_mirror_count; midx++) {
1181		if (midx) {
1182			nfs_page_group_lock(req);
1183
1184			/* find the last request */
1185			for (lastreq = req->wb_head;
1186			     lastreq->wb_this_page != req->wb_head;
1187			     lastreq = lastreq->wb_this_page)
1188				;
 
 
 
1189
1190			dupreq = nfs_create_subreq(req, lastreq,
1191					pgbase, offset, bytes);
1192
1193			nfs_page_group_unlock(req);
1194			if (IS_ERR(dupreq)) {
1195				desc->pg_error = PTR_ERR(dupreq);
1196				goto out_failed;
1197			}
1198		} else
1199			dupreq = req;
1200
1201		if (nfs_pgio_has_mirroring(desc))
1202			desc->pg_mirror_idx = midx;
1203		if (!nfs_pageio_add_request_mirror(desc, dupreq))
1204			goto out_cleanup_subreq;
1205	}
1206
 
 
 
 
1207	return 1;
1208
1209out_cleanup_subreq:
1210	if (req != dupreq)
1211		nfs_pageio_cleanup_request(desc, dupreq);
1212out_failed:
1213	nfs_pageio_error_cleanup(desc);
1214	return 0;
1215}
1216
1217/*
1218 * nfs_pageio_complete_mirror - Complete I/O on the current mirror of an
1219 *				nfs_pageio_descriptor
1220 * @desc: pointer to io descriptor
1221 * @mirror_idx: pointer to mirror index
1222 */
1223static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
1224				       u32 mirror_idx)
1225{
1226	struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[mirror_idx];
1227	u32 restore_idx = desc->pg_mirror_idx;
 
 
 
1228
1229	if (nfs_pgio_has_mirroring(desc))
1230		desc->pg_mirror_idx = mirror_idx;
1231	for (;;) {
1232		nfs_pageio_doio(desc);
1233		if (desc->pg_error < 0 || !mirror->pg_recoalesce)
1234			break;
1235		if (!nfs_do_recoalesce(desc))
1236			break;
1237	}
1238	desc->pg_mirror_idx = restore_idx;
1239}
1240
1241/*
1242 * nfs_pageio_resend - Transfer requests to new descriptor and resend
1243 * @hdr - the pgio header to move request from
1244 * @desc - the pageio descriptor to add requests to
1245 *
1246 * Try to move each request (nfs_page) from @hdr to @desc then attempt
1247 * to send them.
1248 *
1249 * Returns 0 on success and < 0 on error.
1250 */
1251int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
1252		      struct nfs_pgio_header *hdr)
1253{
1254	LIST_HEAD(pages);
1255
1256	desc->pg_io_completion = hdr->io_completion;
1257	desc->pg_dreq = hdr->dreq;
1258	list_splice_init(&hdr->pages, &pages);
1259	while (!list_empty(&pages)) {
1260		struct nfs_page *req = nfs_list_entry(pages.next);
1261
1262		if (!nfs_pageio_add_request(desc, req))
1263			break;
1264	}
1265	nfs_pageio_complete(desc);
1266	if (!list_empty(&pages)) {
1267		int err = desc->pg_error < 0 ? desc->pg_error : -EIO;
1268		hdr->completion_ops->error_cleanup(&pages, err);
1269		nfs_set_pgio_error(hdr, err, hdr->io_start);
1270		return err;
1271	}
1272	return 0;
1273}
1274EXPORT_SYMBOL_GPL(nfs_pageio_resend);
1275
1276/**
1277 * nfs_pageio_complete - Complete I/O then cleanup an nfs_pageio_descriptor
1278 * @desc: pointer to io descriptor
1279 */
1280void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
1281{
1282	u32 midx;
1283
1284	for (midx = 0; midx < desc->pg_mirror_count; midx++)
1285		nfs_pageio_complete_mirror(desc, midx);
1286
1287	if (desc->pg_error < 0)
1288		nfs_pageio_error_cleanup(desc);
1289	if (desc->pg_ops->pg_cleanup)
1290		desc->pg_ops->pg_cleanup(desc);
1291	nfs_pageio_cleanup_mirroring(desc);
1292}
1293
1294/**
1295 * nfs_pageio_cond_complete - Conditional I/O completion
1296 * @desc: pointer to io descriptor
1297 * @index: page index
1298 *
1299 * It is important to ensure that processes don't try to take locks
1300 * on non-contiguous ranges of pages as that might deadlock. This
1301 * function should be called before attempting to wait on a locked
1302 * nfs_page. It will complete the I/O if the page index 'index'
1303 * is not contiguous with the existing list of pages in 'desc'.
1304 */
1305void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
1306{
1307	struct nfs_pgio_mirror *mirror;
1308	struct nfs_page *prev;
1309	u32 midx;
1310
1311	for (midx = 0; midx < desc->pg_mirror_count; midx++) {
1312		mirror = &desc->pg_mirrors[midx];
1313		if (!list_empty(&mirror->pg_list)) {
1314			prev = nfs_list_entry(mirror->pg_list.prev);
1315			if (index != prev->wb_index + 1) {
1316				nfs_pageio_complete(desc);
1317				break;
1318			}
1319		}
1320	}
 
 
 
 
 
 
 
 
1321}
1322
1323int __init nfs_init_nfspagecache(void)
1324{
1325	nfs_page_cachep = kmem_cache_create("nfs_page",
1326					    sizeof(struct nfs_page),
1327					    0, SLAB_HWCACHE_ALIGN,
1328					    NULL);
1329	if (nfs_page_cachep == NULL)
1330		return -ENOMEM;
1331
1332	return 0;
1333}
1334
1335void nfs_destroy_nfspagecache(void)
1336{
1337	kmem_cache_destroy(nfs_page_cachep);
1338}
1339
1340static const struct rpc_call_ops nfs_pgio_common_ops = {
1341	.rpc_call_prepare = nfs_pgio_prepare,
1342	.rpc_call_done = nfs_pgio_result,
1343	.rpc_release = nfs_pgio_release,
1344};
1345
1346const struct nfs_pageio_ops nfs_pgio_rw_ops = {
1347	.pg_test = nfs_generic_pg_test,
1348	.pg_doio = nfs_generic_pg_pgios,
1349};