Linux Audio

Check our new training course

Loading...
v4.17
   1/*
   2 * linux/fs/nfs/pagelist.c
   3 *
   4 * A set of helper functions for managing NFS read and write requests.
   5 * The main purpose of these routines is to provide support for the
   6 * coalescing of several requests into a single RPC call.
   7 *
   8 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
   9 *
  10 */
  11
  12#include <linux/slab.h>
  13#include <linux/file.h>
  14#include <linux/sched.h>
  15#include <linux/sunrpc/clnt.h>
  16#include <linux/nfs.h>
  17#include <linux/nfs3.h>
  18#include <linux/nfs4.h>
  19#include <linux/nfs_page.h>
  20#include <linux/nfs_fs.h>
  21#include <linux/nfs_mount.h>
  22#include <linux/export.h>
  23
  24#include "internal.h"
  25#include "pnfs.h"
  26
  27#define NFSDBG_FACILITY		NFSDBG_PAGECACHE
  28
  29static struct kmem_cache *nfs_page_cachep;
  30static const struct rpc_call_ops nfs_pgio_common_ops;
  31
 
 
 
 
 
 
 
 
 
 
 
 
 
  32struct nfs_pgio_mirror *
  33nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc)
  34{
  35	return nfs_pgio_has_mirroring(desc) ?
  36		&desc->pg_mirrors[desc->pg_mirror_idx] :
  37		&desc->pg_mirrors[0];
  38}
  39EXPORT_SYMBOL_GPL(nfs_pgio_current_mirror);
  40
  41void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
  42		       struct nfs_pgio_header *hdr,
  43		       void (*release)(struct nfs_pgio_header *hdr))
  44{
  45	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
  46
  47
  48	hdr->req = nfs_list_entry(mirror->pg_list.next);
  49	hdr->inode = desc->pg_inode;
  50	hdr->cred = hdr->req->wb_context->cred;
  51	hdr->io_start = req_offset(hdr->req);
  52	hdr->good_bytes = mirror->pg_count;
  53	hdr->io_completion = desc->pg_io_completion;
  54	hdr->dreq = desc->pg_dreq;
 
  55	hdr->release = release;
  56	hdr->completion_ops = desc->pg_completion_ops;
  57	if (hdr->completion_ops->init_hdr)
  58		hdr->completion_ops->init_hdr(hdr);
  59
  60	hdr->pgio_mirror_idx = desc->pg_mirror_idx;
  61}
  62EXPORT_SYMBOL_GPL(nfs_pgheader_init);
  63
  64void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
  65{
  66	spin_lock(&hdr->lock);
  67	if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags)
  68	    || pos < hdr->io_start + hdr->good_bytes) {
  69		clear_bit(NFS_IOHDR_EOF, &hdr->flags);
  70		hdr->good_bytes = pos - hdr->io_start;
  71		hdr->error = error;
  72	}
  73	spin_unlock(&hdr->lock);
  74}
  75
  76static inline struct nfs_page *
  77nfs_page_alloc(void)
  78{
  79	struct nfs_page	*p = kmem_cache_zalloc(nfs_page_cachep, GFP_NOIO);
  80	if (p)
  81		INIT_LIST_HEAD(&p->wb_list);
  82	return p;
  83}
  84
  85static inline void
  86nfs_page_free(struct nfs_page *p)
  87{
  88	kmem_cache_free(nfs_page_cachep, p);
  89}
  90
  91/**
  92 * nfs_iocounter_wait - wait for i/o to complete
  93 * @l_ctx: nfs_lock_context with io_counter to use
  94 *
  95 * returns -ERESTARTSYS if interrupted by a fatal signal.
  96 * Otherwise returns 0 once the io_count hits 0.
  97 */
  98int
  99nfs_iocounter_wait(struct nfs_lock_context *l_ctx)
 100{
 101	return wait_var_event_killable(&l_ctx->io_count,
 102				       !atomic_read(&l_ctx->io_count));
 103}
 104
 105/**
 106 * nfs_async_iocounter_wait - wait on a rpc_waitqueue for I/O
 107 * to complete
 108 * @task: the rpc_task that should wait
 109 * @l_ctx: nfs_lock_context with io_counter to check
 110 *
 111 * Returns true if there is outstanding I/O to wait on and the
 112 * task has been put to sleep.
 113 */
 114bool
 115nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx)
 116{
 117	struct inode *inode = d_inode(l_ctx->open_context->dentry);
 118	bool ret = false;
 119
 120	if (atomic_read(&l_ctx->io_count) > 0) {
 121		rpc_sleep_on(&NFS_SERVER(inode)->uoc_rpcwaitq, task, NULL);
 122		ret = true;
 123	}
 124
 125	if (atomic_read(&l_ctx->io_count) == 0) {
 126		rpc_wake_up_queued_task(&NFS_SERVER(inode)->uoc_rpcwaitq, task);
 127		ret = false;
 128	}
 129
 130	return ret;
 131}
 132EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait);
 133
 134/*
 135 * nfs_page_group_lock - lock the head of the page group
 136 * @req - request in group that is to be locked
 
 
 
 137 *
 138 * this lock must be held when traversing or modifying the page
 139 * group list
 140 *
 141 * return 0 on success, < 0 on error
 
 
 142 */
 143int
 144nfs_page_group_lock(struct nfs_page *req)
 145{
 146	struct nfs_page *head = req->wb_head;
 147
 148	WARN_ON_ONCE(head != head->wb_head);
 149
 150	if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags))
 151		return 0;
 152
 153	set_bit(PG_CONTENDED1, &head->wb_flags);
 154	smp_mb__after_atomic();
 155	return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
 156				TASK_UNINTERRUPTIBLE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 157}
 158
 159/*
 160 * nfs_page_group_unlock - unlock the head of the page group
 161 * @req - request in group that is to be unlocked
 162 */
 163void
 164nfs_page_group_unlock(struct nfs_page *req)
 165{
 166	struct nfs_page *head = req->wb_head;
 167
 168	WARN_ON_ONCE(head != head->wb_head);
 169
 170	smp_mb__before_atomic();
 171	clear_bit(PG_HEADLOCK, &head->wb_flags);
 172	smp_mb__after_atomic();
 173	if (!test_bit(PG_CONTENDED1, &head->wb_flags))
 174		return;
 175	wake_up_bit(&head->wb_flags, PG_HEADLOCK);
 176}
 177
 178/*
 179 * nfs_page_group_sync_on_bit_locked
 180 *
 181 * must be called with page group lock held
 182 */
 183static bool
 184nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
 185{
 186	struct nfs_page *head = req->wb_head;
 187	struct nfs_page *tmp;
 188
 189	WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_flags));
 190	WARN_ON_ONCE(test_and_set_bit(bit, &req->wb_flags));
 191
 192	tmp = req->wb_this_page;
 193	while (tmp != req) {
 194		if (!test_bit(bit, &tmp->wb_flags))
 195			return false;
 196		tmp = tmp->wb_this_page;
 197	}
 198
 199	/* true! reset all bits */
 200	tmp = req;
 201	do {
 202		clear_bit(bit, &tmp->wb_flags);
 203		tmp = tmp->wb_this_page;
 204	} while (tmp != req);
 205
 206	return true;
 207}
 208
 209/*
 210 * nfs_page_group_sync_on_bit - set bit on current request, but only
 211 *   return true if the bit is set for all requests in page group
 212 * @req - request in page group
 213 * @bit - PG_* bit that is used to sync page group
 214 */
 215bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit)
 216{
 217	bool ret;
 218
 219	nfs_page_group_lock(req);
 220	ret = nfs_page_group_sync_on_bit_locked(req, bit);
 221	nfs_page_group_unlock(req);
 222
 223	return ret;
 224}
 225
 226/*
 227 * nfs_page_group_init - Initialize the page group linkage for @req
 228 * @req - a new nfs request
 229 * @prev - the previous request in page group, or NULL if @req is the first
 230 *         or only request in the group (the head).
 231 */
 232static inline void
 233nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev)
 234{
 235	struct inode *inode;
 236	WARN_ON_ONCE(prev == req);
 237
 238	if (!prev) {
 239		/* a head request */
 240		req->wb_head = req;
 241		req->wb_this_page = req;
 242	} else {
 243		/* a subrequest */
 244		WARN_ON_ONCE(prev->wb_this_page != prev->wb_head);
 245		WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags));
 246		req->wb_head = prev->wb_head;
 247		req->wb_this_page = prev->wb_this_page;
 248		prev->wb_this_page = req;
 249
 250		/* All subrequests take a ref on the head request until
 251		 * nfs_page_group_destroy is called */
 252		kref_get(&req->wb_head->wb_kref);
 253
 254		/* grab extra ref and bump the request count if head request
 255		 * has extra ref from the write/commit path to handle handoff
 256		 * between write and commit lists. */
 257		if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) {
 258			inode = page_file_mapping(req->wb_page)->host;
 259			set_bit(PG_INODE_REF, &req->wb_flags);
 260			kref_get(&req->wb_kref);
 261			atomic_long_inc(&NFS_I(inode)->nrequests);
 
 
 262		}
 263	}
 264}
 265
 266/*
 267 * nfs_page_group_destroy - sync the destruction of page groups
 268 * @req - request that no longer needs the page group
 269 *
 270 * releases the page group reference from each member once all
 271 * members have called this function.
 272 */
 273static void
 274nfs_page_group_destroy(struct kref *kref)
 275{
 276	struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
 277	struct nfs_page *head = req->wb_head;
 278	struct nfs_page *tmp, *next;
 279
 
 
 
 
 280	if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN))
 281		goto out;
 282
 283	tmp = req;
 284	do {
 285		next = tmp->wb_this_page;
 286		/* unlink and free */
 287		tmp->wb_this_page = tmp;
 288		tmp->wb_head = tmp;
 289		nfs_free_request(tmp);
 290		tmp = next;
 291	} while (tmp != req);
 292out:
 293	/* subrequests must release the ref on the head request */
 294	if (head != req)
 295		nfs_release_request(head);
 296}
 297
 298/**
 299 * nfs_create_request - Create an NFS read/write request.
 300 * @ctx: open context to use
 301 * @page: page to write
 302 * @last: last nfs request created for this page group or NULL if head
 303 * @offset: starting offset within the page for the write
 304 * @count: number of bytes to read/write
 305 *
 306 * The page must be locked by the caller. This makes sure we never
 307 * create two different requests for the same page.
 308 * User should ensure it is safe to sleep in this function.
 309 */
 310struct nfs_page *
 311nfs_create_request(struct nfs_open_context *ctx, struct page *page,
 312		   struct nfs_page *last, unsigned int offset,
 313		   unsigned int count)
 314{
 315	struct nfs_page		*req;
 316	struct nfs_lock_context *l_ctx;
 317
 318	if (test_bit(NFS_CONTEXT_BAD, &ctx->flags))
 319		return ERR_PTR(-EBADF);
 320	/* try to allocate the request struct */
 321	req = nfs_page_alloc();
 322	if (req == NULL)
 323		return ERR_PTR(-ENOMEM);
 324
 325	/* get lock context early so we can deal with alloc failures */
 326	l_ctx = nfs_get_lock_context(ctx);
 327	if (IS_ERR(l_ctx)) {
 328		nfs_page_free(req);
 329		return ERR_CAST(l_ctx);
 330	}
 331	req->wb_lock_context = l_ctx;
 332	atomic_inc(&l_ctx->io_count);
 333
 334	/* Initialize the request struct. Initially, we assume a
 335	 * long write-back delay. This will be adjusted in
 336	 * update_nfs_request below if the region is not locked. */
 337	req->wb_page    = page;
 338	if (page) {
 339		req->wb_index = page_index(page);
 340		get_page(page);
 341	}
 342	req->wb_offset  = offset;
 343	req->wb_pgbase	= offset;
 344	req->wb_bytes   = count;
 345	req->wb_context = get_nfs_open_context(ctx);
 346	kref_init(&req->wb_kref);
 347	nfs_page_group_init(req, last);
 348	return req;
 349}
 350
 351/**
 352 * nfs_unlock_request - Unlock request and wake up sleepers.
 353 * @req:
 354 */
 355void nfs_unlock_request(struct nfs_page *req)
 356{
 357	if (!NFS_WBACK_BUSY(req)) {
 358		printk(KERN_ERR "NFS: Invalid unlock attempted\n");
 359		BUG();
 360	}
 361	smp_mb__before_atomic();
 362	clear_bit(PG_BUSY, &req->wb_flags);
 363	smp_mb__after_atomic();
 364	if (!test_bit(PG_CONTENDED2, &req->wb_flags))
 365		return;
 366	wake_up_bit(&req->wb_flags, PG_BUSY);
 367}
 368
 369/**
 370 * nfs_unlock_and_release_request - Unlock request and release the nfs_page
 371 * @req:
 372 */
 373void nfs_unlock_and_release_request(struct nfs_page *req)
 374{
 375	nfs_unlock_request(req);
 376	nfs_release_request(req);
 377}
 378
 379/*
 380 * nfs_clear_request - Free up all resources allocated to the request
 381 * @req:
 382 *
 383 * Release page and open context resources associated with a read/write
 384 * request after it has completed.
 385 */
 386static void nfs_clear_request(struct nfs_page *req)
 387{
 388	struct page *page = req->wb_page;
 389	struct nfs_open_context *ctx = req->wb_context;
 390	struct nfs_lock_context *l_ctx = req->wb_lock_context;
 391
 392	if (page != NULL) {
 393		put_page(page);
 394		req->wb_page = NULL;
 395	}
 396	if (l_ctx != NULL) {
 397		if (atomic_dec_and_test(&l_ctx->io_count)) {
 398			wake_up_var(&l_ctx->io_count);
 399			if (test_bit(NFS_CONTEXT_UNLOCK, &ctx->flags))
 400				rpc_wake_up(&NFS_SERVER(d_inode(ctx->dentry))->uoc_rpcwaitq);
 401		}
 402		nfs_put_lock_context(l_ctx);
 403		req->wb_lock_context = NULL;
 404	}
 405	if (ctx != NULL) {
 406		put_nfs_open_context(ctx);
 407		req->wb_context = NULL;
 408	}
 409}
 410
 411/**
 412 * nfs_release_request - Release the count on an NFS read/write request
 413 * @req: request to release
 414 *
 415 * Note: Should never be called with the spinlock held!
 416 */
 417void nfs_free_request(struct nfs_page *req)
 418{
 419	WARN_ON_ONCE(req->wb_this_page != req);
 420
 421	/* extra debug: make sure no sync bits are still set */
 422	WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
 423	WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE, &req->wb_flags));
 424	WARN_ON_ONCE(test_bit(PG_UPTODATE, &req->wb_flags));
 425	WARN_ON_ONCE(test_bit(PG_WB_END, &req->wb_flags));
 426	WARN_ON_ONCE(test_bit(PG_REMOVE, &req->wb_flags));
 427
 428	/* Release struct file and open context */
 429	nfs_clear_request(req);
 430	nfs_page_free(req);
 431}
 432
 433void nfs_release_request(struct nfs_page *req)
 434{
 435	kref_put(&req->wb_kref, nfs_page_group_destroy);
 436}
 437EXPORT_SYMBOL_GPL(nfs_release_request);
 438
 439/**
 440 * nfs_wait_on_request - Wait for a request to complete.
 441 * @req: request to wait upon.
 442 *
 443 * Interruptible by fatal signals only.
 444 * The user is responsible for holding a count on the request.
 445 */
 446int
 447nfs_wait_on_request(struct nfs_page *req)
 448{
 449	if (!test_bit(PG_BUSY, &req->wb_flags))
 450		return 0;
 451	set_bit(PG_CONTENDED2, &req->wb_flags);
 452	smp_mb__after_atomic();
 453	return wait_on_bit_io(&req->wb_flags, PG_BUSY,
 454			      TASK_UNINTERRUPTIBLE);
 455}
 456EXPORT_SYMBOL_GPL(nfs_wait_on_request);
 457
 458/*
 459 * nfs_generic_pg_test - determine if requests can be coalesced
 460 * @desc: pointer to descriptor
 461 * @prev: previous request in desc, or NULL
 462 * @req: this request
 463 *
 464 * Returns zero if @req can be coalesced into @desc, otherwise it returns
 465 * the size of the request.
 466 */
 467size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
 468			   struct nfs_page *prev, struct nfs_page *req)
 469{
 470	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 471
 472
 473	if (mirror->pg_count > mirror->pg_bsize) {
 474		/* should never happen */
 475		WARN_ON_ONCE(1);
 476		return 0;
 477	}
 478
 479	/*
 480	 * Limit the request size so that we can still allocate a page array
 481	 * for it without upsetting the slab allocator.
 482	 */
 483	if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) *
 484			sizeof(struct page *) > PAGE_SIZE)
 485		return 0;
 486
 487	return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes);
 488}
 489EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
 490
 491struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *ops)
 492{
 493	struct nfs_pgio_header *hdr = ops->rw_alloc_header();
 494
 495	if (hdr) {
 496		INIT_LIST_HEAD(&hdr->pages);
 497		spin_lock_init(&hdr->lock);
 498		hdr->rw_ops = ops;
 499	}
 500	return hdr;
 501}
 502EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc);
 503
 
 
 
 
 
 
 
 
 
 
 504/**
 505 * nfs_pgio_data_destroy - make @hdr suitable for reuse
 506 *
 507 * Frees memory and releases refs from nfs_generic_pgio, so that it may
 508 * be called again.
 509 *
 510 * @hdr: A header that has had nfs_generic_pgio called
 511 */
 512static void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
 513{
 514	if (hdr->args.context)
 515		put_nfs_open_context(hdr->args.context);
 516	if (hdr->page_array.pagevec != hdr->page_array.page_array)
 517		kfree(hdr->page_array.pagevec);
 518}
 519
 520/*
 521 * nfs_pgio_header_free - Free a read or write header
 522 * @hdr: The header to free
 523 */
 524void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
 525{
 526	nfs_pgio_data_destroy(hdr);
 527	hdr->rw_ops->rw_free_header(hdr);
 528}
 529EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
 530
 531/**
 532 * nfs_pgio_rpcsetup - Set up arguments for a pageio call
 533 * @hdr: The pageio hdr
 534 * @count: Number of bytes to read
 535 * @offset: Initial offset
 536 * @how: How to commit data (writes only)
 537 * @cinfo: Commit information for the call (writes only)
 538 */
 539static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
 540			      unsigned int count,
 541			      int how, struct nfs_commit_info *cinfo)
 542{
 543	struct nfs_page *req = hdr->req;
 544
 545	/* Set up the RPC argument and reply structs
 546	 * NB: take care not to mess about with hdr->commit et al. */
 547
 548	hdr->args.fh     = NFS_FH(hdr->inode);
 549	hdr->args.offset = req_offset(req);
 550	/* pnfs_set_layoutcommit needs this */
 551	hdr->mds_offset = hdr->args.offset;
 552	hdr->args.pgbase = req->wb_pgbase;
 553	hdr->args.pages  = hdr->page_array.pagevec;
 554	hdr->args.count  = count;
 555	hdr->args.context = get_nfs_open_context(req->wb_context);
 556	hdr->args.lock_context = req->wb_lock_context;
 557	hdr->args.stable  = NFS_UNSTABLE;
 558	switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) {
 559	case 0:
 560		break;
 561	case FLUSH_COND_STABLE:
 562		if (nfs_reqs_to_commit(cinfo))
 563			break;
 564	default:
 565		hdr->args.stable = NFS_FILE_SYNC;
 566	}
 567
 568	hdr->res.fattr   = &hdr->fattr;
 569	hdr->res.count   = count;
 570	hdr->res.eof     = 0;
 571	hdr->res.verf    = &hdr->verf;
 572	nfs_fattr_init(&hdr->fattr);
 573}
 574
 575/**
 576 * nfs_pgio_prepare - Prepare pageio hdr to go over the wire
 577 * @task: The current task
 578 * @calldata: pageio header to prepare
 579 */
 580static void nfs_pgio_prepare(struct rpc_task *task, void *calldata)
 581{
 582	struct nfs_pgio_header *hdr = calldata;
 583	int err;
 584	err = NFS_PROTO(hdr->inode)->pgio_rpc_prepare(task, hdr);
 585	if (err)
 586		rpc_exit(task, err);
 587}
 588
 589int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
 590		      struct rpc_cred *cred, const struct nfs_rpc_ops *rpc_ops,
 591		      const struct rpc_call_ops *call_ops, int how, int flags)
 592{
 593	struct rpc_task *task;
 594	struct rpc_message msg = {
 595		.rpc_argp = &hdr->args,
 596		.rpc_resp = &hdr->res,
 597		.rpc_cred = cred,
 598	};
 599	struct rpc_task_setup task_setup_data = {
 600		.rpc_client = clnt,
 601		.task = &hdr->task,
 602		.rpc_message = &msg,
 603		.callback_ops = call_ops,
 604		.callback_data = hdr,
 605		.workqueue = nfsiod_workqueue,
 606		.flags = RPC_TASK_ASYNC | flags,
 607	};
 608	int ret = 0;
 609
 610	hdr->rw_ops->rw_initiate(hdr, &msg, rpc_ops, &task_setup_data, how);
 611
 612	dprintk("NFS: initiated pgio call "
 613		"(req %s/%llu, %u bytes @ offset %llu)\n",
 614		hdr->inode->i_sb->s_id,
 615		(unsigned long long)NFS_FILEID(hdr->inode),
 616		hdr->args.count,
 617		(unsigned long long)hdr->args.offset);
 618
 619	task = rpc_run_task(&task_setup_data);
 620	if (IS_ERR(task)) {
 621		ret = PTR_ERR(task);
 622		goto out;
 623	}
 624	if (how & FLUSH_SYNC) {
 625		ret = rpc_wait_for_completion_task(task);
 626		if (ret == 0)
 627			ret = task->tk_status;
 628	}
 629	rpc_put_task(task);
 630out:
 631	return ret;
 632}
 633EXPORT_SYMBOL_GPL(nfs_initiate_pgio);
 634
 635/**
 636 * nfs_pgio_error - Clean up from a pageio error
 637 * @desc: IO descriptor
 638 * @hdr: pageio header
 639 */
 640static void nfs_pgio_error(struct nfs_pgio_header *hdr)
 641{
 642	set_bit(NFS_IOHDR_REDO, &hdr->flags);
 
 643	hdr->completion_ops->completion(hdr);
 644}
 645
 646/**
 647 * nfs_pgio_release - Release pageio data
 648 * @calldata: The pageio header to release
 649 */
 650static void nfs_pgio_release(void *calldata)
 651{
 652	struct nfs_pgio_header *hdr = calldata;
 
 653	hdr->completion_ops->completion(hdr);
 654}
 655
 656static void nfs_pageio_mirror_init(struct nfs_pgio_mirror *mirror,
 657				   unsigned int bsize)
 658{
 659	INIT_LIST_HEAD(&mirror->pg_list);
 660	mirror->pg_bytes_written = 0;
 661	mirror->pg_count = 0;
 662	mirror->pg_bsize = bsize;
 663	mirror->pg_base = 0;
 664	mirror->pg_recoalesce = 0;
 665}
 666
 667/**
 668 * nfs_pageio_init - initialise a page io descriptor
 669 * @desc: pointer to descriptor
 670 * @inode: pointer to inode
 671 * @pg_ops: pointer to pageio operations
 672 * @compl_ops: pointer to pageio completion operations
 673 * @rw_ops: pointer to nfs read/write operations
 674 * @bsize: io block size
 675 * @io_flags: extra parameters for the io function
 676 */
 677void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
 678		     struct inode *inode,
 679		     const struct nfs_pageio_ops *pg_ops,
 680		     const struct nfs_pgio_completion_ops *compl_ops,
 681		     const struct nfs_rw_ops *rw_ops,
 682		     size_t bsize,
 683		     int io_flags)
 684{
 
 
 
 685	desc->pg_moreio = 0;
 686	desc->pg_inode = inode;
 687	desc->pg_ops = pg_ops;
 688	desc->pg_completion_ops = compl_ops;
 689	desc->pg_rw_ops = rw_ops;
 690	desc->pg_ioflags = io_flags;
 691	desc->pg_error = 0;
 692	desc->pg_lseg = NULL;
 693	desc->pg_io_completion = NULL;
 694	desc->pg_dreq = NULL;
 
 695	desc->pg_bsize = bsize;
 696
 697	desc->pg_mirror_count = 1;
 698	desc->pg_mirror_idx = 0;
 699
 700	desc->pg_mirrors_dynamic = NULL;
 701	desc->pg_mirrors = desc->pg_mirrors_static;
 702	nfs_pageio_mirror_init(&desc->pg_mirrors[0], bsize);
 
 
 
 
 
 
 
 
 
 
 
 
 703}
 
 704
 705/**
 706 * nfs_pgio_result - Basic pageio error handling
 707 * @task: The task that ran
 708 * @calldata: Pageio header to check
 709 */
 710static void nfs_pgio_result(struct rpc_task *task, void *calldata)
 711{
 712	struct nfs_pgio_header *hdr = calldata;
 713	struct inode *inode = hdr->inode;
 714
 715	dprintk("NFS: %s: %5u, (status %d)\n", __func__,
 716		task->tk_pid, task->tk_status);
 717
 718	if (hdr->rw_ops->rw_done(task, hdr, inode) != 0)
 719		return;
 720	if (task->tk_status < 0)
 721		nfs_set_pgio_error(hdr, task->tk_status, hdr->args.offset);
 722	else
 723		hdr->rw_ops->rw_result(task, hdr);
 724}
 725
 726/*
 727 * Create an RPC task for the given read or write request and kick it.
 728 * The page must have been locked by the caller.
 729 *
 730 * It may happen that the page we're passed is not marked dirty.
 731 * This is the case if nfs_updatepage detects a conflicting request
 732 * that has been written but not committed.
 733 */
 734int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
 735		     struct nfs_pgio_header *hdr)
 736{
 737	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 738
 739	struct nfs_page		*req;
 740	struct page		**pages,
 741				*last_page;
 742	struct list_head *head = &mirror->pg_list;
 743	struct nfs_commit_info cinfo;
 744	struct nfs_page_array *pg_array = &hdr->page_array;
 745	unsigned int pagecount, pageused;
 746	gfp_t gfp_flags = GFP_KERNEL;
 747
 748	pagecount = nfs_page_array_len(mirror->pg_base, mirror->pg_count);
 749	pg_array->npages = pagecount;
 750
 751	if (pagecount <= ARRAY_SIZE(pg_array->page_array))
 752		pg_array->pagevec = pg_array->page_array;
 753	else {
 754		if (hdr->rw_mode == FMODE_WRITE)
 755			gfp_flags = GFP_NOIO;
 756		pg_array->pagevec = kcalloc(pagecount, sizeof(struct page *), gfp_flags);
 757		if (!pg_array->pagevec) {
 758			pg_array->npages = 0;
 759			nfs_pgio_error(hdr);
 760			desc->pg_error = -ENOMEM;
 761			return desc->pg_error;
 762		}
 763	}
 764
 765	nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
 766	pages = hdr->page_array.pagevec;
 767	last_page = NULL;
 768	pageused = 0;
 769	while (!list_empty(head)) {
 770		req = nfs_list_entry(head->next);
 771		nfs_list_remove_request(req);
 772		nfs_list_add_request(req, &hdr->pages);
 773
 774		if (!last_page || last_page != req->wb_page) {
 775			pageused++;
 776			if (pageused > pagecount)
 777				break;
 778			*pages++ = last_page = req->wb_page;
 779		}
 780	}
 781	if (WARN_ON_ONCE(pageused != pagecount)) {
 782		nfs_pgio_error(hdr);
 783		desc->pg_error = -EINVAL;
 784		return desc->pg_error;
 785	}
 786
 787	if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
 788	    (desc->pg_moreio || nfs_reqs_to_commit(&cinfo)))
 789		desc->pg_ioflags &= ~FLUSH_COND_STABLE;
 790
 791	/* Set up the argument struct */
 792	nfs_pgio_rpcsetup(hdr, mirror->pg_count, desc->pg_ioflags, &cinfo);
 793	desc->pg_rpc_callops = &nfs_pgio_common_ops;
 794	return 0;
 795}
 796EXPORT_SYMBOL_GPL(nfs_generic_pgio);
 797
 798static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
 799{
 800	struct nfs_pgio_header *hdr;
 801	int ret;
 802
 803	hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
 804	if (!hdr) {
 805		desc->pg_error = -ENOMEM;
 806		return desc->pg_error;
 807	}
 808	nfs_pgheader_init(desc, hdr, nfs_pgio_header_free);
 809	ret = nfs_generic_pgio(desc, hdr);
 810	if (ret == 0)
 811		ret = nfs_initiate_pgio(NFS_CLIENT(hdr->inode),
 812					hdr,
 813					hdr->cred,
 814					NFS_PROTO(hdr->inode),
 815					desc->pg_rpc_callops,
 816					desc->pg_ioflags, 0);
 817	return ret;
 818}
 819
 820static struct nfs_pgio_mirror *
 821nfs_pageio_alloc_mirrors(struct nfs_pageio_descriptor *desc,
 822		unsigned int mirror_count)
 823{
 824	struct nfs_pgio_mirror *ret;
 825	unsigned int i;
 826
 827	kfree(desc->pg_mirrors_dynamic);
 828	desc->pg_mirrors_dynamic = NULL;
 829	if (mirror_count == 1)
 830		return desc->pg_mirrors_static;
 831	ret = kmalloc_array(mirror_count, sizeof(*ret), GFP_NOFS);
 832	if (ret != NULL) {
 833		for (i = 0; i < mirror_count; i++)
 834			nfs_pageio_mirror_init(&ret[i], desc->pg_bsize);
 835		desc->pg_mirrors_dynamic = ret;
 836	}
 837	return ret;
 838}
 839
 840/*
 841 * nfs_pageio_setup_mirroring - determine if mirroring is to be used
 842 *				by calling the pg_get_mirror_count op
 843 */
 844static void nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
 845				       struct nfs_page *req)
 846{
 847	unsigned int mirror_count = 1;
 848
 849	if (pgio->pg_ops->pg_get_mirror_count)
 850		mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
 851	if (mirror_count == pgio->pg_mirror_count || pgio->pg_error < 0)
 852		return;
 853
 854	if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX) {
 855		pgio->pg_error = -EINVAL;
 856		return;
 857	}
 
 
 
 
 
 
 858
 859	pgio->pg_mirrors = nfs_pageio_alloc_mirrors(pgio, mirror_count);
 860	if (pgio->pg_mirrors == NULL) {
 861		pgio->pg_error = -ENOMEM;
 862		pgio->pg_mirrors = pgio->pg_mirrors_static;
 863		mirror_count = 1;
 864	}
 865	pgio->pg_mirror_count = mirror_count;
 
 
 866}
 867
 868/*
 869 * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1)
 870 */
 871void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio)
 872{
 873	pgio->pg_mirror_count = 1;
 874	pgio->pg_mirror_idx = 0;
 875}
 876
 877static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor *pgio)
 878{
 879	pgio->pg_mirror_count = 1;
 880	pgio->pg_mirror_idx = 0;
 881	pgio->pg_mirrors = pgio->pg_mirrors_static;
 882	kfree(pgio->pg_mirrors_dynamic);
 883	pgio->pg_mirrors_dynamic = NULL;
 884}
 885
 886static bool nfs_match_lock_context(const struct nfs_lock_context *l1,
 887		const struct nfs_lock_context *l2)
 888{
 889	return l1->lockowner == l2->lockowner;
 
 890}
 891
 892/**
 893 * nfs_can_coalesce_requests - test two requests for compatibility
 894 * @prev: pointer to nfs_page
 895 * @req: pointer to nfs_page
 896 *
 897 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
 898 * page data area they describe is contiguous, and that their RPC
 899 * credentials, NFSv4 open state, and lockowners are the same.
 900 *
 901 * Return 'true' if this is the case, else return 'false'.
 902 */
 903static bool nfs_can_coalesce_requests(struct nfs_page *prev,
 904				      struct nfs_page *req,
 905				      struct nfs_pageio_descriptor *pgio)
 906{
 907	size_t size;
 908	struct file_lock_context *flctx;
 909
 910	if (prev) {
 911		if (!nfs_match_open_context(req->wb_context, prev->wb_context))
 912			return false;
 913		flctx = d_inode(req->wb_context->dentry)->i_flctx;
 914		if (flctx != NULL &&
 915		    !(list_empty_careful(&flctx->flc_posix) &&
 916		      list_empty_careful(&flctx->flc_flock)) &&
 917		    !nfs_match_lock_context(req->wb_lock_context,
 918					    prev->wb_lock_context))
 919			return false;
 920		if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
 921			return false;
 922		if (req->wb_page == prev->wb_page) {
 923			if (req->wb_pgbase != prev->wb_pgbase + prev->wb_bytes)
 924				return false;
 925		} else {
 926			if (req->wb_pgbase != 0 ||
 927			    prev->wb_pgbase + prev->wb_bytes != PAGE_SIZE)
 928				return false;
 929		}
 930	}
 931	size = pgio->pg_ops->pg_test(pgio, prev, req);
 932	WARN_ON_ONCE(size > req->wb_bytes);
 933	if (size && size < req->wb_bytes)
 934		req->wb_bytes = size;
 935	return size > 0;
 936}
 937
 938/**
 939 * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
 940 * @desc: destination io descriptor
 941 * @req: request
 942 *
 943 * Returns true if the request 'req' was successfully coalesced into the
 944 * existing list of pages 'desc'.
 945 */
 946static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
 947				     struct nfs_page *req)
 948{
 949	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 950
 951	struct nfs_page *prev = NULL;
 952
 953	if (mirror->pg_count != 0) {
 954		prev = nfs_list_entry(mirror->pg_list.prev);
 955	} else {
 956		if (desc->pg_ops->pg_init)
 957			desc->pg_ops->pg_init(desc, req);
 958		if (desc->pg_error < 0)
 959			return 0;
 960		mirror->pg_base = req->wb_pgbase;
 961	}
 962	if (!nfs_can_coalesce_requests(prev, req, desc))
 963		return 0;
 964	nfs_list_remove_request(req);
 965	nfs_list_add_request(req, &mirror->pg_list);
 966	mirror->pg_count += req->wb_bytes;
 967	return 1;
 968}
 969
 970/*
 971 * Helper for nfs_pageio_add_request and nfs_pageio_complete
 972 */
 973static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
 974{
 975	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 976
 977
 978	if (!list_empty(&mirror->pg_list)) {
 979		int error = desc->pg_ops->pg_doio(desc);
 980		if (error < 0)
 981			desc->pg_error = error;
 982		else
 983			mirror->pg_bytes_written += mirror->pg_count;
 984	}
 985	if (list_empty(&mirror->pg_list)) {
 986		mirror->pg_count = 0;
 987		mirror->pg_base = 0;
 988	}
 989}
 990
 991/**
 992 * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
 993 * @desc: destination io descriptor
 994 * @req: request
 995 *
 996 * This may split a request into subrequests which are all part of the
 997 * same page group.
 998 *
 999 * Returns true if the request 'req' was successfully coalesced into the
1000 * existing list of pages 'desc'.
1001 */
1002static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
1003			   struct nfs_page *req)
1004{
1005	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1006
1007	struct nfs_page *subreq;
1008	unsigned int bytes_left = 0;
1009	unsigned int offset, pgbase;
1010
1011	nfs_page_group_lock(req);
1012
1013	subreq = req;
1014	bytes_left = subreq->wb_bytes;
1015	offset = subreq->wb_offset;
1016	pgbase = subreq->wb_pgbase;
1017
1018	do {
1019		if (!nfs_pageio_do_add_request(desc, subreq)) {
1020			/* make sure pg_test call(s) did nothing */
1021			WARN_ON_ONCE(subreq->wb_bytes != bytes_left);
1022			WARN_ON_ONCE(subreq->wb_offset != offset);
1023			WARN_ON_ONCE(subreq->wb_pgbase != pgbase);
1024
1025			nfs_page_group_unlock(req);
1026			desc->pg_moreio = 1;
1027			nfs_pageio_doio(desc);
1028			if (desc->pg_error < 0)
1029				return 0;
1030			if (mirror->pg_recoalesce)
1031				return 0;
1032			/* retry add_request for this subreq */
1033			nfs_page_group_lock(req);
1034			continue;
1035		}
1036
1037		/* check for buggy pg_test call(s) */
1038		WARN_ON_ONCE(subreq->wb_bytes + subreq->wb_pgbase > PAGE_SIZE);
1039		WARN_ON_ONCE(subreq->wb_bytes > bytes_left);
1040		WARN_ON_ONCE(subreq->wb_bytes == 0);
1041
1042		bytes_left -= subreq->wb_bytes;
1043		offset += subreq->wb_bytes;
1044		pgbase += subreq->wb_bytes;
1045
1046		if (bytes_left) {
1047			subreq = nfs_create_request(req->wb_context,
1048					req->wb_page,
1049					subreq, pgbase, bytes_left);
1050			if (IS_ERR(subreq))
1051				goto err_ptr;
1052			nfs_lock_request(subreq);
1053			subreq->wb_offset  = offset;
1054			subreq->wb_index = req->wb_index;
1055		}
1056	} while (bytes_left > 0);
1057
1058	nfs_page_group_unlock(req);
1059	return 1;
1060err_ptr:
1061	desc->pg_error = PTR_ERR(subreq);
1062	nfs_page_group_unlock(req);
1063	return 0;
1064}
1065
1066static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
1067{
1068	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1069	LIST_HEAD(head);
1070
1071	do {
1072		list_splice_init(&mirror->pg_list, &head);
1073		mirror->pg_bytes_written -= mirror->pg_count;
1074		mirror->pg_count = 0;
1075		mirror->pg_base = 0;
1076		mirror->pg_recoalesce = 0;
1077
1078		while (!list_empty(&head)) {
1079			struct nfs_page *req;
1080
1081			req = list_first_entry(&head, struct nfs_page, wb_list);
1082			nfs_list_remove_request(req);
1083			if (__nfs_pageio_add_request(desc, req))
1084				continue;
1085			if (desc->pg_error < 0) {
1086				list_splice_tail(&head, &mirror->pg_list);
1087				mirror->pg_recoalesce = 1;
1088				return 0;
1089			}
1090			break;
1091		}
1092	} while (mirror->pg_recoalesce);
1093	return 1;
1094}
1095
1096static int nfs_pageio_add_request_mirror(struct nfs_pageio_descriptor *desc,
1097		struct nfs_page *req)
1098{
1099	int ret;
1100
1101	do {
1102		ret = __nfs_pageio_add_request(desc, req);
1103		if (ret)
1104			break;
1105		if (desc->pg_error < 0)
1106			break;
1107		ret = nfs_do_recoalesce(desc);
1108	} while (ret);
1109
1110	return ret;
1111}
1112
1113int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
1114			   struct nfs_page *req)
1115{
1116	u32 midx;
1117	unsigned int pgbase, offset, bytes;
1118	struct nfs_page *dupreq, *lastreq;
1119
1120	pgbase = req->wb_pgbase;
1121	offset = req->wb_offset;
1122	bytes = req->wb_bytes;
1123
1124	nfs_pageio_setup_mirroring(desc, req);
1125	if (desc->pg_error < 0)
1126		goto out_failed;
1127
1128	for (midx = 0; midx < desc->pg_mirror_count; midx++) {
1129		if (midx) {
1130			nfs_page_group_lock(req);
1131
1132			/* find the last request */
1133			for (lastreq = req->wb_head;
1134			     lastreq->wb_this_page != req->wb_head;
1135			     lastreq = lastreq->wb_this_page)
1136				;
1137
1138			dupreq = nfs_create_request(req->wb_context,
1139					req->wb_page, lastreq, pgbase, bytes);
1140
1141			if (IS_ERR(dupreq)) {
1142				nfs_page_group_unlock(req);
1143				desc->pg_error = PTR_ERR(dupreq);
1144				goto out_failed;
1145			}
1146
1147			nfs_lock_request(dupreq);
1148			nfs_page_group_unlock(req);
1149			dupreq->wb_offset = offset;
1150			dupreq->wb_index = req->wb_index;
1151		} else
1152			dupreq = req;
1153
1154		if (nfs_pgio_has_mirroring(desc))
1155			desc->pg_mirror_idx = midx;
1156		if (!nfs_pageio_add_request_mirror(desc, dupreq))
1157			goto out_failed;
1158	}
1159
1160	return 1;
1161
1162out_failed:
1163	/*
1164	 * We might have failed before sending any reqs over wire.
1165	 * Clean up rest of the reqs in mirror pg_list.
1166	 */
1167	if (desc->pg_error) {
1168		struct nfs_pgio_mirror *mirror;
1169		void (*func)(struct list_head *);
1170
1171		/* remember fatal errors */
1172		if (nfs_error_is_fatal(desc->pg_error))
1173			nfs_context_set_write_error(req->wb_context,
1174						    desc->pg_error);
1175
1176		func = desc->pg_completion_ops->error_cleanup;
1177		for (midx = 0; midx < desc->pg_mirror_count; midx++) {
1178			mirror = &desc->pg_mirrors[midx];
1179			func(&mirror->pg_list);
1180		}
1181	}
1182	return 0;
1183}
1184
1185/*
1186 * nfs_pageio_complete_mirror - Complete I/O on the current mirror of an
1187 *				nfs_pageio_descriptor
1188 * @desc: pointer to io descriptor
1189 * @mirror_idx: pointer to mirror index
1190 */
1191static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
1192				       u32 mirror_idx)
1193{
1194	struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[mirror_idx];
1195	u32 restore_idx = desc->pg_mirror_idx;
1196
1197	if (nfs_pgio_has_mirroring(desc))
1198		desc->pg_mirror_idx = mirror_idx;
1199	for (;;) {
1200		nfs_pageio_doio(desc);
1201		if (!mirror->pg_recoalesce)
1202			break;
1203		if (!nfs_do_recoalesce(desc))
1204			break;
1205	}
1206	desc->pg_mirror_idx = restore_idx;
1207}
1208
1209/*
1210 * nfs_pageio_resend - Transfer requests to new descriptor and resend
1211 * @hdr - the pgio header to move request from
1212 * @desc - the pageio descriptor to add requests to
1213 *
1214 * Try to move each request (nfs_page) from @hdr to @desc then attempt
1215 * to send them.
1216 *
1217 * Returns 0 on success and < 0 on error.
1218 */
1219int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
1220		      struct nfs_pgio_header *hdr)
1221{
1222	LIST_HEAD(failed);
1223
1224	desc->pg_io_completion = hdr->io_completion;
1225	desc->pg_dreq = hdr->dreq;
1226	while (!list_empty(&hdr->pages)) {
1227		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
1228
1229		nfs_list_remove_request(req);
1230		if (!nfs_pageio_add_request(desc, req))
1231			nfs_list_add_request(req, &failed);
1232	}
1233	nfs_pageio_complete(desc);
1234	if (!list_empty(&failed)) {
1235		list_move(&failed, &hdr->pages);
1236		return desc->pg_error < 0 ? desc->pg_error : -EIO;
1237	}
1238	return 0;
1239}
1240EXPORT_SYMBOL_GPL(nfs_pageio_resend);
1241
1242/**
1243 * nfs_pageio_complete - Complete I/O then cleanup an nfs_pageio_descriptor
1244 * @desc: pointer to io descriptor
1245 */
1246void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
1247{
1248	u32 midx;
1249
1250	for (midx = 0; midx < desc->pg_mirror_count; midx++)
1251		nfs_pageio_complete_mirror(desc, midx);
1252
1253	if (desc->pg_ops->pg_cleanup)
1254		desc->pg_ops->pg_cleanup(desc);
1255	nfs_pageio_cleanup_mirroring(desc);
1256}
1257
1258/**
1259 * nfs_pageio_cond_complete - Conditional I/O completion
1260 * @desc: pointer to io descriptor
1261 * @index: page index
1262 *
1263 * It is important to ensure that processes don't try to take locks
1264 * on non-contiguous ranges of pages as that might deadlock. This
1265 * function should be called before attempting to wait on a locked
1266 * nfs_page. It will complete the I/O if the page index 'index'
1267 * is not contiguous with the existing list of pages in 'desc'.
1268 */
1269void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
1270{
1271	struct nfs_pgio_mirror *mirror;
1272	struct nfs_page *prev;
1273	u32 midx;
1274
1275	for (midx = 0; midx < desc->pg_mirror_count; midx++) {
1276		mirror = &desc->pg_mirrors[midx];
1277		if (!list_empty(&mirror->pg_list)) {
1278			prev = nfs_list_entry(mirror->pg_list.prev);
1279			if (index != prev->wb_index + 1) {
1280				nfs_pageio_complete(desc);
1281				break;
1282			}
1283		}
1284	}
1285}
1286
1287int __init nfs_init_nfspagecache(void)
1288{
1289	nfs_page_cachep = kmem_cache_create("nfs_page",
1290					    sizeof(struct nfs_page),
1291					    0, SLAB_HWCACHE_ALIGN,
1292					    NULL);
1293	if (nfs_page_cachep == NULL)
1294		return -ENOMEM;
1295
1296	return 0;
1297}
1298
1299void nfs_destroy_nfspagecache(void)
1300{
1301	kmem_cache_destroy(nfs_page_cachep);
1302}
1303
1304static const struct rpc_call_ops nfs_pgio_common_ops = {
1305	.rpc_call_prepare = nfs_pgio_prepare,
1306	.rpc_call_done = nfs_pgio_result,
1307	.rpc_release = nfs_pgio_release,
1308};
1309
1310const struct nfs_pageio_ops nfs_pgio_rw_ops = {
1311	.pg_test = nfs_generic_pg_test,
1312	.pg_doio = nfs_generic_pg_pgios,
1313};
v4.6
   1/*
   2 * linux/fs/nfs/pagelist.c
   3 *
   4 * A set of helper functions for managing NFS read and write requests.
   5 * The main purpose of these routines is to provide support for the
   6 * coalescing of several requests into a single RPC call.
   7 *
   8 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
   9 *
  10 */
  11
  12#include <linux/slab.h>
  13#include <linux/file.h>
  14#include <linux/sched.h>
  15#include <linux/sunrpc/clnt.h>
  16#include <linux/nfs.h>
  17#include <linux/nfs3.h>
  18#include <linux/nfs4.h>
  19#include <linux/nfs_page.h>
  20#include <linux/nfs_fs.h>
  21#include <linux/nfs_mount.h>
  22#include <linux/export.h>
  23
  24#include "internal.h"
  25#include "pnfs.h"
  26
  27#define NFSDBG_FACILITY		NFSDBG_PAGECACHE
  28
  29static struct kmem_cache *nfs_page_cachep;
  30static const struct rpc_call_ops nfs_pgio_common_ops;
  31
  32static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
  33{
  34	p->npages = pagecount;
  35	if (pagecount <= ARRAY_SIZE(p->page_array))
  36		p->pagevec = p->page_array;
  37	else {
  38		p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
  39		if (!p->pagevec)
  40			p->npages = 0;
  41	}
  42	return p->pagevec != NULL;
  43}
  44
  45struct nfs_pgio_mirror *
  46nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc)
  47{
  48	return nfs_pgio_has_mirroring(desc) ?
  49		&desc->pg_mirrors[desc->pg_mirror_idx] :
  50		&desc->pg_mirrors[0];
  51}
  52EXPORT_SYMBOL_GPL(nfs_pgio_current_mirror);
  53
  54void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
  55		       struct nfs_pgio_header *hdr,
  56		       void (*release)(struct nfs_pgio_header *hdr))
  57{
  58	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
  59
  60
  61	hdr->req = nfs_list_entry(mirror->pg_list.next);
  62	hdr->inode = desc->pg_inode;
  63	hdr->cred = hdr->req->wb_context->cred;
  64	hdr->io_start = req_offset(hdr->req);
  65	hdr->good_bytes = mirror->pg_count;
 
  66	hdr->dreq = desc->pg_dreq;
  67	hdr->layout_private = desc->pg_layout_private;
  68	hdr->release = release;
  69	hdr->completion_ops = desc->pg_completion_ops;
  70	if (hdr->completion_ops->init_hdr)
  71		hdr->completion_ops->init_hdr(hdr);
  72
  73	hdr->pgio_mirror_idx = desc->pg_mirror_idx;
  74}
  75EXPORT_SYMBOL_GPL(nfs_pgheader_init);
  76
  77void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
  78{
  79	spin_lock(&hdr->lock);
  80	if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags)
  81	    || pos < hdr->io_start + hdr->good_bytes) {
  82		clear_bit(NFS_IOHDR_EOF, &hdr->flags);
  83		hdr->good_bytes = pos - hdr->io_start;
  84		hdr->error = error;
  85	}
  86	spin_unlock(&hdr->lock);
  87}
  88
  89static inline struct nfs_page *
  90nfs_page_alloc(void)
  91{
  92	struct nfs_page	*p = kmem_cache_zalloc(nfs_page_cachep, GFP_NOIO);
  93	if (p)
  94		INIT_LIST_HEAD(&p->wb_list);
  95	return p;
  96}
  97
  98static inline void
  99nfs_page_free(struct nfs_page *p)
 100{
 101	kmem_cache_free(nfs_page_cachep, p);
 102}
 103
 104/**
 105 * nfs_iocounter_wait - wait for i/o to complete
 106 * @l_ctx: nfs_lock_context with io_counter to use
 107 *
 108 * returns -ERESTARTSYS if interrupted by a fatal signal.
 109 * Otherwise returns 0 once the io_count hits 0.
 110 */
 111int
 112nfs_iocounter_wait(struct nfs_lock_context *l_ctx)
 113{
 114	return wait_on_atomic_t(&l_ctx->io_count, nfs_wait_atomic_killable,
 115			TASK_KILLABLE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 116}
 
 117
 118/*
 119 * nfs_page_group_lock - lock the head of the page group
 120 * @req - request in group that is to be locked
 121 * @nonblock - if true don't block waiting for lock
 122 *
 123 * this lock must be held if modifying the page group list
 124 *
 125 * return 0 on success, < 0 on error: -EDELAY if nonblocking or the
 126 * result from wait_on_bit_lock
 127 *
 128 * NOTE: calling with nonblock=false should always have set the
 129 *       lock bit (see fs/buffer.c and other uses of wait_on_bit_lock
 130 *       with TASK_UNINTERRUPTIBLE), so there is no need to check the result.
 131 */
 132int
 133nfs_page_group_lock(struct nfs_page *req, bool nonblock)
 134{
 135	struct nfs_page *head = req->wb_head;
 136
 137	WARN_ON_ONCE(head != head->wb_head);
 138
 139	if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags))
 140		return 0;
 141
 142	if (!nonblock)
 143		return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
 
 144				TASK_UNINTERRUPTIBLE);
 145
 146	return -EAGAIN;
 147}
 148
 149/*
 150 * nfs_page_group_lock_wait - wait for the lock to clear, but don't grab it
 151 * @req - a request in the group
 152 *
 153 * This is a blocking call to wait for the group lock to be cleared.
 154 */
 155void
 156nfs_page_group_lock_wait(struct nfs_page *req)
 157{
 158	struct nfs_page *head = req->wb_head;
 159
 160	WARN_ON_ONCE(head != head->wb_head);
 161
 162	wait_on_bit(&head->wb_flags, PG_HEADLOCK,
 163		TASK_UNINTERRUPTIBLE);
 164}
 165
 166/*
 167 * nfs_page_group_unlock - unlock the head of the page group
 168 * @req - request in group that is to be unlocked
 169 */
 170void
 171nfs_page_group_unlock(struct nfs_page *req)
 172{
 173	struct nfs_page *head = req->wb_head;
 174
 175	WARN_ON_ONCE(head != head->wb_head);
 176
 177	smp_mb__before_atomic();
 178	clear_bit(PG_HEADLOCK, &head->wb_flags);
 179	smp_mb__after_atomic();
 
 
 180	wake_up_bit(&head->wb_flags, PG_HEADLOCK);
 181}
 182
 183/*
 184 * nfs_page_group_sync_on_bit_locked
 185 *
 186 * must be called with page group lock held
 187 */
 188static bool
 189nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
 190{
 191	struct nfs_page *head = req->wb_head;
 192	struct nfs_page *tmp;
 193
 194	WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_flags));
 195	WARN_ON_ONCE(test_and_set_bit(bit, &req->wb_flags));
 196
 197	tmp = req->wb_this_page;
 198	while (tmp != req) {
 199		if (!test_bit(bit, &tmp->wb_flags))
 200			return false;
 201		tmp = tmp->wb_this_page;
 202	}
 203
 204	/* true! reset all bits */
 205	tmp = req;
 206	do {
 207		clear_bit(bit, &tmp->wb_flags);
 208		tmp = tmp->wb_this_page;
 209	} while (tmp != req);
 210
 211	return true;
 212}
 213
 214/*
 215 * nfs_page_group_sync_on_bit - set bit on current request, but only
 216 *   return true if the bit is set for all requests in page group
 217 * @req - request in page group
 218 * @bit - PG_* bit that is used to sync page group
 219 */
 220bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit)
 221{
 222	bool ret;
 223
 224	nfs_page_group_lock(req, false);
 225	ret = nfs_page_group_sync_on_bit_locked(req, bit);
 226	nfs_page_group_unlock(req);
 227
 228	return ret;
 229}
 230
 231/*
 232 * nfs_page_group_init - Initialize the page group linkage for @req
 233 * @req - a new nfs request
 234 * @prev - the previous request in page group, or NULL if @req is the first
 235 *         or only request in the group (the head).
 236 */
 237static inline void
 238nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev)
 239{
 240	struct inode *inode;
 241	WARN_ON_ONCE(prev == req);
 242
 243	if (!prev) {
 244		/* a head request */
 245		req->wb_head = req;
 246		req->wb_this_page = req;
 247	} else {
 248		/* a subrequest */
 249		WARN_ON_ONCE(prev->wb_this_page != prev->wb_head);
 250		WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags));
 251		req->wb_head = prev->wb_head;
 252		req->wb_this_page = prev->wb_this_page;
 253		prev->wb_this_page = req;
 254
 255		/* All subrequests take a ref on the head request until
 256		 * nfs_page_group_destroy is called */
 257		kref_get(&req->wb_head->wb_kref);
 258
 259		/* grab extra ref and bump the request count if head request
 260		 * has extra ref from the write/commit path to handle handoff
 261		 * between write and commit lists. */
 262		if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) {
 263			inode = page_file_mapping(req->wb_page)->host;
 264			set_bit(PG_INODE_REF, &req->wb_flags);
 265			kref_get(&req->wb_kref);
 266			spin_lock(&inode->i_lock);
 267			NFS_I(inode)->nrequests++;
 268			spin_unlock(&inode->i_lock);
 269		}
 270	}
 271}
 272
 273/*
 274 * nfs_page_group_destroy - sync the destruction of page groups
 275 * @req - request that no longer needs the page group
 276 *
 277 * releases the page group reference from each member once all
 278 * members have called this function.
 279 */
 280static void
 281nfs_page_group_destroy(struct kref *kref)
 282{
 283	struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
 
 284	struct nfs_page *tmp, *next;
 285
 286	/* subrequests must release the ref on the head request */
 287	if (req->wb_head != req)
 288		nfs_release_request(req->wb_head);
 289
 290	if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN))
 291		return;
 292
 293	tmp = req;
 294	do {
 295		next = tmp->wb_this_page;
 296		/* unlink and free */
 297		tmp->wb_this_page = tmp;
 298		tmp->wb_head = tmp;
 299		nfs_free_request(tmp);
 300		tmp = next;
 301	} while (tmp != req);
 
 
 
 
 302}
 303
 304/**
 305 * nfs_create_request - Create an NFS read/write request.
 306 * @ctx: open context to use
 307 * @page: page to write
 308 * @last: last nfs request created for this page group or NULL if head
 309 * @offset: starting offset within the page for the write
 310 * @count: number of bytes to read/write
 311 *
 312 * The page must be locked by the caller. This makes sure we never
 313 * create two different requests for the same page.
 314 * User should ensure it is safe to sleep in this function.
 315 */
 316struct nfs_page *
 317nfs_create_request(struct nfs_open_context *ctx, struct page *page,
 318		   struct nfs_page *last, unsigned int offset,
 319		   unsigned int count)
 320{
 321	struct nfs_page		*req;
 322	struct nfs_lock_context *l_ctx;
 323
 324	if (test_bit(NFS_CONTEXT_BAD, &ctx->flags))
 325		return ERR_PTR(-EBADF);
 326	/* try to allocate the request struct */
 327	req = nfs_page_alloc();
 328	if (req == NULL)
 329		return ERR_PTR(-ENOMEM);
 330
 331	/* get lock context early so we can deal with alloc failures */
 332	l_ctx = nfs_get_lock_context(ctx);
 333	if (IS_ERR(l_ctx)) {
 334		nfs_page_free(req);
 335		return ERR_CAST(l_ctx);
 336	}
 337	req->wb_lock_context = l_ctx;
 338	atomic_inc(&l_ctx->io_count);
 339
 340	/* Initialize the request struct. Initially, we assume a
 341	 * long write-back delay. This will be adjusted in
 342	 * update_nfs_request below if the region is not locked. */
 343	req->wb_page    = page;
 344	req->wb_index	= page_file_index(page);
 345	get_page(page);
 
 
 346	req->wb_offset  = offset;
 347	req->wb_pgbase	= offset;
 348	req->wb_bytes   = count;
 349	req->wb_context = get_nfs_open_context(ctx);
 350	kref_init(&req->wb_kref);
 351	nfs_page_group_init(req, last);
 352	return req;
 353}
 354
 355/**
 356 * nfs_unlock_request - Unlock request and wake up sleepers.
 357 * @req:
 358 */
 359void nfs_unlock_request(struct nfs_page *req)
 360{
 361	if (!NFS_WBACK_BUSY(req)) {
 362		printk(KERN_ERR "NFS: Invalid unlock attempted\n");
 363		BUG();
 364	}
 365	smp_mb__before_atomic();
 366	clear_bit(PG_BUSY, &req->wb_flags);
 367	smp_mb__after_atomic();
 
 
 368	wake_up_bit(&req->wb_flags, PG_BUSY);
 369}
 370
 371/**
 372 * nfs_unlock_and_release_request - Unlock request and release the nfs_page
 373 * @req:
 374 */
 375void nfs_unlock_and_release_request(struct nfs_page *req)
 376{
 377	nfs_unlock_request(req);
 378	nfs_release_request(req);
 379}
 380
 381/*
 382 * nfs_clear_request - Free up all resources allocated to the request
 383 * @req:
 384 *
 385 * Release page and open context resources associated with a read/write
 386 * request after it has completed.
 387 */
 388static void nfs_clear_request(struct nfs_page *req)
 389{
 390	struct page *page = req->wb_page;
 391	struct nfs_open_context *ctx = req->wb_context;
 392	struct nfs_lock_context *l_ctx = req->wb_lock_context;
 393
 394	if (page != NULL) {
 395		put_page(page);
 396		req->wb_page = NULL;
 397	}
 398	if (l_ctx != NULL) {
 399		if (atomic_dec_and_test(&l_ctx->io_count))
 400			wake_up_atomic_t(&l_ctx->io_count);
 
 
 
 401		nfs_put_lock_context(l_ctx);
 402		req->wb_lock_context = NULL;
 403	}
 404	if (ctx != NULL) {
 405		put_nfs_open_context(ctx);
 406		req->wb_context = NULL;
 407	}
 408}
 409
 410/**
 411 * nfs_release_request - Release the count on an NFS read/write request
 412 * @req: request to release
 413 *
 414 * Note: Should never be called with the spinlock held!
 415 */
 416void nfs_free_request(struct nfs_page *req)
 417{
 418	WARN_ON_ONCE(req->wb_this_page != req);
 419
 420	/* extra debug: make sure no sync bits are still set */
 421	WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
 422	WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE, &req->wb_flags));
 423	WARN_ON_ONCE(test_bit(PG_UPTODATE, &req->wb_flags));
 424	WARN_ON_ONCE(test_bit(PG_WB_END, &req->wb_flags));
 425	WARN_ON_ONCE(test_bit(PG_REMOVE, &req->wb_flags));
 426
 427	/* Release struct file and open context */
 428	nfs_clear_request(req);
 429	nfs_page_free(req);
 430}
 431
 432void nfs_release_request(struct nfs_page *req)
 433{
 434	kref_put(&req->wb_kref, nfs_page_group_destroy);
 435}
 
 436
 437/**
 438 * nfs_wait_on_request - Wait for a request to complete.
 439 * @req: request to wait upon.
 440 *
 441 * Interruptible by fatal signals only.
 442 * The user is responsible for holding a count on the request.
 443 */
 444int
 445nfs_wait_on_request(struct nfs_page *req)
 446{
 
 
 
 
 447	return wait_on_bit_io(&req->wb_flags, PG_BUSY,
 448			      TASK_UNINTERRUPTIBLE);
 449}
 
 450
 451/*
 452 * nfs_generic_pg_test - determine if requests can be coalesced
 453 * @desc: pointer to descriptor
 454 * @prev: previous request in desc, or NULL
 455 * @req: this request
 456 *
 457 * Returns zero if @req can be coalesced into @desc, otherwise it returns
 458 * the size of the request.
 459 */
 460size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
 461			   struct nfs_page *prev, struct nfs_page *req)
 462{
 463	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 464
 465
 466	if (mirror->pg_count > mirror->pg_bsize) {
 467		/* should never happen */
 468		WARN_ON_ONCE(1);
 469		return 0;
 470	}
 471
 472	/*
 473	 * Limit the request size so that we can still allocate a page array
 474	 * for it without upsetting the slab allocator.
 475	 */
 476	if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) *
 477			sizeof(struct page *) > PAGE_SIZE)
 478		return 0;
 479
 480	return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes);
 481}
 482EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
 483
 484struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *ops)
 485{
 486	struct nfs_pgio_header *hdr = ops->rw_alloc_header();
 487
 488	if (hdr) {
 489		INIT_LIST_HEAD(&hdr->pages);
 490		spin_lock_init(&hdr->lock);
 491		hdr->rw_ops = ops;
 492	}
 493	return hdr;
 494}
 495EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc);
 496
 497/*
 498 * nfs_pgio_header_free - Free a read or write header
 499 * @hdr: The header to free
 500 */
 501void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
 502{
 503	hdr->rw_ops->rw_free_header(hdr);
 504}
 505EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
 506
 507/**
 508 * nfs_pgio_data_destroy - make @hdr suitable for reuse
 509 *
 510 * Frees memory and releases refs from nfs_generic_pgio, so that it may
 511 * be called again.
 512 *
 513 * @hdr: A header that has had nfs_generic_pgio called
 514 */
 515void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
 516{
 517	if (hdr->args.context)
 518		put_nfs_open_context(hdr->args.context);
 519	if (hdr->page_array.pagevec != hdr->page_array.page_array)
 520		kfree(hdr->page_array.pagevec);
 521}
 522EXPORT_SYMBOL_GPL(nfs_pgio_data_destroy);
 
 
 
 
 
 
 
 
 
 
 523
 524/**
 525 * nfs_pgio_rpcsetup - Set up arguments for a pageio call
 526 * @hdr: The pageio hdr
 527 * @count: Number of bytes to read
 528 * @offset: Initial offset
 529 * @how: How to commit data (writes only)
 530 * @cinfo: Commit information for the call (writes only)
 531 */
 532static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
 533			      unsigned int count, unsigned int offset,
 534			      int how, struct nfs_commit_info *cinfo)
 535{
 536	struct nfs_page *req = hdr->req;
 537
 538	/* Set up the RPC argument and reply structs
 539	 * NB: take care not to mess about with hdr->commit et al. */
 540
 541	hdr->args.fh     = NFS_FH(hdr->inode);
 542	hdr->args.offset = req_offset(req) + offset;
 543	/* pnfs_set_layoutcommit needs this */
 544	hdr->mds_offset = hdr->args.offset;
 545	hdr->args.pgbase = req->wb_pgbase + offset;
 546	hdr->args.pages  = hdr->page_array.pagevec;
 547	hdr->args.count  = count;
 548	hdr->args.context = get_nfs_open_context(req->wb_context);
 549	hdr->args.lock_context = req->wb_lock_context;
 550	hdr->args.stable  = NFS_UNSTABLE;
 551	switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) {
 552	case 0:
 553		break;
 554	case FLUSH_COND_STABLE:
 555		if (nfs_reqs_to_commit(cinfo))
 556			break;
 557	default:
 558		hdr->args.stable = NFS_FILE_SYNC;
 559	}
 560
 561	hdr->res.fattr   = &hdr->fattr;
 562	hdr->res.count   = count;
 563	hdr->res.eof     = 0;
 564	hdr->res.verf    = &hdr->verf;
 565	nfs_fattr_init(&hdr->fattr);
 566}
 567
 568/**
 569 * nfs_pgio_prepare - Prepare pageio hdr to go over the wire
 570 * @task: The current task
 571 * @calldata: pageio header to prepare
 572 */
 573static void nfs_pgio_prepare(struct rpc_task *task, void *calldata)
 574{
 575	struct nfs_pgio_header *hdr = calldata;
 576	int err;
 577	err = NFS_PROTO(hdr->inode)->pgio_rpc_prepare(task, hdr);
 578	if (err)
 579		rpc_exit(task, err);
 580}
 581
 582int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
 583		      struct rpc_cred *cred, const struct nfs_rpc_ops *rpc_ops,
 584		      const struct rpc_call_ops *call_ops, int how, int flags)
 585{
 586	struct rpc_task *task;
 587	struct rpc_message msg = {
 588		.rpc_argp = &hdr->args,
 589		.rpc_resp = &hdr->res,
 590		.rpc_cred = cred,
 591	};
 592	struct rpc_task_setup task_setup_data = {
 593		.rpc_client = clnt,
 594		.task = &hdr->task,
 595		.rpc_message = &msg,
 596		.callback_ops = call_ops,
 597		.callback_data = hdr,
 598		.workqueue = nfsiod_workqueue,
 599		.flags = RPC_TASK_ASYNC | flags,
 600	};
 601	int ret = 0;
 602
 603	hdr->rw_ops->rw_initiate(hdr, &msg, rpc_ops, &task_setup_data, how);
 604
 605	dprintk("NFS: initiated pgio call "
 606		"(req %s/%llu, %u bytes @ offset %llu)\n",
 607		hdr->inode->i_sb->s_id,
 608		(unsigned long long)NFS_FILEID(hdr->inode),
 609		hdr->args.count,
 610		(unsigned long long)hdr->args.offset);
 611
 612	task = rpc_run_task(&task_setup_data);
 613	if (IS_ERR(task)) {
 614		ret = PTR_ERR(task);
 615		goto out;
 616	}
 617	if (how & FLUSH_SYNC) {
 618		ret = rpc_wait_for_completion_task(task);
 619		if (ret == 0)
 620			ret = task->tk_status;
 621	}
 622	rpc_put_task(task);
 623out:
 624	return ret;
 625}
 626EXPORT_SYMBOL_GPL(nfs_initiate_pgio);
 627
 628/**
 629 * nfs_pgio_error - Clean up from a pageio error
 630 * @desc: IO descriptor
 631 * @hdr: pageio header
 632 */
 633static void nfs_pgio_error(struct nfs_pgio_header *hdr)
 634{
 635	set_bit(NFS_IOHDR_REDO, &hdr->flags);
 636	nfs_pgio_data_destroy(hdr);
 637	hdr->completion_ops->completion(hdr);
 638}
 639
 640/**
 641 * nfs_pgio_release - Release pageio data
 642 * @calldata: The pageio header to release
 643 */
 644static void nfs_pgio_release(void *calldata)
 645{
 646	struct nfs_pgio_header *hdr = calldata;
 647	nfs_pgio_data_destroy(hdr);
 648	hdr->completion_ops->completion(hdr);
 649}
 650
 651static void nfs_pageio_mirror_init(struct nfs_pgio_mirror *mirror,
 652				   unsigned int bsize)
 653{
 654	INIT_LIST_HEAD(&mirror->pg_list);
 655	mirror->pg_bytes_written = 0;
 656	mirror->pg_count = 0;
 657	mirror->pg_bsize = bsize;
 658	mirror->pg_base = 0;
 659	mirror->pg_recoalesce = 0;
 660}
 661
 662/**
 663 * nfs_pageio_init - initialise a page io descriptor
 664 * @desc: pointer to descriptor
 665 * @inode: pointer to inode
 666 * @pg_ops: pointer to pageio operations
 667 * @compl_ops: pointer to pageio completion operations
 668 * @rw_ops: pointer to nfs read/write operations
 669 * @bsize: io block size
 670 * @io_flags: extra parameters for the io function
 671 */
 672void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
 673		     struct inode *inode,
 674		     const struct nfs_pageio_ops *pg_ops,
 675		     const struct nfs_pgio_completion_ops *compl_ops,
 676		     const struct nfs_rw_ops *rw_ops,
 677		     size_t bsize,
 678		     int io_flags)
 679{
 680	struct nfs_pgio_mirror *new;
 681	int i;
 682
 683	desc->pg_moreio = 0;
 684	desc->pg_inode = inode;
 685	desc->pg_ops = pg_ops;
 686	desc->pg_completion_ops = compl_ops;
 687	desc->pg_rw_ops = rw_ops;
 688	desc->pg_ioflags = io_flags;
 689	desc->pg_error = 0;
 690	desc->pg_lseg = NULL;
 
 691	desc->pg_dreq = NULL;
 692	desc->pg_layout_private = NULL;
 693	desc->pg_bsize = bsize;
 694
 695	desc->pg_mirror_count = 1;
 696	desc->pg_mirror_idx = 0;
 697
 698	if (pg_ops->pg_get_mirror_count) {
 699		/* until we have a request, we don't have an lseg and no
 700		 * idea how many mirrors there will be */
 701		new = kcalloc(NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX,
 702			      sizeof(struct nfs_pgio_mirror), GFP_KERNEL);
 703		desc->pg_mirrors_dynamic = new;
 704		desc->pg_mirrors = new;
 705
 706		for (i = 0; i < NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX; i++)
 707			nfs_pageio_mirror_init(&desc->pg_mirrors[i], bsize);
 708	} else {
 709		desc->pg_mirrors_dynamic = NULL;
 710		desc->pg_mirrors = desc->pg_mirrors_static;
 711		nfs_pageio_mirror_init(&desc->pg_mirrors[0], bsize);
 712	}
 713}
 714EXPORT_SYMBOL_GPL(nfs_pageio_init);
 715
 716/**
 717 * nfs_pgio_result - Basic pageio error handling
 718 * @task: The task that ran
 719 * @calldata: Pageio header to check
 720 */
 721static void nfs_pgio_result(struct rpc_task *task, void *calldata)
 722{
 723	struct nfs_pgio_header *hdr = calldata;
 724	struct inode *inode = hdr->inode;
 725
 726	dprintk("NFS: %s: %5u, (status %d)\n", __func__,
 727		task->tk_pid, task->tk_status);
 728
 729	if (hdr->rw_ops->rw_done(task, hdr, inode) != 0)
 730		return;
 731	if (task->tk_status < 0)
 732		nfs_set_pgio_error(hdr, task->tk_status, hdr->args.offset);
 733	else
 734		hdr->rw_ops->rw_result(task, hdr);
 735}
 736
 737/*
 738 * Create an RPC task for the given read or write request and kick it.
 739 * The page must have been locked by the caller.
 740 *
 741 * It may happen that the page we're passed is not marked dirty.
 742 * This is the case if nfs_updatepage detects a conflicting request
 743 * that has been written but not committed.
 744 */
 745int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
 746		     struct nfs_pgio_header *hdr)
 747{
 748	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 749
 750	struct nfs_page		*req;
 751	struct page		**pages,
 752				*last_page;
 753	struct list_head *head = &mirror->pg_list;
 754	struct nfs_commit_info cinfo;
 
 755	unsigned int pagecount, pageused;
 
 756
 757	pagecount = nfs_page_array_len(mirror->pg_base, mirror->pg_count);
 758	if (!nfs_pgarray_set(&hdr->page_array, pagecount)) {
 759		nfs_pgio_error(hdr);
 760		desc->pg_error = -ENOMEM;
 761		return desc->pg_error;
 
 
 
 
 
 
 
 
 
 
 762	}
 763
 764	nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
 765	pages = hdr->page_array.pagevec;
 766	last_page = NULL;
 767	pageused = 0;
 768	while (!list_empty(head)) {
 769		req = nfs_list_entry(head->next);
 770		nfs_list_remove_request(req);
 771		nfs_list_add_request(req, &hdr->pages);
 772
 773		if (!last_page || last_page != req->wb_page) {
 774			pageused++;
 775			if (pageused > pagecount)
 776				break;
 777			*pages++ = last_page = req->wb_page;
 778		}
 779	}
 780	if (WARN_ON_ONCE(pageused != pagecount)) {
 781		nfs_pgio_error(hdr);
 782		desc->pg_error = -EINVAL;
 783		return desc->pg_error;
 784	}
 785
 786	if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
 787	    (desc->pg_moreio || nfs_reqs_to_commit(&cinfo)))
 788		desc->pg_ioflags &= ~FLUSH_COND_STABLE;
 789
 790	/* Set up the argument struct */
 791	nfs_pgio_rpcsetup(hdr, mirror->pg_count, 0, desc->pg_ioflags, &cinfo);
 792	desc->pg_rpc_callops = &nfs_pgio_common_ops;
 793	return 0;
 794}
 795EXPORT_SYMBOL_GPL(nfs_generic_pgio);
 796
 797static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
 798{
 799	struct nfs_pgio_header *hdr;
 800	int ret;
 801
 802	hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
 803	if (!hdr) {
 804		desc->pg_error = -ENOMEM;
 805		return desc->pg_error;
 806	}
 807	nfs_pgheader_init(desc, hdr, nfs_pgio_header_free);
 808	ret = nfs_generic_pgio(desc, hdr);
 809	if (ret == 0)
 810		ret = nfs_initiate_pgio(NFS_CLIENT(hdr->inode),
 811					hdr,
 812					hdr->cred,
 813					NFS_PROTO(hdr->inode),
 814					desc->pg_rpc_callops,
 815					desc->pg_ioflags, 0);
 816	return ret;
 817}
 818
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 819/*
 820 * nfs_pageio_setup_mirroring - determine if mirroring is to be used
 821 *				by calling the pg_get_mirror_count op
 822 */
 823static int nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
 824				       struct nfs_page *req)
 825{
 826	int mirror_count = 1;
 827
 828	if (!pgio->pg_ops->pg_get_mirror_count)
 829		return 0;
 
 
 830
 831	mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
 832
 833	if (pgio->pg_error < 0)
 834		return pgio->pg_error;
 835
 836	if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX)
 837		return -EINVAL;
 838
 839	if (WARN_ON_ONCE(!pgio->pg_mirrors_dynamic))
 840		return -EINVAL;
 841
 
 
 
 
 
 
 842	pgio->pg_mirror_count = mirror_count;
 843
 844	return 0;
 845}
 846
 847/*
 848 * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1)
 849 */
 850void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio)
 851{
 852	pgio->pg_mirror_count = 1;
 853	pgio->pg_mirror_idx = 0;
 854}
 855
 856static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor *pgio)
 857{
 858	pgio->pg_mirror_count = 1;
 859	pgio->pg_mirror_idx = 0;
 860	pgio->pg_mirrors = pgio->pg_mirrors_static;
 861	kfree(pgio->pg_mirrors_dynamic);
 862	pgio->pg_mirrors_dynamic = NULL;
 863}
 864
 865static bool nfs_match_lock_context(const struct nfs_lock_context *l1,
 866		const struct nfs_lock_context *l2)
 867{
 868	return l1->lockowner.l_owner == l2->lockowner.l_owner
 869		&& l1->lockowner.l_pid == l2->lockowner.l_pid;
 870}
 871
 872/**
 873 * nfs_can_coalesce_requests - test two requests for compatibility
 874 * @prev: pointer to nfs_page
 875 * @req: pointer to nfs_page
 876 *
 877 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
 878 * page data area they describe is contiguous, and that their RPC
 879 * credentials, NFSv4 open state, and lockowners are the same.
 880 *
 881 * Return 'true' if this is the case, else return 'false'.
 882 */
 883static bool nfs_can_coalesce_requests(struct nfs_page *prev,
 884				      struct nfs_page *req,
 885				      struct nfs_pageio_descriptor *pgio)
 886{
 887	size_t size;
 888	struct file_lock_context *flctx;
 889
 890	if (prev) {
 891		if (!nfs_match_open_context(req->wb_context, prev->wb_context))
 892			return false;
 893		flctx = d_inode(req->wb_context->dentry)->i_flctx;
 894		if (flctx != NULL &&
 895		    !(list_empty_careful(&flctx->flc_posix) &&
 896		      list_empty_careful(&flctx->flc_flock)) &&
 897		    !nfs_match_lock_context(req->wb_lock_context,
 898					    prev->wb_lock_context))
 899			return false;
 900		if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
 901			return false;
 902		if (req->wb_page == prev->wb_page) {
 903			if (req->wb_pgbase != prev->wb_pgbase + prev->wb_bytes)
 904				return false;
 905		} else {
 906			if (req->wb_pgbase != 0 ||
 907			    prev->wb_pgbase + prev->wb_bytes != PAGE_SIZE)
 908				return false;
 909		}
 910	}
 911	size = pgio->pg_ops->pg_test(pgio, prev, req);
 912	WARN_ON_ONCE(size > req->wb_bytes);
 913	if (size && size < req->wb_bytes)
 914		req->wb_bytes = size;
 915	return size > 0;
 916}
 917
 918/**
 919 * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
 920 * @desc: destination io descriptor
 921 * @req: request
 922 *
 923 * Returns true if the request 'req' was successfully coalesced into the
 924 * existing list of pages 'desc'.
 925 */
 926static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
 927				     struct nfs_page *req)
 928{
 929	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 930
 931	struct nfs_page *prev = NULL;
 932
 933	if (mirror->pg_count != 0) {
 934		prev = nfs_list_entry(mirror->pg_list.prev);
 935	} else {
 936		if (desc->pg_ops->pg_init)
 937			desc->pg_ops->pg_init(desc, req);
 938		if (desc->pg_error < 0)
 939			return 0;
 940		mirror->pg_base = req->wb_pgbase;
 941	}
 942	if (!nfs_can_coalesce_requests(prev, req, desc))
 943		return 0;
 944	nfs_list_remove_request(req);
 945	nfs_list_add_request(req, &mirror->pg_list);
 946	mirror->pg_count += req->wb_bytes;
 947	return 1;
 948}
 949
 950/*
 951 * Helper for nfs_pageio_add_request and nfs_pageio_complete
 952 */
 953static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
 954{
 955	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 956
 957
 958	if (!list_empty(&mirror->pg_list)) {
 959		int error = desc->pg_ops->pg_doio(desc);
 960		if (error < 0)
 961			desc->pg_error = error;
 962		else
 963			mirror->pg_bytes_written += mirror->pg_count;
 964	}
 965	if (list_empty(&mirror->pg_list)) {
 966		mirror->pg_count = 0;
 967		mirror->pg_base = 0;
 968	}
 969}
 970
 971/**
 972 * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
 973 * @desc: destination io descriptor
 974 * @req: request
 975 *
 976 * This may split a request into subrequests which are all part of the
 977 * same page group.
 978 *
 979 * Returns true if the request 'req' was successfully coalesced into the
 980 * existing list of pages 'desc'.
 981 */
 982static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
 983			   struct nfs_page *req)
 984{
 985	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 986
 987	struct nfs_page *subreq;
 988	unsigned int bytes_left = 0;
 989	unsigned int offset, pgbase;
 990
 991	nfs_page_group_lock(req, false);
 992
 993	subreq = req;
 994	bytes_left = subreq->wb_bytes;
 995	offset = subreq->wb_offset;
 996	pgbase = subreq->wb_pgbase;
 997
 998	do {
 999		if (!nfs_pageio_do_add_request(desc, subreq)) {
1000			/* make sure pg_test call(s) did nothing */
1001			WARN_ON_ONCE(subreq->wb_bytes != bytes_left);
1002			WARN_ON_ONCE(subreq->wb_offset != offset);
1003			WARN_ON_ONCE(subreq->wb_pgbase != pgbase);
1004
1005			nfs_page_group_unlock(req);
1006			desc->pg_moreio = 1;
1007			nfs_pageio_doio(desc);
1008			if (desc->pg_error < 0)
1009				return 0;
1010			if (mirror->pg_recoalesce)
1011				return 0;
1012			/* retry add_request for this subreq */
1013			nfs_page_group_lock(req, false);
1014			continue;
1015		}
1016
1017		/* check for buggy pg_test call(s) */
1018		WARN_ON_ONCE(subreq->wb_bytes + subreq->wb_pgbase > PAGE_SIZE);
1019		WARN_ON_ONCE(subreq->wb_bytes > bytes_left);
1020		WARN_ON_ONCE(subreq->wb_bytes == 0);
1021
1022		bytes_left -= subreq->wb_bytes;
1023		offset += subreq->wb_bytes;
1024		pgbase += subreq->wb_bytes;
1025
1026		if (bytes_left) {
1027			subreq = nfs_create_request(req->wb_context,
1028					req->wb_page,
1029					subreq, pgbase, bytes_left);
1030			if (IS_ERR(subreq))
1031				goto err_ptr;
1032			nfs_lock_request(subreq);
1033			subreq->wb_offset  = offset;
1034			subreq->wb_index = req->wb_index;
1035		}
1036	} while (bytes_left > 0);
1037
1038	nfs_page_group_unlock(req);
1039	return 1;
1040err_ptr:
1041	desc->pg_error = PTR_ERR(subreq);
1042	nfs_page_group_unlock(req);
1043	return 0;
1044}
1045
1046static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
1047{
1048	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1049	LIST_HEAD(head);
1050
1051	do {
1052		list_splice_init(&mirror->pg_list, &head);
1053		mirror->pg_bytes_written -= mirror->pg_count;
1054		mirror->pg_count = 0;
1055		mirror->pg_base = 0;
1056		mirror->pg_recoalesce = 0;
1057
1058		while (!list_empty(&head)) {
1059			struct nfs_page *req;
1060
1061			req = list_first_entry(&head, struct nfs_page, wb_list);
1062			nfs_list_remove_request(req);
1063			if (__nfs_pageio_add_request(desc, req))
1064				continue;
1065			if (desc->pg_error < 0) {
1066				list_splice_tail(&head, &mirror->pg_list);
1067				mirror->pg_recoalesce = 1;
1068				return 0;
1069			}
1070			break;
1071		}
1072	} while (mirror->pg_recoalesce);
1073	return 1;
1074}
1075
1076static int nfs_pageio_add_request_mirror(struct nfs_pageio_descriptor *desc,
1077		struct nfs_page *req)
1078{
1079	int ret;
1080
1081	do {
1082		ret = __nfs_pageio_add_request(desc, req);
1083		if (ret)
1084			break;
1085		if (desc->pg_error < 0)
1086			break;
1087		ret = nfs_do_recoalesce(desc);
1088	} while (ret);
1089
1090	return ret;
1091}
1092
1093int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
1094			   struct nfs_page *req)
1095{
1096	u32 midx;
1097	unsigned int pgbase, offset, bytes;
1098	struct nfs_page *dupreq, *lastreq;
1099
1100	pgbase = req->wb_pgbase;
1101	offset = req->wb_offset;
1102	bytes = req->wb_bytes;
1103
1104	nfs_pageio_setup_mirroring(desc, req);
1105	if (desc->pg_error < 0)
1106		goto out_failed;
1107
1108	for (midx = 0; midx < desc->pg_mirror_count; midx++) {
1109		if (midx) {
1110			nfs_page_group_lock(req, false);
1111
1112			/* find the last request */
1113			for (lastreq = req->wb_head;
1114			     lastreq->wb_this_page != req->wb_head;
1115			     lastreq = lastreq->wb_this_page)
1116				;
1117
1118			dupreq = nfs_create_request(req->wb_context,
1119					req->wb_page, lastreq, pgbase, bytes);
1120
1121			if (IS_ERR(dupreq)) {
1122				nfs_page_group_unlock(req);
1123				desc->pg_error = PTR_ERR(dupreq);
1124				goto out_failed;
1125			}
1126
1127			nfs_lock_request(dupreq);
1128			nfs_page_group_unlock(req);
1129			dupreq->wb_offset = offset;
1130			dupreq->wb_index = req->wb_index;
1131		} else
1132			dupreq = req;
1133
1134		if (nfs_pgio_has_mirroring(desc))
1135			desc->pg_mirror_idx = midx;
1136		if (!nfs_pageio_add_request_mirror(desc, dupreq))
1137			goto out_failed;
1138	}
1139
1140	return 1;
1141
1142out_failed:
1143	/*
1144	 * We might have failed before sending any reqs over wire.
1145	 * Clean up rest of the reqs in mirror pg_list.
1146	 */
1147	if (desc->pg_error) {
1148		struct nfs_pgio_mirror *mirror;
1149		void (*func)(struct list_head *);
1150
1151		/* remember fatal errors */
1152		if (nfs_error_is_fatal(desc->pg_error))
1153			mapping_set_error(desc->pg_inode->i_mapping,
1154					  desc->pg_error);
1155
1156		func = desc->pg_completion_ops->error_cleanup;
1157		for (midx = 0; midx < desc->pg_mirror_count; midx++) {
1158			mirror = &desc->pg_mirrors[midx];
1159			func(&mirror->pg_list);
1160		}
1161	}
1162	return 0;
1163}
1164
1165/*
1166 * nfs_pageio_complete_mirror - Complete I/O on the current mirror of an
1167 *				nfs_pageio_descriptor
1168 * @desc: pointer to io descriptor
1169 * @mirror_idx: pointer to mirror index
1170 */
1171static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
1172				       u32 mirror_idx)
1173{
1174	struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[mirror_idx];
1175	u32 restore_idx = desc->pg_mirror_idx;
1176
1177	if (nfs_pgio_has_mirroring(desc))
1178		desc->pg_mirror_idx = mirror_idx;
1179	for (;;) {
1180		nfs_pageio_doio(desc);
1181		if (!mirror->pg_recoalesce)
1182			break;
1183		if (!nfs_do_recoalesce(desc))
1184			break;
1185	}
1186	desc->pg_mirror_idx = restore_idx;
1187}
1188
1189/*
1190 * nfs_pageio_resend - Transfer requests to new descriptor and resend
1191 * @hdr - the pgio header to move request from
1192 * @desc - the pageio descriptor to add requests to
1193 *
1194 * Try to move each request (nfs_page) from @hdr to @desc then attempt
1195 * to send them.
1196 *
1197 * Returns 0 on success and < 0 on error.
1198 */
1199int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
1200		      struct nfs_pgio_header *hdr)
1201{
1202	LIST_HEAD(failed);
1203
 
1204	desc->pg_dreq = hdr->dreq;
1205	while (!list_empty(&hdr->pages)) {
1206		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
1207
1208		nfs_list_remove_request(req);
1209		if (!nfs_pageio_add_request(desc, req))
1210			nfs_list_add_request(req, &failed);
1211	}
1212	nfs_pageio_complete(desc);
1213	if (!list_empty(&failed)) {
1214		list_move(&failed, &hdr->pages);
1215		return desc->pg_error < 0 ? desc->pg_error : -EIO;
1216	}
1217	return 0;
1218}
1219EXPORT_SYMBOL_GPL(nfs_pageio_resend);
1220
1221/**
1222 * nfs_pageio_complete - Complete I/O then cleanup an nfs_pageio_descriptor
1223 * @desc: pointer to io descriptor
1224 */
1225void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
1226{
1227	u32 midx;
1228
1229	for (midx = 0; midx < desc->pg_mirror_count; midx++)
1230		nfs_pageio_complete_mirror(desc, midx);
1231
1232	if (desc->pg_ops->pg_cleanup)
1233		desc->pg_ops->pg_cleanup(desc);
1234	nfs_pageio_cleanup_mirroring(desc);
1235}
1236
1237/**
1238 * nfs_pageio_cond_complete - Conditional I/O completion
1239 * @desc: pointer to io descriptor
1240 * @index: page index
1241 *
1242 * It is important to ensure that processes don't try to take locks
1243 * on non-contiguous ranges of pages as that might deadlock. This
1244 * function should be called before attempting to wait on a locked
1245 * nfs_page. It will complete the I/O if the page index 'index'
1246 * is not contiguous with the existing list of pages in 'desc'.
1247 */
1248void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
1249{
1250	struct nfs_pgio_mirror *mirror;
1251	struct nfs_page *prev;
1252	u32 midx;
1253
1254	for (midx = 0; midx < desc->pg_mirror_count; midx++) {
1255		mirror = &desc->pg_mirrors[midx];
1256		if (!list_empty(&mirror->pg_list)) {
1257			prev = nfs_list_entry(mirror->pg_list.prev);
1258			if (index != prev->wb_index + 1)
1259				nfs_pageio_complete_mirror(desc, midx);
 
 
1260		}
1261	}
1262}
1263
1264int __init nfs_init_nfspagecache(void)
1265{
1266	nfs_page_cachep = kmem_cache_create("nfs_page",
1267					    sizeof(struct nfs_page),
1268					    0, SLAB_HWCACHE_ALIGN,
1269					    NULL);
1270	if (nfs_page_cachep == NULL)
1271		return -ENOMEM;
1272
1273	return 0;
1274}
1275
1276void nfs_destroy_nfspagecache(void)
1277{
1278	kmem_cache_destroy(nfs_page_cachep);
1279}
1280
1281static const struct rpc_call_ops nfs_pgio_common_ops = {
1282	.rpc_call_prepare = nfs_pgio_prepare,
1283	.rpc_call_done = nfs_pgio_result,
1284	.rpc_release = nfs_pgio_release,
1285};
1286
1287const struct nfs_pageio_ops nfs_pgio_rw_ops = {
1288	.pg_test = nfs_generic_pg_test,
1289	.pg_doio = nfs_generic_pg_pgios,
1290};