Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 * linux/fs/nfs/pagelist.c
   3 *
   4 * A set of helper functions for managing NFS read and write requests.
   5 * The main purpose of these routines is to provide support for the
   6 * coalescing of several requests into a single RPC call.
   7 *
   8 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
   9 *
  10 */
  11
  12#include <linux/slab.h>
  13#include <linux/file.h>
  14#include <linux/sched.h>
  15#include <linux/sunrpc/clnt.h>
  16#include <linux/nfs.h>
  17#include <linux/nfs3.h>
  18#include <linux/nfs4.h>
  19#include <linux/nfs_page.h>
  20#include <linux/nfs_fs.h>
 
  21#include <linux/nfs_mount.h>
  22#include <linux/export.h>
 
  23
  24#include "internal.h"
  25#include "pnfs.h"
 
 
  26
  27#define NFSDBG_FACILITY		NFSDBG_PAGECACHE
  28
  29static struct kmem_cache *nfs_page_cachep;
  30static const struct rpc_call_ops nfs_pgio_common_ops;
  31
  32static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
 
 
 
 
 
 
  33{
  34	p->npages = pagecount;
  35	if (pagecount <= ARRAY_SIZE(p->page_array))
  36		p->pagevec = p->page_array;
  37	else {
  38		p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
  39		if (!p->pagevec)
  40			p->npages = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  41	}
  42	return p->pagevec != NULL;
 
 
 
 
 
 
 
 
  43}
  44
  45struct nfs_pgio_mirror *
  46nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc)
  47{
  48	return nfs_pgio_has_mirroring(desc) ?
  49		&desc->pg_mirrors[desc->pg_mirror_idx] :
  50		&desc->pg_mirrors[0];
  51}
  52EXPORT_SYMBOL_GPL(nfs_pgio_current_mirror);
  53
 
 
 
 
 
 
 
 
  54void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
  55		       struct nfs_pgio_header *hdr,
  56		       void (*release)(struct nfs_pgio_header *hdr))
  57{
  58	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
  59
  60
  61	hdr->req = nfs_list_entry(mirror->pg_list.next);
  62	hdr->inode = desc->pg_inode;
  63	hdr->cred = hdr->req->wb_context->cred;
  64	hdr->io_start = req_offset(hdr->req);
  65	hdr->good_bytes = mirror->pg_count;
 
  66	hdr->dreq = desc->pg_dreq;
  67	hdr->layout_private = desc->pg_layout_private;
  68	hdr->release = release;
  69	hdr->completion_ops = desc->pg_completion_ops;
  70	if (hdr->completion_ops->init_hdr)
  71		hdr->completion_ops->init_hdr(hdr);
  72
  73	hdr->pgio_mirror_idx = desc->pg_mirror_idx;
  74}
  75EXPORT_SYMBOL_GPL(nfs_pgheader_init);
  76
  77void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
  78{
  79	spin_lock(&hdr->lock);
  80	if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags)
  81	    || pos < hdr->io_start + hdr->good_bytes) {
 
 
  82		clear_bit(NFS_IOHDR_EOF, &hdr->flags);
  83		hdr->good_bytes = pos - hdr->io_start;
  84		hdr->error = error;
  85	}
  86	spin_unlock(&hdr->lock);
  87}
  88
  89static inline struct nfs_page *
  90nfs_page_alloc(void)
  91{
  92	struct nfs_page	*p = kmem_cache_zalloc(nfs_page_cachep, GFP_NOIO);
 
  93	if (p)
  94		INIT_LIST_HEAD(&p->wb_list);
  95	return p;
  96}
  97
  98static inline void
  99nfs_page_free(struct nfs_page *p)
 100{
 101	kmem_cache_free(nfs_page_cachep, p);
 102}
 103
 104/**
 105 * nfs_iocounter_wait - wait for i/o to complete
 106 * @l_ctx: nfs_lock_context with io_counter to use
 107 *
 108 * returns -ERESTARTSYS if interrupted by a fatal signal.
 109 * Otherwise returns 0 once the io_count hits 0.
 110 */
 111int
 112nfs_iocounter_wait(struct nfs_lock_context *l_ctx)
 113{
 114	return wait_on_atomic_t(&l_ctx->io_count, nfs_wait_atomic_killable,
 115			TASK_KILLABLE);
 116}
 117
 118/*
 119 * nfs_page_group_lock - lock the head of the page group
 120 * @req - request in group that is to be locked
 121 * @nonblock - if true don't block waiting for lock
 
 122 *
 123 * this lock must be held if modifying the page group list
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 124 *
 125 * return 0 on success, < 0 on error: -EDELAY if nonblocking or the
 126 * result from wait_on_bit_lock
 127 *
 128 * NOTE: calling with nonblock=false should always have set the
 129 *       lock bit (see fs/buffer.c and other uses of wait_on_bit_lock
 130 *       with TASK_UNINTERRUPTIBLE), so there is no need to check the result.
 131 */
 132int
 133nfs_page_group_lock(struct nfs_page *req, bool nonblock)
 134{
 135	struct nfs_page *head = req->wb_head;
 136
 137	WARN_ON_ONCE(head != head->wb_head);
 138
 139	if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags))
 140		return 0;
 141
 142	if (!nonblock)
 143		return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
 
 144				TASK_UNINTERRUPTIBLE);
 145
 146	return -EAGAIN;
 147}
 148
 149/*
 150 * nfs_page_group_lock_wait - wait for the lock to clear, but don't grab it
 151 * @req - a request in the group
 152 *
 153 * This is a blocking call to wait for the group lock to be cleared.
 154 */
 155void
 156nfs_page_group_lock_wait(struct nfs_page *req)
 157{
 158	struct nfs_page *head = req->wb_head;
 
 
 
 
 
 159
 160	WARN_ON_ONCE(head != head->wb_head);
 
 
 
 
 
 
 
 
 
 
 
 
 161
 162	wait_on_bit(&head->wb_flags, PG_HEADLOCK,
 163		TASK_UNINTERRUPTIBLE);
 
 
 164}
 165
 166/*
 167 * nfs_page_group_unlock - unlock the head of the page group
 168 * @req - request in group that is to be unlocked
 169 */
 170void
 171nfs_page_group_unlock(struct nfs_page *req)
 172{
 173	struct nfs_page *head = req->wb_head;
 174
 175	WARN_ON_ONCE(head != head->wb_head);
 176
 177	smp_mb__before_atomic();
 178	clear_bit(PG_HEADLOCK, &head->wb_flags);
 179	smp_mb__after_atomic();
 180	wake_up_bit(&head->wb_flags, PG_HEADLOCK);
 181}
 182
 183/*
 184 * nfs_page_group_sync_on_bit_locked
 185 *
 186 * must be called with page group lock held
 187 */
 188static bool
 189nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
 190{
 191	struct nfs_page *head = req->wb_head;
 192	struct nfs_page *tmp;
 193
 194	WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_flags));
 195	WARN_ON_ONCE(test_and_set_bit(bit, &req->wb_flags));
 196
 197	tmp = req->wb_this_page;
 198	while (tmp != req) {
 199		if (!test_bit(bit, &tmp->wb_flags))
 200			return false;
 201		tmp = tmp->wb_this_page;
 202	}
 203
 204	/* true! reset all bits */
 205	tmp = req;
 206	do {
 207		clear_bit(bit, &tmp->wb_flags);
 208		tmp = tmp->wb_this_page;
 209	} while (tmp != req);
 210
 211	return true;
 212}
 213
 214/*
 215 * nfs_page_group_sync_on_bit - set bit on current request, but only
 216 *   return true if the bit is set for all requests in page group
 217 * @req - request in page group
 218 * @bit - PG_* bit that is used to sync page group
 219 */
 220bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit)
 221{
 222	bool ret;
 223
 224	nfs_page_group_lock(req, false);
 225	ret = nfs_page_group_sync_on_bit_locked(req, bit);
 226	nfs_page_group_unlock(req);
 227
 228	return ret;
 229}
 230
 231/*
 232 * nfs_page_group_init - Initialize the page group linkage for @req
 233 * @req - a new nfs request
 234 * @prev - the previous request in page group, or NULL if @req is the first
 235 *         or only request in the group (the head).
 236 */
 237static inline void
 238nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev)
 239{
 240	struct inode *inode;
 241	WARN_ON_ONCE(prev == req);
 242
 243	if (!prev) {
 244		/* a head request */
 245		req->wb_head = req;
 246		req->wb_this_page = req;
 247	} else {
 248		/* a subrequest */
 249		WARN_ON_ONCE(prev->wb_this_page != prev->wb_head);
 250		WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags));
 251		req->wb_head = prev->wb_head;
 252		req->wb_this_page = prev->wb_this_page;
 253		prev->wb_this_page = req;
 254
 255		/* All subrequests take a ref on the head request until
 256		 * nfs_page_group_destroy is called */
 257		kref_get(&req->wb_head->wb_kref);
 258
 259		/* grab extra ref and bump the request count if head request
 260		 * has extra ref from the write/commit path to handle handoff
 261		 * between write and commit lists. */
 262		if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) {
 263			inode = page_file_mapping(req->wb_page)->host;
 264			set_bit(PG_INODE_REF, &req->wb_flags);
 265			kref_get(&req->wb_kref);
 266			spin_lock(&inode->i_lock);
 267			NFS_I(inode)->nrequests++;
 268			spin_unlock(&inode->i_lock);
 269		}
 270	}
 271}
 272
 273/*
 274 * nfs_page_group_destroy - sync the destruction of page groups
 275 * @req - request that no longer needs the page group
 276 *
 277 * releases the page group reference from each member once all
 278 * members have called this function.
 279 */
 280static void
 281nfs_page_group_destroy(struct kref *kref)
 282{
 283	struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
 
 284	struct nfs_page *tmp, *next;
 285
 286	/* subrequests must release the ref on the head request */
 287	if (req->wb_head != req)
 288		nfs_release_request(req->wb_head);
 289
 290	if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN))
 291		return;
 292
 293	tmp = req;
 294	do {
 295		next = tmp->wb_this_page;
 296		/* unlink and free */
 297		tmp->wb_this_page = tmp;
 298		tmp->wb_head = tmp;
 299		nfs_free_request(tmp);
 300		tmp = next;
 301	} while (tmp != req);
 
 
 
 
 302}
 303
 304/**
 305 * nfs_create_request - Create an NFS read/write request.
 306 * @ctx: open context to use
 307 * @page: page to write
 308 * @last: last nfs request created for this page group or NULL if head
 309 * @offset: starting offset within the page for the write
 310 * @count: number of bytes to read/write
 311 *
 312 * The page must be locked by the caller. This makes sure we never
 313 * create two different requests for the same page.
 314 * User should ensure it is safe to sleep in this function.
 315 */
 316struct nfs_page *
 317nfs_create_request(struct nfs_open_context *ctx, struct page *page,
 318		   struct nfs_page *last, unsigned int offset,
 319		   unsigned int count)
 320{
 321	struct nfs_page		*req;
 322	struct nfs_lock_context *l_ctx;
 323
 324	if (test_bit(NFS_CONTEXT_BAD, &ctx->flags))
 325		return ERR_PTR(-EBADF);
 326	/* try to allocate the request struct */
 327	req = nfs_page_alloc();
 328	if (req == NULL)
 329		return ERR_PTR(-ENOMEM);
 330
 331	/* get lock context early so we can deal with alloc failures */
 332	l_ctx = nfs_get_lock_context(ctx);
 333	if (IS_ERR(l_ctx)) {
 334		nfs_page_free(req);
 335		return ERR_CAST(l_ctx);
 336	}
 337	req->wb_lock_context = l_ctx;
 
 338	atomic_inc(&l_ctx->io_count);
 339
 340	/* Initialize the request struct. Initially, we assume a
 341	 * long write-back delay. This will be adjusted in
 342	 * update_nfs_request below if the region is not locked. */
 343	req->wb_page    = page;
 344	req->wb_index	= page_file_index(page);
 345	get_page(page);
 346	req->wb_offset  = offset;
 347	req->wb_pgbase	= offset;
 348	req->wb_bytes   = count;
 349	req->wb_context = get_nfs_open_context(ctx);
 350	kref_init(&req->wb_kref);
 351	nfs_page_group_init(req, last);
 352	return req;
 353}
 354
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 355/**
 356 * nfs_unlock_request - Unlock request and wake up sleepers.
 357 * @req:
 358 */
 359void nfs_unlock_request(struct nfs_page *req)
 360{
 361	if (!NFS_WBACK_BUSY(req)) {
 362		printk(KERN_ERR "NFS: Invalid unlock attempted\n");
 363		BUG();
 364	}
 365	smp_mb__before_atomic();
 366	clear_bit(PG_BUSY, &req->wb_flags);
 367	smp_mb__after_atomic();
 
 
 368	wake_up_bit(&req->wb_flags, PG_BUSY);
 369}
 370
 371/**
 372 * nfs_unlock_and_release_request - Unlock request and release the nfs_page
 373 * @req:
 374 */
 375void nfs_unlock_and_release_request(struct nfs_page *req)
 376{
 377	nfs_unlock_request(req);
 378	nfs_release_request(req);
 379}
 380
 381/*
 382 * nfs_clear_request - Free up all resources allocated to the request
 383 * @req:
 384 *
 385 * Release page and open context resources associated with a read/write
 386 * request after it has completed.
 387 */
 388static void nfs_clear_request(struct nfs_page *req)
 389{
 
 390	struct page *page = req->wb_page;
 391	struct nfs_open_context *ctx = req->wb_context;
 392	struct nfs_lock_context *l_ctx = req->wb_lock_context;
 
 393
 394	if (page != NULL) {
 
 
 
 
 395		put_page(page);
 396		req->wb_page = NULL;
 397	}
 398	if (l_ctx != NULL) {
 399		if (atomic_dec_and_test(&l_ctx->io_count))
 400			wake_up_atomic_t(&l_ctx->io_count);
 
 
 
 
 401		nfs_put_lock_context(l_ctx);
 402		req->wb_lock_context = NULL;
 403	}
 404	if (ctx != NULL) {
 405		put_nfs_open_context(ctx);
 406		req->wb_context = NULL;
 407	}
 408}
 409
 410/**
 411 * nfs_release_request - Release the count on an NFS read/write request
 412 * @req: request to release
 413 *
 414 * Note: Should never be called with the spinlock held!
 415 */
 416void nfs_free_request(struct nfs_page *req)
 417{
 418	WARN_ON_ONCE(req->wb_this_page != req);
 419
 420	/* extra debug: make sure no sync bits are still set */
 421	WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
 422	WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE, &req->wb_flags));
 423	WARN_ON_ONCE(test_bit(PG_UPTODATE, &req->wb_flags));
 424	WARN_ON_ONCE(test_bit(PG_WB_END, &req->wb_flags));
 425	WARN_ON_ONCE(test_bit(PG_REMOVE, &req->wb_flags));
 426
 427	/* Release struct file and open context */
 428	nfs_clear_request(req);
 429	nfs_page_free(req);
 430}
 431
 432void nfs_release_request(struct nfs_page *req)
 433{
 434	kref_put(&req->wb_kref, nfs_page_group_destroy);
 435}
 436
 437/**
 438 * nfs_wait_on_request - Wait for a request to complete.
 439 * @req: request to wait upon.
 440 *
 441 * Interruptible by fatal signals only.
 442 * The user is responsible for holding a count on the request.
 443 */
 444int
 445nfs_wait_on_request(struct nfs_page *req)
 446{
 447	return wait_on_bit_io(&req->wb_flags, PG_BUSY,
 448			      TASK_UNINTERRUPTIBLE);
 449}
 450
 451/*
 452 * nfs_generic_pg_test - determine if requests can be coalesced
 453 * @desc: pointer to descriptor
 454 * @prev: previous request in desc, or NULL
 455 * @req: this request
 456 *
 457 * Returns zero if @req can be coalesced into @desc, otherwise it returns
 458 * the size of the request.
 459 */
 460size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
 461			   struct nfs_page *prev, struct nfs_page *req)
 462{
 463	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 464
 465
 466	if (mirror->pg_count > mirror->pg_bsize) {
 467		/* should never happen */
 468		WARN_ON_ONCE(1);
 469		return 0;
 470	}
 471
 472	/*
 473	 * Limit the request size so that we can still allocate a page array
 474	 * for it without upsetting the slab allocator.
 475	 */
 476	if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) *
 477			sizeof(struct page *) > PAGE_SIZE)
 478		return 0;
 479
 480	return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes);
 481}
 482EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
 483
 484struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *ops)
 485{
 486	struct nfs_pgio_header *hdr = ops->rw_alloc_header();
 487
 488	if (hdr) {
 489		INIT_LIST_HEAD(&hdr->pages);
 490		spin_lock_init(&hdr->lock);
 491		hdr->rw_ops = ops;
 492	}
 493	return hdr;
 494}
 495EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc);
 496
 497/*
 498 * nfs_pgio_header_free - Free a read or write header
 499 * @hdr: The header to free
 500 */
 501void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
 502{
 503	hdr->rw_ops->rw_free_header(hdr);
 504}
 505EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
 506
 507/**
 508 * nfs_pgio_data_destroy - make @hdr suitable for reuse
 509 *
 510 * Frees memory and releases refs from nfs_generic_pgio, so that it may
 511 * be called again.
 512 *
 513 * @hdr: A header that has had nfs_generic_pgio called
 514 */
 515void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
 516{
 517	if (hdr->args.context)
 518		put_nfs_open_context(hdr->args.context);
 519	if (hdr->page_array.pagevec != hdr->page_array.page_array)
 520		kfree(hdr->page_array.pagevec);
 521}
 522EXPORT_SYMBOL_GPL(nfs_pgio_data_destroy);
 
 
 
 
 
 
 
 
 
 
 523
 524/**
 525 * nfs_pgio_rpcsetup - Set up arguments for a pageio call
 526 * @hdr: The pageio hdr
 
 527 * @count: Number of bytes to read
 528 * @offset: Initial offset
 529 * @how: How to commit data (writes only)
 530 * @cinfo: Commit information for the call (writes only)
 531 */
 532static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
 533			      unsigned int count, unsigned int offset,
 534			      int how, struct nfs_commit_info *cinfo)
 535{
 536	struct nfs_page *req = hdr->req;
 537
 538	/* Set up the RPC argument and reply structs
 539	 * NB: take care not to mess about with hdr->commit et al. */
 540
 541	hdr->args.fh     = NFS_FH(hdr->inode);
 542	hdr->args.offset = req_offset(req) + offset;
 543	/* pnfs_set_layoutcommit needs this */
 544	hdr->mds_offset = hdr->args.offset;
 545	hdr->args.pgbase = req->wb_pgbase + offset;
 546	hdr->args.pages  = hdr->page_array.pagevec;
 547	hdr->args.count  = count;
 548	hdr->args.context = get_nfs_open_context(req->wb_context);
 549	hdr->args.lock_context = req->wb_lock_context;
 550	hdr->args.stable  = NFS_UNSTABLE;
 551	switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) {
 552	case 0:
 553		break;
 554	case FLUSH_COND_STABLE:
 555		if (nfs_reqs_to_commit(cinfo))
 556			break;
 
 557	default:
 558		hdr->args.stable = NFS_FILE_SYNC;
 559	}
 560
 561	hdr->res.fattr   = &hdr->fattr;
 562	hdr->res.count   = count;
 563	hdr->res.eof     = 0;
 564	hdr->res.verf    = &hdr->verf;
 565	nfs_fattr_init(&hdr->fattr);
 566}
 567
 568/**
 569 * nfs_pgio_prepare - Prepare pageio hdr to go over the wire
 570 * @task: The current task
 571 * @calldata: pageio header to prepare
 572 */
 573static void nfs_pgio_prepare(struct rpc_task *task, void *calldata)
 574{
 575	struct nfs_pgio_header *hdr = calldata;
 576	int err;
 577	err = NFS_PROTO(hdr->inode)->pgio_rpc_prepare(task, hdr);
 578	if (err)
 579		rpc_exit(task, err);
 580}
 581
 582int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
 583		      struct rpc_cred *cred, const struct nfs_rpc_ops *rpc_ops,
 584		      const struct rpc_call_ops *call_ops, int how, int flags)
 
 585{
 586	struct rpc_task *task;
 587	struct rpc_message msg = {
 588		.rpc_argp = &hdr->args,
 589		.rpc_resp = &hdr->res,
 590		.rpc_cred = cred,
 591	};
 592	struct rpc_task_setup task_setup_data = {
 593		.rpc_client = clnt,
 594		.task = &hdr->task,
 595		.rpc_message = &msg,
 596		.callback_ops = call_ops,
 597		.callback_data = hdr,
 598		.workqueue = nfsiod_workqueue,
 599		.flags = RPC_TASK_ASYNC | flags,
 600	};
 601	int ret = 0;
 
 
 602
 603	hdr->rw_ops->rw_initiate(hdr, &msg, rpc_ops, &task_setup_data, how);
 604
 605	dprintk("NFS: initiated pgio call "
 606		"(req %s/%llu, %u bytes @ offset %llu)\n",
 607		hdr->inode->i_sb->s_id,
 608		(unsigned long long)NFS_FILEID(hdr->inode),
 609		hdr->args.count,
 610		(unsigned long long)hdr->args.offset);
 611
 
 
 
 
 612	task = rpc_run_task(&task_setup_data);
 613	if (IS_ERR(task)) {
 614		ret = PTR_ERR(task);
 615		goto out;
 616	}
 617	if (how & FLUSH_SYNC) {
 618		ret = rpc_wait_for_completion_task(task);
 619		if (ret == 0)
 620			ret = task->tk_status;
 621	}
 622	rpc_put_task(task);
 623out:
 624	return ret;
 625}
 626EXPORT_SYMBOL_GPL(nfs_initiate_pgio);
 627
 628/**
 629 * nfs_pgio_error - Clean up from a pageio error
 630 * @desc: IO descriptor
 631 * @hdr: pageio header
 632 */
 633static void nfs_pgio_error(struct nfs_pgio_header *hdr)
 634{
 635	set_bit(NFS_IOHDR_REDO, &hdr->flags);
 636	nfs_pgio_data_destroy(hdr);
 637	hdr->completion_ops->completion(hdr);
 638}
 639
 640/**
 641 * nfs_pgio_release - Release pageio data
 642 * @calldata: The pageio header to release
 643 */
 644static void nfs_pgio_release(void *calldata)
 645{
 646	struct nfs_pgio_header *hdr = calldata;
 647	nfs_pgio_data_destroy(hdr);
 648	hdr->completion_ops->completion(hdr);
 649}
 650
 651static void nfs_pageio_mirror_init(struct nfs_pgio_mirror *mirror,
 652				   unsigned int bsize)
 653{
 654	INIT_LIST_HEAD(&mirror->pg_list);
 655	mirror->pg_bytes_written = 0;
 656	mirror->pg_count = 0;
 657	mirror->pg_bsize = bsize;
 658	mirror->pg_base = 0;
 659	mirror->pg_recoalesce = 0;
 660}
 661
 662/**
 663 * nfs_pageio_init - initialise a page io descriptor
 664 * @desc: pointer to descriptor
 665 * @inode: pointer to inode
 666 * @pg_ops: pointer to pageio operations
 667 * @compl_ops: pointer to pageio completion operations
 668 * @rw_ops: pointer to nfs read/write operations
 669 * @bsize: io block size
 670 * @io_flags: extra parameters for the io function
 671 */
 672void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
 673		     struct inode *inode,
 674		     const struct nfs_pageio_ops *pg_ops,
 675		     const struct nfs_pgio_completion_ops *compl_ops,
 676		     const struct nfs_rw_ops *rw_ops,
 677		     size_t bsize,
 678		     int io_flags)
 679{
 680	struct nfs_pgio_mirror *new;
 681	int i;
 682
 683	desc->pg_moreio = 0;
 684	desc->pg_inode = inode;
 685	desc->pg_ops = pg_ops;
 686	desc->pg_completion_ops = compl_ops;
 687	desc->pg_rw_ops = rw_ops;
 688	desc->pg_ioflags = io_flags;
 689	desc->pg_error = 0;
 690	desc->pg_lseg = NULL;
 
 691	desc->pg_dreq = NULL;
 692	desc->pg_layout_private = NULL;
 693	desc->pg_bsize = bsize;
 694
 695	desc->pg_mirror_count = 1;
 696	desc->pg_mirror_idx = 0;
 697
 698	if (pg_ops->pg_get_mirror_count) {
 699		/* until we have a request, we don't have an lseg and no
 700		 * idea how many mirrors there will be */
 701		new = kcalloc(NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX,
 702			      sizeof(struct nfs_pgio_mirror), GFP_KERNEL);
 703		desc->pg_mirrors_dynamic = new;
 704		desc->pg_mirrors = new;
 705
 706		for (i = 0; i < NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX; i++)
 707			nfs_pageio_mirror_init(&desc->pg_mirrors[i], bsize);
 708	} else {
 709		desc->pg_mirrors_dynamic = NULL;
 710		desc->pg_mirrors = desc->pg_mirrors_static;
 711		nfs_pageio_mirror_init(&desc->pg_mirrors[0], bsize);
 712	}
 713}
 714EXPORT_SYMBOL_GPL(nfs_pageio_init);
 715
 716/**
 717 * nfs_pgio_result - Basic pageio error handling
 718 * @task: The task that ran
 719 * @calldata: Pageio header to check
 720 */
 721static void nfs_pgio_result(struct rpc_task *task, void *calldata)
 722{
 723	struct nfs_pgio_header *hdr = calldata;
 724	struct inode *inode = hdr->inode;
 725
 726	dprintk("NFS: %s: %5u, (status %d)\n", __func__,
 727		task->tk_pid, task->tk_status);
 728
 729	if (hdr->rw_ops->rw_done(task, hdr, inode) != 0)
 730		return;
 731	if (task->tk_status < 0)
 732		nfs_set_pgio_error(hdr, task->tk_status, hdr->args.offset);
 733	else
 734		hdr->rw_ops->rw_result(task, hdr);
 735}
 736
 737/*
 738 * Create an RPC task for the given read or write request and kick it.
 739 * The page must have been locked by the caller.
 740 *
 741 * It may happen that the page we're passed is not marked dirty.
 742 * This is the case if nfs_updatepage detects a conflicting request
 743 * that has been written but not committed.
 744 */
 745int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
 746		     struct nfs_pgio_header *hdr)
 747{
 748	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 749
 750	struct nfs_page		*req;
 751	struct page		**pages,
 752				*last_page;
 753	struct list_head *head = &mirror->pg_list;
 754	struct nfs_commit_info cinfo;
 
 755	unsigned int pagecount, pageused;
 
 
 756
 757	pagecount = nfs_page_array_len(mirror->pg_base, mirror->pg_count);
 758	if (!nfs_pgarray_set(&hdr->page_array, pagecount)) {
 759		nfs_pgio_error(hdr);
 760		desc->pg_error = -ENOMEM;
 761		return desc->pg_error;
 
 
 
 
 
 
 
 
 762	}
 763
 764	nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
 765	pages = hdr->page_array.pagevec;
 766	last_page = NULL;
 767	pageused = 0;
 768	while (!list_empty(head)) {
 
 
 
 769		req = nfs_list_entry(head->next);
 770		nfs_list_remove_request(req);
 771		nfs_list_add_request(req, &hdr->pages);
 772
 773		if (!last_page || last_page != req->wb_page) {
 774			pageused++;
 775			if (pageused > pagecount)
 776				break;
 777			*pages++ = last_page = req->wb_page;
 
 
 
 
 
 
 778		}
 779	}
 
 780	if (WARN_ON_ONCE(pageused != pagecount)) {
 781		nfs_pgio_error(hdr);
 782		desc->pg_error = -EINVAL;
 783		return desc->pg_error;
 784	}
 785
 786	if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
 787	    (desc->pg_moreio || nfs_reqs_to_commit(&cinfo)))
 788		desc->pg_ioflags &= ~FLUSH_COND_STABLE;
 789
 790	/* Set up the argument struct */
 791	nfs_pgio_rpcsetup(hdr, mirror->pg_count, 0, desc->pg_ioflags, &cinfo);
 
 792	desc->pg_rpc_callops = &nfs_pgio_common_ops;
 793	return 0;
 794}
 795EXPORT_SYMBOL_GPL(nfs_generic_pgio);
 796
 797static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
 798{
 799	struct nfs_pgio_header *hdr;
 800	int ret;
 
 801
 802	hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
 803	if (!hdr) {
 804		desc->pg_error = -ENOMEM;
 805		return desc->pg_error;
 806	}
 807	nfs_pgheader_init(desc, hdr, nfs_pgio_header_free);
 808	ret = nfs_generic_pgio(desc, hdr);
 809	if (ret == 0)
 
 
 
 
 
 
 
 
 810		ret = nfs_initiate_pgio(NFS_CLIENT(hdr->inode),
 811					hdr,
 812					hdr->cred,
 813					NFS_PROTO(hdr->inode),
 814					desc->pg_rpc_callops,
 815					desc->pg_ioflags, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 816	return ret;
 817}
 818
 819/*
 820 * nfs_pageio_setup_mirroring - determine if mirroring is to be used
 821 *				by calling the pg_get_mirror_count op
 822 */
 823static int nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
 824				       struct nfs_page *req)
 825{
 826	int mirror_count = 1;
 827
 828	if (!pgio->pg_ops->pg_get_mirror_count)
 829		return 0;
 830
 831	mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
 832
 833	if (pgio->pg_error < 0)
 834		return pgio->pg_error;
 835
 836	if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX)
 837		return -EINVAL;
 838
 839	if (WARN_ON_ONCE(!pgio->pg_mirrors_dynamic))
 840		return -EINVAL;
 
 
 841
 
 
 
 
 
 
 842	pgio->pg_mirror_count = mirror_count;
 843
 844	return 0;
 845}
 846
 847/*
 848 * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1)
 849 */
 850void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio)
 851{
 852	pgio->pg_mirror_count = 1;
 853	pgio->pg_mirror_idx = 0;
 854}
 855
 856static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor *pgio)
 857{
 858	pgio->pg_mirror_count = 1;
 859	pgio->pg_mirror_idx = 0;
 860	pgio->pg_mirrors = pgio->pg_mirrors_static;
 861	kfree(pgio->pg_mirrors_dynamic);
 862	pgio->pg_mirrors_dynamic = NULL;
 863}
 864
 865static bool nfs_match_lock_context(const struct nfs_lock_context *l1,
 866		const struct nfs_lock_context *l2)
 867{
 868	return l1->lockowner.l_owner == l2->lockowner.l_owner
 869		&& l1->lockowner.l_pid == l2->lockowner.l_pid;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 870}
 871
 872/**
 873 * nfs_can_coalesce_requests - test two requests for compatibility
 874 * @prev: pointer to nfs_page
 875 * @req: pointer to nfs_page
 
 876 *
 877 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
 878 * page data area they describe is contiguous, and that their RPC
 879 * credentials, NFSv4 open state, and lockowners are the same.
 880 *
 881 * Return 'true' if this is the case, else return 'false'.
 882 */
 883static bool nfs_can_coalesce_requests(struct nfs_page *prev,
 884				      struct nfs_page *req,
 885				      struct nfs_pageio_descriptor *pgio)
 886{
 887	size_t size;
 888	struct file_lock_context *flctx;
 889
 890	if (prev) {
 891		if (!nfs_match_open_context(req->wb_context, prev->wb_context))
 892			return false;
 893		flctx = d_inode(req->wb_context->dentry)->i_flctx;
 894		if (flctx != NULL &&
 895		    !(list_empty_careful(&flctx->flc_posix) &&
 896		      list_empty_careful(&flctx->flc_flock)) &&
 897		    !nfs_match_lock_context(req->wb_lock_context,
 898					    prev->wb_lock_context))
 899			return false;
 900		if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
 901			return false;
 902		if (req->wb_page == prev->wb_page) {
 903			if (req->wb_pgbase != prev->wb_pgbase + prev->wb_bytes)
 904				return false;
 905		} else {
 906			if (req->wb_pgbase != 0 ||
 907			    prev->wb_pgbase + prev->wb_bytes != PAGE_SIZE)
 908				return false;
 909		}
 910	}
 911	size = pgio->pg_ops->pg_test(pgio, prev, req);
 912	WARN_ON_ONCE(size > req->wb_bytes);
 913	if (size && size < req->wb_bytes)
 914		req->wb_bytes = size;
 915	return size > 0;
 916}
 917
 918/**
 919 * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
 920 * @desc: destination io descriptor
 921 * @req: request
 922 *
 923 * Returns true if the request 'req' was successfully coalesced into the
 924 * existing list of pages 'desc'.
 925 */
 926static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
 927				     struct nfs_page *req)
 
 928{
 929	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 930
 931	struct nfs_page *prev = NULL;
 
 932
 933	if (mirror->pg_count != 0) {
 934		prev = nfs_list_entry(mirror->pg_list.prev);
 935	} else {
 936		if (desc->pg_ops->pg_init)
 937			desc->pg_ops->pg_init(desc, req);
 938		if (desc->pg_error < 0)
 939			return 0;
 940		mirror->pg_base = req->wb_pgbase;
 941	}
 942	if (!nfs_can_coalesce_requests(prev, req, desc))
 
 
 
 
 
 
 
 
 943		return 0;
 944	nfs_list_remove_request(req);
 945	nfs_list_add_request(req, &mirror->pg_list);
 
 
 
 
 946	mirror->pg_count += req->wb_bytes;
 947	return 1;
 948}
 949
 950/*
 951 * Helper for nfs_pageio_add_request and nfs_pageio_complete
 952 */
 953static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
 954{
 955	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 956
 957
 958	if (!list_empty(&mirror->pg_list)) {
 959		int error = desc->pg_ops->pg_doio(desc);
 960		if (error < 0)
 961			desc->pg_error = error;
 962		else
 963			mirror->pg_bytes_written += mirror->pg_count;
 964	}
 965	if (list_empty(&mirror->pg_list)) {
 966		mirror->pg_count = 0;
 967		mirror->pg_base = 0;
 968	}
 
 
 
 
 
 
 969}
 970
 971/**
 972 * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
 973 * @desc: destination io descriptor
 974 * @req: request
 975 *
 976 * This may split a request into subrequests which are all part of the
 977 * same page group.
 
 978 *
 979 * Returns true if the request 'req' was successfully coalesced into the
 980 * existing list of pages 'desc'.
 981 */
 982static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
 983			   struct nfs_page *req)
 984{
 985	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 986
 987	struct nfs_page *subreq;
 988	unsigned int bytes_left = 0;
 989	unsigned int offset, pgbase;
 990
 991	nfs_page_group_lock(req, false);
 992
 993	subreq = req;
 994	bytes_left = subreq->wb_bytes;
 995	offset = subreq->wb_offset;
 996	pgbase = subreq->wb_pgbase;
 997
 998	do {
 999		if (!nfs_pageio_do_add_request(desc, subreq)) {
1000			/* make sure pg_test call(s) did nothing */
1001			WARN_ON_ONCE(subreq->wb_bytes != bytes_left);
1002			WARN_ON_ONCE(subreq->wb_offset != offset);
1003			WARN_ON_ONCE(subreq->wb_pgbase != pgbase);
1004
 
 
 
 
 
 
 
 
 
 
 
 
1005			nfs_page_group_unlock(req);
1006			desc->pg_moreio = 1;
1007			nfs_pageio_doio(desc);
1008			if (desc->pg_error < 0)
1009				return 0;
1010			if (mirror->pg_recoalesce)
1011				return 0;
1012			/* retry add_request for this subreq */
1013			nfs_page_group_lock(req, false);
1014			continue;
1015		}
1016
1017		/* check for buggy pg_test call(s) */
1018		WARN_ON_ONCE(subreq->wb_bytes + subreq->wb_pgbase > PAGE_SIZE);
1019		WARN_ON_ONCE(subreq->wb_bytes > bytes_left);
1020		WARN_ON_ONCE(subreq->wb_bytes == 0);
1021
1022		bytes_left -= subreq->wb_bytes;
1023		offset += subreq->wb_bytes;
1024		pgbase += subreq->wb_bytes;
1025
1026		if (bytes_left) {
1027			subreq = nfs_create_request(req->wb_context,
1028					req->wb_page,
1029					subreq, pgbase, bytes_left);
1030			if (IS_ERR(subreq))
1031				goto err_ptr;
1032			nfs_lock_request(subreq);
1033			subreq->wb_offset  = offset;
1034			subreq->wb_index = req->wb_index;
1035		}
1036	} while (bytes_left > 0);
1037
1038	nfs_page_group_unlock(req);
1039	return 1;
1040err_ptr:
1041	desc->pg_error = PTR_ERR(subreq);
1042	nfs_page_group_unlock(req);
1043	return 0;
1044}
1045
1046static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
1047{
1048	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1049	LIST_HEAD(head);
1050
1051	do {
1052		list_splice_init(&mirror->pg_list, &head);
1053		mirror->pg_bytes_written -= mirror->pg_count;
1054		mirror->pg_count = 0;
1055		mirror->pg_base = 0;
1056		mirror->pg_recoalesce = 0;
1057
1058		while (!list_empty(&head)) {
1059			struct nfs_page *req;
1060
1061			req = list_first_entry(&head, struct nfs_page, wb_list);
1062			nfs_list_remove_request(req);
1063			if (__nfs_pageio_add_request(desc, req))
1064				continue;
1065			if (desc->pg_error < 0) {
1066				list_splice_tail(&head, &mirror->pg_list);
1067				mirror->pg_recoalesce = 1;
1068				return 0;
1069			}
1070			break;
1071		}
1072	} while (mirror->pg_recoalesce);
1073	return 1;
1074}
1075
1076static int nfs_pageio_add_request_mirror(struct nfs_pageio_descriptor *desc,
1077		struct nfs_page *req)
1078{
1079	int ret;
1080
1081	do {
1082		ret = __nfs_pageio_add_request(desc, req);
1083		if (ret)
1084			break;
1085		if (desc->pg_error < 0)
1086			break;
1087		ret = nfs_do_recoalesce(desc);
1088	} while (ret);
1089
1090	return ret;
1091}
1092
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1093int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
1094			   struct nfs_page *req)
1095{
1096	u32 midx;
1097	unsigned int pgbase, offset, bytes;
1098	struct nfs_page *dupreq, *lastreq;
1099
1100	pgbase = req->wb_pgbase;
1101	offset = req->wb_offset;
1102	bytes = req->wb_bytes;
1103
1104	nfs_pageio_setup_mirroring(desc, req);
1105	if (desc->pg_error < 0)
1106		goto out_failed;
1107
1108	for (midx = 0; midx < desc->pg_mirror_count; midx++) {
1109		if (midx) {
1110			nfs_page_group_lock(req, false);
1111
1112			/* find the last request */
1113			for (lastreq = req->wb_head;
1114			     lastreq->wb_this_page != req->wb_head;
1115			     lastreq = lastreq->wb_this_page)
1116				;
1117
1118			dupreq = nfs_create_request(req->wb_context,
1119					req->wb_page, lastreq, pgbase, bytes);
1120
1121			if (IS_ERR(dupreq)) {
1122				nfs_page_group_unlock(req);
1123				desc->pg_error = PTR_ERR(dupreq);
1124				goto out_failed;
1125			}
1126
1127			nfs_lock_request(dupreq);
1128			nfs_page_group_unlock(req);
1129			dupreq->wb_offset = offset;
1130			dupreq->wb_index = req->wb_index;
1131		} else
1132			dupreq = req;
1133
1134		if (nfs_pgio_has_mirroring(desc))
1135			desc->pg_mirror_idx = midx;
1136		if (!nfs_pageio_add_request_mirror(desc, dupreq))
1137			goto out_failed;
1138	}
1139
 
 
 
 
1140	return 1;
1141
 
 
1142out_failed:
1143	/*
1144	 * We might have failed before sending any reqs over wire.
1145	 * Clean up rest of the reqs in mirror pg_list.
1146	 */
1147	if (desc->pg_error) {
1148		struct nfs_pgio_mirror *mirror;
1149		void (*func)(struct list_head *);
1150
1151		/* remember fatal errors */
1152		if (nfs_error_is_fatal(desc->pg_error))
1153			mapping_set_error(desc->pg_inode->i_mapping,
1154					  desc->pg_error);
1155
1156		func = desc->pg_completion_ops->error_cleanup;
1157		for (midx = 0; midx < desc->pg_mirror_count; midx++) {
1158			mirror = &desc->pg_mirrors[midx];
1159			func(&mirror->pg_list);
1160		}
1161	}
1162	return 0;
1163}
1164
1165/*
1166 * nfs_pageio_complete_mirror - Complete I/O on the current mirror of an
1167 *				nfs_pageio_descriptor
1168 * @desc: pointer to io descriptor
1169 * @mirror_idx: pointer to mirror index
1170 */
1171static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
1172				       u32 mirror_idx)
1173{
1174	struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[mirror_idx];
1175	u32 restore_idx = desc->pg_mirror_idx;
 
 
 
1176
1177	if (nfs_pgio_has_mirroring(desc))
1178		desc->pg_mirror_idx = mirror_idx;
1179	for (;;) {
1180		nfs_pageio_doio(desc);
1181		if (!mirror->pg_recoalesce)
1182			break;
1183		if (!nfs_do_recoalesce(desc))
1184			break;
1185	}
1186	desc->pg_mirror_idx = restore_idx;
1187}
1188
1189/*
1190 * nfs_pageio_resend - Transfer requests to new descriptor and resend
1191 * @hdr - the pgio header to move request from
1192 * @desc - the pageio descriptor to add requests to
1193 *
1194 * Try to move each request (nfs_page) from @hdr to @desc then attempt
1195 * to send them.
1196 *
1197 * Returns 0 on success and < 0 on error.
1198 */
1199int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
1200		      struct nfs_pgio_header *hdr)
1201{
1202	LIST_HEAD(failed);
1203
 
1204	desc->pg_dreq = hdr->dreq;
1205	while (!list_empty(&hdr->pages)) {
1206		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
 
 
1207
1208		nfs_list_remove_request(req);
1209		if (!nfs_pageio_add_request(desc, req))
1210			nfs_list_add_request(req, &failed);
1211	}
1212	nfs_pageio_complete(desc);
1213	if (!list_empty(&failed)) {
1214		list_move(&failed, &hdr->pages);
1215		return desc->pg_error < 0 ? desc->pg_error : -EIO;
 
 
1216	}
1217	return 0;
1218}
1219EXPORT_SYMBOL_GPL(nfs_pageio_resend);
1220
1221/**
1222 * nfs_pageio_complete - Complete I/O then cleanup an nfs_pageio_descriptor
1223 * @desc: pointer to io descriptor
1224 */
1225void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
1226{
1227	u32 midx;
1228
1229	for (midx = 0; midx < desc->pg_mirror_count; midx++)
1230		nfs_pageio_complete_mirror(desc, midx);
1231
 
 
1232	if (desc->pg_ops->pg_cleanup)
1233		desc->pg_ops->pg_cleanup(desc);
1234	nfs_pageio_cleanup_mirroring(desc);
1235}
1236
1237/**
1238 * nfs_pageio_cond_complete - Conditional I/O completion
1239 * @desc: pointer to io descriptor
1240 * @index: page index
1241 *
1242 * It is important to ensure that processes don't try to take locks
1243 * on non-contiguous ranges of pages as that might deadlock. This
1244 * function should be called before attempting to wait on a locked
1245 * nfs_page. It will complete the I/O if the page index 'index'
1246 * is not contiguous with the existing list of pages in 'desc'.
1247 */
1248void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
1249{
1250	struct nfs_pgio_mirror *mirror;
1251	struct nfs_page *prev;
 
1252	u32 midx;
1253
1254	for (midx = 0; midx < desc->pg_mirror_count; midx++) {
1255		mirror = &desc->pg_mirrors[midx];
1256		if (!list_empty(&mirror->pg_list)) {
1257			prev = nfs_list_entry(mirror->pg_list.prev);
1258			if (index != prev->wb_index + 1)
1259				nfs_pageio_complete_mirror(desc, midx);
 
 
 
 
 
 
 
 
 
 
 
1260		}
1261	}
 
 
 
 
 
 
 
 
1262}
1263
1264int __init nfs_init_nfspagecache(void)
1265{
1266	nfs_page_cachep = kmem_cache_create("nfs_page",
1267					    sizeof(struct nfs_page),
1268					    0, SLAB_HWCACHE_ALIGN,
1269					    NULL);
1270	if (nfs_page_cachep == NULL)
1271		return -ENOMEM;
1272
1273	return 0;
1274}
1275
1276void nfs_destroy_nfspagecache(void)
1277{
1278	kmem_cache_destroy(nfs_page_cachep);
1279}
1280
1281static const struct rpc_call_ops nfs_pgio_common_ops = {
1282	.rpc_call_prepare = nfs_pgio_prepare,
1283	.rpc_call_done = nfs_pgio_result,
1284	.rpc_release = nfs_pgio_release,
1285};
1286
1287const struct nfs_pageio_ops nfs_pgio_rw_ops = {
1288	.pg_test = nfs_generic_pg_test,
1289	.pg_doio = nfs_generic_pg_pgios,
1290};
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * linux/fs/nfs/pagelist.c
   4 *
   5 * A set of helper functions for managing NFS read and write requests.
   6 * The main purpose of these routines is to provide support for the
   7 * coalescing of several requests into a single RPC call.
   8 *
   9 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
  10 *
  11 */
  12
  13#include <linux/slab.h>
  14#include <linux/file.h>
  15#include <linux/sched.h>
  16#include <linux/sunrpc/clnt.h>
  17#include <linux/nfs.h>
  18#include <linux/nfs3.h>
  19#include <linux/nfs4.h>
 
  20#include <linux/nfs_fs.h>
  21#include <linux/nfs_page.h>
  22#include <linux/nfs_mount.h>
  23#include <linux/export.h>
  24#include <linux/filelock.h>
  25
  26#include "internal.h"
  27#include "pnfs.h"
  28#include "nfstrace.h"
  29#include "fscache.h"
  30
  31#define NFSDBG_FACILITY		NFSDBG_PAGECACHE
  32
  33static struct kmem_cache *nfs_page_cachep;
  34static const struct rpc_call_ops nfs_pgio_common_ops;
  35
  36struct nfs_page_iter_page {
  37	const struct nfs_page *req;
  38	size_t count;
  39};
  40
  41static void nfs_page_iter_page_init(struct nfs_page_iter_page *i,
  42				    const struct nfs_page *req)
  43{
  44	i->req = req;
  45	i->count = 0;
  46}
  47
  48static void nfs_page_iter_page_advance(struct nfs_page_iter_page *i, size_t sz)
  49{
  50	const struct nfs_page *req = i->req;
  51	size_t tmp = i->count + sz;
  52
  53	i->count = (tmp < req->wb_bytes) ? tmp : req->wb_bytes;
  54}
  55
  56static struct page *nfs_page_iter_page_get(struct nfs_page_iter_page *i)
  57{
  58	const struct nfs_page *req = i->req;
  59	struct page *page;
  60
  61	if (i->count != req->wb_bytes) {
  62		size_t base = i->count + req->wb_pgbase;
  63		size_t len = PAGE_SIZE - offset_in_page(base);
  64
  65		page = nfs_page_to_page(req, base);
  66		nfs_page_iter_page_advance(i, len);
  67		return page;
  68	}
  69	return NULL;
  70}
  71
  72static struct nfs_pgio_mirror *
  73nfs_pgio_get_mirror(struct nfs_pageio_descriptor *desc, u32 idx)
  74{
  75	if (desc->pg_ops->pg_get_mirror)
  76		return desc->pg_ops->pg_get_mirror(desc, idx);
  77	return &desc->pg_mirrors[0];
  78}
  79
  80struct nfs_pgio_mirror *
  81nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc)
  82{
  83	return nfs_pgio_get_mirror(desc, desc->pg_mirror_idx);
 
 
  84}
  85EXPORT_SYMBOL_GPL(nfs_pgio_current_mirror);
  86
  87static u32
  88nfs_pgio_set_current_mirror(struct nfs_pageio_descriptor *desc, u32 idx)
  89{
  90	if (desc->pg_ops->pg_set_mirror)
  91		return desc->pg_ops->pg_set_mirror(desc, idx);
  92	return desc->pg_mirror_idx;
  93}
  94
  95void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
  96		       struct nfs_pgio_header *hdr,
  97		       void (*release)(struct nfs_pgio_header *hdr))
  98{
  99	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 100
 101
 102	hdr->req = nfs_list_entry(mirror->pg_list.next);
 103	hdr->inode = desc->pg_inode;
 104	hdr->cred = nfs_req_openctx(hdr->req)->cred;
 105	hdr->io_start = req_offset(hdr->req);
 106	hdr->good_bytes = mirror->pg_count;
 107	hdr->io_completion = desc->pg_io_completion;
 108	hdr->dreq = desc->pg_dreq;
 109	nfs_netfs_set_pgio_header(hdr, desc);
 110	hdr->release = release;
 111	hdr->completion_ops = desc->pg_completion_ops;
 112	if (hdr->completion_ops->init_hdr)
 113		hdr->completion_ops->init_hdr(hdr);
 114
 115	hdr->pgio_mirror_idx = desc->pg_mirror_idx;
 116}
 117EXPORT_SYMBOL_GPL(nfs_pgheader_init);
 118
 119void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
 120{
 121	unsigned int new = pos - hdr->io_start;
 122
 123	trace_nfs_pgio_error(hdr, error, pos);
 124	if (hdr->good_bytes > new) {
 125		hdr->good_bytes = new;
 126		clear_bit(NFS_IOHDR_EOF, &hdr->flags);
 127		if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags))
 128			hdr->error = error;
 129	}
 
 130}
 131
 132static inline struct nfs_page *nfs_page_alloc(void)
 
 133{
 134	struct nfs_page *p =
 135		kmem_cache_zalloc(nfs_page_cachep, nfs_io_gfp_mask());
 136	if (p)
 137		INIT_LIST_HEAD(&p->wb_list);
 138	return p;
 139}
 140
 141static inline void
 142nfs_page_free(struct nfs_page *p)
 143{
 144	kmem_cache_free(nfs_page_cachep, p);
 145}
 146
 147/**
 148 * nfs_iocounter_wait - wait for i/o to complete
 149 * @l_ctx: nfs_lock_context with io_counter to use
 150 *
 151 * returns -ERESTARTSYS if interrupted by a fatal signal.
 152 * Otherwise returns 0 once the io_count hits 0.
 153 */
 154int
 155nfs_iocounter_wait(struct nfs_lock_context *l_ctx)
 156{
 157	return wait_var_event_killable(&l_ctx->io_count,
 158				       !atomic_read(&l_ctx->io_count));
 159}
 160
 161/**
 162 * nfs_async_iocounter_wait - wait on a rpc_waitqueue for I/O
 163 * to complete
 164 * @task: the rpc_task that should wait
 165 * @l_ctx: nfs_lock_context with io_counter to check
 166 *
 167 * Returns true if there is outstanding I/O to wait on and the
 168 * task has been put to sleep.
 169 */
 170bool
 171nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx)
 172{
 173	struct inode *inode = d_inode(l_ctx->open_context->dentry);
 174	bool ret = false;
 175
 176	if (atomic_read(&l_ctx->io_count) > 0) {
 177		rpc_sleep_on(&NFS_SERVER(inode)->uoc_rpcwaitq, task, NULL);
 178		ret = true;
 179	}
 180
 181	if (atomic_read(&l_ctx->io_count) == 0) {
 182		rpc_wake_up_queued_task(&NFS_SERVER(inode)->uoc_rpcwaitq, task);
 183		ret = false;
 184	}
 185
 186	return ret;
 187}
 188EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait);
 189
 190/*
 191 * nfs_page_set_headlock - set the request PG_HEADLOCK
 192 * @req: request that is to be locked
 193 *
 194 * this lock must be held when modifying req->wb_head
 
 195 *
 196 * return 0 on success, < 0 on error
 
 
 197 */
 198int
 199nfs_page_set_headlock(struct nfs_page *req)
 200{
 201	if (!test_and_set_bit(PG_HEADLOCK, &req->wb_flags))
 
 
 
 
 202		return 0;
 203
 204	set_bit(PG_CONTENDED1, &req->wb_flags);
 205	smp_mb__after_atomic();
 206	return wait_on_bit_lock(&req->wb_flags, PG_HEADLOCK,
 207				TASK_UNINTERRUPTIBLE);
 
 
 208}
 209
 210/*
 211 * nfs_page_clear_headlock - clear the request PG_HEADLOCK
 212 * @req: request that is to be locked
 
 
 213 */
 214void
 215nfs_page_clear_headlock(struct nfs_page *req)
 216{
 217	clear_bit_unlock(PG_HEADLOCK, &req->wb_flags);
 218	smp_mb__after_atomic();
 219	if (!test_bit(PG_CONTENDED1, &req->wb_flags))
 220		return;
 221	wake_up_bit(&req->wb_flags, PG_HEADLOCK);
 222}
 223
 224/*
 225 * nfs_page_group_lock - lock the head of the page group
 226 * @req: request in group that is to be locked
 227 *
 228 * this lock must be held when traversing or modifying the page
 229 * group list
 230 *
 231 * return 0 on success, < 0 on error
 232 */
 233int
 234nfs_page_group_lock(struct nfs_page *req)
 235{
 236	int ret;
 237
 238	ret = nfs_page_set_headlock(req);
 239	if (ret || req->wb_head == req)
 240		return ret;
 241	return nfs_page_set_headlock(req->wb_head);
 242}
 243
 244/*
 245 * nfs_page_group_unlock - unlock the head of the page group
 246 * @req: request in group that is to be unlocked
 247 */
 248void
 249nfs_page_group_unlock(struct nfs_page *req)
 250{
 251	if (req != req->wb_head)
 252		nfs_page_clear_headlock(req->wb_head);
 253	nfs_page_clear_headlock(req);
 
 
 
 
 
 254}
 255
 256/*
 257 * nfs_page_group_sync_on_bit_locked
 258 *
 259 * must be called with page group lock held
 260 */
 261static bool
 262nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
 263{
 264	struct nfs_page *head = req->wb_head;
 265	struct nfs_page *tmp;
 266
 267	WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_flags));
 268	WARN_ON_ONCE(test_and_set_bit(bit, &req->wb_flags));
 269
 270	tmp = req->wb_this_page;
 271	while (tmp != req) {
 272		if (!test_bit(bit, &tmp->wb_flags))
 273			return false;
 274		tmp = tmp->wb_this_page;
 275	}
 276
 277	/* true! reset all bits */
 278	tmp = req;
 279	do {
 280		clear_bit(bit, &tmp->wb_flags);
 281		tmp = tmp->wb_this_page;
 282	} while (tmp != req);
 283
 284	return true;
 285}
 286
 287/*
 288 * nfs_page_group_sync_on_bit - set bit on current request, but only
 289 *   return true if the bit is set for all requests in page group
 290 * @req - request in page group
 291 * @bit - PG_* bit that is used to sync page group
 292 */
 293bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit)
 294{
 295	bool ret;
 296
 297	nfs_page_group_lock(req);
 298	ret = nfs_page_group_sync_on_bit_locked(req, bit);
 299	nfs_page_group_unlock(req);
 300
 301	return ret;
 302}
 303
 304/*
 305 * nfs_page_group_init - Initialize the page group linkage for @req
 306 * @req - a new nfs request
 307 * @prev - the previous request in page group, or NULL if @req is the first
 308 *         or only request in the group (the head).
 309 */
 310static inline void
 311nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev)
 312{
 313	struct inode *inode;
 314	WARN_ON_ONCE(prev == req);
 315
 316	if (!prev) {
 317		/* a head request */
 318		req->wb_head = req;
 319		req->wb_this_page = req;
 320	} else {
 321		/* a subrequest */
 322		WARN_ON_ONCE(prev->wb_this_page != prev->wb_head);
 323		WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags));
 324		req->wb_head = prev->wb_head;
 325		req->wb_this_page = prev->wb_this_page;
 326		prev->wb_this_page = req;
 327
 328		/* All subrequests take a ref on the head request until
 329		 * nfs_page_group_destroy is called */
 330		kref_get(&req->wb_head->wb_kref);
 331
 332		/* grab extra ref and bump the request count if head request
 333		 * has extra ref from the write/commit path to handle handoff
 334		 * between write and commit lists. */
 335		if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) {
 336			inode = nfs_page_to_inode(req);
 337			set_bit(PG_INODE_REF, &req->wb_flags);
 338			kref_get(&req->wb_kref);
 339			atomic_long_inc(&NFS_I(inode)->nrequests);
 
 
 340		}
 341	}
 342}
 343
 344/*
 345 * nfs_page_group_destroy - sync the destruction of page groups
 346 * @req - request that no longer needs the page group
 347 *
 348 * releases the page group reference from each member once all
 349 * members have called this function.
 350 */
 351static void
 352nfs_page_group_destroy(struct kref *kref)
 353{
 354	struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
 355	struct nfs_page *head = req->wb_head;
 356	struct nfs_page *tmp, *next;
 357
 
 
 
 
 358	if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN))
 359		goto out;
 360
 361	tmp = req;
 362	do {
 363		next = tmp->wb_this_page;
 364		/* unlink and free */
 365		tmp->wb_this_page = tmp;
 366		tmp->wb_head = tmp;
 367		nfs_free_request(tmp);
 368		tmp = next;
 369	} while (tmp != req);
 370out:
 371	/* subrequests must release the ref on the head request */
 372	if (head != req)
 373		nfs_release_request(head);
 374}
 375
 376static struct nfs_page *nfs_page_create(struct nfs_lock_context *l_ctx,
 377					unsigned int pgbase, pgoff_t index,
 378					unsigned int offset, unsigned int count)
 
 
 
 
 
 
 
 
 
 
 
 
 
 379{
 380	struct nfs_page		*req;
 381	struct nfs_open_context *ctx = l_ctx->open_context;
 382
 383	if (test_bit(NFS_CONTEXT_BAD, &ctx->flags))
 384		return ERR_PTR(-EBADF);
 385	/* try to allocate the request struct */
 386	req = nfs_page_alloc();
 387	if (req == NULL)
 388		return ERR_PTR(-ENOMEM);
 389
 
 
 
 
 
 
 390	req->wb_lock_context = l_ctx;
 391	refcount_inc(&l_ctx->count);
 392	atomic_inc(&l_ctx->io_count);
 393
 394	/* Initialize the request struct. Initially, we assume a
 395	 * long write-back delay. This will be adjusted in
 396	 * update_nfs_request below if the region is not locked. */
 397	req->wb_pgbase = pgbase;
 398	req->wb_index = index;
 399	req->wb_offset = offset;
 400	req->wb_bytes = count;
 
 
 
 401	kref_init(&req->wb_kref);
 402	req->wb_nio = 0;
 403	return req;
 404}
 405
 406static void nfs_page_assign_folio(struct nfs_page *req, struct folio *folio)
 407{
 408	if (folio != NULL) {
 409		req->wb_folio = folio;
 410		folio_get(folio);
 411		set_bit(PG_FOLIO, &req->wb_flags);
 412	}
 413}
 414
 415static void nfs_page_assign_page(struct nfs_page *req, struct page *page)
 416{
 417	if (page != NULL) {
 418		req->wb_page = page;
 419		get_page(page);
 420	}
 421}
 422
 423/**
 424 * nfs_page_create_from_page - Create an NFS read/write request.
 425 * @ctx: open context to use
 426 * @page: page to write
 427 * @pgbase: starting offset within the page for the write
 428 * @offset: file offset for the write
 429 * @count: number of bytes to read/write
 430 *
 431 * The page must be locked by the caller. This makes sure we never
 432 * create two different requests for the same page.
 433 * User should ensure it is safe to sleep in this function.
 434 */
 435struct nfs_page *nfs_page_create_from_page(struct nfs_open_context *ctx,
 436					   struct page *page,
 437					   unsigned int pgbase, loff_t offset,
 438					   unsigned int count)
 439{
 440	struct nfs_lock_context *l_ctx = nfs_get_lock_context(ctx);
 441	struct nfs_page *ret;
 442
 443	if (IS_ERR(l_ctx))
 444		return ERR_CAST(l_ctx);
 445	ret = nfs_page_create(l_ctx, pgbase, offset >> PAGE_SHIFT,
 446			      offset_in_page(offset), count);
 447	if (!IS_ERR(ret)) {
 448		nfs_page_assign_page(ret, page);
 449		nfs_page_group_init(ret, NULL);
 450	}
 451	nfs_put_lock_context(l_ctx);
 452	return ret;
 453}
 454
 455/**
 456 * nfs_page_create_from_folio - Create an NFS read/write request.
 457 * @ctx: open context to use
 458 * @folio: folio to write
 459 * @offset: starting offset within the folio for the write
 460 * @count: number of bytes to read/write
 461 *
 462 * The page must be locked by the caller. This makes sure we never
 463 * create two different requests for the same page.
 464 * User should ensure it is safe to sleep in this function.
 465 */
 466struct nfs_page *nfs_page_create_from_folio(struct nfs_open_context *ctx,
 467					    struct folio *folio,
 468					    unsigned int offset,
 469					    unsigned int count)
 470{
 471	struct nfs_lock_context *l_ctx = nfs_get_lock_context(ctx);
 472	struct nfs_page *ret;
 473
 474	if (IS_ERR(l_ctx))
 475		return ERR_CAST(l_ctx);
 476	ret = nfs_page_create(l_ctx, offset, folio->index, offset, count);
 477	if (!IS_ERR(ret)) {
 478		nfs_page_assign_folio(ret, folio);
 479		nfs_page_group_init(ret, NULL);
 480	}
 481	nfs_put_lock_context(l_ctx);
 482	return ret;
 483}
 484
 485static struct nfs_page *
 486nfs_create_subreq(struct nfs_page *req,
 487		  unsigned int pgbase,
 488		  unsigned int offset,
 489		  unsigned int count)
 490{
 491	struct nfs_page *last;
 492	struct nfs_page *ret;
 493	struct folio *folio = nfs_page_to_folio(req);
 494	struct page *page = nfs_page_to_page(req, pgbase);
 495
 496	ret = nfs_page_create(req->wb_lock_context, pgbase, req->wb_index,
 497			      offset, count);
 498	if (!IS_ERR(ret)) {
 499		if (folio)
 500			nfs_page_assign_folio(ret, folio);
 501		else
 502			nfs_page_assign_page(ret, page);
 503		/* find the last request */
 504		for (last = req->wb_head;
 505		     last->wb_this_page != req->wb_head;
 506		     last = last->wb_this_page)
 507			;
 508
 509		nfs_lock_request(ret);
 510		nfs_page_group_init(ret, last);
 511		ret->wb_nio = req->wb_nio;
 512	}
 513	return ret;
 514}
 515
 516/**
 517 * nfs_unlock_request - Unlock request and wake up sleepers.
 518 * @req: pointer to request
 519 */
 520void nfs_unlock_request(struct nfs_page *req)
 521{
 522	clear_bit_unlock(PG_BUSY, &req->wb_flags);
 
 
 
 
 
 523	smp_mb__after_atomic();
 524	if (!test_bit(PG_CONTENDED2, &req->wb_flags))
 525		return;
 526	wake_up_bit(&req->wb_flags, PG_BUSY);
 527}
 528
 529/**
 530 * nfs_unlock_and_release_request - Unlock request and release the nfs_page
 531 * @req: pointer to request
 532 */
 533void nfs_unlock_and_release_request(struct nfs_page *req)
 534{
 535	nfs_unlock_request(req);
 536	nfs_release_request(req);
 537}
 538
 539/*
 540 * nfs_clear_request - Free up all resources allocated to the request
 541 * @req:
 542 *
 543 * Release page and open context resources associated with a read/write
 544 * request after it has completed.
 545 */
 546static void nfs_clear_request(struct nfs_page *req)
 547{
 548	struct folio *folio = nfs_page_to_folio(req);
 549	struct page *page = req->wb_page;
 
 550	struct nfs_lock_context *l_ctx = req->wb_lock_context;
 551	struct nfs_open_context *ctx;
 552
 553	if (folio != NULL) {
 554		folio_put(folio);
 555		req->wb_folio = NULL;
 556		clear_bit(PG_FOLIO, &req->wb_flags);
 557	} else if (page != NULL) {
 558		put_page(page);
 559		req->wb_page = NULL;
 560	}
 561	if (l_ctx != NULL) {
 562		if (atomic_dec_and_test(&l_ctx->io_count)) {
 563			wake_up_var(&l_ctx->io_count);
 564			ctx = l_ctx->open_context;
 565			if (test_bit(NFS_CONTEXT_UNLOCK, &ctx->flags))
 566				rpc_wake_up(&NFS_SERVER(d_inode(ctx->dentry))->uoc_rpcwaitq);
 567		}
 568		nfs_put_lock_context(l_ctx);
 569		req->wb_lock_context = NULL;
 570	}
 
 
 
 
 571}
 572
 573/**
 574 * nfs_free_request - Release the count on an NFS read/write request
 575 * @req: request to release
 576 *
 577 * Note: Should never be called with the spinlock held!
 578 */
 579void nfs_free_request(struct nfs_page *req)
 580{
 581	WARN_ON_ONCE(req->wb_this_page != req);
 582
 583	/* extra debug: make sure no sync bits are still set */
 584	WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
 585	WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE, &req->wb_flags));
 586	WARN_ON_ONCE(test_bit(PG_UPTODATE, &req->wb_flags));
 587	WARN_ON_ONCE(test_bit(PG_WB_END, &req->wb_flags));
 588	WARN_ON_ONCE(test_bit(PG_REMOVE, &req->wb_flags));
 589
 590	/* Release struct file and open context */
 591	nfs_clear_request(req);
 592	nfs_page_free(req);
 593}
 594
 595void nfs_release_request(struct nfs_page *req)
 596{
 597	kref_put(&req->wb_kref, nfs_page_group_destroy);
 598}
 599EXPORT_SYMBOL_GPL(nfs_release_request);
 
 
 
 
 
 
 
 
 
 
 
 
 
 600
 601/*
 602 * nfs_generic_pg_test - determine if requests can be coalesced
 603 * @desc: pointer to descriptor
 604 * @prev: previous request in desc, or NULL
 605 * @req: this request
 606 *
 607 * Returns zero if @req cannot be coalesced into @desc, otherwise it returns
 608 * the size of the request.
 609 */
 610size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
 611			   struct nfs_page *prev, struct nfs_page *req)
 612{
 613	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 614
 615
 616	if (mirror->pg_count > mirror->pg_bsize) {
 617		/* should never happen */
 618		WARN_ON_ONCE(1);
 619		return 0;
 620	}
 621
 622	/*
 623	 * Limit the request size so that we can still allocate a page array
 624	 * for it without upsetting the slab allocator.
 625	 */
 626	if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) *
 627			sizeof(struct page *) > PAGE_SIZE)
 628		return 0;
 629
 630	return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes);
 631}
 632EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
 633
 634struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *ops)
 635{
 636	struct nfs_pgio_header *hdr = ops->rw_alloc_header();
 637
 638	if (hdr) {
 639		INIT_LIST_HEAD(&hdr->pages);
 
 640		hdr->rw_ops = ops;
 641	}
 642	return hdr;
 643}
 644EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc);
 645
 
 
 
 
 
 
 
 
 
 
 646/**
 647 * nfs_pgio_data_destroy - make @hdr suitable for reuse
 648 *
 649 * Frees memory and releases refs from nfs_generic_pgio, so that it may
 650 * be called again.
 651 *
 652 * @hdr: A header that has had nfs_generic_pgio called
 653 */
 654static void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
 655{
 656	if (hdr->args.context)
 657		put_nfs_open_context(hdr->args.context);
 658	if (hdr->page_array.pagevec != hdr->page_array.page_array)
 659		kfree(hdr->page_array.pagevec);
 660}
 661
 662/*
 663 * nfs_pgio_header_free - Free a read or write header
 664 * @hdr: The header to free
 665 */
 666void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
 667{
 668	nfs_pgio_data_destroy(hdr);
 669	hdr->rw_ops->rw_free_header(hdr);
 670}
 671EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
 672
 673/**
 674 * nfs_pgio_rpcsetup - Set up arguments for a pageio call
 675 * @hdr: The pageio hdr
 676 * @pgbase: base
 677 * @count: Number of bytes to read
 
 678 * @how: How to commit data (writes only)
 679 * @cinfo: Commit information for the call (writes only)
 680 */
 681static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr, unsigned int pgbase,
 682			      unsigned int count, int how,
 683			      struct nfs_commit_info *cinfo)
 684{
 685	struct nfs_page *req = hdr->req;
 686
 687	/* Set up the RPC argument and reply structs
 688	 * NB: take care not to mess about with hdr->commit et al. */
 689
 690	hdr->args.fh     = NFS_FH(hdr->inode);
 691	hdr->args.offset = req_offset(req);
 692	/* pnfs_set_layoutcommit needs this */
 693	hdr->mds_offset = hdr->args.offset;
 694	hdr->args.pgbase = pgbase;
 695	hdr->args.pages  = hdr->page_array.pagevec;
 696	hdr->args.count  = count;
 697	hdr->args.context = get_nfs_open_context(nfs_req_openctx(req));
 698	hdr->args.lock_context = req->wb_lock_context;
 699	hdr->args.stable  = NFS_UNSTABLE;
 700	switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) {
 701	case 0:
 702		break;
 703	case FLUSH_COND_STABLE:
 704		if (nfs_reqs_to_commit(cinfo))
 705			break;
 706		fallthrough;
 707	default:
 708		hdr->args.stable = NFS_FILE_SYNC;
 709	}
 710
 711	hdr->res.fattr   = &hdr->fattr;
 712	hdr->res.count   = 0;
 713	hdr->res.eof     = 0;
 714	hdr->res.verf    = &hdr->verf;
 715	nfs_fattr_init(&hdr->fattr);
 716}
 717
 718/**
 719 * nfs_pgio_prepare - Prepare pageio hdr to go over the wire
 720 * @task: The current task
 721 * @calldata: pageio header to prepare
 722 */
 723static void nfs_pgio_prepare(struct rpc_task *task, void *calldata)
 724{
 725	struct nfs_pgio_header *hdr = calldata;
 726	int err;
 727	err = NFS_PROTO(hdr->inode)->pgio_rpc_prepare(task, hdr);
 728	if (err)
 729		rpc_exit(task, err);
 730}
 731
 732int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
 733		      const struct cred *cred, const struct nfs_rpc_ops *rpc_ops,
 734		      const struct rpc_call_ops *call_ops, int how, int flags,
 735		      struct nfsd_file *localio)
 736{
 737	struct rpc_task *task;
 738	struct rpc_message msg = {
 739		.rpc_argp = &hdr->args,
 740		.rpc_resp = &hdr->res,
 741		.rpc_cred = cred,
 742	};
 743	struct rpc_task_setup task_setup_data = {
 744		.rpc_client = clnt,
 745		.task = &hdr->task,
 746		.rpc_message = &msg,
 747		.callback_ops = call_ops,
 748		.callback_data = hdr,
 749		.workqueue = nfsiod_workqueue,
 750		.flags = RPC_TASK_ASYNC | flags,
 751	};
 752
 753	if (nfs_server_capable(hdr->inode, NFS_CAP_MOVEABLE))
 754		task_setup_data.flags |= RPC_TASK_MOVEABLE;
 755
 756	hdr->rw_ops->rw_initiate(hdr, &msg, rpc_ops, &task_setup_data, how);
 757
 758	dprintk("NFS: initiated pgio call "
 759		"(req %s/%llu, %u bytes @ offset %llu)\n",
 760		hdr->inode->i_sb->s_id,
 761		(unsigned long long)NFS_FILEID(hdr->inode),
 762		hdr->args.count,
 763		(unsigned long long)hdr->args.offset);
 764
 765	if (localio)
 766		return nfs_local_doio(NFS_SERVER(hdr->inode)->nfs_client,
 767				      localio, hdr, call_ops);
 768
 769	task = rpc_run_task(&task_setup_data);
 770	if (IS_ERR(task))
 771		return PTR_ERR(task);
 
 
 
 
 
 
 
 772	rpc_put_task(task);
 773	return 0;
 
 774}
 775EXPORT_SYMBOL_GPL(nfs_initiate_pgio);
 776
 777/**
 778 * nfs_pgio_error - Clean up from a pageio error
 
 779 * @hdr: pageio header
 780 */
 781static void nfs_pgio_error(struct nfs_pgio_header *hdr)
 782{
 783	set_bit(NFS_IOHDR_REDO, &hdr->flags);
 
 784	hdr->completion_ops->completion(hdr);
 785}
 786
 787/**
 788 * nfs_pgio_release - Release pageio data
 789 * @calldata: The pageio header to release
 790 */
 791static void nfs_pgio_release(void *calldata)
 792{
 793	struct nfs_pgio_header *hdr = calldata;
 
 794	hdr->completion_ops->completion(hdr);
 795}
 796
 797static void nfs_pageio_mirror_init(struct nfs_pgio_mirror *mirror,
 798				   unsigned int bsize)
 799{
 800	INIT_LIST_HEAD(&mirror->pg_list);
 801	mirror->pg_bytes_written = 0;
 802	mirror->pg_count = 0;
 803	mirror->pg_bsize = bsize;
 804	mirror->pg_base = 0;
 805	mirror->pg_recoalesce = 0;
 806}
 807
 808/**
 809 * nfs_pageio_init - initialise a page io descriptor
 810 * @desc: pointer to descriptor
 811 * @inode: pointer to inode
 812 * @pg_ops: pointer to pageio operations
 813 * @compl_ops: pointer to pageio completion operations
 814 * @rw_ops: pointer to nfs read/write operations
 815 * @bsize: io block size
 816 * @io_flags: extra parameters for the io function
 817 */
 818void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
 819		     struct inode *inode,
 820		     const struct nfs_pageio_ops *pg_ops,
 821		     const struct nfs_pgio_completion_ops *compl_ops,
 822		     const struct nfs_rw_ops *rw_ops,
 823		     size_t bsize,
 824		     int io_flags)
 825{
 
 
 
 826	desc->pg_moreio = 0;
 827	desc->pg_inode = inode;
 828	desc->pg_ops = pg_ops;
 829	desc->pg_completion_ops = compl_ops;
 830	desc->pg_rw_ops = rw_ops;
 831	desc->pg_ioflags = io_flags;
 832	desc->pg_error = 0;
 833	desc->pg_lseg = NULL;
 834	desc->pg_io_completion = NULL;
 835	desc->pg_dreq = NULL;
 836	nfs_netfs_reset_pageio_descriptor(desc);
 837	desc->pg_bsize = bsize;
 838
 839	desc->pg_mirror_count = 1;
 840	desc->pg_mirror_idx = 0;
 841
 842	desc->pg_mirrors_dynamic = NULL;
 843	desc->pg_mirrors = desc->pg_mirrors_static;
 844	nfs_pageio_mirror_init(&desc->pg_mirrors[0], bsize);
 845	desc->pg_maxretrans = 0;
 
 
 
 
 
 
 
 
 
 
 
 846}
 
 847
 848/**
 849 * nfs_pgio_result - Basic pageio error handling
 850 * @task: The task that ran
 851 * @calldata: Pageio header to check
 852 */
 853static void nfs_pgio_result(struct rpc_task *task, void *calldata)
 854{
 855	struct nfs_pgio_header *hdr = calldata;
 856	struct inode *inode = hdr->inode;
 857
 
 
 
 858	if (hdr->rw_ops->rw_done(task, hdr, inode) != 0)
 859		return;
 860	if (task->tk_status < 0)
 861		nfs_set_pgio_error(hdr, task->tk_status, hdr->args.offset);
 862	else
 863		hdr->rw_ops->rw_result(task, hdr);
 864}
 865
 866/*
 867 * Create an RPC task for the given read or write request and kick it.
 868 * The page must have been locked by the caller.
 869 *
 870 * It may happen that the page we're passed is not marked dirty.
 871 * This is the case if nfs_updatepage detects a conflicting request
 872 * that has been written but not committed.
 873 */
 874int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
 875		     struct nfs_pgio_header *hdr)
 876{
 877	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 878
 879	struct nfs_page		*req;
 880	struct page		**pages,
 881				*last_page;
 882	struct list_head *head = &mirror->pg_list;
 883	struct nfs_commit_info cinfo;
 884	struct nfs_page_array *pg_array = &hdr->page_array;
 885	unsigned int pagecount, pageused;
 886	unsigned int pg_base = offset_in_page(mirror->pg_base);
 887	gfp_t gfp_flags = nfs_io_gfp_mask();
 888
 889	pagecount = nfs_page_array_len(pg_base, mirror->pg_count);
 890	pg_array->npages = pagecount;
 891
 892	if (pagecount <= ARRAY_SIZE(pg_array->page_array))
 893		pg_array->pagevec = pg_array->page_array;
 894	else {
 895		pg_array->pagevec = kcalloc(pagecount, sizeof(struct page *), gfp_flags);
 896		if (!pg_array->pagevec) {
 897			pg_array->npages = 0;
 898			nfs_pgio_error(hdr);
 899			desc->pg_error = -ENOMEM;
 900			return desc->pg_error;
 901		}
 902	}
 903
 904	nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
 905	pages = hdr->page_array.pagevec;
 906	last_page = NULL;
 907	pageused = 0;
 908	while (!list_empty(head)) {
 909		struct nfs_page_iter_page i;
 910		struct page *page;
 911
 912		req = nfs_list_entry(head->next);
 913		nfs_list_move_request(req, &hdr->pages);
 
 914
 915		if (req->wb_pgbase == 0)
 916			last_page = NULL;
 917
 918		nfs_page_iter_page_init(&i, req);
 919		while ((page = nfs_page_iter_page_get(&i)) != NULL) {
 920			if (last_page != page) {
 921				pageused++;
 922				if (pageused > pagecount)
 923					goto full;
 924				*pages++ = last_page = page;
 925			}
 926		}
 927	}
 928full:
 929	if (WARN_ON_ONCE(pageused != pagecount)) {
 930		nfs_pgio_error(hdr);
 931		desc->pg_error = -EINVAL;
 932		return desc->pg_error;
 933	}
 934
 935	if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
 936	    (desc->pg_moreio || nfs_reqs_to_commit(&cinfo)))
 937		desc->pg_ioflags &= ~FLUSH_COND_STABLE;
 938
 939	/* Set up the argument struct */
 940	nfs_pgio_rpcsetup(hdr, pg_base, mirror->pg_count, desc->pg_ioflags,
 941			  &cinfo);
 942	desc->pg_rpc_callops = &nfs_pgio_common_ops;
 943	return 0;
 944}
 945EXPORT_SYMBOL_GPL(nfs_generic_pgio);
 946
 947static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
 948{
 949	struct nfs_pgio_header *hdr;
 950	int ret;
 951	unsigned short task_flags = 0;
 952
 953	hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
 954	if (!hdr) {
 955		desc->pg_error = -ENOMEM;
 956		return desc->pg_error;
 957	}
 958	nfs_pgheader_init(desc, hdr, nfs_pgio_header_free);
 959	ret = nfs_generic_pgio(desc, hdr);
 960	if (ret == 0) {
 961		struct nfs_client *clp = NFS_SERVER(hdr->inode)->nfs_client;
 962
 963		struct nfsd_file *localio =
 964			nfs_local_open_fh(clp, hdr->cred,
 965					  hdr->args.fh, hdr->args.context->mode);
 966
 967		if (NFS_SERVER(hdr->inode)->nfs_client->cl_minorversion)
 968			task_flags = RPC_TASK_MOVEABLE;
 969		ret = nfs_initiate_pgio(NFS_CLIENT(hdr->inode),
 970					hdr,
 971					hdr->cred,
 972					NFS_PROTO(hdr->inode),
 973					desc->pg_rpc_callops,
 974					desc->pg_ioflags,
 975					RPC_TASK_CRED_NOREF | task_flags,
 976					localio);
 977	}
 978	return ret;
 979}
 980
 981static struct nfs_pgio_mirror *
 982nfs_pageio_alloc_mirrors(struct nfs_pageio_descriptor *desc,
 983		unsigned int mirror_count)
 984{
 985	struct nfs_pgio_mirror *ret;
 986	unsigned int i;
 987
 988	kfree(desc->pg_mirrors_dynamic);
 989	desc->pg_mirrors_dynamic = NULL;
 990	if (mirror_count == 1)
 991		return desc->pg_mirrors_static;
 992	ret = kmalloc_array(mirror_count, sizeof(*ret), nfs_io_gfp_mask());
 993	if (ret != NULL) {
 994		for (i = 0; i < mirror_count; i++)
 995			nfs_pageio_mirror_init(&ret[i], desc->pg_bsize);
 996		desc->pg_mirrors_dynamic = ret;
 997	}
 998	return ret;
 999}
1000
1001/*
1002 * nfs_pageio_setup_mirroring - determine if mirroring is to be used
1003 *				by calling the pg_get_mirror_count op
1004 */
1005static void nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
1006				       struct nfs_page *req)
1007{
1008	unsigned int mirror_count = 1;
1009
1010	if (pgio->pg_ops->pg_get_mirror_count)
1011		mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
1012	if (mirror_count == pgio->pg_mirror_count || pgio->pg_error < 0)
1013		return;
 
 
 
 
 
 
1014
1015	if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX) {
1016		pgio->pg_error = -EINVAL;
1017		return;
1018	}
1019
1020	pgio->pg_mirrors = nfs_pageio_alloc_mirrors(pgio, mirror_count);
1021	if (pgio->pg_mirrors == NULL) {
1022		pgio->pg_error = -ENOMEM;
1023		pgio->pg_mirrors = pgio->pg_mirrors_static;
1024		mirror_count = 1;
1025	}
1026	pgio->pg_mirror_count = mirror_count;
 
 
 
 
 
 
 
 
 
 
 
1027}
1028
1029static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor *pgio)
1030{
1031	pgio->pg_mirror_count = 1;
1032	pgio->pg_mirror_idx = 0;
1033	pgio->pg_mirrors = pgio->pg_mirrors_static;
1034	kfree(pgio->pg_mirrors_dynamic);
1035	pgio->pg_mirrors_dynamic = NULL;
1036}
1037
1038static bool nfs_match_lock_context(const struct nfs_lock_context *l1,
1039		const struct nfs_lock_context *l2)
1040{
1041	return l1->lockowner == l2->lockowner;
1042}
1043
1044static bool nfs_page_is_contiguous(const struct nfs_page *prev,
1045				   const struct nfs_page *req)
1046{
1047	size_t prev_end = prev->wb_pgbase + prev->wb_bytes;
1048
1049	if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
1050		return false;
1051	if (req->wb_pgbase == 0)
1052		return prev_end == nfs_page_max_length(prev);
1053	if (req->wb_pgbase == prev_end) {
1054		struct folio *folio = nfs_page_to_folio(req);
1055		if (folio)
1056			return folio == nfs_page_to_folio(prev);
1057		return req->wb_page == prev->wb_page;
1058	}
1059	return false;
1060}
1061
1062/**
1063 * nfs_coalesce_size - test two requests for compatibility
1064 * @prev: pointer to nfs_page
1065 * @req: pointer to nfs_page
1066 * @pgio: pointer to nfs_pagio_descriptor
1067 *
1068 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
1069 * page data area they describe is contiguous, and that their RPC
1070 * credentials, NFSv4 open state, and lockowners are the same.
1071 *
1072 * Returns size of the request that can be coalesced
1073 */
1074static unsigned int nfs_coalesce_size(struct nfs_page *prev,
1075				      struct nfs_page *req,
1076				      struct nfs_pageio_descriptor *pgio)
1077{
 
1078	struct file_lock_context *flctx;
1079
1080	if (prev) {
1081		if (!nfs_match_open_context(nfs_req_openctx(req), nfs_req_openctx(prev)))
1082			return 0;
1083		flctx = locks_inode_context(d_inode(nfs_req_openctx(req)->dentry));
1084		if (flctx != NULL &&
1085		    !(list_empty_careful(&flctx->flc_posix) &&
1086		      list_empty_careful(&flctx->flc_flock)) &&
1087		    !nfs_match_lock_context(req->wb_lock_context,
1088					    prev->wb_lock_context))
1089			return 0;
1090		if (!nfs_page_is_contiguous(prev, req))
1091			return 0;
 
 
 
 
 
 
 
 
1092	}
1093	return pgio->pg_ops->pg_test(pgio, prev, req);
 
 
 
 
1094}
1095
1096/**
1097 * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
1098 * @desc: destination io descriptor
1099 * @req: request
1100 *
1101 * If the request 'req' was successfully coalesced into the existing list
1102 * of pages 'desc', it returns the size of req.
1103 */
1104static unsigned int
1105nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
1106		struct nfs_page *req)
1107{
1108	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 
1109	struct nfs_page *prev = NULL;
1110	unsigned int size;
1111
1112	if (list_empty(&mirror->pg_list)) {
 
 
1113		if (desc->pg_ops->pg_init)
1114			desc->pg_ops->pg_init(desc, req);
1115		if (desc->pg_error < 0)
1116			return 0;
1117		mirror->pg_base = req->wb_pgbase;
1118		mirror->pg_count = 0;
1119		mirror->pg_recoalesce = 0;
1120	} else
1121		prev = nfs_list_entry(mirror->pg_list.prev);
1122
1123	if (desc->pg_maxretrans && req->wb_nio > desc->pg_maxretrans) {
1124		if (NFS_SERVER(desc->pg_inode)->flags & NFS_MOUNT_SOFTERR)
1125			desc->pg_error = -ETIMEDOUT;
1126		else
1127			desc->pg_error = -EIO;
1128		return 0;
1129	}
1130
1131	size = nfs_coalesce_size(prev, req, desc);
1132	if (size < req->wb_bytes)
1133		return size;
1134	nfs_list_move_request(req, &mirror->pg_list);
1135	mirror->pg_count += req->wb_bytes;
1136	return req->wb_bytes;
1137}
1138
1139/*
1140 * Helper for nfs_pageio_add_request and nfs_pageio_complete
1141 */
1142static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
1143{
1144	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1145
 
1146	if (!list_empty(&mirror->pg_list)) {
1147		int error = desc->pg_ops->pg_doio(desc);
1148		if (error < 0)
1149			desc->pg_error = error;
1150		if (list_empty(&mirror->pg_list))
1151			mirror->pg_bytes_written += mirror->pg_count;
1152	}
1153}
1154
1155static void
1156nfs_pageio_cleanup_request(struct nfs_pageio_descriptor *desc,
1157		struct nfs_page *req)
1158{
1159	LIST_HEAD(head);
1160
1161	nfs_list_move_request(req, &head);
1162	desc->pg_completion_ops->error_cleanup(&head, desc->pg_error);
1163}
1164
1165/**
1166 * __nfs_pageio_add_request - Attempt to coalesce a request into a page list.
1167 * @desc: destination io descriptor
1168 * @req: request
1169 *
1170 * This may split a request into subrequests which are all part of the
1171 * same page group. If so, it will submit @req as the last one, to ensure
1172 * the pointer to @req is still valid in case of failure.
1173 *
1174 * Returns true if the request 'req' was successfully coalesced into the
1175 * existing list of pages 'desc'.
1176 */
1177static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
1178			   struct nfs_page *req)
1179{
1180	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 
1181	struct nfs_page *subreq;
1182	unsigned int size, subreq_size;
 
1183
1184	nfs_page_group_lock(req);
1185
1186	subreq = req;
1187	subreq_size = subreq->wb_bytes;
1188	for(;;) {
1189		size = nfs_pageio_do_add_request(desc, subreq);
1190		if (size == subreq_size) {
1191			/* We successfully submitted a request */
1192			if (subreq == req)
1193				break;
1194			req->wb_pgbase += size;
1195			req->wb_bytes -= size;
1196			req->wb_offset += size;
1197			subreq_size = req->wb_bytes;
1198			subreq = req;
1199			continue;
1200		}
1201		if (WARN_ON_ONCE(subreq != req)) {
1202			nfs_page_group_unlock(req);
1203			nfs_pageio_cleanup_request(desc, subreq);
1204			subreq = req;
1205			subreq_size = req->wb_bytes;
1206			nfs_page_group_lock(req);
1207		}
1208		if (!size) {
1209			/* Can't coalesce any more, so do I/O */
1210			nfs_page_group_unlock(req);
1211			desc->pg_moreio = 1;
1212			nfs_pageio_doio(desc);
1213			if (desc->pg_error < 0 || mirror->pg_recoalesce)
 
 
1214				return 0;
1215			/* retry add_request for this subreq */
1216			nfs_page_group_lock(req);
1217			continue;
1218		}
1219		subreq = nfs_create_subreq(req, req->wb_pgbase,
1220				req->wb_offset, size);
1221		if (IS_ERR(subreq))
1222			goto err_ptr;
1223		subreq_size = size;
1224	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1225
1226	nfs_page_group_unlock(req);
1227	return 1;
1228err_ptr:
1229	desc->pg_error = PTR_ERR(subreq);
1230	nfs_page_group_unlock(req);
1231	return 0;
1232}
1233
1234static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
1235{
1236	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1237	LIST_HEAD(head);
1238
1239	do {
1240		list_splice_init(&mirror->pg_list, &head);
 
 
 
1241		mirror->pg_recoalesce = 0;
1242
1243		while (!list_empty(&head)) {
1244			struct nfs_page *req;
1245
1246			req = list_first_entry(&head, struct nfs_page, wb_list);
 
1247			if (__nfs_pageio_add_request(desc, req))
1248				continue;
1249			if (desc->pg_error < 0) {
1250				list_splice_tail(&head, &mirror->pg_list);
1251				mirror->pg_recoalesce = 1;
1252				return 0;
1253			}
1254			break;
1255		}
1256	} while (mirror->pg_recoalesce);
1257	return 1;
1258}
1259
1260static int nfs_pageio_add_request_mirror(struct nfs_pageio_descriptor *desc,
1261		struct nfs_page *req)
1262{
1263	int ret;
1264
1265	do {
1266		ret = __nfs_pageio_add_request(desc, req);
1267		if (ret)
1268			break;
1269		if (desc->pg_error < 0)
1270			break;
1271		ret = nfs_do_recoalesce(desc);
1272	} while (ret);
1273
1274	return ret;
1275}
1276
1277static void nfs_pageio_error_cleanup(struct nfs_pageio_descriptor *desc)
1278{
1279	u32 midx;
1280	struct nfs_pgio_mirror *mirror;
1281
1282	if (!desc->pg_error)
1283		return;
1284
1285	for (midx = 0; midx < desc->pg_mirror_count; midx++) {
1286		mirror = nfs_pgio_get_mirror(desc, midx);
1287		desc->pg_completion_ops->error_cleanup(&mirror->pg_list,
1288				desc->pg_error);
1289	}
1290}
1291
1292int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
1293			   struct nfs_page *req)
1294{
1295	u32 midx;
1296	unsigned int pgbase, offset, bytes;
1297	struct nfs_page *dupreq;
1298
1299	pgbase = req->wb_pgbase;
1300	offset = req->wb_offset;
1301	bytes = req->wb_bytes;
1302
1303	nfs_pageio_setup_mirroring(desc, req);
1304	if (desc->pg_error < 0)
1305		goto out_failed;
1306
1307	/* Create the mirror instances first, and fire them off */
1308	for (midx = 1; midx < desc->pg_mirror_count; midx++) {
1309		nfs_page_group_lock(req);
1310
1311		dupreq = nfs_create_subreq(req,
1312				pgbase, offset, bytes);
1313
1314		nfs_page_group_unlock(req);
1315		if (IS_ERR(dupreq)) {
1316			desc->pg_error = PTR_ERR(dupreq);
1317			goto out_failed;
1318		}
 
 
 
 
 
 
 
 
 
 
 
 
 
1319
1320		nfs_pgio_set_current_mirror(desc, midx);
 
1321		if (!nfs_pageio_add_request_mirror(desc, dupreq))
1322			goto out_cleanup_subreq;
1323	}
1324
1325	nfs_pgio_set_current_mirror(desc, 0);
1326	if (!nfs_pageio_add_request_mirror(desc, req))
1327		goto out_failed;
1328
1329	return 1;
1330
1331out_cleanup_subreq:
1332	nfs_pageio_cleanup_request(desc, dupreq);
1333out_failed:
1334	nfs_pageio_error_cleanup(desc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1335	return 0;
1336}
1337
1338/*
1339 * nfs_pageio_complete_mirror - Complete I/O on the current mirror of an
1340 *				nfs_pageio_descriptor
1341 * @desc: pointer to io descriptor
1342 * @mirror_idx: pointer to mirror index
1343 */
1344static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
1345				       u32 mirror_idx)
1346{
1347	struct nfs_pgio_mirror *mirror;
1348	u32 restore_idx;
1349
1350	restore_idx = nfs_pgio_set_current_mirror(desc, mirror_idx);
1351	mirror = nfs_pgio_current_mirror(desc);
1352
 
 
1353	for (;;) {
1354		nfs_pageio_doio(desc);
1355		if (desc->pg_error < 0 || !mirror->pg_recoalesce)
1356			break;
1357		if (!nfs_do_recoalesce(desc))
1358			break;
1359	}
1360	nfs_pgio_set_current_mirror(desc, restore_idx);
1361}
1362
1363/*
1364 * nfs_pageio_resend - Transfer requests to new descriptor and resend
1365 * @hdr - the pgio header to move request from
1366 * @desc - the pageio descriptor to add requests to
1367 *
1368 * Try to move each request (nfs_page) from @hdr to @desc then attempt
1369 * to send them.
1370 *
1371 * Returns 0 on success and < 0 on error.
1372 */
1373int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
1374		      struct nfs_pgio_header *hdr)
1375{
1376	LIST_HEAD(pages);
1377
1378	desc->pg_io_completion = hdr->io_completion;
1379	desc->pg_dreq = hdr->dreq;
1380	nfs_netfs_set_pageio_descriptor(desc, hdr);
1381	list_splice_init(&hdr->pages, &pages);
1382	while (!list_empty(&pages)) {
1383		struct nfs_page *req = nfs_list_entry(pages.next);
1384
 
1385		if (!nfs_pageio_add_request(desc, req))
1386			break;
1387	}
1388	nfs_pageio_complete(desc);
1389	if (!list_empty(&pages)) {
1390		int err = desc->pg_error < 0 ? desc->pg_error : -EIO;
1391		hdr->completion_ops->error_cleanup(&pages, err);
1392		nfs_set_pgio_error(hdr, err, hdr->io_start);
1393		return err;
1394	}
1395	return 0;
1396}
1397EXPORT_SYMBOL_GPL(nfs_pageio_resend);
1398
1399/**
1400 * nfs_pageio_complete - Complete I/O then cleanup an nfs_pageio_descriptor
1401 * @desc: pointer to io descriptor
1402 */
1403void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
1404{
1405	u32 midx;
1406
1407	for (midx = 0; midx < desc->pg_mirror_count; midx++)
1408		nfs_pageio_complete_mirror(desc, midx);
1409
1410	if (desc->pg_error < 0)
1411		nfs_pageio_error_cleanup(desc);
1412	if (desc->pg_ops->pg_cleanup)
1413		desc->pg_ops->pg_cleanup(desc);
1414	nfs_pageio_cleanup_mirroring(desc);
1415}
1416
1417/**
1418 * nfs_pageio_cond_complete - Conditional I/O completion
1419 * @desc: pointer to io descriptor
1420 * @index: page index
1421 *
1422 * It is important to ensure that processes don't try to take locks
1423 * on non-contiguous ranges of pages as that might deadlock. This
1424 * function should be called before attempting to wait on a locked
1425 * nfs_page. It will complete the I/O if the page index 'index'
1426 * is not contiguous with the existing list of pages in 'desc'.
1427 */
1428void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
1429{
1430	struct nfs_pgio_mirror *mirror;
1431	struct nfs_page *prev;
1432	struct folio *folio;
1433	u32 midx;
1434
1435	for (midx = 0; midx < desc->pg_mirror_count; midx++) {
1436		mirror = nfs_pgio_get_mirror(desc, midx);
1437		if (!list_empty(&mirror->pg_list)) {
1438			prev = nfs_list_entry(mirror->pg_list.prev);
1439			folio = nfs_page_to_folio(prev);
1440			if (folio) {
1441				if (index == folio_next_index(folio))
1442					continue;
1443			} else if (index == prev->wb_index + 1)
1444				continue;
1445			/*
1446			 * We will submit more requests after these. Indicate
1447			 * this to the underlying layers.
1448			 */
1449			desc->pg_moreio = 1;
1450			nfs_pageio_complete(desc);
1451			break;
1452		}
1453	}
1454}
1455
1456/*
1457 * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1)
1458 */
1459void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio)
1460{
1461	nfs_pageio_complete(pgio);
1462}
1463
1464int __init nfs_init_nfspagecache(void)
1465{
1466	nfs_page_cachep = kmem_cache_create("nfs_page",
1467					    sizeof(struct nfs_page),
1468					    0, SLAB_HWCACHE_ALIGN,
1469					    NULL);
1470	if (nfs_page_cachep == NULL)
1471		return -ENOMEM;
1472
1473	return 0;
1474}
1475
1476void nfs_destroy_nfspagecache(void)
1477{
1478	kmem_cache_destroy(nfs_page_cachep);
1479}
1480
1481static const struct rpc_call_ops nfs_pgio_common_ops = {
1482	.rpc_call_prepare = nfs_pgio_prepare,
1483	.rpc_call_done = nfs_pgio_result,
1484	.rpc_release = nfs_pgio_release,
1485};
1486
1487const struct nfs_pageio_ops nfs_pgio_rw_ops = {
1488	.pg_test = nfs_generic_pg_test,
1489	.pg_doio = nfs_generic_pg_pgios,
1490};