Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Module for pnfs flexfile layout driver.
   4 *
   5 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
   6 *
   7 * Tao Peng <bergwolf@primarydata.com>
   8 */
   9
  10#include <linux/nfs_fs.h>
  11#include <linux/nfs_mount.h>
  12#include <linux/nfs_page.h>
  13#include <linux/module.h>
  14#include <linux/sched/mm.h>
  15
  16#include <linux/sunrpc/metrics.h>
  17
  18#include "flexfilelayout.h"
  19#include "../nfs4session.h"
  20#include "../nfs4idmap.h"
  21#include "../internal.h"
  22#include "../delegation.h"
  23#include "../nfs4trace.h"
  24#include "../iostat.h"
  25#include "../nfs.h"
  26#include "../nfs42.h"
  27
  28#define NFSDBG_FACILITY         NFSDBG_PNFS_LD
  29
  30#define FF_LAYOUT_POLL_RETRY_MAX     (15*HZ)
  31#define FF_LAYOUTRETURN_MAXERR 20
  32
  33static unsigned short io_maxretrans;
  34
  35static const struct pnfs_commit_ops ff_layout_commit_ops;
  36static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
  37		struct nfs_pgio_header *hdr);
  38static int ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
  39			       struct nfs42_layoutstat_devinfo *devinfo,
  40			       int dev_limit);
  41static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
  42			      const struct nfs42_layoutstat_devinfo *devinfo,
  43			      struct nfs4_ff_layout_mirror *mirror);
  44
  45static struct pnfs_layout_hdr *
  46ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
  47{
  48	struct nfs4_flexfile_layout *ffl;
  49
  50	ffl = kzalloc(sizeof(*ffl), gfp_flags);
  51	if (ffl) {
  52		pnfs_init_ds_commit_info(&ffl->commit_info);
  53		INIT_LIST_HEAD(&ffl->error_list);
  54		INIT_LIST_HEAD(&ffl->mirrors);
  55		ffl->last_report_time = ktime_get();
  56		ffl->commit_info.ops = &ff_layout_commit_ops;
  57		return &ffl->generic_hdr;
  58	} else
  59		return NULL;
  60}
  61
  62static void
  63ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
  64{
  65	struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(lo);
  66	struct nfs4_ff_layout_ds_err *err, *n;
  67
  68	list_for_each_entry_safe(err, n, &ffl->error_list, list) {
  69		list_del(&err->list);
  70		kfree(err);
  71	}
  72	kfree_rcu(ffl, generic_hdr.plh_rcu);
  73}
  74
  75static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
  76{
  77	__be32 *p;
  78
  79	p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
  80	if (unlikely(p == NULL))
  81		return -ENOBUFS;
  82	stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
  83	memcpy(stateid->data, p, NFS4_STATEID_SIZE);
  84	dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
  85		p[0], p[1], p[2], p[3]);
  86	return 0;
  87}
  88
  89static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
  90{
  91	__be32 *p;
  92
  93	p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
  94	if (unlikely(!p))
  95		return -ENOBUFS;
  96	memcpy(devid, p, NFS4_DEVICEID4_SIZE);
  97	nfs4_print_deviceid(devid);
  98	return 0;
  99}
 100
 101static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
 102{
 103	__be32 *p;
 104
 105	p = xdr_inline_decode(xdr, 4);
 106	if (unlikely(!p))
 107		return -ENOBUFS;
 108	fh->size = be32_to_cpup(p++);
 109	if (fh->size > NFS_MAXFHSIZE) {
 110		printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
 111		       fh->size);
 112		return -EOVERFLOW;
 113	}
 114	/* fh.data */
 115	p = xdr_inline_decode(xdr, fh->size);
 116	if (unlikely(!p))
 117		return -ENOBUFS;
 118	memcpy(&fh->data, p, fh->size);
 119	dprintk("%s: fh len %d\n", __func__, fh->size);
 120
 121	return 0;
 122}
 123
 124/*
 125 * Currently only stringified uids and gids are accepted.
 126 * I.e., kerberos is not supported to the DSes, so no pricipals.
 127 *
 128 * That means that one common function will suffice, but when
 129 * principals are added, this should be split to accomodate
 130 * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
 131 */
 132static int
 133decode_name(struct xdr_stream *xdr, u32 *id)
 134{
 135	__be32 *p;
 136	int len;
 137
 138	/* opaque_length(4)*/
 139	p = xdr_inline_decode(xdr, 4);
 140	if (unlikely(!p))
 141		return -ENOBUFS;
 142	len = be32_to_cpup(p++);
 143	if (len < 0)
 144		return -EINVAL;
 145
 146	dprintk("%s: len %u\n", __func__, len);
 147
 148	/* opaque body */
 149	p = xdr_inline_decode(xdr, len);
 150	if (unlikely(!p))
 151		return -ENOBUFS;
 152
 153	if (!nfs_map_string_to_numeric((char *)p, len, id))
 154		return -EINVAL;
 155
 156	return 0;
 157}
 158
 159static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
 160		const struct nfs4_ff_layout_mirror *m2)
 161{
 162	int i, j;
 163
 164	if (m1->fh_versions_cnt != m2->fh_versions_cnt)
 165		return false;
 166	for (i = 0; i < m1->fh_versions_cnt; i++) {
 167		bool found_fh = false;
 168		for (j = 0; j < m2->fh_versions_cnt; j++) {
 169			if (nfs_compare_fh(&m1->fh_versions[i],
 170					&m2->fh_versions[j]) == 0) {
 171				found_fh = true;
 172				break;
 173			}
 174		}
 175		if (!found_fh)
 176			return false;
 177	}
 178	return true;
 179}
 180
 181static struct nfs4_ff_layout_mirror *
 182ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
 183		struct nfs4_ff_layout_mirror *mirror)
 184{
 185	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
 186	struct nfs4_ff_layout_mirror *pos;
 187	struct inode *inode = lo->plh_inode;
 188
 189	spin_lock(&inode->i_lock);
 190	list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
 191		if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0)
 192			continue;
 193		if (!ff_mirror_match_fh(mirror, pos))
 194			continue;
 195		if (refcount_inc_not_zero(&pos->ref)) {
 196			spin_unlock(&inode->i_lock);
 197			return pos;
 198		}
 199	}
 200	list_add(&mirror->mirrors, &ff_layout->mirrors);
 201	mirror->layout = lo;
 202	spin_unlock(&inode->i_lock);
 203	return mirror;
 204}
 205
 206static void
 207ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
 208{
 209	struct inode *inode;
 210	if (mirror->layout == NULL)
 211		return;
 212	inode = mirror->layout->plh_inode;
 213	spin_lock(&inode->i_lock);
 214	list_del(&mirror->mirrors);
 215	spin_unlock(&inode->i_lock);
 216	mirror->layout = NULL;
 217}
 218
 219static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
 220{
 221	struct nfs4_ff_layout_mirror *mirror;
 222
 223	mirror = kzalloc(sizeof(*mirror), gfp_flags);
 224	if (mirror != NULL) {
 225		spin_lock_init(&mirror->lock);
 226		refcount_set(&mirror->ref, 1);
 227		INIT_LIST_HEAD(&mirror->mirrors);
 228	}
 229	return mirror;
 230}
 231
 232static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
 233{
 234	const struct cred	*cred;
 235
 236	ff_layout_remove_mirror(mirror);
 237	kfree(mirror->fh_versions);
 238	cred = rcu_access_pointer(mirror->ro_cred);
 239	put_cred(cred);
 240	cred = rcu_access_pointer(mirror->rw_cred);
 241	put_cred(cred);
 242	nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
 243	kfree(mirror);
 244}
 245
 246static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
 247{
 248	if (mirror != NULL && refcount_dec_and_test(&mirror->ref))
 249		ff_layout_free_mirror(mirror);
 250}
 251
 252static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
 253{
 254	u32 i;
 255
 256	for (i = 0; i < fls->mirror_array_cnt; i++)
 257		ff_layout_put_mirror(fls->mirror_array[i]);
 258}
 259
 260static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
 261{
 262	if (fls) {
 263		ff_layout_free_mirror_array(fls);
 264		kfree(fls);
 265	}
 266}
 267
 268static bool
 269ff_lseg_match_mirrors(struct pnfs_layout_segment *l1,
 270		struct pnfs_layout_segment *l2)
 271{
 272	const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1);
 273	const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l1);
 274	u32 i;
 275
 276	if (fl1->mirror_array_cnt != fl2->mirror_array_cnt)
 277		return false;
 278	for (i = 0; i < fl1->mirror_array_cnt; i++) {
 279		if (fl1->mirror_array[i] != fl2->mirror_array[i])
 280			return false;
 281	}
 282	return true;
 283}
 284
 285static bool
 286ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
 287		const struct pnfs_layout_range *l2)
 288{
 289	u64 end1, end2;
 290
 291	if (l1->iomode != l2->iomode)
 292		return l1->iomode != IOMODE_READ;
 293	end1 = pnfs_calc_offset_end(l1->offset, l1->length);
 294	end2 = pnfs_calc_offset_end(l2->offset, l2->length);
 295	if (end1 < l2->offset)
 296		return false;
 297	if (end2 < l1->offset)
 298		return true;
 299	return l2->offset <= l1->offset;
 300}
 301
 302static bool
 303ff_lseg_merge(struct pnfs_layout_segment *new,
 304		struct pnfs_layout_segment *old)
 305{
 306	u64 new_end, old_end;
 307
 308	if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
 309		return false;
 310	if (new->pls_range.iomode != old->pls_range.iomode)
 311		return false;
 312	old_end = pnfs_calc_offset_end(old->pls_range.offset,
 313			old->pls_range.length);
 314	if (old_end < new->pls_range.offset)
 315		return false;
 316	new_end = pnfs_calc_offset_end(new->pls_range.offset,
 317			new->pls_range.length);
 318	if (new_end < old->pls_range.offset)
 319		return false;
 320	if (!ff_lseg_match_mirrors(new, old))
 321		return false;
 322
 323	/* Mergeable: copy info from 'old' to 'new' */
 324	if (new_end < old_end)
 325		new_end = old_end;
 326	if (new->pls_range.offset < old->pls_range.offset)
 327		new->pls_range.offset = old->pls_range.offset;
 328	new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
 329			new_end);
 330	if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
 331		set_bit(NFS_LSEG_ROC, &new->pls_flags);
 332	return true;
 333}
 334
 335static void
 336ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
 337		struct pnfs_layout_segment *lseg,
 338		struct list_head *free_me)
 339{
 340	pnfs_generic_layout_insert_lseg(lo, lseg,
 341			ff_lseg_range_is_after,
 342			ff_lseg_merge,
 343			free_me);
 344}
 345
 346static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
 347{
 348	int i, j;
 349
 350	for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
 351		for (j = i + 1; j < fls->mirror_array_cnt; j++)
 352			if (fls->mirror_array[i]->efficiency <
 353			    fls->mirror_array[j]->efficiency)
 354				swap(fls->mirror_array[i],
 355				     fls->mirror_array[j]);
 356	}
 357}
 358
 359static struct pnfs_layout_segment *
 360ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
 361		     struct nfs4_layoutget_res *lgr,
 362		     gfp_t gfp_flags)
 363{
 364	struct pnfs_layout_segment *ret;
 365	struct nfs4_ff_layout_segment *fls = NULL;
 366	struct xdr_stream stream;
 367	struct xdr_buf buf;
 368	struct page *scratch;
 369	u64 stripe_unit;
 370	u32 mirror_array_cnt;
 371	__be32 *p;
 372	int i, rc;
 373
 374	dprintk("--> %s\n", __func__);
 375	scratch = alloc_page(gfp_flags);
 376	if (!scratch)
 377		return ERR_PTR(-ENOMEM);
 378
 379	xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
 380			      lgr->layoutp->len);
 381	xdr_set_scratch_page(&stream, scratch);
 382
 383	/* stripe unit and mirror_array_cnt */
 384	rc = -EIO;
 385	p = xdr_inline_decode(&stream, 8 + 4);
 386	if (!p)
 387		goto out_err_free;
 388
 389	p = xdr_decode_hyper(p, &stripe_unit);
 390	mirror_array_cnt = be32_to_cpup(p++);
 391	dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
 392		stripe_unit, mirror_array_cnt);
 393
 394	if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
 395	    mirror_array_cnt == 0)
 396		goto out_err_free;
 397
 398	rc = -ENOMEM;
 399	fls = kzalloc(struct_size(fls, mirror_array, mirror_array_cnt),
 400			gfp_flags);
 401	if (!fls)
 402		goto out_err_free;
 403
 404	fls->mirror_array_cnt = mirror_array_cnt;
 405	fls->stripe_unit = stripe_unit;
 406
 407	for (i = 0; i < fls->mirror_array_cnt; i++) {
 408		struct nfs4_ff_layout_mirror *mirror;
 409		struct cred *kcred;
 410		const struct cred __rcu *cred;
 411		kuid_t uid;
 412		kgid_t gid;
 413		u32 ds_count, fh_count, id;
 414		int j;
 415
 416		rc = -EIO;
 417		p = xdr_inline_decode(&stream, 4);
 418		if (!p)
 419			goto out_err_free;
 420		ds_count = be32_to_cpup(p);
 421
 422		/* FIXME: allow for striping? */
 423		if (ds_count != 1)
 424			goto out_err_free;
 425
 426		fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
 427		if (fls->mirror_array[i] == NULL) {
 428			rc = -ENOMEM;
 429			goto out_err_free;
 430		}
 431
 432		fls->mirror_array[i]->ds_count = ds_count;
 433
 434		/* deviceid */
 435		rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid);
 436		if (rc)
 437			goto out_err_free;
 438
 439		/* efficiency */
 440		rc = -EIO;
 441		p = xdr_inline_decode(&stream, 4);
 442		if (!p)
 443			goto out_err_free;
 444		fls->mirror_array[i]->efficiency = be32_to_cpup(p);
 445
 446		/* stateid */
 447		rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid);
 448		if (rc)
 449			goto out_err_free;
 450
 451		/* fh */
 452		rc = -EIO;
 453		p = xdr_inline_decode(&stream, 4);
 454		if (!p)
 455			goto out_err_free;
 456		fh_count = be32_to_cpup(p);
 457
 458		fls->mirror_array[i]->fh_versions =
 459			kcalloc(fh_count, sizeof(struct nfs_fh),
 460				gfp_flags);
 461		if (fls->mirror_array[i]->fh_versions == NULL) {
 462			rc = -ENOMEM;
 463			goto out_err_free;
 464		}
 465
 466		for (j = 0; j < fh_count; j++) {
 467			rc = decode_nfs_fh(&stream,
 468					   &fls->mirror_array[i]->fh_versions[j]);
 469			if (rc)
 470				goto out_err_free;
 471		}
 472
 473		fls->mirror_array[i]->fh_versions_cnt = fh_count;
 474
 475		/* user */
 476		rc = decode_name(&stream, &id);
 477		if (rc)
 478			goto out_err_free;
 479
 480		uid = make_kuid(&init_user_ns, id);
 481
 482		/* group */
 483		rc = decode_name(&stream, &id);
 484		if (rc)
 485			goto out_err_free;
 486
 487		gid = make_kgid(&init_user_ns, id);
 488
 489		if (gfp_flags & __GFP_FS)
 490			kcred = prepare_kernel_cred(NULL);
 491		else {
 492			unsigned int nofs_flags = memalloc_nofs_save();
 493			kcred = prepare_kernel_cred(NULL);
 494			memalloc_nofs_restore(nofs_flags);
 495		}
 496		rc = -ENOMEM;
 497		if (!kcred)
 498			goto out_err_free;
 499		kcred->fsuid = uid;
 500		kcred->fsgid = gid;
 501		cred = RCU_INITIALIZER(kcred);
 502
 503		if (lgr->range.iomode == IOMODE_READ)
 504			rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
 505		else
 506			rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
 507
 508		mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
 509		if (mirror != fls->mirror_array[i]) {
 510			/* swap cred ptrs so free_mirror will clean up old */
 511			if (lgr->range.iomode == IOMODE_READ) {
 512				cred = xchg(&mirror->ro_cred, cred);
 513				rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
 514			} else {
 515				cred = xchg(&mirror->rw_cred, cred);
 516				rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
 517			}
 518			ff_layout_free_mirror(fls->mirror_array[i]);
 519			fls->mirror_array[i] = mirror;
 520		}
 521
 522		dprintk("%s: iomode %s uid %u gid %u\n", __func__,
 523			lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
 524			from_kuid(&init_user_ns, uid),
 525			from_kgid(&init_user_ns, gid));
 526	}
 527
 528	p = xdr_inline_decode(&stream, 4);
 529	if (!p)
 530		goto out_sort_mirrors;
 531	fls->flags = be32_to_cpup(p);
 532
 533	p = xdr_inline_decode(&stream, 4);
 534	if (!p)
 535		goto out_sort_mirrors;
 536	for (i=0; i < fls->mirror_array_cnt; i++)
 537		fls->mirror_array[i]->report_interval = be32_to_cpup(p);
 538
 539out_sort_mirrors:
 540	ff_layout_sort_mirrors(fls);
 541	ret = &fls->generic_hdr;
 542	dprintk("<-- %s (success)\n", __func__);
 543out_free_page:
 544	__free_page(scratch);
 545	return ret;
 546out_err_free:
 547	_ff_layout_free_lseg(fls);
 548	ret = ERR_PTR(rc);
 549	dprintk("<-- %s (%d)\n", __func__, rc);
 550	goto out_free_page;
 551}
 552
 553static void
 554ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
 555{
 556	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
 557
 558	dprintk("--> %s\n", __func__);
 559
 560	if (lseg->pls_range.iomode == IOMODE_RW) {
 561		struct nfs4_flexfile_layout *ffl;
 562		struct inode *inode;
 563
 564		ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
 565		inode = ffl->generic_hdr.plh_inode;
 566		spin_lock(&inode->i_lock);
 567		pnfs_generic_ds_cinfo_release_lseg(&ffl->commit_info, lseg);
 568		spin_unlock(&inode->i_lock);
 569	}
 570	_ff_layout_free_lseg(fls);
 571}
 572
 573static void
 574nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
 575{
 576	/* first IO request? */
 577	if (atomic_inc_return(&timer->n_ops) == 1) {
 578		timer->start_time = now;
 579	}
 580}
 581
 582static ktime_t
 583nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
 584{
 585	ktime_t start;
 586
 587	if (atomic_dec_return(&timer->n_ops) < 0)
 588		WARN_ON_ONCE(1);
 589
 590	start = timer->start_time;
 591	timer->start_time = now;
 592	return ktime_sub(now, start);
 593}
 594
 595static bool
 596nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
 597			    struct nfs4_ff_layoutstat *layoutstat,
 598			    ktime_t now)
 599{
 600	s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
 601	struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
 602
 603	nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
 604	if (!mirror->start_time)
 605		mirror->start_time = now;
 606	if (mirror->report_interval != 0)
 607		report_interval = (s64)mirror->report_interval * 1000LL;
 608	else if (layoutstats_timer != 0)
 609		report_interval = (s64)layoutstats_timer * 1000LL;
 610	if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
 611			report_interval) {
 612		ffl->last_report_time = now;
 613		return true;
 614	}
 615
 616	return false;
 617}
 618
 619static void
 620nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
 621		__u64 requested)
 622{
 623	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
 624
 625	iostat->ops_requested++;
 626	iostat->bytes_requested += requested;
 627}
 628
 629static void
 630nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
 631		__u64 requested,
 632		__u64 completed,
 633		ktime_t time_completed,
 634		ktime_t time_started)
 635{
 636	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
 637	ktime_t completion_time = ktime_sub(time_completed, time_started);
 638	ktime_t timer;
 639
 640	iostat->ops_completed++;
 641	iostat->bytes_completed += completed;
 642	iostat->bytes_not_delivered += requested - completed;
 643
 644	timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
 645	iostat->total_busy_time =
 646			ktime_add(iostat->total_busy_time, timer);
 647	iostat->aggregate_completion_time =
 648			ktime_add(iostat->aggregate_completion_time,
 649					completion_time);
 650}
 651
 652static void
 653nfs4_ff_layout_stat_io_start_read(struct inode *inode,
 654		struct nfs4_ff_layout_mirror *mirror,
 655		__u64 requested, ktime_t now)
 656{
 657	bool report;
 658
 659	spin_lock(&mirror->lock);
 660	report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
 661	nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
 662	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
 663	spin_unlock(&mirror->lock);
 664
 665	if (report)
 666		pnfs_report_layoutstat(inode, GFP_KERNEL);
 667}
 668
 669static void
 670nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
 671		struct nfs4_ff_layout_mirror *mirror,
 672		__u64 requested,
 673		__u64 completed)
 674{
 675	spin_lock(&mirror->lock);
 676	nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
 677			requested, completed,
 678			ktime_get(), task->tk_start);
 679	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
 680	spin_unlock(&mirror->lock);
 681}
 682
 683static void
 684nfs4_ff_layout_stat_io_start_write(struct inode *inode,
 685		struct nfs4_ff_layout_mirror *mirror,
 686		__u64 requested, ktime_t now)
 687{
 688	bool report;
 689
 690	spin_lock(&mirror->lock);
 691	report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
 692	nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
 693	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
 694	spin_unlock(&mirror->lock);
 695
 696	if (report)
 697		pnfs_report_layoutstat(inode, GFP_NOIO);
 698}
 699
 700static void
 701nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
 702		struct nfs4_ff_layout_mirror *mirror,
 703		__u64 requested,
 704		__u64 completed,
 705		enum nfs3_stable_how committed)
 706{
 707	if (committed == NFS_UNSTABLE)
 708		requested = completed = 0;
 709
 710	spin_lock(&mirror->lock);
 711	nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
 712			requested, completed, ktime_get(), task->tk_start);
 713	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
 714	spin_unlock(&mirror->lock);
 715}
 716
 717static void
 718ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, u32 idx)
 719{
 720	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
 721
 722	if (devid)
 723		nfs4_mark_deviceid_unavailable(devid);
 724}
 725
 726static void
 727ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, u32 idx)
 728{
 729	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
 730
 731	if (devid)
 732		nfs4_mark_deviceid_available(devid);
 733}
 734
 735static struct nfs4_pnfs_ds *
 736ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
 737			     u32 start_idx, u32 *best_idx,
 738			     bool check_device)
 739{
 740	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
 741	struct nfs4_ff_layout_mirror *mirror;
 742	struct nfs4_pnfs_ds *ds;
 
 743	u32 idx;
 744
 745	/* mirrors are initially sorted by efficiency */
 746	for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
 
 
 
 747		mirror = FF_LAYOUT_COMP(lseg, idx);
 748		ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
 749		if (!ds)
 750			continue;
 751
 752		if (check_device &&
 753		    nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node))
 754			continue;
 755
 756		*best_idx = idx;
 757		return ds;
 758	}
 759
 760	return NULL;
 761}
 762
 763static struct nfs4_pnfs_ds *
 764ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg,
 765				 u32 start_idx, u32 *best_idx)
 766{
 767	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, false);
 768}
 769
 770static struct nfs4_pnfs_ds *
 771ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg,
 772				   u32 start_idx, u32 *best_idx)
 773{
 774	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, true);
 775}
 776
 777static struct nfs4_pnfs_ds *
 778ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
 779				  u32 start_idx, u32 *best_idx)
 780{
 781	struct nfs4_pnfs_ds *ds;
 782
 783	ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx);
 784	if (ds)
 785		return ds;
 786	return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx);
 787}
 788
 789static struct nfs4_pnfs_ds *
 790ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio,
 791			  u32 *best_idx)
 792{
 793	struct pnfs_layout_segment *lseg = pgio->pg_lseg;
 794	struct nfs4_pnfs_ds *ds;
 795
 796	ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx,
 797					       best_idx);
 798	if (ds || !pgio->pg_mirror_idx)
 799		return ds;
 800	return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx);
 801}
 802
 803static void
 804ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
 805		      struct nfs_page *req,
 806		      bool strict_iomode)
 807{
 808	pnfs_put_lseg(pgio->pg_lseg);
 809	pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
 810					   nfs_req_openctx(req),
 811					   req_offset(req),
 812					   req->wb_bytes,
 813					   IOMODE_READ,
 814					   strict_iomode,
 815					   GFP_KERNEL);
 816	if (IS_ERR(pgio->pg_lseg)) {
 817		pgio->pg_error = PTR_ERR(pgio->pg_lseg);
 818		pgio->pg_lseg = NULL;
 819	}
 820}
 821
 822static void
 823ff_layout_pg_check_layout(struct nfs_pageio_descriptor *pgio,
 824			  struct nfs_page *req)
 825{
 826	pnfs_generic_pg_check_layout(pgio);
 827	pnfs_generic_pg_check_range(pgio, req);
 828}
 829
 830static void
 831ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
 832			struct nfs_page *req)
 833{
 834	struct nfs_pgio_mirror *pgm;
 835	struct nfs4_ff_layout_mirror *mirror;
 836	struct nfs4_pnfs_ds *ds;
 837	u32 ds_idx;
 838
 839retry:
 840	ff_layout_pg_check_layout(pgio, req);
 841	/* Use full layout for now */
 842	if (!pgio->pg_lseg) {
 843		ff_layout_pg_get_read(pgio, req, false);
 844		if (!pgio->pg_lseg)
 845			goto out_nolseg;
 846	}
 847	if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
 848		ff_layout_pg_get_read(pgio, req, true);
 849		if (!pgio->pg_lseg)
 850			goto out_nolseg;
 851	}
 852
 853	ds = ff_layout_get_ds_for_read(pgio, &ds_idx);
 854	if (!ds) {
 855		if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
 856			goto out_mds;
 857		pnfs_generic_pg_cleanup(pgio);
 858		/* Sleep for 1 second before retrying */
 859		ssleep(1);
 860		goto retry;
 861	}
 862
 863	mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
 864	pgm = &pgio->pg_mirrors[0];
 865	pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
 
 
 866
 867	pgio->pg_mirror_idx = ds_idx;
 868
 869	if (NFS_SERVER(pgio->pg_inode)->flags &
 870			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
 871		pgio->pg_maxretrans = io_maxretrans;
 872	return;
 873out_nolseg:
 874	if (pgio->pg_error < 0)
 875		return;
 876out_mds:
 877	trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode,
 878			0, NFS4_MAX_UINT64, IOMODE_READ,
 879			NFS_I(pgio->pg_inode)->layout,
 880			pgio->pg_lseg);
 881	pgio->pg_maxretrans = 0;
 882	nfs_pageio_reset_read_mds(pgio);
 883}
 884
 885static void
 886ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
 887			struct nfs_page *req)
 888{
 889	struct nfs4_ff_layout_mirror *mirror;
 890	struct nfs_pgio_mirror *pgm;
 891	struct nfs4_pnfs_ds *ds;
 892	u32 i;
 893
 894retry:
 895	ff_layout_pg_check_layout(pgio, req);
 896	if (!pgio->pg_lseg) {
 897		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
 898						   nfs_req_openctx(req),
 899						   req_offset(req),
 900						   req->wb_bytes,
 901						   IOMODE_RW,
 902						   false,
 903						   GFP_NOFS);
 904		if (IS_ERR(pgio->pg_lseg)) {
 905			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
 906			pgio->pg_lseg = NULL;
 907			return;
 908		}
 909	}
 910	/* If no lseg, fall back to write through mds */
 911	if (pgio->pg_lseg == NULL)
 912		goto out_mds;
 913
 914	/* Use a direct mapping of ds_idx to pgio mirror_idx */
 915	if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))
 916		goto out_eagain;
 917
 918	for (i = 0; i < pgio->pg_mirror_count; i++) {
 919		mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
 920		ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true);
 921		if (!ds) {
 922			if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
 923				goto out_mds;
 924			pnfs_generic_pg_cleanup(pgio);
 925			/* Sleep for 1 second before retrying */
 926			ssleep(1);
 927			goto retry;
 928		}
 929		pgm = &pgio->pg_mirrors[i];
 930		pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
 931	}
 932
 933	if (NFS_SERVER(pgio->pg_inode)->flags &
 934			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
 935		pgio->pg_maxretrans = io_maxretrans;
 936	return;
 937out_eagain:
 938	pnfs_generic_pg_cleanup(pgio);
 939	pgio->pg_error = -EAGAIN;
 940	return;
 941out_mds:
 942	trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode,
 943			0, NFS4_MAX_UINT64, IOMODE_RW,
 944			NFS_I(pgio->pg_inode)->layout,
 945			pgio->pg_lseg);
 946	pgio->pg_maxretrans = 0;
 947	nfs_pageio_reset_write_mds(pgio);
 948	pgio->pg_error = -EAGAIN;
 949}
 950
 951static unsigned int
 952ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
 953				    struct nfs_page *req)
 954{
 955	if (!pgio->pg_lseg) {
 956		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
 957						   nfs_req_openctx(req),
 958						   req_offset(req),
 959						   req->wb_bytes,
 960						   IOMODE_RW,
 961						   false,
 962						   GFP_NOFS);
 963		if (IS_ERR(pgio->pg_lseg)) {
 964			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
 965			pgio->pg_lseg = NULL;
 966			goto out;
 967		}
 968	}
 969	if (pgio->pg_lseg)
 970		return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
 971
 972	trace_pnfs_mds_fallback_pg_get_mirror_count(pgio->pg_inode,
 973			0, NFS4_MAX_UINT64, IOMODE_RW,
 974			NFS_I(pgio->pg_inode)->layout,
 975			pgio->pg_lseg);
 976	/* no lseg means that pnfs is not in use, so no mirroring here */
 977	nfs_pageio_reset_write_mds(pgio);
 978out:
 979	return 1;
 980}
 981
 982static u32
 983ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
 984{
 985	u32 old = desc->pg_mirror_idx;
 986
 987	desc->pg_mirror_idx = idx;
 988	return old;
 989}
 990
 991static struct nfs_pgio_mirror *
 992ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
 993{
 994	return &desc->pg_mirrors[idx];
 995}
 996
 997static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
 998	.pg_init = ff_layout_pg_init_read,
 999	.pg_test = pnfs_generic_pg_test,
1000	.pg_doio = pnfs_generic_pg_readpages,
1001	.pg_cleanup = pnfs_generic_pg_cleanup,
1002};
1003
1004static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
1005	.pg_init = ff_layout_pg_init_write,
1006	.pg_test = pnfs_generic_pg_test,
1007	.pg_doio = pnfs_generic_pg_writepages,
1008	.pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
1009	.pg_cleanup = pnfs_generic_pg_cleanup,
1010	.pg_get_mirror = ff_layout_pg_get_mirror_write,
1011	.pg_set_mirror = ff_layout_pg_set_mirror_write,
1012};
1013
1014static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
1015{
1016	struct rpc_task *task = &hdr->task;
1017
1018	pnfs_layoutcommit_inode(hdr->inode, false);
1019
1020	if (retry_pnfs) {
1021		dprintk("%s Reset task %5u for i/o through pNFS "
1022			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1023			hdr->task.tk_pid,
1024			hdr->inode->i_sb->s_id,
1025			(unsigned long long)NFS_FILEID(hdr->inode),
1026			hdr->args.count,
1027			(unsigned long long)hdr->args.offset);
1028
1029		hdr->completion_ops->reschedule_io(hdr);
1030		return;
1031	}
1032
1033	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1034		dprintk("%s Reset task %5u for i/o through MDS "
1035			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1036			hdr->task.tk_pid,
1037			hdr->inode->i_sb->s_id,
1038			(unsigned long long)NFS_FILEID(hdr->inode),
1039			hdr->args.count,
1040			(unsigned long long)hdr->args.offset);
1041
1042		trace_pnfs_mds_fallback_write_done(hdr->inode,
1043				hdr->args.offset, hdr->args.count,
1044				IOMODE_RW, NFS_I(hdr->inode)->layout,
1045				hdr->lseg);
1046		task->tk_status = pnfs_write_done_resend_to_mds(hdr);
1047	}
1048}
1049
1050static void ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr)
1051{
1052	u32 idx = hdr->pgio_mirror_idx + 1;
1053	u32 new_idx = 0;
1054
1055	if (ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx))
1056		ff_layout_send_layouterror(hdr->lseg);
1057	else
1058		pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1059	pnfs_read_resend_pnfs(hdr, new_idx);
1060}
1061
1062static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
1063{
1064	struct rpc_task *task = &hdr->task;
1065
1066	pnfs_layoutcommit_inode(hdr->inode, false);
1067	pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1068
1069	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1070		dprintk("%s Reset task %5u for i/o through MDS "
1071			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1072			hdr->task.tk_pid,
1073			hdr->inode->i_sb->s_id,
1074			(unsigned long long)NFS_FILEID(hdr->inode),
1075			hdr->args.count,
1076			(unsigned long long)hdr->args.offset);
1077
1078		trace_pnfs_mds_fallback_read_done(hdr->inode,
1079				hdr->args.offset, hdr->args.count,
1080				IOMODE_READ, NFS_I(hdr->inode)->layout,
1081				hdr->lseg);
1082		task->tk_status = pnfs_read_done_resend_to_mds(hdr);
1083	}
1084}
1085
1086static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1087					   struct nfs4_state *state,
1088					   struct nfs_client *clp,
1089					   struct pnfs_layout_segment *lseg,
1090					   u32 idx)
1091{
1092	struct pnfs_layout_hdr *lo = lseg->pls_layout;
1093	struct inode *inode = lo->plh_inode;
1094	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1095	struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
1096
1097	switch (task->tk_status) {
1098	case -NFS4ERR_BADSESSION:
1099	case -NFS4ERR_BADSLOT:
1100	case -NFS4ERR_BAD_HIGH_SLOT:
1101	case -NFS4ERR_DEADSESSION:
1102	case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1103	case -NFS4ERR_SEQ_FALSE_RETRY:
1104	case -NFS4ERR_SEQ_MISORDERED:
1105		dprintk("%s ERROR %d, Reset session. Exchangeid "
1106			"flags 0x%x\n", __func__, task->tk_status,
1107			clp->cl_exchange_flags);
1108		nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
1109		break;
1110	case -NFS4ERR_DELAY:
1111	case -NFS4ERR_GRACE:
1112		rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
1113		break;
1114	case -NFS4ERR_RETRY_UNCACHED_REP:
1115		break;
1116	/* Invalidate Layout errors */
1117	case -NFS4ERR_PNFS_NO_LAYOUT:
1118	case -ESTALE:           /* mapped NFS4ERR_STALE */
1119	case -EBADHANDLE:       /* mapped NFS4ERR_BADHANDLE */
1120	case -EISDIR:           /* mapped NFS4ERR_ISDIR */
1121	case -NFS4ERR_FHEXPIRED:
1122	case -NFS4ERR_WRONG_TYPE:
1123		dprintk("%s Invalid layout error %d\n", __func__,
1124			task->tk_status);
1125		/*
1126		 * Destroy layout so new i/o will get a new layout.
1127		 * Layout will not be destroyed until all current lseg
1128		 * references are put. Mark layout as invalid to resend failed
1129		 * i/o and all i/o waiting on the slot table to the MDS until
1130		 * layout is destroyed and a new valid layout is obtained.
1131		 */
1132		pnfs_destroy_layout(NFS_I(inode));
1133		rpc_wake_up(&tbl->slot_tbl_waitq);
1134		goto reset;
1135	/* RPC connection errors */
1136	case -ECONNREFUSED:
1137	case -EHOSTDOWN:
1138	case -EHOSTUNREACH:
1139	case -ENETUNREACH:
1140	case -EIO:
1141	case -ETIMEDOUT:
1142	case -EPIPE:
1143		dprintk("%s DS connection error %d\n", __func__,
1144			task->tk_status);
1145		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1146				&devid->deviceid);
1147		rpc_wake_up(&tbl->slot_tbl_waitq);
1148		fallthrough;
1149	default:
1150		if (ff_layout_avoid_mds_available_ds(lseg))
1151			return -NFS4ERR_RESET_TO_PNFS;
1152reset:
1153		dprintk("%s Retry through MDS. Error %d\n", __func__,
1154			task->tk_status);
1155		return -NFS4ERR_RESET_TO_MDS;
1156	}
1157	task->tk_status = 0;
1158	return -EAGAIN;
1159}
1160
1161/* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
1162static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1163					   struct pnfs_layout_segment *lseg,
1164					   u32 idx)
1165{
1166	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1167
1168	switch (task->tk_status) {
1169	/* File access problems. Don't mark the device as unavailable */
1170	case -EACCES:
1171	case -ESTALE:
1172	case -EISDIR:
1173	case -EBADHANDLE:
1174	case -ELOOP:
1175	case -ENOSPC:
1176		break;
1177	case -EJUKEBOX:
1178		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1179		goto out_retry;
1180	default:
1181		dprintk("%s DS connection error %d\n", __func__,
1182			task->tk_status);
1183		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1184				&devid->deviceid);
1185	}
1186	/* FIXME: Need to prevent infinite looping here. */
1187	return -NFS4ERR_RESET_TO_PNFS;
1188out_retry:
1189	task->tk_status = 0;
1190	rpc_restart_call_prepare(task);
1191	rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1192	return -EAGAIN;
1193}
1194
1195static int ff_layout_async_handle_error(struct rpc_task *task,
1196					struct nfs4_state *state,
1197					struct nfs_client *clp,
1198					struct pnfs_layout_segment *lseg,
1199					u32 idx)
1200{
1201	int vers = clp->cl_nfs_mod->rpc_vers->number;
1202
1203	if (task->tk_status >= 0) {
1204		ff_layout_mark_ds_reachable(lseg, idx);
1205		return 0;
1206	}
1207
1208	/* Handle the case of an invalid layout segment */
1209	if (!pnfs_is_valid_lseg(lseg))
1210		return -NFS4ERR_RESET_TO_PNFS;
1211
1212	switch (vers) {
1213	case 3:
1214		return ff_layout_async_handle_error_v3(task, lseg, idx);
1215	case 4:
1216		return ff_layout_async_handle_error_v4(task, state, clp,
1217						       lseg, idx);
1218	default:
1219		/* should never happen */
1220		WARN_ON_ONCE(1);
1221		return 0;
1222	}
1223}
1224
1225static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1226					u32 idx, u64 offset, u64 length,
1227					u32 *op_status, int opnum, int error)
1228{
1229	struct nfs4_ff_layout_mirror *mirror;
1230	u32 status = *op_status;
1231	int err;
1232
1233	if (status == 0) {
1234		switch (error) {
1235		case -ETIMEDOUT:
1236		case -EPFNOSUPPORT:
1237		case -EPROTONOSUPPORT:
1238		case -EOPNOTSUPP:
1239		case -ECONNREFUSED:
1240		case -ECONNRESET:
1241		case -EHOSTDOWN:
1242		case -EHOSTUNREACH:
1243		case -ENETUNREACH:
1244		case -EADDRINUSE:
1245		case -ENOBUFS:
1246		case -EPIPE:
1247		case -EPERM:
1248			*op_status = status = NFS4ERR_NXIO;
1249			break;
1250		case -EACCES:
1251			*op_status = status = NFS4ERR_ACCESS;
1252			break;
1253		default:
1254			return;
1255		}
1256	}
1257
1258	mirror = FF_LAYOUT_COMP(lseg, idx);
1259	err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
1260				       mirror, offset, length, status, opnum,
1261				       GFP_NOIO);
1262
1263	switch (status) {
1264	case NFS4ERR_DELAY:
1265	case NFS4ERR_GRACE:
1266		break;
1267	case NFS4ERR_NXIO:
1268		ff_layout_mark_ds_unreachable(lseg, idx);
1269		/*
1270		 * Don't return the layout if this is a read and we still
1271		 * have layouts to try
1272		 */
1273		if (opnum == OP_READ)
1274			break;
1275		fallthrough;
1276	default:
1277		pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
1278						  lseg);
1279	}
1280
1281	dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
1282}
1283
1284/* NFS_PROTO call done callback routines */
1285static int ff_layout_read_done_cb(struct rpc_task *task,
1286				struct nfs_pgio_header *hdr)
1287{
1288	int err;
1289
1290	if (task->tk_status < 0) {
1291		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1292					    hdr->args.offset, hdr->args.count,
1293					    &hdr->res.op_status, OP_READ,
1294					    task->tk_status);
1295		trace_ff_layout_read_error(hdr);
1296	}
1297
1298	err = ff_layout_async_handle_error(task, hdr->args.context->state,
1299					   hdr->ds_clp, hdr->lseg,
1300					   hdr->pgio_mirror_idx);
1301
1302	trace_nfs4_pnfs_read(hdr, err);
1303	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1304	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1305	switch (err) {
1306	case -NFS4ERR_RESET_TO_PNFS:
1307		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1308		return task->tk_status;
1309	case -NFS4ERR_RESET_TO_MDS:
1310		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1311		return task->tk_status;
1312	case -EAGAIN:
1313		goto out_eagain;
1314	}
1315
1316	return 0;
1317out_eagain:
1318	rpc_restart_call_prepare(task);
1319	return -EAGAIN;
1320}
1321
1322static bool
1323ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1324{
1325	return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1326}
1327
1328/*
1329 * We reference the rpc_cred of the first WRITE that triggers the need for
1330 * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1331 * rfc5661 is not clear about which credential should be used.
1332 *
1333 * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1334 * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1335 * we always send layoutcommit after DS writes.
1336 */
1337static void
1338ff_layout_set_layoutcommit(struct inode *inode,
1339		struct pnfs_layout_segment *lseg,
1340		loff_t end_offset)
1341{
1342	if (!ff_layout_need_layoutcommit(lseg))
1343		return;
1344
1345	pnfs_set_layoutcommit(inode, lseg, end_offset);
1346	dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino,
1347		(unsigned long long) NFS_I(inode)->layout->plh_lwb);
1348}
1349
1350static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
1351		struct nfs_pgio_header *hdr)
1352{
1353	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1354		return;
1355	nfs4_ff_layout_stat_io_start_read(hdr->inode,
1356			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1357			hdr->args.count,
1358			task->tk_start);
1359}
1360
1361static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
1362		struct nfs_pgio_header *hdr)
1363{
1364	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1365		return;
1366	nfs4_ff_layout_stat_io_end_read(task,
1367			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1368			hdr->args.count,
1369			hdr->res.count);
1370	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1371}
1372
1373static int ff_layout_read_prepare_common(struct rpc_task *task,
1374					 struct nfs_pgio_header *hdr)
1375{
1376	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1377		rpc_exit(task, -EIO);
1378		return -EIO;
1379	}
1380
1381	ff_layout_read_record_layoutstats_start(task, hdr);
1382	return 0;
1383}
1384
1385/*
1386 * Call ops for the async read/write cases
1387 * In the case of dense layouts, the offset needs to be reset to its
1388 * original value.
1389 */
1390static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1391{
1392	struct nfs_pgio_header *hdr = data;
1393
1394	if (ff_layout_read_prepare_common(task, hdr))
1395		return;
1396
1397	rpc_call_start(task);
1398}
1399
1400static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1401{
1402	struct nfs_pgio_header *hdr = data;
1403
1404	if (nfs4_setup_sequence(hdr->ds_clp,
1405				&hdr->args.seq_args,
1406				&hdr->res.seq_res,
1407				task))
1408		return;
1409
1410	ff_layout_read_prepare_common(task, hdr);
1411}
1412
1413static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1414{
1415	struct nfs_pgio_header *hdr = data;
1416
1417	dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
1418
1419	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1420	    task->tk_status == 0) {
1421		nfs4_sequence_done(task, &hdr->res.seq_res);
1422		return;
1423	}
1424
1425	/* Note this may cause RPC to be resent */
1426	hdr->mds_ops->rpc_call_done(task, hdr);
1427}
1428
1429static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1430{
1431	struct nfs_pgio_header *hdr = data;
1432
1433	ff_layout_read_record_layoutstats_done(task, hdr);
1434	rpc_count_iostats_metrics(task,
1435	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1436}
1437
1438static void ff_layout_read_release(void *data)
1439{
1440	struct nfs_pgio_header *hdr = data;
1441
1442	ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
1443	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
1444		ff_layout_resend_pnfs_read(hdr);
1445	else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1446		ff_layout_reset_read(hdr);
1447	pnfs_generic_rw_release(data);
1448}
1449
1450
1451static int ff_layout_write_done_cb(struct rpc_task *task,
1452				struct nfs_pgio_header *hdr)
1453{
1454	loff_t end_offs = 0;
1455	int err;
1456
1457	if (task->tk_status < 0) {
1458		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1459					    hdr->args.offset, hdr->args.count,
1460					    &hdr->res.op_status, OP_WRITE,
1461					    task->tk_status);
1462		trace_ff_layout_write_error(hdr);
1463	}
1464
1465	err = ff_layout_async_handle_error(task, hdr->args.context->state,
1466					   hdr->ds_clp, hdr->lseg,
1467					   hdr->pgio_mirror_idx);
1468
1469	trace_nfs4_pnfs_write(hdr, err);
1470	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1471	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1472	switch (err) {
1473	case -NFS4ERR_RESET_TO_PNFS:
1474		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1475		return task->tk_status;
1476	case -NFS4ERR_RESET_TO_MDS:
1477		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1478		return task->tk_status;
1479	case -EAGAIN:
1480		return -EAGAIN;
1481	}
1482
1483	if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1484	    hdr->res.verf->committed == NFS_DATA_SYNC)
1485		end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
1486
1487	/* Note: if the write is unstable, don't set end_offs until commit */
1488	ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
1489
1490	/* zero out fattr since we don't care DS attr at all */
1491	hdr->fattr.valid = 0;
1492	if (task->tk_status >= 0)
1493		nfs_writeback_update_inode(hdr);
1494
1495	return 0;
1496}
1497
1498static int ff_layout_commit_done_cb(struct rpc_task *task,
1499				     struct nfs_commit_data *data)
1500{
1501	int err;
1502
1503	if (task->tk_status < 0) {
1504		ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1505					    data->args.offset, data->args.count,
1506					    &data->res.op_status, OP_COMMIT,
1507					    task->tk_status);
1508		trace_ff_layout_commit_error(data);
1509	}
1510
1511	err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
1512					   data->lseg, data->ds_commit_index);
1513
1514	trace_nfs4_pnfs_commit_ds(data, err);
1515	switch (err) {
1516	case -NFS4ERR_RESET_TO_PNFS:
1517		pnfs_generic_prepare_to_resend_writes(data);
1518		return -EAGAIN;
1519	case -NFS4ERR_RESET_TO_MDS:
1520		pnfs_generic_prepare_to_resend_writes(data);
1521		return -EAGAIN;
1522	case -EAGAIN:
1523		rpc_restart_call_prepare(task);
1524		return -EAGAIN;
1525	}
1526
1527	ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
1528
1529	return 0;
1530}
1531
1532static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
1533		struct nfs_pgio_header *hdr)
1534{
1535	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1536		return;
1537	nfs4_ff_layout_stat_io_start_write(hdr->inode,
1538			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1539			hdr->args.count,
1540			task->tk_start);
1541}
1542
1543static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
1544		struct nfs_pgio_header *hdr)
1545{
1546	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1547		return;
1548	nfs4_ff_layout_stat_io_end_write(task,
1549			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1550			hdr->args.count, hdr->res.count,
1551			hdr->res.verf->committed);
1552	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1553}
1554
1555static int ff_layout_write_prepare_common(struct rpc_task *task,
1556					  struct nfs_pgio_header *hdr)
1557{
1558	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1559		rpc_exit(task, -EIO);
1560		return -EIO;
1561	}
1562
1563	ff_layout_write_record_layoutstats_start(task, hdr);
1564	return 0;
1565}
1566
1567static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1568{
1569	struct nfs_pgio_header *hdr = data;
1570
1571	if (ff_layout_write_prepare_common(task, hdr))
1572		return;
1573
1574	rpc_call_start(task);
1575}
1576
1577static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1578{
1579	struct nfs_pgio_header *hdr = data;
1580
1581	if (nfs4_setup_sequence(hdr->ds_clp,
1582				&hdr->args.seq_args,
1583				&hdr->res.seq_res,
1584				task))
1585		return;
1586
1587	ff_layout_write_prepare_common(task, hdr);
1588}
1589
1590static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1591{
1592	struct nfs_pgio_header *hdr = data;
1593
1594	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1595	    task->tk_status == 0) {
1596		nfs4_sequence_done(task, &hdr->res.seq_res);
1597		return;
1598	}
1599
1600	/* Note this may cause RPC to be resent */
1601	hdr->mds_ops->rpc_call_done(task, hdr);
1602}
1603
1604static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1605{
1606	struct nfs_pgio_header *hdr = data;
1607
1608	ff_layout_write_record_layoutstats_done(task, hdr);
1609	rpc_count_iostats_metrics(task,
1610	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1611}
1612
1613static void ff_layout_write_release(void *data)
1614{
1615	struct nfs_pgio_header *hdr = data;
1616
1617	ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
1618	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
1619		ff_layout_send_layouterror(hdr->lseg);
1620		ff_layout_reset_write(hdr, true);
1621	} else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1622		ff_layout_reset_write(hdr, false);
1623	pnfs_generic_rw_release(data);
1624}
1625
1626static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
1627		struct nfs_commit_data *cdata)
1628{
1629	if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
1630		return;
1631	nfs4_ff_layout_stat_io_start_write(cdata->inode,
1632			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1633			0, task->tk_start);
1634}
1635
1636static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
1637		struct nfs_commit_data *cdata)
1638{
1639	struct nfs_page *req;
1640	__u64 count = 0;
1641
1642	if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
1643		return;
1644
1645	if (task->tk_status == 0) {
1646		list_for_each_entry(req, &cdata->pages, wb_list)
1647			count += req->wb_bytes;
1648	}
1649	nfs4_ff_layout_stat_io_end_write(task,
1650			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1651			count, count, NFS_FILE_SYNC);
1652	set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags);
1653}
1654
1655static void ff_layout_commit_prepare_common(struct rpc_task *task,
1656		struct nfs_commit_data *cdata)
1657{
1658	ff_layout_commit_record_layoutstats_start(task, cdata);
1659}
1660
1661static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1662{
1663	ff_layout_commit_prepare_common(task, data);
1664	rpc_call_start(task);
1665}
1666
1667static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1668{
1669	struct nfs_commit_data *wdata = data;
1670
1671	if (nfs4_setup_sequence(wdata->ds_clp,
1672				&wdata->args.seq_args,
1673				&wdata->res.seq_res,
1674				task))
1675		return;
1676	ff_layout_commit_prepare_common(task, data);
1677}
1678
1679static void ff_layout_commit_done(struct rpc_task *task, void *data)
1680{
1681	pnfs_generic_write_commit_done(task, data);
1682}
1683
1684static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1685{
1686	struct nfs_commit_data *cdata = data;
1687
1688	ff_layout_commit_record_layoutstats_done(task, cdata);
1689	rpc_count_iostats_metrics(task,
1690	    &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1691}
1692
1693static void ff_layout_commit_release(void *data)
1694{
1695	struct nfs_commit_data *cdata = data;
1696
1697	ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
1698	pnfs_generic_commit_release(data);
1699}
1700
1701static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1702	.rpc_call_prepare = ff_layout_read_prepare_v3,
1703	.rpc_call_done = ff_layout_read_call_done,
1704	.rpc_count_stats = ff_layout_read_count_stats,
1705	.rpc_release = ff_layout_read_release,
1706};
1707
1708static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1709	.rpc_call_prepare = ff_layout_read_prepare_v4,
1710	.rpc_call_done = ff_layout_read_call_done,
1711	.rpc_count_stats = ff_layout_read_count_stats,
1712	.rpc_release = ff_layout_read_release,
1713};
1714
1715static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1716	.rpc_call_prepare = ff_layout_write_prepare_v3,
1717	.rpc_call_done = ff_layout_write_call_done,
1718	.rpc_count_stats = ff_layout_write_count_stats,
1719	.rpc_release = ff_layout_write_release,
1720};
1721
1722static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1723	.rpc_call_prepare = ff_layout_write_prepare_v4,
1724	.rpc_call_done = ff_layout_write_call_done,
1725	.rpc_count_stats = ff_layout_write_count_stats,
1726	.rpc_release = ff_layout_write_release,
1727};
1728
1729static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1730	.rpc_call_prepare = ff_layout_commit_prepare_v3,
1731	.rpc_call_done = ff_layout_commit_done,
1732	.rpc_count_stats = ff_layout_commit_count_stats,
1733	.rpc_release = ff_layout_commit_release,
1734};
1735
1736static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1737	.rpc_call_prepare = ff_layout_commit_prepare_v4,
1738	.rpc_call_done = ff_layout_commit_done,
1739	.rpc_count_stats = ff_layout_commit_count_stats,
1740	.rpc_release = ff_layout_commit_release,
1741};
1742
1743static enum pnfs_try_status
1744ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1745{
1746	struct pnfs_layout_segment *lseg = hdr->lseg;
1747	struct nfs4_pnfs_ds *ds;
1748	struct rpc_clnt *ds_clnt;
1749	struct nfs4_ff_layout_mirror *mirror;
1750	const struct cred *ds_cred;
1751	loff_t offset = hdr->args.offset;
1752	u32 idx = hdr->pgio_mirror_idx;
1753	int vers;
1754	struct nfs_fh *fh;
1755
1756	dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
1757		__func__, hdr->inode->i_ino,
1758		hdr->args.pgbase, (size_t)hdr->args.count, offset);
1759
1760	mirror = FF_LAYOUT_COMP(lseg, idx);
1761	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
1762	if (!ds)
1763		goto out_failed;
1764
1765	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1766						   hdr->inode);
1767	if (IS_ERR(ds_clnt))
1768		goto out_failed;
1769
1770	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1771	if (!ds_cred)
1772		goto out_failed;
1773
1774	vers = nfs4_ff_layout_ds_version(mirror);
1775
1776	dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1777		ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers);
1778
1779	hdr->pgio_done_cb = ff_layout_read_done_cb;
1780	refcount_inc(&ds->ds_clp->cl_count);
1781	hdr->ds_clp = ds->ds_clp;
1782	fh = nfs4_ff_layout_select_ds_fh(mirror);
1783	if (fh)
1784		hdr->args.fh = fh;
1785
1786	nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1787
1788	/*
1789	 * Note that if we ever decide to split across DSes,
1790	 * then we may need to handle dense-like offsets.
1791	 */
1792	hdr->args.offset = offset;
1793	hdr->mds_offset = offset;
1794
1795	/* Perform an asynchronous read to ds */
1796	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1797			  vers == 3 ? &ff_layout_read_call_ops_v3 :
1798				      &ff_layout_read_call_ops_v4,
1799			  0, RPC_TASK_SOFTCONN);
1800	put_cred(ds_cred);
1801	return PNFS_ATTEMPTED;
1802
1803out_failed:
1804	if (ff_layout_avoid_mds_available_ds(lseg))
1805		return PNFS_TRY_AGAIN;
1806	trace_pnfs_mds_fallback_read_pagelist(hdr->inode,
1807			hdr->args.offset, hdr->args.count,
1808			IOMODE_READ, NFS_I(hdr->inode)->layout, lseg);
1809	return PNFS_NOT_ATTEMPTED;
1810}
1811
1812/* Perform async writes. */
1813static enum pnfs_try_status
1814ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1815{
1816	struct pnfs_layout_segment *lseg = hdr->lseg;
1817	struct nfs4_pnfs_ds *ds;
1818	struct rpc_clnt *ds_clnt;
1819	struct nfs4_ff_layout_mirror *mirror;
1820	const struct cred *ds_cred;
1821	loff_t offset = hdr->args.offset;
1822	int vers;
1823	struct nfs_fh *fh;
1824	u32 idx = hdr->pgio_mirror_idx;
1825
1826	mirror = FF_LAYOUT_COMP(lseg, idx);
1827	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1828	if (!ds)
1829		goto out_failed;
1830
1831	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1832						   hdr->inode);
1833	if (IS_ERR(ds_clnt))
1834		goto out_failed;
1835
1836	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1837	if (!ds_cred)
1838		goto out_failed;
1839
1840	vers = nfs4_ff_layout_ds_version(mirror);
1841
1842	dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
1843		__func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1844		offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count),
1845		vers);
1846
1847	hdr->pgio_done_cb = ff_layout_write_done_cb;
1848	refcount_inc(&ds->ds_clp->cl_count);
1849	hdr->ds_clp = ds->ds_clp;
1850	hdr->ds_commit_idx = idx;
1851	fh = nfs4_ff_layout_select_ds_fh(mirror);
1852	if (fh)
1853		hdr->args.fh = fh;
1854
1855	nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1856
1857	/*
1858	 * Note that if we ever decide to split across DSes,
1859	 * then we may need to handle dense-like offsets.
1860	 */
1861	hdr->args.offset = offset;
1862
1863	/* Perform an asynchronous write */
1864	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1865			  vers == 3 ? &ff_layout_write_call_ops_v3 :
1866				      &ff_layout_write_call_ops_v4,
1867			  sync, RPC_TASK_SOFTCONN);
1868	put_cred(ds_cred);
1869	return PNFS_ATTEMPTED;
1870
1871out_failed:
1872	if (ff_layout_avoid_mds_available_ds(lseg))
1873		return PNFS_TRY_AGAIN;
1874	trace_pnfs_mds_fallback_write_pagelist(hdr->inode,
1875			hdr->args.offset, hdr->args.count,
1876			IOMODE_RW, NFS_I(hdr->inode)->layout, lseg);
1877	return PNFS_NOT_ATTEMPTED;
1878}
1879
1880static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1881{
1882	return i;
1883}
1884
1885static struct nfs_fh *
1886select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1887{
1888	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1889
1890	/* FIXME: Assume that there is only one NFS version available
1891	 * for the DS.
1892	 */
1893	return &flseg->mirror_array[i]->fh_versions[0];
1894}
1895
1896static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
1897{
1898	struct pnfs_layout_segment *lseg = data->lseg;
1899	struct nfs4_pnfs_ds *ds;
1900	struct rpc_clnt *ds_clnt;
1901	struct nfs4_ff_layout_mirror *mirror;
1902	const struct cred *ds_cred;
1903	u32 idx;
1904	int vers, ret;
1905	struct nfs_fh *fh;
1906
1907	if (!lseg || !(pnfs_is_valid_lseg(lseg) ||
1908	    test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)))
1909		goto out_err;
1910
1911	idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
1912	mirror = FF_LAYOUT_COMP(lseg, idx);
1913	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1914	if (!ds)
1915		goto out_err;
1916
1917	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1918						   data->inode);
1919	if (IS_ERR(ds_clnt))
1920		goto out_err;
1921
1922	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred);
1923	if (!ds_cred)
1924		goto out_err;
1925
1926	vers = nfs4_ff_layout_ds_version(mirror);
1927
1928	dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
1929		data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count),
1930		vers);
1931	data->commit_done_cb = ff_layout_commit_done_cb;
1932	data->cred = ds_cred;
1933	refcount_inc(&ds->ds_clp->cl_count);
1934	data->ds_clp = ds->ds_clp;
1935	fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
1936	if (fh)
1937		data->args.fh = fh;
1938
1939	ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
1940				   vers == 3 ? &ff_layout_commit_call_ops_v3 :
1941					       &ff_layout_commit_call_ops_v4,
1942				   how, RPC_TASK_SOFTCONN);
1943	put_cred(ds_cred);
1944	return ret;
1945out_err:
1946	pnfs_generic_prepare_to_resend_writes(data);
1947	pnfs_generic_commit_release(data);
1948	return -EAGAIN;
1949}
1950
1951static int
1952ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
1953			   int how, struct nfs_commit_info *cinfo)
1954{
1955	return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
1956					    ff_layout_initiate_commit);
1957}
1958
1959static struct pnfs_ds_commit_info *
1960ff_layout_get_ds_info(struct inode *inode)
1961{
1962	struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
1963
1964	if (layout == NULL)
1965		return NULL;
1966
1967	return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
1968}
1969
1970static void
1971ff_layout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
1972		struct pnfs_layout_segment *lseg)
1973{
1974	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1975	struct inode *inode = lseg->pls_layout->plh_inode;
1976	struct pnfs_commit_array *array, *new;
1977
1978	new = pnfs_alloc_commit_array(flseg->mirror_array_cnt, GFP_NOIO);
1979	if (new) {
1980		spin_lock(&inode->i_lock);
1981		array = pnfs_add_commit_array(fl_cinfo, new, lseg);
1982		spin_unlock(&inode->i_lock);
1983		if (array != new)
1984			pnfs_free_commit_array(new);
1985	}
1986}
1987
1988static void
1989ff_layout_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
1990		struct inode *inode)
1991{
1992	spin_lock(&inode->i_lock);
1993	pnfs_generic_ds_cinfo_destroy(fl_cinfo);
1994	spin_unlock(&inode->i_lock);
1995}
1996
1997static void
1998ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
1999{
2000	nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
2001						  id_node));
2002}
2003
2004static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
2005				  const struct nfs4_layoutreturn_args *args,
2006				  const struct nfs4_flexfile_layoutreturn_args *ff_args)
2007{
2008	__be32 *start;
2009
2010	start = xdr_reserve_space(xdr, 4);
2011	if (unlikely(!start))
2012		return -E2BIG;
2013
2014	*start = cpu_to_be32(ff_args->num_errors);
2015	/* This assume we always return _ALL_ layouts */
2016	return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors);
2017}
2018
2019static void
2020encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len)
2021{
2022	WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
2023}
2024
2025static void
2026ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
2027			    const nfs4_stateid *stateid,
2028			    const struct nfs42_layoutstat_devinfo *devinfo)
2029{
2030	__be32 *p;
2031
2032	p = xdr_reserve_space(xdr, 8 + 8);
2033	p = xdr_encode_hyper(p, devinfo->offset);
2034	p = xdr_encode_hyper(p, devinfo->length);
2035	encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
2036	p = xdr_reserve_space(xdr, 4*8);
2037	p = xdr_encode_hyper(p, devinfo->read_count);
2038	p = xdr_encode_hyper(p, devinfo->read_bytes);
2039	p = xdr_encode_hyper(p, devinfo->write_count);
2040	p = xdr_encode_hyper(p, devinfo->write_bytes);
2041	encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE);
2042}
2043
2044static void
2045ff_layout_encode_ff_iostat(struct xdr_stream *xdr,
2046			    const nfs4_stateid *stateid,
2047			    const struct nfs42_layoutstat_devinfo *devinfo)
2048{
2049	ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo);
2050	ff_layout_encode_ff_layoutupdate(xdr, devinfo,
2051			devinfo->ld_private.data);
2052}
2053
2054/* report nothing for now */
2055static void ff_layout_encode_iostats_array(struct xdr_stream *xdr,
2056		const struct nfs4_layoutreturn_args *args,
2057		struct nfs4_flexfile_layoutreturn_args *ff_args)
2058{
2059	__be32 *p;
2060	int i;
2061
2062	p = xdr_reserve_space(xdr, 4);
2063	*p = cpu_to_be32(ff_args->num_dev);
2064	for (i = 0; i < ff_args->num_dev; i++)
2065		ff_layout_encode_ff_iostat(xdr,
2066				&args->layout->plh_stateid,
2067				&ff_args->devinfo[i]);
2068}
2069
2070static void
2071ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo,
2072		unsigned int num_entries)
2073{
2074	unsigned int i;
2075
2076	for (i = 0; i < num_entries; i++) {
2077		if (!devinfo[i].ld_private.ops)
2078			continue;
2079		if (!devinfo[i].ld_private.ops->free)
2080			continue;
2081		devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
2082	}
2083}
2084
2085static struct nfs4_deviceid_node *
2086ff_layout_alloc_deviceid_node(struct nfs_server *server,
2087			      struct pnfs_device *pdev, gfp_t gfp_flags)
2088{
2089	struct nfs4_ff_layout_ds *dsaddr;
2090
2091	dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
2092	if (!dsaddr)
2093		return NULL;
2094	return &dsaddr->id_node;
2095}
2096
2097static void
2098ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
2099		const void *voidargs,
2100		const struct nfs4_xdr_opaque_data *ff_opaque)
2101{
2102	const struct nfs4_layoutreturn_args *args = voidargs;
2103	struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data;
2104	struct xdr_buf tmp_buf = {
2105		.head = {
2106			[0] = {
2107				.iov_base = page_address(ff_args->pages[0]),
2108			},
2109		},
2110		.buflen = PAGE_SIZE,
2111	};
2112	struct xdr_stream tmp_xdr;
2113	__be32 *start;
2114
2115	dprintk("%s: Begin\n", __func__);
2116
2117	xdr_init_encode(&tmp_xdr, &tmp_buf, NULL, NULL);
2118
2119	ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
2120	ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);
2121
2122	start = xdr_reserve_space(xdr, 4);
2123	*start = cpu_to_be32(tmp_buf.len);
2124	xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len);
2125
2126	dprintk("%s: Return\n", __func__);
2127}
2128
2129static void
2130ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args)
2131{
2132	struct nfs4_flexfile_layoutreturn_args *ff_args;
2133
2134	if (!args->data)
2135		return;
2136	ff_args = args->data;
2137	args->data = NULL;
2138
2139	ff_layout_free_ds_ioerr(&ff_args->errors);
2140	ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev);
2141
2142	put_page(ff_args->pages[0]);
2143	kfree(ff_args);
2144}
2145
2146static const struct nfs4_xdr_opaque_ops layoutreturn_ops = {
2147	.encode = ff_layout_encode_layoutreturn,
2148	.free = ff_layout_free_layoutreturn,
2149};
2150
2151static int
2152ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
2153{
2154	struct nfs4_flexfile_layoutreturn_args *ff_args;
2155	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout);
2156
2157	ff_args = kmalloc(sizeof(*ff_args), GFP_KERNEL);
2158	if (!ff_args)
2159		goto out_nomem;
2160	ff_args->pages[0] = alloc_page(GFP_KERNEL);
2161	if (!ff_args->pages[0])
2162		goto out_nomem_free;
2163
2164	INIT_LIST_HEAD(&ff_args->errors);
2165	ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout,
2166			&args->range, &ff_args->errors,
2167			FF_LAYOUTRETURN_MAXERR);
2168
2169	spin_lock(&args->inode->i_lock);
2170	ff_args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
2171			&ff_args->devinfo[0], ARRAY_SIZE(ff_args->devinfo));
2172	spin_unlock(&args->inode->i_lock);
2173
2174	args->ld_private->ops = &layoutreturn_ops;
2175	args->ld_private->data = ff_args;
2176	return 0;
2177out_nomem_free:
2178	kfree(ff_args);
2179out_nomem:
2180	return -ENOMEM;
2181}
2182
2183#ifdef CONFIG_NFS_V4_2
2184void
2185ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2186{
2187	struct pnfs_layout_hdr *lo = lseg->pls_layout;
2188	struct nfs42_layout_error *errors;
2189	LIST_HEAD(head);
2190
2191	if (!nfs_server_capable(lo->plh_inode, NFS_CAP_LAYOUTERROR))
2192		return;
2193	ff_layout_fetch_ds_ioerr(lo, &lseg->pls_range, &head, -1);
2194	if (list_empty(&head))
2195		return;
2196
2197	errors = kmalloc_array(NFS42_LAYOUTERROR_MAX,
2198			sizeof(*errors), GFP_NOFS);
2199	if (errors != NULL) {
2200		const struct nfs4_ff_layout_ds_err *pos;
2201		size_t n = 0;
2202
2203		list_for_each_entry(pos, &head, list) {
2204			errors[n].offset = pos->offset;
2205			errors[n].length = pos->length;
2206			nfs4_stateid_copy(&errors[n].stateid, &pos->stateid);
2207			errors[n].errors[0].dev_id = pos->deviceid;
2208			errors[n].errors[0].status = pos->status;
2209			errors[n].errors[0].opnum = pos->opnum;
2210			n++;
2211			if (!list_is_last(&pos->list, &head) &&
2212			    n < NFS42_LAYOUTERROR_MAX)
2213				continue;
2214			if (nfs42_proc_layouterror(lseg, errors, n) < 0)
2215				break;
2216			n = 0;
2217		}
2218		kfree(errors);
2219	}
2220	ff_layout_free_ds_ioerr(&head);
2221}
2222#else
2223void
2224ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2225{
2226}
2227#endif
2228
2229static int
2230ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
2231{
2232	const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
2233
2234	return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
2235}
2236
2237static size_t
2238ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
2239			  const int buflen)
2240{
2241	const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
2242	const struct in6_addr *addr = &sin6->sin6_addr;
2243
2244	/*
2245	 * RFC 4291, Section 2.2.2
2246	 *
2247	 * Shorthanded ANY address
2248	 */
2249	if (ipv6_addr_any(addr))
2250		return snprintf(buf, buflen, "::");
2251
2252	/*
2253	 * RFC 4291, Section 2.2.2
2254	 *
2255	 * Shorthanded loopback address
2256	 */
2257	if (ipv6_addr_loopback(addr))
2258		return snprintf(buf, buflen, "::1");
2259
2260	/*
2261	 * RFC 4291, Section 2.2.3
2262	 *
2263	 * Special presentation address format for mapped v4
2264	 * addresses.
2265	 */
2266	if (ipv6_addr_v4mapped(addr))
2267		return snprintf(buf, buflen, "::ffff:%pI4",
2268					&addr->s6_addr32[3]);
2269
2270	/*
2271	 * RFC 4291, Section 2.2.1
2272	 */
2273	return snprintf(buf, buflen, "%pI6c", addr);
2274}
2275
2276/* Derived from rpc_sockaddr2uaddr */
2277static void
2278ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
2279{
2280	struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
2281	char portbuf[RPCBIND_MAXUADDRPLEN];
2282	char addrbuf[RPCBIND_MAXUADDRLEN];
 
2283	unsigned short port;
2284	int len, netid_len;
2285	__be32 *p;
2286
2287	switch (sap->sa_family) {
2288	case AF_INET:
2289		if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
2290			return;
2291		port = ntohs(((struct sockaddr_in *)sap)->sin_port);
 
 
2292		break;
2293	case AF_INET6:
2294		if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
2295			return;
2296		port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
 
 
2297		break;
2298	default:
 
2299		WARN_ON_ONCE(1);
2300		return;
2301	}
2302
2303	snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
2304	len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
2305
2306	netid_len = strlen(da->da_netid);
2307	p = xdr_reserve_space(xdr, 4 + netid_len);
2308	xdr_encode_opaque(p, da->da_netid, netid_len);
2309
2310	p = xdr_reserve_space(xdr, 4 + len);
2311	xdr_encode_opaque(p, addrbuf, len);
2312}
2313
2314static void
2315ff_layout_encode_nfstime(struct xdr_stream *xdr,
2316			 ktime_t t)
2317{
2318	struct timespec64 ts;
2319	__be32 *p;
2320
2321	p = xdr_reserve_space(xdr, 12);
2322	ts = ktime_to_timespec64(t);
2323	p = xdr_encode_hyper(p, ts.tv_sec);
2324	*p++ = cpu_to_be32(ts.tv_nsec);
2325}
2326
2327static void
2328ff_layout_encode_io_latency(struct xdr_stream *xdr,
2329			    struct nfs4_ff_io_stat *stat)
2330{
2331	__be32 *p;
2332
2333	p = xdr_reserve_space(xdr, 5 * 8);
2334	p = xdr_encode_hyper(p, stat->ops_requested);
2335	p = xdr_encode_hyper(p, stat->bytes_requested);
2336	p = xdr_encode_hyper(p, stat->ops_completed);
2337	p = xdr_encode_hyper(p, stat->bytes_completed);
2338	p = xdr_encode_hyper(p, stat->bytes_not_delivered);
2339	ff_layout_encode_nfstime(xdr, stat->total_busy_time);
2340	ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
2341}
2342
2343static void
2344ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
2345			      const struct nfs42_layoutstat_devinfo *devinfo,
2346			      struct nfs4_ff_layout_mirror *mirror)
2347{
2348	struct nfs4_pnfs_ds_addr *da;
2349	struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
2350	struct nfs_fh *fh = &mirror->fh_versions[0];
2351	__be32 *p;
2352
2353	da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
2354	dprintk("%s: DS %s: encoding address %s\n",
2355		__func__, ds->ds_remotestr, da->da_remotestr);
2356	/* netaddr4 */
2357	ff_layout_encode_netaddr(xdr, da);
2358	/* nfs_fh4 */
2359	p = xdr_reserve_space(xdr, 4 + fh->size);
2360	xdr_encode_opaque(p, fh->data, fh->size);
2361	/* ff_io_latency4 read */
2362	spin_lock(&mirror->lock);
2363	ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
2364	/* ff_io_latency4 write */
2365	ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
2366	spin_unlock(&mirror->lock);
2367	/* nfstime4 */
2368	ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
2369	/* bool */
2370	p = xdr_reserve_space(xdr, 4);
2371	*p = cpu_to_be32(false);
2372}
2373
2374static void
2375ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
2376			     const struct nfs4_xdr_opaque_data *opaque)
2377{
2378	struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque,
2379			struct nfs42_layoutstat_devinfo, ld_private);
2380	__be32 *start;
2381
2382	/* layoutupdate length */
2383	start = xdr_reserve_space(xdr, 4);
2384	ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data);
2385
2386	*start = cpu_to_be32((xdr->p - start - 1) * 4);
2387}
2388
2389static void
2390ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
2391{
2392	struct nfs4_ff_layout_mirror *mirror = opaque->data;
2393
2394	ff_layout_put_mirror(mirror);
2395}
2396
2397static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
2398	.encode = ff_layout_encode_layoutstats,
2399	.free	= ff_layout_free_layoutstats,
2400};
2401
2402static int
2403ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
2404			       struct nfs42_layoutstat_devinfo *devinfo,
2405			       int dev_limit)
2406{
2407	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
2408	struct nfs4_ff_layout_mirror *mirror;
2409	struct nfs4_deviceid_node *dev;
2410	int i = 0;
2411
2412	list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2413		if (i >= dev_limit)
2414			break;
2415		if (IS_ERR_OR_NULL(mirror->mirror_ds))
2416			continue;
2417		if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags))
2418			continue;
2419		/* mirror refcount put in cleanup_layoutstats */
2420		if (!refcount_inc_not_zero(&mirror->ref))
2421			continue;
2422		dev = &mirror->mirror_ds->id_node; 
2423		memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
2424		devinfo->offset = 0;
2425		devinfo->length = NFS4_MAX_UINT64;
2426		spin_lock(&mirror->lock);
2427		devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
2428		devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
2429		devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
2430		devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
2431		spin_unlock(&mirror->lock);
2432		devinfo->layout_type = LAYOUT_FLEX_FILES;
2433		devinfo->ld_private.ops = &layoutstat_ops;
2434		devinfo->ld_private.data = mirror;
2435
2436		devinfo++;
2437		i++;
2438	}
2439	return i;
2440}
2441
2442static int
2443ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
2444{
2445	struct nfs4_flexfile_layout *ff_layout;
2446	const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
2447
2448	/* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2449	args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), GFP_NOIO);
2450	if (!args->devinfo)
2451		return -ENOMEM;
2452
2453	spin_lock(&args->inode->i_lock);
2454	ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
2455	args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
2456			&args->devinfo[0], dev_count);
2457	spin_unlock(&args->inode->i_lock);
2458	if (!args->num_dev) {
2459		kfree(args->devinfo);
2460		args->devinfo = NULL;
2461		return -ENOENT;
2462	}
2463
2464	return 0;
2465}
2466
2467static int
2468ff_layout_set_layoutdriver(struct nfs_server *server,
2469		const struct nfs_fh *dummy)
2470{
2471#if IS_ENABLED(CONFIG_NFS_V4_2)
2472	server->caps |= NFS_CAP_LAYOUTSTATS;
2473#endif
2474	return 0;
2475}
2476
2477static const struct pnfs_commit_ops ff_layout_commit_ops = {
2478	.setup_ds_info		= ff_layout_setup_ds_info,
2479	.release_ds_info	= ff_layout_release_ds_info,
2480	.mark_request_commit	= pnfs_layout_mark_request_commit,
2481	.clear_request_commit	= pnfs_generic_clear_request_commit,
2482	.scan_commit_lists	= pnfs_generic_scan_commit_lists,
2483	.recover_commit_reqs	= pnfs_generic_recover_commit_reqs,
2484	.commit_pagelist	= ff_layout_commit_pagelist,
2485};
2486
2487static struct pnfs_layoutdriver_type flexfilelayout_type = {
2488	.id			= LAYOUT_FLEX_FILES,
2489	.name			= "LAYOUT_FLEX_FILES",
2490	.owner			= THIS_MODULE,
2491	.flags			= PNFS_LAYOUTGET_ON_OPEN,
2492	.max_layoutget_response	= 4096, /* 1 page or so... */
2493	.set_layoutdriver	= ff_layout_set_layoutdriver,
2494	.alloc_layout_hdr	= ff_layout_alloc_layout_hdr,
2495	.free_layout_hdr	= ff_layout_free_layout_hdr,
2496	.alloc_lseg		= ff_layout_alloc_lseg,
2497	.free_lseg		= ff_layout_free_lseg,
2498	.add_lseg		= ff_layout_add_lseg,
2499	.pg_read_ops		= &ff_layout_pg_read_ops,
2500	.pg_write_ops		= &ff_layout_pg_write_ops,
2501	.get_ds_info		= ff_layout_get_ds_info,
2502	.free_deviceid_node	= ff_layout_free_deviceid_node,
2503	.read_pagelist		= ff_layout_read_pagelist,
2504	.write_pagelist		= ff_layout_write_pagelist,
2505	.alloc_deviceid_node    = ff_layout_alloc_deviceid_node,
2506	.prepare_layoutreturn   = ff_layout_prepare_layoutreturn,
2507	.sync			= pnfs_nfs_generic_sync,
2508	.prepare_layoutstats	= ff_layout_prepare_layoutstats,
2509};
2510
2511static int __init nfs4flexfilelayout_init(void)
2512{
2513	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
2514	       __func__);
2515	return pnfs_register_layoutdriver(&flexfilelayout_type);
2516}
2517
2518static void __exit nfs4flexfilelayout_exit(void)
2519{
2520	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
2521	       __func__);
2522	pnfs_unregister_layoutdriver(&flexfilelayout_type);
2523}
2524
2525MODULE_ALIAS("nfs-layouttype4-4");
2526
2527MODULE_LICENSE("GPL");
2528MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
2529
2530module_init(nfs4flexfilelayout_init);
2531module_exit(nfs4flexfilelayout_exit);
2532
2533module_param(io_maxretrans, ushort, 0644);
2534MODULE_PARM_DESC(io_maxretrans, "The  number of times the NFSv4.1 client "
2535			"retries an I/O request before returning an error. ");
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Module for pnfs flexfile layout driver.
   4 *
   5 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
   6 *
   7 * Tao Peng <bergwolf@primarydata.com>
   8 */
   9
  10#include <linux/nfs_fs.h>
  11#include <linux/nfs_mount.h>
  12#include <linux/nfs_page.h>
  13#include <linux/module.h>
  14#include <linux/sched/mm.h>
  15
  16#include <linux/sunrpc/metrics.h>
  17
  18#include "flexfilelayout.h"
  19#include "../nfs4session.h"
  20#include "../nfs4idmap.h"
  21#include "../internal.h"
  22#include "../delegation.h"
  23#include "../nfs4trace.h"
  24#include "../iostat.h"
  25#include "../nfs.h"
  26#include "../nfs42.h"
  27
  28#define NFSDBG_FACILITY         NFSDBG_PNFS_LD
  29
  30#define FF_LAYOUT_POLL_RETRY_MAX     (15*HZ)
  31#define FF_LAYOUTRETURN_MAXERR 20
  32
  33static unsigned short io_maxretrans;
  34
  35static const struct pnfs_commit_ops ff_layout_commit_ops;
  36static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
  37		struct nfs_pgio_header *hdr);
  38static int ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
  39			       struct nfs42_layoutstat_devinfo *devinfo,
  40			       int dev_limit);
  41static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
  42			      const struct nfs42_layoutstat_devinfo *devinfo,
  43			      struct nfs4_ff_layout_mirror *mirror);
  44
  45static struct pnfs_layout_hdr *
  46ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
  47{
  48	struct nfs4_flexfile_layout *ffl;
  49
  50	ffl = kzalloc(sizeof(*ffl), gfp_flags);
  51	if (ffl) {
  52		pnfs_init_ds_commit_info(&ffl->commit_info);
  53		INIT_LIST_HEAD(&ffl->error_list);
  54		INIT_LIST_HEAD(&ffl->mirrors);
  55		ffl->last_report_time = ktime_get();
  56		ffl->commit_info.ops = &ff_layout_commit_ops;
  57		return &ffl->generic_hdr;
  58	} else
  59		return NULL;
  60}
  61
  62static void
  63ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
  64{
  65	struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(lo);
  66	struct nfs4_ff_layout_ds_err *err, *n;
  67
  68	list_for_each_entry_safe(err, n, &ffl->error_list, list) {
  69		list_del(&err->list);
  70		kfree(err);
  71	}
  72	kfree_rcu(ffl, generic_hdr.plh_rcu);
  73}
  74
  75static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
  76{
  77	__be32 *p;
  78
  79	p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
  80	if (unlikely(p == NULL))
  81		return -ENOBUFS;
  82	stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
  83	memcpy(stateid->data, p, NFS4_STATEID_SIZE);
  84	dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
  85		p[0], p[1], p[2], p[3]);
  86	return 0;
  87}
  88
  89static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
  90{
  91	__be32 *p;
  92
  93	p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
  94	if (unlikely(!p))
  95		return -ENOBUFS;
  96	memcpy(devid, p, NFS4_DEVICEID4_SIZE);
  97	nfs4_print_deviceid(devid);
  98	return 0;
  99}
 100
 101static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
 102{
 103	__be32 *p;
 104
 105	p = xdr_inline_decode(xdr, 4);
 106	if (unlikely(!p))
 107		return -ENOBUFS;
 108	fh->size = be32_to_cpup(p++);
 109	if (fh->size > sizeof(struct nfs_fh)) {
 110		printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
 111		       fh->size);
 112		return -EOVERFLOW;
 113	}
 114	/* fh.data */
 115	p = xdr_inline_decode(xdr, fh->size);
 116	if (unlikely(!p))
 117		return -ENOBUFS;
 118	memcpy(&fh->data, p, fh->size);
 119	dprintk("%s: fh len %d\n", __func__, fh->size);
 120
 121	return 0;
 122}
 123
 124/*
 125 * Currently only stringified uids and gids are accepted.
 126 * I.e., kerberos is not supported to the DSes, so no pricipals.
 127 *
 128 * That means that one common function will suffice, but when
 129 * principals are added, this should be split to accomodate
 130 * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
 131 */
 132static int
 133decode_name(struct xdr_stream *xdr, u32 *id)
 134{
 135	__be32 *p;
 136	int len;
 137
 138	/* opaque_length(4)*/
 139	p = xdr_inline_decode(xdr, 4);
 140	if (unlikely(!p))
 141		return -ENOBUFS;
 142	len = be32_to_cpup(p++);
 143	if (len < 0)
 144		return -EINVAL;
 145
 146	dprintk("%s: len %u\n", __func__, len);
 147
 148	/* opaque body */
 149	p = xdr_inline_decode(xdr, len);
 150	if (unlikely(!p))
 151		return -ENOBUFS;
 152
 153	if (!nfs_map_string_to_numeric((char *)p, len, id))
 154		return -EINVAL;
 155
 156	return 0;
 157}
 158
 159static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
 160		const struct nfs4_ff_layout_mirror *m2)
 161{
 162	int i, j;
 163
 164	if (m1->fh_versions_cnt != m2->fh_versions_cnt)
 165		return false;
 166	for (i = 0; i < m1->fh_versions_cnt; i++) {
 167		bool found_fh = false;
 168		for (j = 0; j < m2->fh_versions_cnt; j++) {
 169			if (nfs_compare_fh(&m1->fh_versions[i],
 170					&m2->fh_versions[j]) == 0) {
 171				found_fh = true;
 172				break;
 173			}
 174		}
 175		if (!found_fh)
 176			return false;
 177	}
 178	return true;
 179}
 180
 181static struct nfs4_ff_layout_mirror *
 182ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
 183		struct nfs4_ff_layout_mirror *mirror)
 184{
 185	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
 186	struct nfs4_ff_layout_mirror *pos;
 187	struct inode *inode = lo->plh_inode;
 188
 189	spin_lock(&inode->i_lock);
 190	list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
 191		if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0)
 192			continue;
 193		if (!ff_mirror_match_fh(mirror, pos))
 194			continue;
 195		if (refcount_inc_not_zero(&pos->ref)) {
 196			spin_unlock(&inode->i_lock);
 197			return pos;
 198		}
 199	}
 200	list_add(&mirror->mirrors, &ff_layout->mirrors);
 201	mirror->layout = lo;
 202	spin_unlock(&inode->i_lock);
 203	return mirror;
 204}
 205
 206static void
 207ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
 208{
 209	struct inode *inode;
 210	if (mirror->layout == NULL)
 211		return;
 212	inode = mirror->layout->plh_inode;
 213	spin_lock(&inode->i_lock);
 214	list_del(&mirror->mirrors);
 215	spin_unlock(&inode->i_lock);
 216	mirror->layout = NULL;
 217}
 218
 219static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
 220{
 221	struct nfs4_ff_layout_mirror *mirror;
 222
 223	mirror = kzalloc(sizeof(*mirror), gfp_flags);
 224	if (mirror != NULL) {
 225		spin_lock_init(&mirror->lock);
 226		refcount_set(&mirror->ref, 1);
 227		INIT_LIST_HEAD(&mirror->mirrors);
 228	}
 229	return mirror;
 230}
 231
 232static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
 233{
 234	const struct cred	*cred;
 235
 236	ff_layout_remove_mirror(mirror);
 237	kfree(mirror->fh_versions);
 238	cred = rcu_access_pointer(mirror->ro_cred);
 239	put_cred(cred);
 240	cred = rcu_access_pointer(mirror->rw_cred);
 241	put_cred(cred);
 242	nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
 243	kfree(mirror);
 244}
 245
 246static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
 247{
 248	if (mirror != NULL && refcount_dec_and_test(&mirror->ref))
 249		ff_layout_free_mirror(mirror);
 250}
 251
 252static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
 253{
 254	u32 i;
 255
 256	for (i = 0; i < fls->mirror_array_cnt; i++)
 257		ff_layout_put_mirror(fls->mirror_array[i]);
 258}
 259
 260static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
 261{
 262	if (fls) {
 263		ff_layout_free_mirror_array(fls);
 264		kfree(fls);
 265	}
 266}
 267
 268static bool
 269ff_lseg_match_mirrors(struct pnfs_layout_segment *l1,
 270		struct pnfs_layout_segment *l2)
 271{
 272	const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1);
 273	const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l1);
 274	u32 i;
 275
 276	if (fl1->mirror_array_cnt != fl2->mirror_array_cnt)
 277		return false;
 278	for (i = 0; i < fl1->mirror_array_cnt; i++) {
 279		if (fl1->mirror_array[i] != fl2->mirror_array[i])
 280			return false;
 281	}
 282	return true;
 283}
 284
 285static bool
 286ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
 287		const struct pnfs_layout_range *l2)
 288{
 289	u64 end1, end2;
 290
 291	if (l1->iomode != l2->iomode)
 292		return l1->iomode != IOMODE_READ;
 293	end1 = pnfs_calc_offset_end(l1->offset, l1->length);
 294	end2 = pnfs_calc_offset_end(l2->offset, l2->length);
 295	if (end1 < l2->offset)
 296		return false;
 297	if (end2 < l1->offset)
 298		return true;
 299	return l2->offset <= l1->offset;
 300}
 301
 302static bool
 303ff_lseg_merge(struct pnfs_layout_segment *new,
 304		struct pnfs_layout_segment *old)
 305{
 306	u64 new_end, old_end;
 307
 308	if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
 309		return false;
 310	if (new->pls_range.iomode != old->pls_range.iomode)
 311		return false;
 312	old_end = pnfs_calc_offset_end(old->pls_range.offset,
 313			old->pls_range.length);
 314	if (old_end < new->pls_range.offset)
 315		return false;
 316	new_end = pnfs_calc_offset_end(new->pls_range.offset,
 317			new->pls_range.length);
 318	if (new_end < old->pls_range.offset)
 319		return false;
 320	if (!ff_lseg_match_mirrors(new, old))
 321		return false;
 322
 323	/* Mergeable: copy info from 'old' to 'new' */
 324	if (new_end < old_end)
 325		new_end = old_end;
 326	if (new->pls_range.offset < old->pls_range.offset)
 327		new->pls_range.offset = old->pls_range.offset;
 328	new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
 329			new_end);
 330	if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
 331		set_bit(NFS_LSEG_ROC, &new->pls_flags);
 332	return true;
 333}
 334
 335static void
 336ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
 337		struct pnfs_layout_segment *lseg,
 338		struct list_head *free_me)
 339{
 340	pnfs_generic_layout_insert_lseg(lo, lseg,
 341			ff_lseg_range_is_after,
 342			ff_lseg_merge,
 343			free_me);
 344}
 345
 346static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
 347{
 348	int i, j;
 349
 350	for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
 351		for (j = i + 1; j < fls->mirror_array_cnt; j++)
 352			if (fls->mirror_array[i]->efficiency <
 353			    fls->mirror_array[j]->efficiency)
 354				swap(fls->mirror_array[i],
 355				     fls->mirror_array[j]);
 356	}
 357}
 358
 359static struct pnfs_layout_segment *
 360ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
 361		     struct nfs4_layoutget_res *lgr,
 362		     gfp_t gfp_flags)
 363{
 364	struct pnfs_layout_segment *ret;
 365	struct nfs4_ff_layout_segment *fls = NULL;
 366	struct xdr_stream stream;
 367	struct xdr_buf buf;
 368	struct page *scratch;
 369	u64 stripe_unit;
 370	u32 mirror_array_cnt;
 371	__be32 *p;
 372	int i, rc;
 373
 374	dprintk("--> %s\n", __func__);
 375	scratch = alloc_page(gfp_flags);
 376	if (!scratch)
 377		return ERR_PTR(-ENOMEM);
 378
 379	xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
 380			      lgr->layoutp->len);
 381	xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
 382
 383	/* stripe unit and mirror_array_cnt */
 384	rc = -EIO;
 385	p = xdr_inline_decode(&stream, 8 + 4);
 386	if (!p)
 387		goto out_err_free;
 388
 389	p = xdr_decode_hyper(p, &stripe_unit);
 390	mirror_array_cnt = be32_to_cpup(p++);
 391	dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
 392		stripe_unit, mirror_array_cnt);
 393
 394	if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
 395	    mirror_array_cnt == 0)
 396		goto out_err_free;
 397
 398	rc = -ENOMEM;
 399	fls = kzalloc(struct_size(fls, mirror_array, mirror_array_cnt),
 400			gfp_flags);
 401	if (!fls)
 402		goto out_err_free;
 403
 404	fls->mirror_array_cnt = mirror_array_cnt;
 405	fls->stripe_unit = stripe_unit;
 406
 407	for (i = 0; i < fls->mirror_array_cnt; i++) {
 408		struct nfs4_ff_layout_mirror *mirror;
 409		struct cred *kcred;
 410		const struct cred __rcu *cred;
 411		kuid_t uid;
 412		kgid_t gid;
 413		u32 ds_count, fh_count, id;
 414		int j;
 415
 416		rc = -EIO;
 417		p = xdr_inline_decode(&stream, 4);
 418		if (!p)
 419			goto out_err_free;
 420		ds_count = be32_to_cpup(p);
 421
 422		/* FIXME: allow for striping? */
 423		if (ds_count != 1)
 424			goto out_err_free;
 425
 426		fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
 427		if (fls->mirror_array[i] == NULL) {
 428			rc = -ENOMEM;
 429			goto out_err_free;
 430		}
 431
 432		fls->mirror_array[i]->ds_count = ds_count;
 433
 434		/* deviceid */
 435		rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid);
 436		if (rc)
 437			goto out_err_free;
 438
 439		/* efficiency */
 440		rc = -EIO;
 441		p = xdr_inline_decode(&stream, 4);
 442		if (!p)
 443			goto out_err_free;
 444		fls->mirror_array[i]->efficiency = be32_to_cpup(p);
 445
 446		/* stateid */
 447		rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid);
 448		if (rc)
 449			goto out_err_free;
 450
 451		/* fh */
 452		rc = -EIO;
 453		p = xdr_inline_decode(&stream, 4);
 454		if (!p)
 455			goto out_err_free;
 456		fh_count = be32_to_cpup(p);
 457
 458		fls->mirror_array[i]->fh_versions =
 459			kcalloc(fh_count, sizeof(struct nfs_fh),
 460				gfp_flags);
 461		if (fls->mirror_array[i]->fh_versions == NULL) {
 462			rc = -ENOMEM;
 463			goto out_err_free;
 464		}
 465
 466		for (j = 0; j < fh_count; j++) {
 467			rc = decode_nfs_fh(&stream,
 468					   &fls->mirror_array[i]->fh_versions[j]);
 469			if (rc)
 470				goto out_err_free;
 471		}
 472
 473		fls->mirror_array[i]->fh_versions_cnt = fh_count;
 474
 475		/* user */
 476		rc = decode_name(&stream, &id);
 477		if (rc)
 478			goto out_err_free;
 479
 480		uid = make_kuid(&init_user_ns, id);
 481
 482		/* group */
 483		rc = decode_name(&stream, &id);
 484		if (rc)
 485			goto out_err_free;
 486
 487		gid = make_kgid(&init_user_ns, id);
 488
 489		if (gfp_flags & __GFP_FS)
 490			kcred = prepare_kernel_cred(NULL);
 491		else {
 492			unsigned int nofs_flags = memalloc_nofs_save();
 493			kcred = prepare_kernel_cred(NULL);
 494			memalloc_nofs_restore(nofs_flags);
 495		}
 496		rc = -ENOMEM;
 497		if (!kcred)
 498			goto out_err_free;
 499		kcred->fsuid = uid;
 500		kcred->fsgid = gid;
 501		cred = RCU_INITIALIZER(kcred);
 502
 503		if (lgr->range.iomode == IOMODE_READ)
 504			rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
 505		else
 506			rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
 507
 508		mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
 509		if (mirror != fls->mirror_array[i]) {
 510			/* swap cred ptrs so free_mirror will clean up old */
 511			if (lgr->range.iomode == IOMODE_READ) {
 512				cred = xchg(&mirror->ro_cred, cred);
 513				rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
 514			} else {
 515				cred = xchg(&mirror->rw_cred, cred);
 516				rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
 517			}
 518			ff_layout_free_mirror(fls->mirror_array[i]);
 519			fls->mirror_array[i] = mirror;
 520		}
 521
 522		dprintk("%s: iomode %s uid %u gid %u\n", __func__,
 523			lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
 524			from_kuid(&init_user_ns, uid),
 525			from_kgid(&init_user_ns, gid));
 526	}
 527
 528	p = xdr_inline_decode(&stream, 4);
 529	if (!p)
 530		goto out_sort_mirrors;
 531	fls->flags = be32_to_cpup(p);
 532
 533	p = xdr_inline_decode(&stream, 4);
 534	if (!p)
 535		goto out_sort_mirrors;
 536	for (i=0; i < fls->mirror_array_cnt; i++)
 537		fls->mirror_array[i]->report_interval = be32_to_cpup(p);
 538
 539out_sort_mirrors:
 540	ff_layout_sort_mirrors(fls);
 541	ret = &fls->generic_hdr;
 542	dprintk("<-- %s (success)\n", __func__);
 543out_free_page:
 544	__free_page(scratch);
 545	return ret;
 546out_err_free:
 547	_ff_layout_free_lseg(fls);
 548	ret = ERR_PTR(rc);
 549	dprintk("<-- %s (%d)\n", __func__, rc);
 550	goto out_free_page;
 551}
 552
 553static void
 554ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
 555{
 556	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
 557
 558	dprintk("--> %s\n", __func__);
 559
 560	if (lseg->pls_range.iomode == IOMODE_RW) {
 561		struct nfs4_flexfile_layout *ffl;
 562		struct inode *inode;
 563
 564		ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
 565		inode = ffl->generic_hdr.plh_inode;
 566		spin_lock(&inode->i_lock);
 567		pnfs_generic_ds_cinfo_release_lseg(&ffl->commit_info, lseg);
 568		spin_unlock(&inode->i_lock);
 569	}
 570	_ff_layout_free_lseg(fls);
 571}
 572
 573static void
 574nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
 575{
 576	/* first IO request? */
 577	if (atomic_inc_return(&timer->n_ops) == 1) {
 578		timer->start_time = now;
 579	}
 580}
 581
 582static ktime_t
 583nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
 584{
 585	ktime_t start;
 586
 587	if (atomic_dec_return(&timer->n_ops) < 0)
 588		WARN_ON_ONCE(1);
 589
 590	start = timer->start_time;
 591	timer->start_time = now;
 592	return ktime_sub(now, start);
 593}
 594
 595static bool
 596nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
 597			    struct nfs4_ff_layoutstat *layoutstat,
 598			    ktime_t now)
 599{
 600	s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
 601	struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
 602
 603	nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
 604	if (!mirror->start_time)
 605		mirror->start_time = now;
 606	if (mirror->report_interval != 0)
 607		report_interval = (s64)mirror->report_interval * 1000LL;
 608	else if (layoutstats_timer != 0)
 609		report_interval = (s64)layoutstats_timer * 1000LL;
 610	if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
 611			report_interval) {
 612		ffl->last_report_time = now;
 613		return true;
 614	}
 615
 616	return false;
 617}
 618
 619static void
 620nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
 621		__u64 requested)
 622{
 623	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
 624
 625	iostat->ops_requested++;
 626	iostat->bytes_requested += requested;
 627}
 628
 629static void
 630nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
 631		__u64 requested,
 632		__u64 completed,
 633		ktime_t time_completed,
 634		ktime_t time_started)
 635{
 636	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
 637	ktime_t completion_time = ktime_sub(time_completed, time_started);
 638	ktime_t timer;
 639
 640	iostat->ops_completed++;
 641	iostat->bytes_completed += completed;
 642	iostat->bytes_not_delivered += requested - completed;
 643
 644	timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
 645	iostat->total_busy_time =
 646			ktime_add(iostat->total_busy_time, timer);
 647	iostat->aggregate_completion_time =
 648			ktime_add(iostat->aggregate_completion_time,
 649					completion_time);
 650}
 651
 652static void
 653nfs4_ff_layout_stat_io_start_read(struct inode *inode,
 654		struct nfs4_ff_layout_mirror *mirror,
 655		__u64 requested, ktime_t now)
 656{
 657	bool report;
 658
 659	spin_lock(&mirror->lock);
 660	report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
 661	nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
 662	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
 663	spin_unlock(&mirror->lock);
 664
 665	if (report)
 666		pnfs_report_layoutstat(inode, GFP_KERNEL);
 667}
 668
 669static void
 670nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
 671		struct nfs4_ff_layout_mirror *mirror,
 672		__u64 requested,
 673		__u64 completed)
 674{
 675	spin_lock(&mirror->lock);
 676	nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
 677			requested, completed,
 678			ktime_get(), task->tk_start);
 679	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
 680	spin_unlock(&mirror->lock);
 681}
 682
 683static void
 684nfs4_ff_layout_stat_io_start_write(struct inode *inode,
 685		struct nfs4_ff_layout_mirror *mirror,
 686		__u64 requested, ktime_t now)
 687{
 688	bool report;
 689
 690	spin_lock(&mirror->lock);
 691	report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
 692	nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
 693	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
 694	spin_unlock(&mirror->lock);
 695
 696	if (report)
 697		pnfs_report_layoutstat(inode, GFP_NOIO);
 698}
 699
 700static void
 701nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
 702		struct nfs4_ff_layout_mirror *mirror,
 703		__u64 requested,
 704		__u64 completed,
 705		enum nfs3_stable_how committed)
 706{
 707	if (committed == NFS_UNSTABLE)
 708		requested = completed = 0;
 709
 710	spin_lock(&mirror->lock);
 711	nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
 712			requested, completed, ktime_get(), task->tk_start);
 713	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
 714	spin_unlock(&mirror->lock);
 715}
 716
 717static void
 718ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, u32 idx)
 719{
 720	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
 721
 722	if (devid)
 723		nfs4_mark_deviceid_unavailable(devid);
 724}
 725
 726static void
 727ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, u32 idx)
 728{
 729	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
 730
 731	if (devid)
 732		nfs4_mark_deviceid_available(devid);
 733}
 734
 735static struct nfs4_pnfs_ds *
 736ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
 737			     u32 start_idx, u32 *best_idx,
 738			     bool check_device)
 739{
 740	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
 741	struct nfs4_ff_layout_mirror *mirror;
 742	struct nfs4_pnfs_ds *ds;
 743	bool fail_return = false;
 744	u32 idx;
 745
 746	/* mirrors are initially sorted by efficiency */
 747	for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
 748		if (idx+1 == fls->mirror_array_cnt)
 749			fail_return = !check_device;
 750
 751		mirror = FF_LAYOUT_COMP(lseg, idx);
 752		ds = nfs4_ff_layout_prepare_ds(lseg, mirror, fail_return);
 753		if (!ds)
 754			continue;
 755
 756		if (check_device &&
 757		    nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node))
 758			continue;
 759
 760		*best_idx = idx;
 761		return ds;
 762	}
 763
 764	return NULL;
 765}
 766
 767static struct nfs4_pnfs_ds *
 768ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg,
 769				 u32 start_idx, u32 *best_idx)
 770{
 771	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, false);
 772}
 773
 774static struct nfs4_pnfs_ds *
 775ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg,
 776				   u32 start_idx, u32 *best_idx)
 777{
 778	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, true);
 779}
 780
 781static struct nfs4_pnfs_ds *
 782ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
 783				  u32 start_idx, u32 *best_idx)
 784{
 785	struct nfs4_pnfs_ds *ds;
 786
 787	ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx);
 788	if (ds)
 789		return ds;
 790	return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx);
 791}
 792
 793static struct nfs4_pnfs_ds *
 794ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio,
 795			  u32 *best_idx)
 796{
 797	struct pnfs_layout_segment *lseg = pgio->pg_lseg;
 798	struct nfs4_pnfs_ds *ds;
 799
 800	ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx,
 801					       best_idx);
 802	if (ds || !pgio->pg_mirror_idx)
 803		return ds;
 804	return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx);
 805}
 806
 807static void
 808ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
 809		      struct nfs_page *req,
 810		      bool strict_iomode)
 811{
 812	pnfs_put_lseg(pgio->pg_lseg);
 813	pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
 814					   nfs_req_openctx(req),
 815					   req_offset(req),
 816					   req->wb_bytes,
 817					   IOMODE_READ,
 818					   strict_iomode,
 819					   GFP_KERNEL);
 820	if (IS_ERR(pgio->pg_lseg)) {
 821		pgio->pg_error = PTR_ERR(pgio->pg_lseg);
 822		pgio->pg_lseg = NULL;
 823	}
 824}
 825
 826static void
 827ff_layout_pg_check_layout(struct nfs_pageio_descriptor *pgio,
 828			  struct nfs_page *req)
 829{
 830	pnfs_generic_pg_check_layout(pgio);
 831	pnfs_generic_pg_check_range(pgio, req);
 832}
 833
 834static void
 835ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
 836			struct nfs_page *req)
 837{
 838	struct nfs_pgio_mirror *pgm;
 839	struct nfs4_ff_layout_mirror *mirror;
 840	struct nfs4_pnfs_ds *ds;
 841	u32 ds_idx, i;
 842
 843retry:
 844	ff_layout_pg_check_layout(pgio, req);
 845	/* Use full layout for now */
 846	if (!pgio->pg_lseg) {
 847		ff_layout_pg_get_read(pgio, req, false);
 848		if (!pgio->pg_lseg)
 849			goto out_nolseg;
 850	}
 851	if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
 852		ff_layout_pg_get_read(pgio, req, true);
 853		if (!pgio->pg_lseg)
 854			goto out_nolseg;
 855	}
 856
 857	ds = ff_layout_get_ds_for_read(pgio, &ds_idx);
 858	if (!ds) {
 859		if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
 860			goto out_mds;
 861		pnfs_generic_pg_cleanup(pgio);
 862		/* Sleep for 1 second before retrying */
 863		ssleep(1);
 864		goto retry;
 865	}
 866
 867	for (i = 0; i < pgio->pg_mirror_count; i++) {
 868		mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
 869		pgm = &pgio->pg_mirrors[i];
 870		pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
 871	}
 872
 873	pgio->pg_mirror_idx = ds_idx;
 874
 875	if (NFS_SERVER(pgio->pg_inode)->flags &
 876			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
 877		pgio->pg_maxretrans = io_maxretrans;
 878	return;
 879out_nolseg:
 880	if (pgio->pg_error < 0)
 881		return;
 882out_mds:
 883	trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode,
 884			0, NFS4_MAX_UINT64, IOMODE_READ,
 885			NFS_I(pgio->pg_inode)->layout,
 886			pgio->pg_lseg);
 887	pgio->pg_maxretrans = 0;
 888	nfs_pageio_reset_read_mds(pgio);
 889}
 890
 891static void
 892ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
 893			struct nfs_page *req)
 894{
 895	struct nfs4_ff_layout_mirror *mirror;
 896	struct nfs_pgio_mirror *pgm;
 897	struct nfs4_pnfs_ds *ds;
 898	u32 i;
 899
 900retry:
 901	ff_layout_pg_check_layout(pgio, req);
 902	if (!pgio->pg_lseg) {
 903		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
 904						   nfs_req_openctx(req),
 905						   req_offset(req),
 906						   req->wb_bytes,
 907						   IOMODE_RW,
 908						   false,
 909						   GFP_NOFS);
 910		if (IS_ERR(pgio->pg_lseg)) {
 911			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
 912			pgio->pg_lseg = NULL;
 913			return;
 914		}
 915	}
 916	/* If no lseg, fall back to write through mds */
 917	if (pgio->pg_lseg == NULL)
 918		goto out_mds;
 919
 920	/* Use a direct mapping of ds_idx to pgio mirror_idx */
 921	if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))
 922		goto out_eagain;
 923
 924	for (i = 0; i < pgio->pg_mirror_count; i++) {
 925		mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
 926		ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true);
 927		if (!ds) {
 928			if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
 929				goto out_mds;
 930			pnfs_generic_pg_cleanup(pgio);
 931			/* Sleep for 1 second before retrying */
 932			ssleep(1);
 933			goto retry;
 934		}
 935		pgm = &pgio->pg_mirrors[i];
 936		pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
 937	}
 938
 939	if (NFS_SERVER(pgio->pg_inode)->flags &
 940			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
 941		pgio->pg_maxretrans = io_maxretrans;
 942	return;
 943out_eagain:
 944	pnfs_generic_pg_cleanup(pgio);
 945	pgio->pg_error = -EAGAIN;
 946	return;
 947out_mds:
 948	trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode,
 949			0, NFS4_MAX_UINT64, IOMODE_RW,
 950			NFS_I(pgio->pg_inode)->layout,
 951			pgio->pg_lseg);
 952	pgio->pg_maxretrans = 0;
 953	nfs_pageio_reset_write_mds(pgio);
 954	pgio->pg_error = -EAGAIN;
 955}
 956
 957static unsigned int
 958ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
 959				    struct nfs_page *req)
 960{
 961	if (!pgio->pg_lseg) {
 962		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
 963						   nfs_req_openctx(req),
 964						   req_offset(req),
 965						   req->wb_bytes,
 966						   IOMODE_RW,
 967						   false,
 968						   GFP_NOFS);
 969		if (IS_ERR(pgio->pg_lseg)) {
 970			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
 971			pgio->pg_lseg = NULL;
 972			goto out;
 973		}
 974	}
 975	if (pgio->pg_lseg)
 976		return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
 977
 978	trace_pnfs_mds_fallback_pg_get_mirror_count(pgio->pg_inode,
 979			0, NFS4_MAX_UINT64, IOMODE_RW,
 980			NFS_I(pgio->pg_inode)->layout,
 981			pgio->pg_lseg);
 982	/* no lseg means that pnfs is not in use, so no mirroring here */
 983	nfs_pageio_reset_write_mds(pgio);
 984out:
 985	return 1;
 986}
 987
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 988static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
 989	.pg_init = ff_layout_pg_init_read,
 990	.pg_test = pnfs_generic_pg_test,
 991	.pg_doio = pnfs_generic_pg_readpages,
 992	.pg_cleanup = pnfs_generic_pg_cleanup,
 993};
 994
 995static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
 996	.pg_init = ff_layout_pg_init_write,
 997	.pg_test = pnfs_generic_pg_test,
 998	.pg_doio = pnfs_generic_pg_writepages,
 999	.pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
1000	.pg_cleanup = pnfs_generic_pg_cleanup,
 
 
1001};
1002
1003static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
1004{
1005	struct rpc_task *task = &hdr->task;
1006
1007	pnfs_layoutcommit_inode(hdr->inode, false);
1008
1009	if (retry_pnfs) {
1010		dprintk("%s Reset task %5u for i/o through pNFS "
1011			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1012			hdr->task.tk_pid,
1013			hdr->inode->i_sb->s_id,
1014			(unsigned long long)NFS_FILEID(hdr->inode),
1015			hdr->args.count,
1016			(unsigned long long)hdr->args.offset);
1017
1018		hdr->completion_ops->reschedule_io(hdr);
1019		return;
1020	}
1021
1022	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1023		dprintk("%s Reset task %5u for i/o through MDS "
1024			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1025			hdr->task.tk_pid,
1026			hdr->inode->i_sb->s_id,
1027			(unsigned long long)NFS_FILEID(hdr->inode),
1028			hdr->args.count,
1029			(unsigned long long)hdr->args.offset);
1030
1031		trace_pnfs_mds_fallback_write_done(hdr->inode,
1032				hdr->args.offset, hdr->args.count,
1033				IOMODE_RW, NFS_I(hdr->inode)->layout,
1034				hdr->lseg);
1035		task->tk_status = pnfs_write_done_resend_to_mds(hdr);
1036	}
1037}
1038
1039static void ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr)
1040{
1041	u32 idx = hdr->pgio_mirror_idx + 1;
1042	u32 new_idx = 0;
1043
1044	if (ff_layout_choose_any_ds_for_read(hdr->lseg, idx + 1, &new_idx))
1045		ff_layout_send_layouterror(hdr->lseg);
1046	else
1047		pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1048	pnfs_read_resend_pnfs(hdr, new_idx);
1049}
1050
1051static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
1052{
1053	struct rpc_task *task = &hdr->task;
1054
1055	pnfs_layoutcommit_inode(hdr->inode, false);
1056	pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1057
1058	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1059		dprintk("%s Reset task %5u for i/o through MDS "
1060			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1061			hdr->task.tk_pid,
1062			hdr->inode->i_sb->s_id,
1063			(unsigned long long)NFS_FILEID(hdr->inode),
1064			hdr->args.count,
1065			(unsigned long long)hdr->args.offset);
1066
1067		trace_pnfs_mds_fallback_read_done(hdr->inode,
1068				hdr->args.offset, hdr->args.count,
1069				IOMODE_READ, NFS_I(hdr->inode)->layout,
1070				hdr->lseg);
1071		task->tk_status = pnfs_read_done_resend_to_mds(hdr);
1072	}
1073}
1074
1075static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1076					   struct nfs4_state *state,
1077					   struct nfs_client *clp,
1078					   struct pnfs_layout_segment *lseg,
1079					   u32 idx)
1080{
1081	struct pnfs_layout_hdr *lo = lseg->pls_layout;
1082	struct inode *inode = lo->plh_inode;
1083	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1084	struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
1085
1086	switch (task->tk_status) {
1087	case -NFS4ERR_BADSESSION:
1088	case -NFS4ERR_BADSLOT:
1089	case -NFS4ERR_BAD_HIGH_SLOT:
1090	case -NFS4ERR_DEADSESSION:
1091	case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1092	case -NFS4ERR_SEQ_FALSE_RETRY:
1093	case -NFS4ERR_SEQ_MISORDERED:
1094		dprintk("%s ERROR %d, Reset session. Exchangeid "
1095			"flags 0x%x\n", __func__, task->tk_status,
1096			clp->cl_exchange_flags);
1097		nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
1098		break;
1099	case -NFS4ERR_DELAY:
1100	case -NFS4ERR_GRACE:
1101		rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
1102		break;
1103	case -NFS4ERR_RETRY_UNCACHED_REP:
1104		break;
1105	/* Invalidate Layout errors */
1106	case -NFS4ERR_PNFS_NO_LAYOUT:
1107	case -ESTALE:           /* mapped NFS4ERR_STALE */
1108	case -EBADHANDLE:       /* mapped NFS4ERR_BADHANDLE */
1109	case -EISDIR:           /* mapped NFS4ERR_ISDIR */
1110	case -NFS4ERR_FHEXPIRED:
1111	case -NFS4ERR_WRONG_TYPE:
1112		dprintk("%s Invalid layout error %d\n", __func__,
1113			task->tk_status);
1114		/*
1115		 * Destroy layout so new i/o will get a new layout.
1116		 * Layout will not be destroyed until all current lseg
1117		 * references are put. Mark layout as invalid to resend failed
1118		 * i/o and all i/o waiting on the slot table to the MDS until
1119		 * layout is destroyed and a new valid layout is obtained.
1120		 */
1121		pnfs_destroy_layout(NFS_I(inode));
1122		rpc_wake_up(&tbl->slot_tbl_waitq);
1123		goto reset;
1124	/* RPC connection errors */
1125	case -ECONNREFUSED:
1126	case -EHOSTDOWN:
1127	case -EHOSTUNREACH:
1128	case -ENETUNREACH:
1129	case -EIO:
1130	case -ETIMEDOUT:
1131	case -EPIPE:
1132		dprintk("%s DS connection error %d\n", __func__,
1133			task->tk_status);
1134		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1135				&devid->deviceid);
1136		rpc_wake_up(&tbl->slot_tbl_waitq);
1137		fallthrough;
1138	default:
1139		if (ff_layout_avoid_mds_available_ds(lseg))
1140			return -NFS4ERR_RESET_TO_PNFS;
1141reset:
1142		dprintk("%s Retry through MDS. Error %d\n", __func__,
1143			task->tk_status);
1144		return -NFS4ERR_RESET_TO_MDS;
1145	}
1146	task->tk_status = 0;
1147	return -EAGAIN;
1148}
1149
1150/* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
1151static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1152					   struct pnfs_layout_segment *lseg,
1153					   u32 idx)
1154{
1155	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1156
1157	switch (task->tk_status) {
1158	/* File access problems. Don't mark the device as unavailable */
1159	case -EACCES:
1160	case -ESTALE:
1161	case -EISDIR:
1162	case -EBADHANDLE:
1163	case -ELOOP:
1164	case -ENOSPC:
1165		break;
1166	case -EJUKEBOX:
1167		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1168		goto out_retry;
1169	default:
1170		dprintk("%s DS connection error %d\n", __func__,
1171			task->tk_status);
1172		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1173				&devid->deviceid);
1174	}
1175	/* FIXME: Need to prevent infinite looping here. */
1176	return -NFS4ERR_RESET_TO_PNFS;
1177out_retry:
1178	task->tk_status = 0;
1179	rpc_restart_call_prepare(task);
1180	rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1181	return -EAGAIN;
1182}
1183
1184static int ff_layout_async_handle_error(struct rpc_task *task,
1185					struct nfs4_state *state,
1186					struct nfs_client *clp,
1187					struct pnfs_layout_segment *lseg,
1188					u32 idx)
1189{
1190	int vers = clp->cl_nfs_mod->rpc_vers->number;
1191
1192	if (task->tk_status >= 0) {
1193		ff_layout_mark_ds_reachable(lseg, idx);
1194		return 0;
1195	}
1196
1197	/* Handle the case of an invalid layout segment */
1198	if (!pnfs_is_valid_lseg(lseg))
1199		return -NFS4ERR_RESET_TO_PNFS;
1200
1201	switch (vers) {
1202	case 3:
1203		return ff_layout_async_handle_error_v3(task, lseg, idx);
1204	case 4:
1205		return ff_layout_async_handle_error_v4(task, state, clp,
1206						       lseg, idx);
1207	default:
1208		/* should never happen */
1209		WARN_ON_ONCE(1);
1210		return 0;
1211	}
1212}
1213
1214static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1215					u32 idx, u64 offset, u64 length,
1216					u32 *op_status, int opnum, int error)
1217{
1218	struct nfs4_ff_layout_mirror *mirror;
1219	u32 status = *op_status;
1220	int err;
1221
1222	if (status == 0) {
1223		switch (error) {
1224		case -ETIMEDOUT:
1225		case -EPFNOSUPPORT:
1226		case -EPROTONOSUPPORT:
1227		case -EOPNOTSUPP:
1228		case -ECONNREFUSED:
1229		case -ECONNRESET:
1230		case -EHOSTDOWN:
1231		case -EHOSTUNREACH:
1232		case -ENETUNREACH:
1233		case -EADDRINUSE:
1234		case -ENOBUFS:
1235		case -EPIPE:
1236		case -EPERM:
1237			*op_status = status = NFS4ERR_NXIO;
1238			break;
1239		case -EACCES:
1240			*op_status = status = NFS4ERR_ACCESS;
1241			break;
1242		default:
1243			return;
1244		}
1245	}
1246
1247	mirror = FF_LAYOUT_COMP(lseg, idx);
1248	err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
1249				       mirror, offset, length, status, opnum,
1250				       GFP_NOIO);
1251
1252	switch (status) {
1253	case NFS4ERR_DELAY:
1254	case NFS4ERR_GRACE:
1255		break;
1256	case NFS4ERR_NXIO:
1257		ff_layout_mark_ds_unreachable(lseg, idx);
1258		/*
1259		 * Don't return the layout if this is a read and we still
1260		 * have layouts to try
1261		 */
1262		if (opnum == OP_READ)
1263			break;
1264		fallthrough;
1265	default:
1266		pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
1267						  lseg);
1268	}
1269
1270	dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
1271}
1272
1273/* NFS_PROTO call done callback routines */
1274static int ff_layout_read_done_cb(struct rpc_task *task,
1275				struct nfs_pgio_header *hdr)
1276{
1277	int err;
1278
1279	if (task->tk_status < 0) {
1280		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1281					    hdr->args.offset, hdr->args.count,
1282					    &hdr->res.op_status, OP_READ,
1283					    task->tk_status);
1284		trace_ff_layout_read_error(hdr);
1285	}
1286
1287	err = ff_layout_async_handle_error(task, hdr->args.context->state,
1288					   hdr->ds_clp, hdr->lseg,
1289					   hdr->pgio_mirror_idx);
1290
1291	trace_nfs4_pnfs_read(hdr, err);
1292	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1293	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1294	switch (err) {
1295	case -NFS4ERR_RESET_TO_PNFS:
1296		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1297		return task->tk_status;
1298	case -NFS4ERR_RESET_TO_MDS:
1299		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1300		return task->tk_status;
1301	case -EAGAIN:
1302		goto out_eagain;
1303	}
1304
1305	return 0;
1306out_eagain:
1307	rpc_restart_call_prepare(task);
1308	return -EAGAIN;
1309}
1310
1311static bool
1312ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1313{
1314	return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1315}
1316
1317/*
1318 * We reference the rpc_cred of the first WRITE that triggers the need for
1319 * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1320 * rfc5661 is not clear about which credential should be used.
1321 *
1322 * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1323 * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1324 * we always send layoutcommit after DS writes.
1325 */
1326static void
1327ff_layout_set_layoutcommit(struct inode *inode,
1328		struct pnfs_layout_segment *lseg,
1329		loff_t end_offset)
1330{
1331	if (!ff_layout_need_layoutcommit(lseg))
1332		return;
1333
1334	pnfs_set_layoutcommit(inode, lseg, end_offset);
1335	dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino,
1336		(unsigned long long) NFS_I(inode)->layout->plh_lwb);
1337}
1338
1339static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
1340		struct nfs_pgio_header *hdr)
1341{
1342	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1343		return;
1344	nfs4_ff_layout_stat_io_start_read(hdr->inode,
1345			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1346			hdr->args.count,
1347			task->tk_start);
1348}
1349
1350static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
1351		struct nfs_pgio_header *hdr)
1352{
1353	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1354		return;
1355	nfs4_ff_layout_stat_io_end_read(task,
1356			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1357			hdr->args.count,
1358			hdr->res.count);
1359	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1360}
1361
1362static int ff_layout_read_prepare_common(struct rpc_task *task,
1363					 struct nfs_pgio_header *hdr)
1364{
1365	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1366		rpc_exit(task, -EIO);
1367		return -EIO;
1368	}
1369
1370	ff_layout_read_record_layoutstats_start(task, hdr);
1371	return 0;
1372}
1373
1374/*
1375 * Call ops for the async read/write cases
1376 * In the case of dense layouts, the offset needs to be reset to its
1377 * original value.
1378 */
1379static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1380{
1381	struct nfs_pgio_header *hdr = data;
1382
1383	if (ff_layout_read_prepare_common(task, hdr))
1384		return;
1385
1386	rpc_call_start(task);
1387}
1388
1389static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1390{
1391	struct nfs_pgio_header *hdr = data;
1392
1393	if (nfs4_setup_sequence(hdr->ds_clp,
1394				&hdr->args.seq_args,
1395				&hdr->res.seq_res,
1396				task))
1397		return;
1398
1399	ff_layout_read_prepare_common(task, hdr);
1400}
1401
1402static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1403{
1404	struct nfs_pgio_header *hdr = data;
1405
1406	dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
1407
1408	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1409	    task->tk_status == 0) {
1410		nfs4_sequence_done(task, &hdr->res.seq_res);
1411		return;
1412	}
1413
1414	/* Note this may cause RPC to be resent */
1415	hdr->mds_ops->rpc_call_done(task, hdr);
1416}
1417
1418static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1419{
1420	struct nfs_pgio_header *hdr = data;
1421
1422	ff_layout_read_record_layoutstats_done(task, hdr);
1423	rpc_count_iostats_metrics(task,
1424	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1425}
1426
1427static void ff_layout_read_release(void *data)
1428{
1429	struct nfs_pgio_header *hdr = data;
1430
1431	ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
1432	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
1433		ff_layout_resend_pnfs_read(hdr);
1434	else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1435		ff_layout_reset_read(hdr);
1436	pnfs_generic_rw_release(data);
1437}
1438
1439
1440static int ff_layout_write_done_cb(struct rpc_task *task,
1441				struct nfs_pgio_header *hdr)
1442{
1443	loff_t end_offs = 0;
1444	int err;
1445
1446	if (task->tk_status < 0) {
1447		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1448					    hdr->args.offset, hdr->args.count,
1449					    &hdr->res.op_status, OP_WRITE,
1450					    task->tk_status);
1451		trace_ff_layout_write_error(hdr);
1452	}
1453
1454	err = ff_layout_async_handle_error(task, hdr->args.context->state,
1455					   hdr->ds_clp, hdr->lseg,
1456					   hdr->pgio_mirror_idx);
1457
1458	trace_nfs4_pnfs_write(hdr, err);
1459	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1460	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1461	switch (err) {
1462	case -NFS4ERR_RESET_TO_PNFS:
1463		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1464		return task->tk_status;
1465	case -NFS4ERR_RESET_TO_MDS:
1466		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1467		return task->tk_status;
1468	case -EAGAIN:
1469		return -EAGAIN;
1470	}
1471
1472	if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1473	    hdr->res.verf->committed == NFS_DATA_SYNC)
1474		end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
1475
1476	/* Note: if the write is unstable, don't set end_offs until commit */
1477	ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
1478
1479	/* zero out fattr since we don't care DS attr at all */
1480	hdr->fattr.valid = 0;
1481	if (task->tk_status >= 0)
1482		nfs_writeback_update_inode(hdr);
1483
1484	return 0;
1485}
1486
1487static int ff_layout_commit_done_cb(struct rpc_task *task,
1488				     struct nfs_commit_data *data)
1489{
1490	int err;
1491
1492	if (task->tk_status < 0) {
1493		ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1494					    data->args.offset, data->args.count,
1495					    &data->res.op_status, OP_COMMIT,
1496					    task->tk_status);
1497		trace_ff_layout_commit_error(data);
1498	}
1499
1500	err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
1501					   data->lseg, data->ds_commit_index);
1502
1503	trace_nfs4_pnfs_commit_ds(data, err);
1504	switch (err) {
1505	case -NFS4ERR_RESET_TO_PNFS:
1506		pnfs_generic_prepare_to_resend_writes(data);
1507		return -EAGAIN;
1508	case -NFS4ERR_RESET_TO_MDS:
1509		pnfs_generic_prepare_to_resend_writes(data);
1510		return -EAGAIN;
1511	case -EAGAIN:
1512		rpc_restart_call_prepare(task);
1513		return -EAGAIN;
1514	}
1515
1516	ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
1517
1518	return 0;
1519}
1520
1521static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
1522		struct nfs_pgio_header *hdr)
1523{
1524	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1525		return;
1526	nfs4_ff_layout_stat_io_start_write(hdr->inode,
1527			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1528			hdr->args.count,
1529			task->tk_start);
1530}
1531
1532static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
1533		struct nfs_pgio_header *hdr)
1534{
1535	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1536		return;
1537	nfs4_ff_layout_stat_io_end_write(task,
1538			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1539			hdr->args.count, hdr->res.count,
1540			hdr->res.verf->committed);
1541	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1542}
1543
1544static int ff_layout_write_prepare_common(struct rpc_task *task,
1545					  struct nfs_pgio_header *hdr)
1546{
1547	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1548		rpc_exit(task, -EIO);
1549		return -EIO;
1550	}
1551
1552	ff_layout_write_record_layoutstats_start(task, hdr);
1553	return 0;
1554}
1555
1556static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1557{
1558	struct nfs_pgio_header *hdr = data;
1559
1560	if (ff_layout_write_prepare_common(task, hdr))
1561		return;
1562
1563	rpc_call_start(task);
1564}
1565
1566static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1567{
1568	struct nfs_pgio_header *hdr = data;
1569
1570	if (nfs4_setup_sequence(hdr->ds_clp,
1571				&hdr->args.seq_args,
1572				&hdr->res.seq_res,
1573				task))
1574		return;
1575
1576	ff_layout_write_prepare_common(task, hdr);
1577}
1578
1579static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1580{
1581	struct nfs_pgio_header *hdr = data;
1582
1583	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1584	    task->tk_status == 0) {
1585		nfs4_sequence_done(task, &hdr->res.seq_res);
1586		return;
1587	}
1588
1589	/* Note this may cause RPC to be resent */
1590	hdr->mds_ops->rpc_call_done(task, hdr);
1591}
1592
1593static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1594{
1595	struct nfs_pgio_header *hdr = data;
1596
1597	ff_layout_write_record_layoutstats_done(task, hdr);
1598	rpc_count_iostats_metrics(task,
1599	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1600}
1601
1602static void ff_layout_write_release(void *data)
1603{
1604	struct nfs_pgio_header *hdr = data;
1605
1606	ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
1607	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
1608		ff_layout_send_layouterror(hdr->lseg);
1609		ff_layout_reset_write(hdr, true);
1610	} else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1611		ff_layout_reset_write(hdr, false);
1612	pnfs_generic_rw_release(data);
1613}
1614
1615static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
1616		struct nfs_commit_data *cdata)
1617{
1618	if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
1619		return;
1620	nfs4_ff_layout_stat_io_start_write(cdata->inode,
1621			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1622			0, task->tk_start);
1623}
1624
1625static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
1626		struct nfs_commit_data *cdata)
1627{
1628	struct nfs_page *req;
1629	__u64 count = 0;
1630
1631	if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
1632		return;
1633
1634	if (task->tk_status == 0) {
1635		list_for_each_entry(req, &cdata->pages, wb_list)
1636			count += req->wb_bytes;
1637	}
1638	nfs4_ff_layout_stat_io_end_write(task,
1639			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1640			count, count, NFS_FILE_SYNC);
1641	set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags);
1642}
1643
1644static void ff_layout_commit_prepare_common(struct rpc_task *task,
1645		struct nfs_commit_data *cdata)
1646{
1647	ff_layout_commit_record_layoutstats_start(task, cdata);
1648}
1649
1650static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1651{
1652	ff_layout_commit_prepare_common(task, data);
1653	rpc_call_start(task);
1654}
1655
1656static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1657{
1658	struct nfs_commit_data *wdata = data;
1659
1660	if (nfs4_setup_sequence(wdata->ds_clp,
1661				&wdata->args.seq_args,
1662				&wdata->res.seq_res,
1663				task))
1664		return;
1665	ff_layout_commit_prepare_common(task, data);
1666}
1667
1668static void ff_layout_commit_done(struct rpc_task *task, void *data)
1669{
1670	pnfs_generic_write_commit_done(task, data);
1671}
1672
1673static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1674{
1675	struct nfs_commit_data *cdata = data;
1676
1677	ff_layout_commit_record_layoutstats_done(task, cdata);
1678	rpc_count_iostats_metrics(task,
1679	    &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1680}
1681
1682static void ff_layout_commit_release(void *data)
1683{
1684	struct nfs_commit_data *cdata = data;
1685
1686	ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
1687	pnfs_generic_commit_release(data);
1688}
1689
1690static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1691	.rpc_call_prepare = ff_layout_read_prepare_v3,
1692	.rpc_call_done = ff_layout_read_call_done,
1693	.rpc_count_stats = ff_layout_read_count_stats,
1694	.rpc_release = ff_layout_read_release,
1695};
1696
1697static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1698	.rpc_call_prepare = ff_layout_read_prepare_v4,
1699	.rpc_call_done = ff_layout_read_call_done,
1700	.rpc_count_stats = ff_layout_read_count_stats,
1701	.rpc_release = ff_layout_read_release,
1702};
1703
1704static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1705	.rpc_call_prepare = ff_layout_write_prepare_v3,
1706	.rpc_call_done = ff_layout_write_call_done,
1707	.rpc_count_stats = ff_layout_write_count_stats,
1708	.rpc_release = ff_layout_write_release,
1709};
1710
1711static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1712	.rpc_call_prepare = ff_layout_write_prepare_v4,
1713	.rpc_call_done = ff_layout_write_call_done,
1714	.rpc_count_stats = ff_layout_write_count_stats,
1715	.rpc_release = ff_layout_write_release,
1716};
1717
1718static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1719	.rpc_call_prepare = ff_layout_commit_prepare_v3,
1720	.rpc_call_done = ff_layout_commit_done,
1721	.rpc_count_stats = ff_layout_commit_count_stats,
1722	.rpc_release = ff_layout_commit_release,
1723};
1724
1725static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1726	.rpc_call_prepare = ff_layout_commit_prepare_v4,
1727	.rpc_call_done = ff_layout_commit_done,
1728	.rpc_count_stats = ff_layout_commit_count_stats,
1729	.rpc_release = ff_layout_commit_release,
1730};
1731
1732static enum pnfs_try_status
1733ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1734{
1735	struct pnfs_layout_segment *lseg = hdr->lseg;
1736	struct nfs4_pnfs_ds *ds;
1737	struct rpc_clnt *ds_clnt;
1738	struct nfs4_ff_layout_mirror *mirror;
1739	const struct cred *ds_cred;
1740	loff_t offset = hdr->args.offset;
1741	u32 idx = hdr->pgio_mirror_idx;
1742	int vers;
1743	struct nfs_fh *fh;
1744
1745	dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
1746		__func__, hdr->inode->i_ino,
1747		hdr->args.pgbase, (size_t)hdr->args.count, offset);
1748
1749	mirror = FF_LAYOUT_COMP(lseg, idx);
1750	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
1751	if (!ds)
1752		goto out_failed;
1753
1754	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1755						   hdr->inode);
1756	if (IS_ERR(ds_clnt))
1757		goto out_failed;
1758
1759	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1760	if (!ds_cred)
1761		goto out_failed;
1762
1763	vers = nfs4_ff_layout_ds_version(mirror);
1764
1765	dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1766		ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers);
1767
1768	hdr->pgio_done_cb = ff_layout_read_done_cb;
1769	refcount_inc(&ds->ds_clp->cl_count);
1770	hdr->ds_clp = ds->ds_clp;
1771	fh = nfs4_ff_layout_select_ds_fh(mirror);
1772	if (fh)
1773		hdr->args.fh = fh;
1774
1775	nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1776
1777	/*
1778	 * Note that if we ever decide to split across DSes,
1779	 * then we may need to handle dense-like offsets.
1780	 */
1781	hdr->args.offset = offset;
1782	hdr->mds_offset = offset;
1783
1784	/* Perform an asynchronous read to ds */
1785	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1786			  vers == 3 ? &ff_layout_read_call_ops_v3 :
1787				      &ff_layout_read_call_ops_v4,
1788			  0, RPC_TASK_SOFTCONN);
1789	put_cred(ds_cred);
1790	return PNFS_ATTEMPTED;
1791
1792out_failed:
1793	if (ff_layout_avoid_mds_available_ds(lseg))
1794		return PNFS_TRY_AGAIN;
1795	trace_pnfs_mds_fallback_read_pagelist(hdr->inode,
1796			hdr->args.offset, hdr->args.count,
1797			IOMODE_READ, NFS_I(hdr->inode)->layout, lseg);
1798	return PNFS_NOT_ATTEMPTED;
1799}
1800
1801/* Perform async writes. */
1802static enum pnfs_try_status
1803ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1804{
1805	struct pnfs_layout_segment *lseg = hdr->lseg;
1806	struct nfs4_pnfs_ds *ds;
1807	struct rpc_clnt *ds_clnt;
1808	struct nfs4_ff_layout_mirror *mirror;
1809	const struct cred *ds_cred;
1810	loff_t offset = hdr->args.offset;
1811	int vers;
1812	struct nfs_fh *fh;
1813	u32 idx = hdr->pgio_mirror_idx;
1814
1815	mirror = FF_LAYOUT_COMP(lseg, idx);
1816	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1817	if (!ds)
1818		goto out_failed;
1819
1820	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1821						   hdr->inode);
1822	if (IS_ERR(ds_clnt))
1823		goto out_failed;
1824
1825	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1826	if (!ds_cred)
1827		goto out_failed;
1828
1829	vers = nfs4_ff_layout_ds_version(mirror);
1830
1831	dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
1832		__func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1833		offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count),
1834		vers);
1835
1836	hdr->pgio_done_cb = ff_layout_write_done_cb;
1837	refcount_inc(&ds->ds_clp->cl_count);
1838	hdr->ds_clp = ds->ds_clp;
1839	hdr->ds_commit_idx = idx;
1840	fh = nfs4_ff_layout_select_ds_fh(mirror);
1841	if (fh)
1842		hdr->args.fh = fh;
1843
1844	nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1845
1846	/*
1847	 * Note that if we ever decide to split across DSes,
1848	 * then we may need to handle dense-like offsets.
1849	 */
1850	hdr->args.offset = offset;
1851
1852	/* Perform an asynchronous write */
1853	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1854			  vers == 3 ? &ff_layout_write_call_ops_v3 :
1855				      &ff_layout_write_call_ops_v4,
1856			  sync, RPC_TASK_SOFTCONN);
1857	put_cred(ds_cred);
1858	return PNFS_ATTEMPTED;
1859
1860out_failed:
1861	if (ff_layout_avoid_mds_available_ds(lseg))
1862		return PNFS_TRY_AGAIN;
1863	trace_pnfs_mds_fallback_write_pagelist(hdr->inode,
1864			hdr->args.offset, hdr->args.count,
1865			IOMODE_RW, NFS_I(hdr->inode)->layout, lseg);
1866	return PNFS_NOT_ATTEMPTED;
1867}
1868
1869static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1870{
1871	return i;
1872}
1873
1874static struct nfs_fh *
1875select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1876{
1877	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1878
1879	/* FIXME: Assume that there is only one NFS version available
1880	 * for the DS.
1881	 */
1882	return &flseg->mirror_array[i]->fh_versions[0];
1883}
1884
1885static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
1886{
1887	struct pnfs_layout_segment *lseg = data->lseg;
1888	struct nfs4_pnfs_ds *ds;
1889	struct rpc_clnt *ds_clnt;
1890	struct nfs4_ff_layout_mirror *mirror;
1891	const struct cred *ds_cred;
1892	u32 idx;
1893	int vers, ret;
1894	struct nfs_fh *fh;
1895
1896	if (!lseg || !(pnfs_is_valid_lseg(lseg) ||
1897	    test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)))
1898		goto out_err;
1899
1900	idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
1901	mirror = FF_LAYOUT_COMP(lseg, idx);
1902	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1903	if (!ds)
1904		goto out_err;
1905
1906	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1907						   data->inode);
1908	if (IS_ERR(ds_clnt))
1909		goto out_err;
1910
1911	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred);
1912	if (!ds_cred)
1913		goto out_err;
1914
1915	vers = nfs4_ff_layout_ds_version(mirror);
1916
1917	dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
1918		data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count),
1919		vers);
1920	data->commit_done_cb = ff_layout_commit_done_cb;
1921	data->cred = ds_cred;
1922	refcount_inc(&ds->ds_clp->cl_count);
1923	data->ds_clp = ds->ds_clp;
1924	fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
1925	if (fh)
1926		data->args.fh = fh;
1927
1928	ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
1929				   vers == 3 ? &ff_layout_commit_call_ops_v3 :
1930					       &ff_layout_commit_call_ops_v4,
1931				   how, RPC_TASK_SOFTCONN);
1932	put_cred(ds_cred);
1933	return ret;
1934out_err:
1935	pnfs_generic_prepare_to_resend_writes(data);
1936	pnfs_generic_commit_release(data);
1937	return -EAGAIN;
1938}
1939
1940static int
1941ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
1942			   int how, struct nfs_commit_info *cinfo)
1943{
1944	return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
1945					    ff_layout_initiate_commit);
1946}
1947
1948static struct pnfs_ds_commit_info *
1949ff_layout_get_ds_info(struct inode *inode)
1950{
1951	struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
1952
1953	if (layout == NULL)
1954		return NULL;
1955
1956	return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
1957}
1958
1959static void
1960ff_layout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
1961		struct pnfs_layout_segment *lseg)
1962{
1963	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1964	struct inode *inode = lseg->pls_layout->plh_inode;
1965	struct pnfs_commit_array *array, *new;
1966
1967	new = pnfs_alloc_commit_array(flseg->mirror_array_cnt, GFP_NOIO);
1968	if (new) {
1969		spin_lock(&inode->i_lock);
1970		array = pnfs_add_commit_array(fl_cinfo, new, lseg);
1971		spin_unlock(&inode->i_lock);
1972		if (array != new)
1973			pnfs_free_commit_array(new);
1974	}
1975}
1976
1977static void
1978ff_layout_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
1979		struct inode *inode)
1980{
1981	spin_lock(&inode->i_lock);
1982	pnfs_generic_ds_cinfo_destroy(fl_cinfo);
1983	spin_unlock(&inode->i_lock);
1984}
1985
1986static void
1987ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
1988{
1989	nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
1990						  id_node));
1991}
1992
1993static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
1994				  const struct nfs4_layoutreturn_args *args,
1995				  const struct nfs4_flexfile_layoutreturn_args *ff_args)
1996{
1997	__be32 *start;
1998
1999	start = xdr_reserve_space(xdr, 4);
2000	if (unlikely(!start))
2001		return -E2BIG;
2002
2003	*start = cpu_to_be32(ff_args->num_errors);
2004	/* This assume we always return _ALL_ layouts */
2005	return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors);
2006}
2007
2008static void
2009encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len)
2010{
2011	WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
2012}
2013
2014static void
2015ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
2016			    const nfs4_stateid *stateid,
2017			    const struct nfs42_layoutstat_devinfo *devinfo)
2018{
2019	__be32 *p;
2020
2021	p = xdr_reserve_space(xdr, 8 + 8);
2022	p = xdr_encode_hyper(p, devinfo->offset);
2023	p = xdr_encode_hyper(p, devinfo->length);
2024	encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
2025	p = xdr_reserve_space(xdr, 4*8);
2026	p = xdr_encode_hyper(p, devinfo->read_count);
2027	p = xdr_encode_hyper(p, devinfo->read_bytes);
2028	p = xdr_encode_hyper(p, devinfo->write_count);
2029	p = xdr_encode_hyper(p, devinfo->write_bytes);
2030	encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE);
2031}
2032
2033static void
2034ff_layout_encode_ff_iostat(struct xdr_stream *xdr,
2035			    const nfs4_stateid *stateid,
2036			    const struct nfs42_layoutstat_devinfo *devinfo)
2037{
2038	ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo);
2039	ff_layout_encode_ff_layoutupdate(xdr, devinfo,
2040			devinfo->ld_private.data);
2041}
2042
2043/* report nothing for now */
2044static void ff_layout_encode_iostats_array(struct xdr_stream *xdr,
2045		const struct nfs4_layoutreturn_args *args,
2046		struct nfs4_flexfile_layoutreturn_args *ff_args)
2047{
2048	__be32 *p;
2049	int i;
2050
2051	p = xdr_reserve_space(xdr, 4);
2052	*p = cpu_to_be32(ff_args->num_dev);
2053	for (i = 0; i < ff_args->num_dev; i++)
2054		ff_layout_encode_ff_iostat(xdr,
2055				&args->layout->plh_stateid,
2056				&ff_args->devinfo[i]);
2057}
2058
2059static void
2060ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo,
2061		unsigned int num_entries)
2062{
2063	unsigned int i;
2064
2065	for (i = 0; i < num_entries; i++) {
2066		if (!devinfo[i].ld_private.ops)
2067			continue;
2068		if (!devinfo[i].ld_private.ops->free)
2069			continue;
2070		devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
2071	}
2072}
2073
2074static struct nfs4_deviceid_node *
2075ff_layout_alloc_deviceid_node(struct nfs_server *server,
2076			      struct pnfs_device *pdev, gfp_t gfp_flags)
2077{
2078	struct nfs4_ff_layout_ds *dsaddr;
2079
2080	dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
2081	if (!dsaddr)
2082		return NULL;
2083	return &dsaddr->id_node;
2084}
2085
2086static void
2087ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
2088		const void *voidargs,
2089		const struct nfs4_xdr_opaque_data *ff_opaque)
2090{
2091	const struct nfs4_layoutreturn_args *args = voidargs;
2092	struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data;
2093	struct xdr_buf tmp_buf = {
2094		.head = {
2095			[0] = {
2096				.iov_base = page_address(ff_args->pages[0]),
2097			},
2098		},
2099		.buflen = PAGE_SIZE,
2100	};
2101	struct xdr_stream tmp_xdr;
2102	__be32 *start;
2103
2104	dprintk("%s: Begin\n", __func__);
2105
2106	xdr_init_encode(&tmp_xdr, &tmp_buf, NULL, NULL);
2107
2108	ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
2109	ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);
2110
2111	start = xdr_reserve_space(xdr, 4);
2112	*start = cpu_to_be32(tmp_buf.len);
2113	xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len);
2114
2115	dprintk("%s: Return\n", __func__);
2116}
2117
2118static void
2119ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args)
2120{
2121	struct nfs4_flexfile_layoutreturn_args *ff_args;
2122
2123	if (!args->data)
2124		return;
2125	ff_args = args->data;
2126	args->data = NULL;
2127
2128	ff_layout_free_ds_ioerr(&ff_args->errors);
2129	ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev);
2130
2131	put_page(ff_args->pages[0]);
2132	kfree(ff_args);
2133}
2134
2135static const struct nfs4_xdr_opaque_ops layoutreturn_ops = {
2136	.encode = ff_layout_encode_layoutreturn,
2137	.free = ff_layout_free_layoutreturn,
2138};
2139
2140static int
2141ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
2142{
2143	struct nfs4_flexfile_layoutreturn_args *ff_args;
2144	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout);
2145
2146	ff_args = kmalloc(sizeof(*ff_args), GFP_KERNEL);
2147	if (!ff_args)
2148		goto out_nomem;
2149	ff_args->pages[0] = alloc_page(GFP_KERNEL);
2150	if (!ff_args->pages[0])
2151		goto out_nomem_free;
2152
2153	INIT_LIST_HEAD(&ff_args->errors);
2154	ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout,
2155			&args->range, &ff_args->errors,
2156			FF_LAYOUTRETURN_MAXERR);
2157
2158	spin_lock(&args->inode->i_lock);
2159	ff_args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
2160			&ff_args->devinfo[0], ARRAY_SIZE(ff_args->devinfo));
2161	spin_unlock(&args->inode->i_lock);
2162
2163	args->ld_private->ops = &layoutreturn_ops;
2164	args->ld_private->data = ff_args;
2165	return 0;
2166out_nomem_free:
2167	kfree(ff_args);
2168out_nomem:
2169	return -ENOMEM;
2170}
2171
2172#ifdef CONFIG_NFS_V4_2
2173void
2174ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2175{
2176	struct pnfs_layout_hdr *lo = lseg->pls_layout;
2177	struct nfs42_layout_error *errors;
2178	LIST_HEAD(head);
2179
2180	if (!nfs_server_capable(lo->plh_inode, NFS_CAP_LAYOUTERROR))
2181		return;
2182	ff_layout_fetch_ds_ioerr(lo, &lseg->pls_range, &head, -1);
2183	if (list_empty(&head))
2184		return;
2185
2186	errors = kmalloc_array(NFS42_LAYOUTERROR_MAX,
2187			sizeof(*errors), GFP_NOFS);
2188	if (errors != NULL) {
2189		const struct nfs4_ff_layout_ds_err *pos;
2190		size_t n = 0;
2191
2192		list_for_each_entry(pos, &head, list) {
2193			errors[n].offset = pos->offset;
2194			errors[n].length = pos->length;
2195			nfs4_stateid_copy(&errors[n].stateid, &pos->stateid);
2196			errors[n].errors[0].dev_id = pos->deviceid;
2197			errors[n].errors[0].status = pos->status;
2198			errors[n].errors[0].opnum = pos->opnum;
2199			n++;
2200			if (!list_is_last(&pos->list, &head) &&
2201			    n < NFS42_LAYOUTERROR_MAX)
2202				continue;
2203			if (nfs42_proc_layouterror(lseg, errors, n) < 0)
2204				break;
2205			n = 0;
2206		}
2207		kfree(errors);
2208	}
2209	ff_layout_free_ds_ioerr(&head);
2210}
2211#else
2212void
2213ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2214{
2215}
2216#endif
2217
2218static int
2219ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
2220{
2221	const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
2222
2223	return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
2224}
2225
2226static size_t
2227ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
2228			  const int buflen)
2229{
2230	const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
2231	const struct in6_addr *addr = &sin6->sin6_addr;
2232
2233	/*
2234	 * RFC 4291, Section 2.2.2
2235	 *
2236	 * Shorthanded ANY address
2237	 */
2238	if (ipv6_addr_any(addr))
2239		return snprintf(buf, buflen, "::");
2240
2241	/*
2242	 * RFC 4291, Section 2.2.2
2243	 *
2244	 * Shorthanded loopback address
2245	 */
2246	if (ipv6_addr_loopback(addr))
2247		return snprintf(buf, buflen, "::1");
2248
2249	/*
2250	 * RFC 4291, Section 2.2.3
2251	 *
2252	 * Special presentation address format for mapped v4
2253	 * addresses.
2254	 */
2255	if (ipv6_addr_v4mapped(addr))
2256		return snprintf(buf, buflen, "::ffff:%pI4",
2257					&addr->s6_addr32[3]);
2258
2259	/*
2260	 * RFC 4291, Section 2.2.1
2261	 */
2262	return snprintf(buf, buflen, "%pI6c", addr);
2263}
2264
2265/* Derived from rpc_sockaddr2uaddr */
2266static void
2267ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
2268{
2269	struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
2270	char portbuf[RPCBIND_MAXUADDRPLEN];
2271	char addrbuf[RPCBIND_MAXUADDRLEN];
2272	char *netid;
2273	unsigned short port;
2274	int len, netid_len;
2275	__be32 *p;
2276
2277	switch (sap->sa_family) {
2278	case AF_INET:
2279		if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
2280			return;
2281		port = ntohs(((struct sockaddr_in *)sap)->sin_port);
2282		netid = "tcp";
2283		netid_len = 3;
2284		break;
2285	case AF_INET6:
2286		if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
2287			return;
2288		port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
2289		netid = "tcp6";
2290		netid_len = 4;
2291		break;
2292	default:
2293		/* we only support tcp and tcp6 */
2294		WARN_ON_ONCE(1);
2295		return;
2296	}
2297
2298	snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
2299	len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
2300
 
2301	p = xdr_reserve_space(xdr, 4 + netid_len);
2302	xdr_encode_opaque(p, netid, netid_len);
2303
2304	p = xdr_reserve_space(xdr, 4 + len);
2305	xdr_encode_opaque(p, addrbuf, len);
2306}
2307
2308static void
2309ff_layout_encode_nfstime(struct xdr_stream *xdr,
2310			 ktime_t t)
2311{
2312	struct timespec64 ts;
2313	__be32 *p;
2314
2315	p = xdr_reserve_space(xdr, 12);
2316	ts = ktime_to_timespec64(t);
2317	p = xdr_encode_hyper(p, ts.tv_sec);
2318	*p++ = cpu_to_be32(ts.tv_nsec);
2319}
2320
2321static void
2322ff_layout_encode_io_latency(struct xdr_stream *xdr,
2323			    struct nfs4_ff_io_stat *stat)
2324{
2325	__be32 *p;
2326
2327	p = xdr_reserve_space(xdr, 5 * 8);
2328	p = xdr_encode_hyper(p, stat->ops_requested);
2329	p = xdr_encode_hyper(p, stat->bytes_requested);
2330	p = xdr_encode_hyper(p, stat->ops_completed);
2331	p = xdr_encode_hyper(p, stat->bytes_completed);
2332	p = xdr_encode_hyper(p, stat->bytes_not_delivered);
2333	ff_layout_encode_nfstime(xdr, stat->total_busy_time);
2334	ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
2335}
2336
2337static void
2338ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
2339			      const struct nfs42_layoutstat_devinfo *devinfo,
2340			      struct nfs4_ff_layout_mirror *mirror)
2341{
2342	struct nfs4_pnfs_ds_addr *da;
2343	struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
2344	struct nfs_fh *fh = &mirror->fh_versions[0];
2345	__be32 *p;
2346
2347	da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
2348	dprintk("%s: DS %s: encoding address %s\n",
2349		__func__, ds->ds_remotestr, da->da_remotestr);
2350	/* netaddr4 */
2351	ff_layout_encode_netaddr(xdr, da);
2352	/* nfs_fh4 */
2353	p = xdr_reserve_space(xdr, 4 + fh->size);
2354	xdr_encode_opaque(p, fh->data, fh->size);
2355	/* ff_io_latency4 read */
2356	spin_lock(&mirror->lock);
2357	ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
2358	/* ff_io_latency4 write */
2359	ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
2360	spin_unlock(&mirror->lock);
2361	/* nfstime4 */
2362	ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
2363	/* bool */
2364	p = xdr_reserve_space(xdr, 4);
2365	*p = cpu_to_be32(false);
2366}
2367
2368static void
2369ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
2370			     const struct nfs4_xdr_opaque_data *opaque)
2371{
2372	struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque,
2373			struct nfs42_layoutstat_devinfo, ld_private);
2374	__be32 *start;
2375
2376	/* layoutupdate length */
2377	start = xdr_reserve_space(xdr, 4);
2378	ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data);
2379
2380	*start = cpu_to_be32((xdr->p - start - 1) * 4);
2381}
2382
2383static void
2384ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
2385{
2386	struct nfs4_ff_layout_mirror *mirror = opaque->data;
2387
2388	ff_layout_put_mirror(mirror);
2389}
2390
2391static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
2392	.encode = ff_layout_encode_layoutstats,
2393	.free	= ff_layout_free_layoutstats,
2394};
2395
2396static int
2397ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
2398			       struct nfs42_layoutstat_devinfo *devinfo,
2399			       int dev_limit)
2400{
2401	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
2402	struct nfs4_ff_layout_mirror *mirror;
2403	struct nfs4_deviceid_node *dev;
2404	int i = 0;
2405
2406	list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2407		if (i >= dev_limit)
2408			break;
2409		if (IS_ERR_OR_NULL(mirror->mirror_ds))
2410			continue;
2411		if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags))
2412			continue;
2413		/* mirror refcount put in cleanup_layoutstats */
2414		if (!refcount_inc_not_zero(&mirror->ref))
2415			continue;
2416		dev = &mirror->mirror_ds->id_node; 
2417		memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
2418		devinfo->offset = 0;
2419		devinfo->length = NFS4_MAX_UINT64;
2420		spin_lock(&mirror->lock);
2421		devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
2422		devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
2423		devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
2424		devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
2425		spin_unlock(&mirror->lock);
2426		devinfo->layout_type = LAYOUT_FLEX_FILES;
2427		devinfo->ld_private.ops = &layoutstat_ops;
2428		devinfo->ld_private.data = mirror;
2429
2430		devinfo++;
2431		i++;
2432	}
2433	return i;
2434}
2435
2436static int
2437ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
2438{
2439	struct nfs4_flexfile_layout *ff_layout;
2440	const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
2441
2442	/* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2443	args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), GFP_NOIO);
2444	if (!args->devinfo)
2445		return -ENOMEM;
2446
2447	spin_lock(&args->inode->i_lock);
2448	ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
2449	args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
2450			&args->devinfo[0], dev_count);
2451	spin_unlock(&args->inode->i_lock);
2452	if (!args->num_dev) {
2453		kfree(args->devinfo);
2454		args->devinfo = NULL;
2455		return -ENOENT;
2456	}
2457
2458	return 0;
2459}
2460
2461static int
2462ff_layout_set_layoutdriver(struct nfs_server *server,
2463		const struct nfs_fh *dummy)
2464{
2465#if IS_ENABLED(CONFIG_NFS_V4_2)
2466	server->caps |= NFS_CAP_LAYOUTSTATS;
2467#endif
2468	return 0;
2469}
2470
2471static const struct pnfs_commit_ops ff_layout_commit_ops = {
2472	.setup_ds_info		= ff_layout_setup_ds_info,
2473	.release_ds_info	= ff_layout_release_ds_info,
2474	.mark_request_commit	= pnfs_layout_mark_request_commit,
2475	.clear_request_commit	= pnfs_generic_clear_request_commit,
2476	.scan_commit_lists	= pnfs_generic_scan_commit_lists,
2477	.recover_commit_reqs	= pnfs_generic_recover_commit_reqs,
2478	.commit_pagelist	= ff_layout_commit_pagelist,
2479};
2480
2481static struct pnfs_layoutdriver_type flexfilelayout_type = {
2482	.id			= LAYOUT_FLEX_FILES,
2483	.name			= "LAYOUT_FLEX_FILES",
2484	.owner			= THIS_MODULE,
2485	.flags			= PNFS_LAYOUTGET_ON_OPEN,
2486	.max_layoutget_response	= 4096, /* 1 page or so... */
2487	.set_layoutdriver	= ff_layout_set_layoutdriver,
2488	.alloc_layout_hdr	= ff_layout_alloc_layout_hdr,
2489	.free_layout_hdr	= ff_layout_free_layout_hdr,
2490	.alloc_lseg		= ff_layout_alloc_lseg,
2491	.free_lseg		= ff_layout_free_lseg,
2492	.add_lseg		= ff_layout_add_lseg,
2493	.pg_read_ops		= &ff_layout_pg_read_ops,
2494	.pg_write_ops		= &ff_layout_pg_write_ops,
2495	.get_ds_info		= ff_layout_get_ds_info,
2496	.free_deviceid_node	= ff_layout_free_deviceid_node,
2497	.read_pagelist		= ff_layout_read_pagelist,
2498	.write_pagelist		= ff_layout_write_pagelist,
2499	.alloc_deviceid_node    = ff_layout_alloc_deviceid_node,
2500	.prepare_layoutreturn   = ff_layout_prepare_layoutreturn,
2501	.sync			= pnfs_nfs_generic_sync,
2502	.prepare_layoutstats	= ff_layout_prepare_layoutstats,
2503};
2504
2505static int __init nfs4flexfilelayout_init(void)
2506{
2507	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
2508	       __func__);
2509	return pnfs_register_layoutdriver(&flexfilelayout_type);
2510}
2511
2512static void __exit nfs4flexfilelayout_exit(void)
2513{
2514	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
2515	       __func__);
2516	pnfs_unregister_layoutdriver(&flexfilelayout_type);
2517}
2518
2519MODULE_ALIAS("nfs-layouttype4-4");
2520
2521MODULE_LICENSE("GPL");
2522MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
2523
2524module_init(nfs4flexfilelayout_init);
2525module_exit(nfs4flexfilelayout_exit);
2526
2527module_param(io_maxretrans, ushort, 0644);
2528MODULE_PARM_DESC(io_maxretrans, "The  number of times the NFSv4.1 client "
2529			"retries an I/O request before returning an error. ");