Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Module for pnfs flexfile layout driver.
   4 *
   5 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
   6 *
   7 * Tao Peng <bergwolf@primarydata.com>
   8 */
   9
  10#include <linux/nfs_fs.h>
  11#include <linux/nfs_mount.h>
  12#include <linux/nfs_page.h>
  13#include <linux/module.h>
  14#include <linux/file.h>
  15#include <linux/sched/mm.h>
  16
  17#include <linux/sunrpc/metrics.h>
  18
  19#include "flexfilelayout.h"
  20#include "../nfs4session.h"
  21#include "../nfs4idmap.h"
  22#include "../internal.h"
  23#include "../delegation.h"
  24#include "../nfs4trace.h"
  25#include "../iostat.h"
  26#include "../nfs.h"
  27#include "../nfs42.h"
  28
  29#define NFSDBG_FACILITY         NFSDBG_PNFS_LD
  30
  31#define FF_LAYOUT_POLL_RETRY_MAX     (15*HZ)
  32#define FF_LAYOUTRETURN_MAXERR 20
  33
  34enum nfs4_ff_op_type {
  35	NFS4_FF_OP_LAYOUTSTATS,
  36	NFS4_FF_OP_LAYOUTRETURN,
  37};
  38
  39static unsigned short io_maxretrans;
  40
  41static const struct pnfs_commit_ops ff_layout_commit_ops;
  42static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
  43		struct nfs_pgio_header *hdr);
  44static int
  45ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
  46			       struct nfs42_layoutstat_devinfo *devinfo,
  47			       int dev_limit, enum nfs4_ff_op_type type);
  48static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
  49			      const struct nfs42_layoutstat_devinfo *devinfo,
  50			      struct nfs4_ff_layout_mirror *mirror);
  51
  52static struct pnfs_layout_hdr *
  53ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
  54{
  55	struct nfs4_flexfile_layout *ffl;
  56
  57	ffl = kzalloc(sizeof(*ffl), gfp_flags);
  58	if (ffl) {
  59		pnfs_init_ds_commit_info(&ffl->commit_info);
  60		INIT_LIST_HEAD(&ffl->error_list);
  61		INIT_LIST_HEAD(&ffl->mirrors);
  62		ffl->last_report_time = ktime_get();
  63		ffl->commit_info.ops = &ff_layout_commit_ops;
  64		return &ffl->generic_hdr;
  65	} else
  66		return NULL;
  67}
  68
  69static void
  70ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
  71{
  72	struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(lo);
  73	struct nfs4_ff_layout_ds_err *err, *n;
  74
  75	list_for_each_entry_safe(err, n, &ffl->error_list, list) {
 
  76		list_del(&err->list);
  77		kfree(err);
  78	}
  79	kfree_rcu(ffl, generic_hdr.plh_rcu);
  80}
  81
  82static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
  83{
  84	__be32 *p;
  85
  86	p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
  87	if (unlikely(p == NULL))
  88		return -ENOBUFS;
  89	stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
  90	memcpy(stateid->data, p, NFS4_STATEID_SIZE);
  91	dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
  92		p[0], p[1], p[2], p[3]);
  93	return 0;
  94}
  95
  96static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
  97{
  98	__be32 *p;
  99
 100	p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
 101	if (unlikely(!p))
 102		return -ENOBUFS;
 103	memcpy(devid, p, NFS4_DEVICEID4_SIZE);
 104	nfs4_print_deviceid(devid);
 105	return 0;
 106}
 107
 108static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
 109{
 110	__be32 *p;
 111
 112	p = xdr_inline_decode(xdr, 4);
 113	if (unlikely(!p))
 114		return -ENOBUFS;
 115	fh->size = be32_to_cpup(p++);
 116	if (fh->size > NFS_MAXFHSIZE) {
 117		printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
 118		       fh->size);
 119		return -EOVERFLOW;
 120	}
 121	/* fh.data */
 122	p = xdr_inline_decode(xdr, fh->size);
 123	if (unlikely(!p))
 124		return -ENOBUFS;
 125	memcpy(&fh->data, p, fh->size);
 126	dprintk("%s: fh len %d\n", __func__, fh->size);
 127
 128	return 0;
 129}
 130
 131/*
 132 * Currently only stringified uids and gids are accepted.
 133 * I.e., kerberos is not supported to the DSes, so no pricipals.
 134 *
 135 * That means that one common function will suffice, but when
 136 * principals are added, this should be split to accomodate
 137 * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
 138 */
 139static int
 140decode_name(struct xdr_stream *xdr, u32 *id)
 141{
 142	__be32 *p;
 143	int len;
 144
 145	/* opaque_length(4)*/
 146	p = xdr_inline_decode(xdr, 4);
 147	if (unlikely(!p))
 148		return -ENOBUFS;
 149	len = be32_to_cpup(p++);
 150	if (len < 0)
 151		return -EINVAL;
 152
 153	dprintk("%s: len %u\n", __func__, len);
 154
 155	/* opaque body */
 156	p = xdr_inline_decode(xdr, len);
 157	if (unlikely(!p))
 158		return -ENOBUFS;
 159
 160	if (!nfs_map_string_to_numeric((char *)p, len, id))
 161		return -EINVAL;
 162
 163	return 0;
 164}
 165
 166static struct nfsd_file *
 167ff_local_open_fh(struct nfs_client *clp, const struct cred *cred,
 168		 struct nfs_fh *fh, fmode_t mode)
 169{
 170	if (mode & FMODE_WRITE) {
 171		/*
 172		 * Always request read and write access since this corresponds
 173		 * to a rw layout.
 174		 */
 175		mode |= FMODE_READ;
 176	}
 177
 178	return nfs_local_open_fh(clp, cred, fh, mode);
 179}
 180
 181static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
 182		const struct nfs4_ff_layout_mirror *m2)
 183{
 184	int i, j;
 185
 186	if (m1->fh_versions_cnt != m2->fh_versions_cnt)
 187		return false;
 188	for (i = 0; i < m1->fh_versions_cnt; i++) {
 189		bool found_fh = false;
 190		for (j = 0; j < m2->fh_versions_cnt; j++) {
 191			if (nfs_compare_fh(&m1->fh_versions[i],
 192					&m2->fh_versions[j]) == 0) {
 193				found_fh = true;
 194				break;
 195			}
 196		}
 197		if (!found_fh)
 198			return false;
 199	}
 200	return true;
 201}
 202
 203static struct nfs4_ff_layout_mirror *
 204ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
 205		struct nfs4_ff_layout_mirror *mirror)
 206{
 207	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
 208	struct nfs4_ff_layout_mirror *pos;
 209	struct inode *inode = lo->plh_inode;
 210
 211	spin_lock(&inode->i_lock);
 212	list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
 213		if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0)
 214			continue;
 215		if (!ff_mirror_match_fh(mirror, pos))
 216			continue;
 217		if (refcount_inc_not_zero(&pos->ref)) {
 218			spin_unlock(&inode->i_lock);
 219			return pos;
 220		}
 221	}
 222	list_add(&mirror->mirrors, &ff_layout->mirrors);
 223	mirror->layout = lo;
 224	spin_unlock(&inode->i_lock);
 225	return mirror;
 226}
 227
 228static void
 229ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
 230{
 231	struct inode *inode;
 232	if (mirror->layout == NULL)
 233		return;
 234	inode = mirror->layout->plh_inode;
 235	spin_lock(&inode->i_lock);
 236	list_del(&mirror->mirrors);
 237	spin_unlock(&inode->i_lock);
 238	mirror->layout = NULL;
 239}
 240
 241static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
 242{
 243	struct nfs4_ff_layout_mirror *mirror;
 244
 245	mirror = kzalloc(sizeof(*mirror), gfp_flags);
 246	if (mirror != NULL) {
 247		spin_lock_init(&mirror->lock);
 248		refcount_set(&mirror->ref, 1);
 249		INIT_LIST_HEAD(&mirror->mirrors);
 250	}
 251	return mirror;
 252}
 253
 254static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
 255{
 256	const struct cred *cred;
 257
 258	ff_layout_remove_mirror(mirror);
 259	kfree(mirror->fh_versions);
 260	cred = rcu_access_pointer(mirror->ro_cred);
 261	put_cred(cred);
 262	cred = rcu_access_pointer(mirror->rw_cred);
 263	put_cred(cred);
 264	nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
 265	kfree(mirror);
 266}
 267
 268static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
 269{
 270	if (mirror != NULL && refcount_dec_and_test(&mirror->ref))
 271		ff_layout_free_mirror(mirror);
 272}
 273
 274static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
 275{
 276	u32 i;
 277
 278	for (i = 0; i < fls->mirror_array_cnt; i++)
 279		ff_layout_put_mirror(fls->mirror_array[i]);
 
 
 
 
 
 
 
 
 280}
 281
 282static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
 283{
 284	if (fls) {
 285		ff_layout_free_mirror_array(fls);
 286		kfree(fls);
 
 
 
 
 
 
 
 287	}
 
 
 
 288}
 289
 290static bool
 291ff_lseg_match_mirrors(struct pnfs_layout_segment *l1,
 292		struct pnfs_layout_segment *l2)
 293{
 294	const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1);
 295	const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l1);
 296	u32 i;
 297
 298	if (fl1->mirror_array_cnt != fl2->mirror_array_cnt)
 299		return false;
 300	for (i = 0; i < fl1->mirror_array_cnt; i++) {
 301		if (fl1->mirror_array[i] != fl2->mirror_array[i])
 302			return false;
 303	}
 304	return true;
 305}
 306
 307static bool
 308ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
 309		const struct pnfs_layout_range *l2)
 310{
 311	u64 end1, end2;
 312
 313	if (l1->iomode != l2->iomode)
 314		return l1->iomode != IOMODE_READ;
 315	end1 = pnfs_calc_offset_end(l1->offset, l1->length);
 316	end2 = pnfs_calc_offset_end(l2->offset, l2->length);
 317	if (end1 < l2->offset)
 318		return false;
 319	if (end2 < l1->offset)
 320		return true;
 321	return l2->offset <= l1->offset;
 322}
 323
 324static bool
 325ff_lseg_merge(struct pnfs_layout_segment *new,
 326		struct pnfs_layout_segment *old)
 327{
 328	u64 new_end, old_end;
 329
 330	if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
 331		return false;
 332	if (new->pls_range.iomode != old->pls_range.iomode)
 333		return false;
 334	old_end = pnfs_calc_offset_end(old->pls_range.offset,
 335			old->pls_range.length);
 336	if (old_end < new->pls_range.offset)
 337		return false;
 338	new_end = pnfs_calc_offset_end(new->pls_range.offset,
 339			new->pls_range.length);
 340	if (new_end < old->pls_range.offset)
 341		return false;
 342	if (!ff_lseg_match_mirrors(new, old))
 343		return false;
 344
 345	/* Mergeable: copy info from 'old' to 'new' */
 346	if (new_end < old_end)
 347		new_end = old_end;
 348	if (new->pls_range.offset < old->pls_range.offset)
 349		new->pls_range.offset = old->pls_range.offset;
 350	new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
 351			new_end);
 352	if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
 353		set_bit(NFS_LSEG_ROC, &new->pls_flags);
 354	return true;
 355}
 356
 357static void
 358ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
 359		struct pnfs_layout_segment *lseg,
 360		struct list_head *free_me)
 361{
 362	pnfs_generic_layout_insert_lseg(lo, lseg,
 363			ff_lseg_range_is_after,
 364			ff_lseg_merge,
 365			free_me);
 366}
 367
 368static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
 369{
 370	int i, j;
 371
 372	for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
 373		for (j = i + 1; j < fls->mirror_array_cnt; j++)
 374			if (fls->mirror_array[i]->efficiency <
 375			    fls->mirror_array[j]->efficiency)
 376				swap(fls->mirror_array[i],
 377				     fls->mirror_array[j]);
 378	}
 379}
 380
 381static struct pnfs_layout_segment *
 382ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
 383		     struct nfs4_layoutget_res *lgr,
 384		     gfp_t gfp_flags)
 385{
 386	struct pnfs_layout_segment *ret;
 387	struct nfs4_ff_layout_segment *fls = NULL;
 388	struct xdr_stream stream;
 389	struct xdr_buf buf;
 390	struct page *scratch;
 391	u64 stripe_unit;
 392	u32 mirror_array_cnt;
 393	__be32 *p;
 394	int i, rc;
 395
 396	dprintk("--> %s\n", __func__);
 397	scratch = alloc_page(gfp_flags);
 398	if (!scratch)
 399		return ERR_PTR(-ENOMEM);
 400
 401	xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
 402			      lgr->layoutp->len);
 403	xdr_set_scratch_page(&stream, scratch);
 404
 405	/* stripe unit and mirror_array_cnt */
 406	rc = -EIO;
 407	p = xdr_inline_decode(&stream, 8 + 4);
 408	if (!p)
 409		goto out_err_free;
 410
 411	p = xdr_decode_hyper(p, &stripe_unit);
 412	mirror_array_cnt = be32_to_cpup(p++);
 413	dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
 414		stripe_unit, mirror_array_cnt);
 415
 416	if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
 417	    mirror_array_cnt == 0)
 418		goto out_err_free;
 419
 420	rc = -ENOMEM;
 421	fls = kzalloc(struct_size(fls, mirror_array, mirror_array_cnt),
 422			gfp_flags);
 423	if (!fls)
 424		goto out_err_free;
 425
 426	fls->mirror_array_cnt = mirror_array_cnt;
 427	fls->stripe_unit = stripe_unit;
 
 
 
 
 428
 429	for (i = 0; i < fls->mirror_array_cnt; i++) {
 430		struct nfs4_ff_layout_mirror *mirror;
 431		struct cred *kcred;
 432		const struct cred __rcu *cred;
 433		kuid_t uid;
 434		kgid_t gid;
 435		u32 ds_count, fh_count, id;
 436		int j;
 437
 438		rc = -EIO;
 439		p = xdr_inline_decode(&stream, 4);
 440		if (!p)
 441			goto out_err_free;
 442		ds_count = be32_to_cpup(p);
 443
 444		/* FIXME: allow for striping? */
 445		if (ds_count != 1)
 446			goto out_err_free;
 447
 448		fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
 449		if (fls->mirror_array[i] == NULL) {
 450			rc = -ENOMEM;
 451			goto out_err_free;
 452		}
 453
 454		fls->mirror_array[i]->ds_count = ds_count;
 455
 456		/* deviceid */
 457		rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid);
 458		if (rc)
 459			goto out_err_free;
 460
 461		/* efficiency */
 462		rc = -EIO;
 463		p = xdr_inline_decode(&stream, 4);
 464		if (!p)
 465			goto out_err_free;
 466		fls->mirror_array[i]->efficiency = be32_to_cpup(p);
 467
 468		/* stateid */
 469		rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid);
 470		if (rc)
 471			goto out_err_free;
 472
 473		/* fh */
 474		rc = -EIO;
 475		p = xdr_inline_decode(&stream, 4);
 476		if (!p)
 477			goto out_err_free;
 478		fh_count = be32_to_cpup(p);
 479
 480		fls->mirror_array[i]->fh_versions =
 481			kcalloc(fh_count, sizeof(struct nfs_fh),
 482				gfp_flags);
 483		if (fls->mirror_array[i]->fh_versions == NULL) {
 484			rc = -ENOMEM;
 485			goto out_err_free;
 486		}
 487
 488		for (j = 0; j < fh_count; j++) {
 489			rc = decode_nfs_fh(&stream,
 490					   &fls->mirror_array[i]->fh_versions[j]);
 491			if (rc)
 492				goto out_err_free;
 493		}
 494
 495		fls->mirror_array[i]->fh_versions_cnt = fh_count;
 496
 497		/* user */
 498		rc = decode_name(&stream, &id);
 499		if (rc)
 500			goto out_err_free;
 501
 502		uid = make_kuid(&init_user_ns, id);
 503
 504		/* group */
 505		rc = decode_name(&stream, &id);
 506		if (rc)
 507			goto out_err_free;
 508
 509		gid = make_kgid(&init_user_ns, id);
 510
 511		if (gfp_flags & __GFP_FS)
 512			kcred = prepare_kernel_cred(&init_task);
 513		else {
 514			unsigned int nofs_flags = memalloc_nofs_save();
 515			kcred = prepare_kernel_cred(&init_task);
 516			memalloc_nofs_restore(nofs_flags);
 517		}
 518		rc = -ENOMEM;
 519		if (!kcred)
 520			goto out_err_free;
 521		kcred->fsuid = uid;
 522		kcred->fsgid = gid;
 523		cred = RCU_INITIALIZER(kcred);
 524
 525		if (lgr->range.iomode == IOMODE_READ)
 526			rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
 527		else
 528			rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
 529
 530		mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
 531		if (mirror != fls->mirror_array[i]) {
 532			/* swap cred ptrs so free_mirror will clean up old */
 533			if (lgr->range.iomode == IOMODE_READ) {
 534				cred = xchg(&mirror->ro_cred, cred);
 535				rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
 536			} else {
 537				cred = xchg(&mirror->rw_cred, cred);
 538				rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
 539			}
 540			ff_layout_free_mirror(fls->mirror_array[i]);
 541			fls->mirror_array[i] = mirror;
 542		}
 543
 544		dprintk("%s: iomode %s uid %u gid %u\n", __func__,
 545			lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
 546			from_kuid(&init_user_ns, uid),
 547			from_kgid(&init_user_ns, gid));
 548	}
 549
 550	p = xdr_inline_decode(&stream, 4);
 551	if (!p)
 552		goto out_sort_mirrors;
 553	fls->flags = be32_to_cpup(p);
 554
 555	p = xdr_inline_decode(&stream, 4);
 556	if (!p)
 557		goto out_sort_mirrors;
 558	for (i=0; i < fls->mirror_array_cnt; i++)
 559		fls->mirror_array[i]->report_interval = be32_to_cpup(p);
 560
 561out_sort_mirrors:
 562	ff_layout_sort_mirrors(fls);
 
 
 
 563	ret = &fls->generic_hdr;
 564	dprintk("<-- %s (success)\n", __func__);
 565out_free_page:
 566	__free_page(scratch);
 567	return ret;
 568out_err_free:
 569	_ff_layout_free_lseg(fls);
 570	ret = ERR_PTR(rc);
 571	dprintk("<-- %s (%d)\n", __func__, rc);
 572	goto out_free_page;
 573}
 574
 
 
 
 
 
 
 
 
 
 
 
 575static void
 576ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
 577{
 578	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
 579
 580	dprintk("--> %s\n", __func__);
 581
 582	if (lseg->pls_range.iomode == IOMODE_RW) {
 583		struct nfs4_flexfile_layout *ffl;
 584		struct inode *inode;
 585
 586		ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
 587		inode = ffl->generic_hdr.plh_inode;
 588		spin_lock(&inode->i_lock);
 589		pnfs_generic_ds_cinfo_release_lseg(&ffl->commit_info, lseg);
 
 
 
 
 590		spin_unlock(&inode->i_lock);
 591	}
 592	_ff_layout_free_lseg(fls);
 593}
 594
 
 
 
 
 
 
 
 595static void
 596nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
 597{
 598	/* first IO request? */
 599	if (atomic_inc_return(&timer->n_ops) == 1) {
 600		timer->start_time = now;
 601	}
 602}
 603
 604static ktime_t
 605nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
 606{
 607	ktime_t start;
 608
 609	if (atomic_dec_return(&timer->n_ops) < 0)
 610		WARN_ON_ONCE(1);
 611
 612	start = timer->start_time;
 613	timer->start_time = now;
 614	return ktime_sub(now, start);
 615}
 616
 617static bool
 618nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
 619			    struct nfs4_ff_layoutstat *layoutstat,
 620			    ktime_t now)
 621{
 622	s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
 623	struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
 624
 625	nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
 626	if (!mirror->start_time)
 627		mirror->start_time = now;
 628	if (mirror->report_interval != 0)
 629		report_interval = (s64)mirror->report_interval * 1000LL;
 630	else if (layoutstats_timer != 0)
 631		report_interval = (s64)layoutstats_timer * 1000LL;
 632	if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
 633			report_interval) {
 634		ffl->last_report_time = now;
 635		return true;
 636	}
 637
 638	return false;
 639}
 640
 641static void
 642nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
 643		__u64 requested)
 644{
 645	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
 646
 647	iostat->ops_requested++;
 648	iostat->bytes_requested += requested;
 649}
 650
 651static void
 652nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
 653		__u64 requested,
 654		__u64 completed,
 655		ktime_t time_completed,
 656		ktime_t time_started)
 657{
 658	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
 659	ktime_t completion_time = ktime_sub(time_completed, time_started);
 660	ktime_t timer;
 661
 662	iostat->ops_completed++;
 663	iostat->bytes_completed += completed;
 664	iostat->bytes_not_delivered += requested - completed;
 665
 666	timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
 667	iostat->total_busy_time =
 668			ktime_add(iostat->total_busy_time, timer);
 669	iostat->aggregate_completion_time =
 670			ktime_add(iostat->aggregate_completion_time,
 671					completion_time);
 672}
 673
 674static void
 675nfs4_ff_layout_stat_io_start_read(struct inode *inode,
 676		struct nfs4_ff_layout_mirror *mirror,
 677		__u64 requested, ktime_t now)
 678{
 679	bool report;
 680
 681	spin_lock(&mirror->lock);
 682	report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
 683	nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
 684	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
 685	spin_unlock(&mirror->lock);
 686
 687	if (report)
 688		pnfs_report_layoutstat(inode, nfs_io_gfp_mask());
 689}
 690
 691static void
 692nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
 693		struct nfs4_ff_layout_mirror *mirror,
 694		__u64 requested,
 695		__u64 completed)
 696{
 697	spin_lock(&mirror->lock);
 698	nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
 699			requested, completed,
 700			ktime_get(), task->tk_start);
 701	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
 702	spin_unlock(&mirror->lock);
 703}
 704
 705static void
 706nfs4_ff_layout_stat_io_start_write(struct inode *inode,
 707		struct nfs4_ff_layout_mirror *mirror,
 708		__u64 requested, ktime_t now)
 709{
 710	bool report;
 711
 712	spin_lock(&mirror->lock);
 713	report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
 714	nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
 715	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
 716	spin_unlock(&mirror->lock);
 717
 718	if (report)
 719		pnfs_report_layoutstat(inode, nfs_io_gfp_mask());
 720}
 721
 722static void
 723nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
 724		struct nfs4_ff_layout_mirror *mirror,
 725		__u64 requested,
 726		__u64 completed,
 727		enum nfs3_stable_how committed)
 728{
 729	if (committed == NFS_UNSTABLE)
 730		requested = completed = 0;
 731
 732	spin_lock(&mirror->lock);
 733	nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
 734			requested, completed, ktime_get(), task->tk_start);
 735	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
 736	spin_unlock(&mirror->lock);
 737}
 738
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 739static void
 740ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, u32 idx)
 741{
 742	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
 743
 744	if (devid)
 745		nfs4_mark_deviceid_unavailable(devid);
 746}
 747
 748static void
 749ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, u32 idx)
 750{
 751	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
 752
 753	if (devid)
 754		nfs4_mark_deviceid_available(devid);
 755}
 756
 757static struct nfs4_pnfs_ds *
 758ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
 759			     u32 start_idx, u32 *best_idx,
 760			     bool check_device)
 761{
 762	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
 763	struct nfs4_ff_layout_mirror *mirror;
 764	struct nfs4_pnfs_ds *ds;
 765	u32 idx;
 
 766
 767	/* mirrors are initially sorted by efficiency */
 768	for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
 
 
 
 769		mirror = FF_LAYOUT_COMP(lseg, idx);
 770		ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
 771		if (!ds)
 772			continue;
 773
 774		if (check_device &&
 775		    nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node))
 776			continue;
 777
 778		*best_idx = idx;
 779		return ds;
 780	}
 781
 782	return NULL;
 783}
 784
 785static struct nfs4_pnfs_ds *
 786ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg,
 787				 u32 start_idx, u32 *best_idx)
 788{
 789	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, false);
 790}
 791
 792static struct nfs4_pnfs_ds *
 793ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg,
 794				   u32 start_idx, u32 *best_idx)
 795{
 796	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, true);
 797}
 798
 799static struct nfs4_pnfs_ds *
 800ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
 801				  u32 start_idx, u32 *best_idx)
 802{
 803	struct nfs4_pnfs_ds *ds;
 804
 805	ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx);
 806	if (ds)
 807		return ds;
 808	return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx);
 809}
 810
 811static struct nfs4_pnfs_ds *
 812ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio,
 813			  u32 *best_idx)
 814{
 815	struct pnfs_layout_segment *lseg = pgio->pg_lseg;
 816	struct nfs4_pnfs_ds *ds;
 817
 818	ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx,
 819					       best_idx);
 820	if (ds || !pgio->pg_mirror_idx)
 821		return ds;
 822	return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx);
 823}
 824
 825static void
 826ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
 827		      struct nfs_page *req,
 828		      bool strict_iomode)
 829{
 830	pnfs_put_lseg(pgio->pg_lseg);
 831	pgio->pg_lseg =
 832		pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
 833				   req_offset(req), req->wb_bytes, IOMODE_READ,
 834				   strict_iomode, nfs_io_gfp_mask());
 
 
 
 835	if (IS_ERR(pgio->pg_lseg)) {
 836		pgio->pg_error = PTR_ERR(pgio->pg_lseg);
 837		pgio->pg_lseg = NULL;
 838	}
 839}
 840
 841static void
 842ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
 843			struct nfs_page *req)
 844{
 845	struct nfs_pgio_mirror *pgm;
 846	struct nfs4_ff_layout_mirror *mirror;
 847	struct nfs4_pnfs_ds *ds;
 848	u32 ds_idx;
 849
 850	if (NFS_SERVER(pgio->pg_inode)->flags &
 851			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
 852		pgio->pg_maxretrans = io_maxretrans;
 853retry:
 854	pnfs_generic_pg_check_layout(pgio, req);
 855	/* Use full layout for now */
 856	if (!pgio->pg_lseg) {
 857		ff_layout_pg_get_read(pgio, req, false);
 858		if (!pgio->pg_lseg)
 859			goto out_nolseg;
 860	}
 861	if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
 862		ff_layout_pg_get_read(pgio, req, true);
 863		if (!pgio->pg_lseg)
 864			goto out_nolseg;
 865	}
 866	/* Reset wb_nio, since getting layout segment was successful */
 867	req->wb_nio = 0;
 868
 869	ds = ff_layout_get_ds_for_read(pgio, &ds_idx);
 870	if (!ds) {
 871		if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
 872			goto out_mds;
 873		pnfs_generic_pg_cleanup(pgio);
 
 874		/* Sleep for 1 second before retrying */
 875		ssleep(1);
 876		goto retry;
 877	}
 878
 879	mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
 
 
 
 
 880	pgm = &pgio->pg_mirrors[0];
 881	pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
 882
 883	pgio->pg_mirror_idx = ds_idx;
 
 
 884	return;
 885out_nolseg:
 886	if (pgio->pg_error < 0) {
 887		if (pgio->pg_error != -EAGAIN)
 888			return;
 889		/* Retry getting layout segment if lower layer returned -EAGAIN */
 890		if (pgio->pg_maxretrans && req->wb_nio++ > pgio->pg_maxretrans) {
 891			if (NFS_SERVER(pgio->pg_inode)->flags & NFS_MOUNT_SOFTERR)
 892				pgio->pg_error = -ETIMEDOUT;
 893			else
 894				pgio->pg_error = -EIO;
 895			return;
 896		}
 897		pgio->pg_error = 0;
 898		/* Sleep for 1 second before retrying */
 899		ssleep(1);
 900		goto retry;
 901	}
 902out_mds:
 903	trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode,
 904			0, NFS4_MAX_UINT64, IOMODE_READ,
 905			NFS_I(pgio->pg_inode)->layout,
 906			pgio->pg_lseg);
 
 
 907	pgio->pg_maxretrans = 0;
 908	nfs_pageio_reset_read_mds(pgio);
 909}
 910
 911static void
 912ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
 913			struct nfs_page *req)
 914{
 915	struct nfs4_ff_layout_mirror *mirror;
 916	struct nfs_pgio_mirror *pgm;
 
 917	struct nfs4_pnfs_ds *ds;
 918	u32 i;
 
 919
 920retry:
 921	pnfs_generic_pg_check_layout(pgio, req);
 922	if (!pgio->pg_lseg) {
 923		pgio->pg_lseg =
 924			pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
 925					   req_offset(req), req->wb_bytes,
 926					   IOMODE_RW, false, nfs_io_gfp_mask());
 
 
 
 927		if (IS_ERR(pgio->pg_lseg)) {
 928			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
 929			pgio->pg_lseg = NULL;
 930			return;
 931		}
 932	}
 933	/* If no lseg, fall back to write through mds */
 934	if (pgio->pg_lseg == NULL)
 935		goto out_mds;
 936
 
 
 
 
 
 937	/* Use a direct mapping of ds_idx to pgio mirror_idx */
 938	if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))
 939		goto out_eagain;
 
 940
 941	for (i = 0; i < pgio->pg_mirror_count; i++) {
 942		mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
 943		ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true);
 944		if (!ds) {
 945			if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
 946				goto out_mds;
 947			pnfs_generic_pg_cleanup(pgio);
 
 948			/* Sleep for 1 second before retrying */
 949			ssleep(1);
 950			goto retry;
 951		}
 952		pgm = &pgio->pg_mirrors[i];
 953		pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
 954	}
 955
 956	if (NFS_SERVER(pgio->pg_inode)->flags &
 957			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
 958		pgio->pg_maxretrans = io_maxretrans;
 959	return;
 960out_eagain:
 961	pnfs_generic_pg_cleanup(pgio);
 962	pgio->pg_error = -EAGAIN;
 963	return;
 964out_mds:
 965	trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode,
 966			0, NFS4_MAX_UINT64, IOMODE_RW,
 967			NFS_I(pgio->pg_inode)->layout,
 968			pgio->pg_lseg);
 
 
 969	pgio->pg_maxretrans = 0;
 970	nfs_pageio_reset_write_mds(pgio);
 971	pgio->pg_error = -EAGAIN;
 972}
 973
 974static unsigned int
 975ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
 976				    struct nfs_page *req)
 977{
 978	if (!pgio->pg_lseg) {
 979		pgio->pg_lseg =
 980			pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
 981					   req_offset(req), req->wb_bytes,
 982					   IOMODE_RW, false, nfs_io_gfp_mask());
 
 
 
 983		if (IS_ERR(pgio->pg_lseg)) {
 984			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
 985			pgio->pg_lseg = NULL;
 986			goto out;
 987		}
 988	}
 989	if (pgio->pg_lseg)
 990		return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
 991
 992	trace_pnfs_mds_fallback_pg_get_mirror_count(pgio->pg_inode,
 993			0, NFS4_MAX_UINT64, IOMODE_RW,
 994			NFS_I(pgio->pg_inode)->layout,
 995			pgio->pg_lseg);
 996	/* no lseg means that pnfs is not in use, so no mirroring here */
 997	nfs_pageio_reset_write_mds(pgio);
 998out:
 999	return 1;
1000}
1001
1002static u32
1003ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
1004{
1005	u32 old = desc->pg_mirror_idx;
1006
1007	desc->pg_mirror_idx = idx;
1008	return old;
1009}
1010
1011static struct nfs_pgio_mirror *
1012ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
1013{
1014	return &desc->pg_mirrors[idx];
1015}
1016
1017static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
1018	.pg_init = ff_layout_pg_init_read,
1019	.pg_test = pnfs_generic_pg_test,
1020	.pg_doio = pnfs_generic_pg_readpages,
1021	.pg_cleanup = pnfs_generic_pg_cleanup,
1022};
1023
1024static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
1025	.pg_init = ff_layout_pg_init_write,
1026	.pg_test = pnfs_generic_pg_test,
1027	.pg_doio = pnfs_generic_pg_writepages,
1028	.pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
1029	.pg_cleanup = pnfs_generic_pg_cleanup,
1030	.pg_get_mirror = ff_layout_pg_get_mirror_write,
1031	.pg_set_mirror = ff_layout_pg_set_mirror_write,
1032};
1033
1034static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
1035{
1036	struct rpc_task *task = &hdr->task;
1037
1038	pnfs_layoutcommit_inode(hdr->inode, false);
1039
1040	if (retry_pnfs) {
1041		dprintk("%s Reset task %5u for i/o through pNFS "
1042			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1043			hdr->task.tk_pid,
1044			hdr->inode->i_sb->s_id,
1045			(unsigned long long)NFS_FILEID(hdr->inode),
1046			hdr->args.count,
1047			(unsigned long long)hdr->args.offset);
1048
1049		hdr->completion_ops->reschedule_io(hdr);
1050		return;
1051	}
1052
1053	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1054		dprintk("%s Reset task %5u for i/o through MDS "
1055			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1056			hdr->task.tk_pid,
1057			hdr->inode->i_sb->s_id,
1058			(unsigned long long)NFS_FILEID(hdr->inode),
1059			hdr->args.count,
1060			(unsigned long long)hdr->args.offset);
1061
1062		trace_pnfs_mds_fallback_write_done(hdr->inode,
1063				hdr->args.offset, hdr->args.count,
1064				IOMODE_RW, NFS_I(hdr->inode)->layout,
1065				hdr->lseg);
1066		task->tk_status = pnfs_write_done_resend_to_mds(hdr);
1067	}
1068}
1069
1070static void ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr)
1071{
1072	u32 idx = hdr->pgio_mirror_idx + 1;
1073	u32 new_idx = 0;
1074
1075	if (ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx))
1076		ff_layout_send_layouterror(hdr->lseg);
1077	else
1078		pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1079	pnfs_read_resend_pnfs(hdr, new_idx);
1080}
1081
1082static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
1083{
1084	struct rpc_task *task = &hdr->task;
1085
1086	pnfs_layoutcommit_inode(hdr->inode, false);
1087	pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1088
1089	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1090		dprintk("%s Reset task %5u for i/o through MDS "
1091			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1092			hdr->task.tk_pid,
1093			hdr->inode->i_sb->s_id,
1094			(unsigned long long)NFS_FILEID(hdr->inode),
1095			hdr->args.count,
1096			(unsigned long long)hdr->args.offset);
1097
1098		trace_pnfs_mds_fallback_read_done(hdr->inode,
1099				hdr->args.offset, hdr->args.count,
1100				IOMODE_READ, NFS_I(hdr->inode)->layout,
1101				hdr->lseg);
1102		task->tk_status = pnfs_read_done_resend_to_mds(hdr);
1103	}
1104}
1105
1106static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1107					   struct nfs4_state *state,
1108					   struct nfs_client *clp,
1109					   struct pnfs_layout_segment *lseg,
1110					   u32 idx)
1111{
1112	struct pnfs_layout_hdr *lo = lseg->pls_layout;
1113	struct inode *inode = lo->plh_inode;
1114	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1115	struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
1116
1117	switch (task->tk_status) {
1118	case -NFS4ERR_BADSESSION:
1119	case -NFS4ERR_BADSLOT:
1120	case -NFS4ERR_BAD_HIGH_SLOT:
1121	case -NFS4ERR_DEADSESSION:
1122	case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1123	case -NFS4ERR_SEQ_FALSE_RETRY:
1124	case -NFS4ERR_SEQ_MISORDERED:
1125		dprintk("%s ERROR %d, Reset session. Exchangeid "
1126			"flags 0x%x\n", __func__, task->tk_status,
1127			clp->cl_exchange_flags);
1128		nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
1129		break;
1130	case -NFS4ERR_DELAY:
1131	case -NFS4ERR_GRACE:
1132		rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
1133		break;
1134	case -NFS4ERR_RETRY_UNCACHED_REP:
1135		break;
1136	/* Invalidate Layout errors */
1137	case -NFS4ERR_PNFS_NO_LAYOUT:
1138	case -ESTALE:           /* mapped NFS4ERR_STALE */
1139	case -EBADHANDLE:       /* mapped NFS4ERR_BADHANDLE */
1140	case -EISDIR:           /* mapped NFS4ERR_ISDIR */
1141	case -NFS4ERR_FHEXPIRED:
1142	case -NFS4ERR_WRONG_TYPE:
1143		dprintk("%s Invalid layout error %d\n", __func__,
1144			task->tk_status);
1145		/*
1146		 * Destroy layout so new i/o will get a new layout.
1147		 * Layout will not be destroyed until all current lseg
1148		 * references are put. Mark layout as invalid to resend failed
1149		 * i/o and all i/o waiting on the slot table to the MDS until
1150		 * layout is destroyed and a new valid layout is obtained.
1151		 */
1152		pnfs_destroy_layout(NFS_I(inode));
1153		rpc_wake_up(&tbl->slot_tbl_waitq);
1154		goto reset;
1155	/* RPC connection errors */
1156	case -ECONNREFUSED:
1157	case -EHOSTDOWN:
1158	case -EHOSTUNREACH:
1159	case -ENETUNREACH:
1160	case -EIO:
1161	case -ETIMEDOUT:
1162	case -EPIPE:
1163	case -EPROTO:
1164	case -ENODEV:
1165		dprintk("%s DS connection error %d\n", __func__,
1166			task->tk_status);
1167		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1168				&devid->deviceid);
1169		rpc_wake_up(&tbl->slot_tbl_waitq);
1170		fallthrough;
1171	default:
1172		if (ff_layout_avoid_mds_available_ds(lseg))
1173			return -NFS4ERR_RESET_TO_PNFS;
1174reset:
1175		dprintk("%s Retry through MDS. Error %d\n", __func__,
1176			task->tk_status);
1177		return -NFS4ERR_RESET_TO_MDS;
1178	}
1179	task->tk_status = 0;
1180	return -EAGAIN;
1181}
1182
1183/* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
1184static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1185					   struct pnfs_layout_segment *lseg,
1186					   u32 idx)
1187{
1188	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1189
1190	switch (task->tk_status) {
1191	/* File access problems. Don't mark the device as unavailable */
1192	case -EACCES:
1193	case -ESTALE:
1194	case -EISDIR:
1195	case -EBADHANDLE:
1196	case -ELOOP:
1197	case -ENOSPC:
1198		break;
1199	case -EJUKEBOX:
1200		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1201		goto out_retry;
1202	default:
1203		dprintk("%s DS connection error %d\n", __func__,
1204			task->tk_status);
1205		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1206				&devid->deviceid);
1207	}
1208	/* FIXME: Need to prevent infinite looping here. */
1209	return -NFS4ERR_RESET_TO_PNFS;
1210out_retry:
1211	task->tk_status = 0;
1212	rpc_restart_call_prepare(task);
1213	rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1214	return -EAGAIN;
1215}
1216
1217static int ff_layout_async_handle_error(struct rpc_task *task,
1218					struct nfs4_state *state,
1219					struct nfs_client *clp,
1220					struct pnfs_layout_segment *lseg,
1221					u32 idx)
1222{
1223	int vers = clp->cl_nfs_mod->rpc_vers->number;
1224
1225	if (task->tk_status >= 0) {
1226		ff_layout_mark_ds_reachable(lseg, idx);
1227		return 0;
1228	}
1229
1230	/* Handle the case of an invalid layout segment */
1231	if (!pnfs_is_valid_lseg(lseg))
1232		return -NFS4ERR_RESET_TO_PNFS;
1233
1234	switch (vers) {
1235	case 3:
1236		return ff_layout_async_handle_error_v3(task, lseg, idx);
1237	case 4:
1238		return ff_layout_async_handle_error_v4(task, state, clp,
1239						       lseg, idx);
1240	default:
1241		/* should never happen */
1242		WARN_ON_ONCE(1);
1243		return 0;
1244	}
1245}
1246
1247static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1248					u32 idx, u64 offset, u64 length,
1249					u32 *op_status, int opnum, int error)
1250{
1251	struct nfs4_ff_layout_mirror *mirror;
1252	u32 status = *op_status;
1253	int err;
1254
1255	if (status == 0) {
1256		switch (error) {
1257		case -ETIMEDOUT:
1258		case -EPFNOSUPPORT:
1259		case -EPROTONOSUPPORT:
1260		case -EOPNOTSUPP:
1261		case -EINVAL:
1262		case -ECONNREFUSED:
1263		case -ECONNRESET:
1264		case -EHOSTDOWN:
1265		case -EHOSTUNREACH:
1266		case -ENETUNREACH:
1267		case -EADDRINUSE:
1268		case -ENOBUFS:
1269		case -EPIPE:
1270		case -EPERM:
1271		case -EPROTO:
1272		case -ENODEV:
1273			*op_status = status = NFS4ERR_NXIO;
1274			break;
1275		case -EACCES:
1276			*op_status = status = NFS4ERR_ACCESS;
1277			break;
1278		default:
1279			return;
1280		}
1281	}
1282
1283	mirror = FF_LAYOUT_COMP(lseg, idx);
1284	err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
1285				       mirror, offset, length, status, opnum,
1286				       nfs_io_gfp_mask());
1287
1288	switch (status) {
1289	case NFS4ERR_DELAY:
1290	case NFS4ERR_GRACE:
1291		break;
1292	case NFS4ERR_NXIO:
1293		ff_layout_mark_ds_unreachable(lseg, idx);
1294		/*
1295		 * Don't return the layout if this is a read and we still
1296		 * have layouts to try
1297		 */
1298		if (opnum == OP_READ)
1299			break;
1300		fallthrough;
1301	default:
1302		pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
1303						  lseg);
1304	}
1305
 
 
 
 
 
 
 
1306	dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
1307}
1308
1309/* NFS_PROTO call done callback routines */
1310static int ff_layout_read_done_cb(struct rpc_task *task,
1311				struct nfs_pgio_header *hdr)
1312{
 
1313	int err;
1314
1315	if (task->tk_status < 0) {
 
1316		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1317					    hdr->args.offset, hdr->args.count,
1318					    &hdr->res.op_status, OP_READ,
1319					    task->tk_status);
1320		trace_ff_layout_read_error(hdr);
1321	}
1322
1323	err = ff_layout_async_handle_error(task, hdr->args.context->state,
1324					   hdr->ds_clp, hdr->lseg,
1325					   hdr->pgio_mirror_idx);
1326
1327	trace_nfs4_pnfs_read(hdr, err);
1328	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1329	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1330	switch (err) {
1331	case -NFS4ERR_RESET_TO_PNFS:
 
 
 
 
1332		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1333		return task->tk_status;
1334	case -NFS4ERR_RESET_TO_MDS:
1335		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1336		return task->tk_status;
1337	case -EAGAIN:
1338		goto out_eagain;
1339	}
1340
1341	return 0;
 
 
 
 
1342out_eagain:
1343	rpc_restart_call_prepare(task);
1344	return -EAGAIN;
1345}
1346
1347static bool
1348ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1349{
1350	return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1351}
1352
1353/*
1354 * We reference the rpc_cred of the first WRITE that triggers the need for
1355 * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1356 * rfc5661 is not clear about which credential should be used.
1357 *
1358 * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1359 * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1360 * we always send layoutcommit after DS writes.
1361 */
1362static void
1363ff_layout_set_layoutcommit(struct inode *inode,
1364		struct pnfs_layout_segment *lseg,
1365		loff_t end_offset)
1366{
1367	if (!ff_layout_need_layoutcommit(lseg))
1368		return;
1369
1370	pnfs_set_layoutcommit(inode, lseg, end_offset);
1371	dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino,
1372		(unsigned long long) NFS_I(inode)->layout->plh_lwb);
1373}
1374
1375static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
1376		struct nfs_pgio_header *hdr)
1377{
1378	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1379		return;
1380	nfs4_ff_layout_stat_io_start_read(hdr->inode,
1381			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1382			hdr->args.count,
1383			task->tk_start);
1384}
1385
1386static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
1387		struct nfs_pgio_header *hdr)
1388{
1389	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1390		return;
1391	nfs4_ff_layout_stat_io_end_read(task,
1392			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1393			hdr->args.count,
1394			hdr->res.count);
1395	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1396}
1397
1398static int ff_layout_read_prepare_common(struct rpc_task *task,
1399					 struct nfs_pgio_header *hdr)
1400{
1401	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1402		rpc_exit(task, -EIO);
1403		return -EIO;
1404	}
1405
1406	if (!pnfs_is_valid_lseg(hdr->lseg)) {
1407		rpc_exit(task, -EAGAIN);
1408		return -EAGAIN;
1409	}
1410
1411	ff_layout_read_record_layoutstats_start(task, hdr);
1412	return 0;
1413}
1414
1415/*
1416 * Call ops for the async read/write cases
1417 * In the case of dense layouts, the offset needs to be reset to its
1418 * original value.
1419 */
1420static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1421{
1422	struct nfs_pgio_header *hdr = data;
1423
1424	if (ff_layout_read_prepare_common(task, hdr))
1425		return;
1426
1427	rpc_call_start(task);
1428}
1429
1430static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1431{
1432	struct nfs_pgio_header *hdr = data;
1433
1434	if (nfs4_setup_sequence(hdr->ds_clp,
1435				&hdr->args.seq_args,
1436				&hdr->res.seq_res,
1437				task))
1438		return;
1439
1440	ff_layout_read_prepare_common(task, hdr);
1441}
1442
1443static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1444{
1445	struct nfs_pgio_header *hdr = data;
1446
 
 
1447	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1448	    task->tk_status == 0) {
1449		nfs4_sequence_done(task, &hdr->res.seq_res);
1450		return;
1451	}
1452
1453	/* Note this may cause RPC to be resent */
1454	hdr->mds_ops->rpc_call_done(task, hdr);
1455}
1456
1457static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1458{
1459	struct nfs_pgio_header *hdr = data;
1460
1461	ff_layout_read_record_layoutstats_done(task, hdr);
1462	rpc_count_iostats_metrics(task,
1463	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1464}
1465
1466static void ff_layout_read_release(void *data)
1467{
1468	struct nfs_pgio_header *hdr = data;
1469
1470	ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
1471	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
1472		ff_layout_resend_pnfs_read(hdr);
1473	else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
 
1474		ff_layout_reset_read(hdr);
1475	pnfs_generic_rw_release(data);
1476}
1477
1478
1479static int ff_layout_write_done_cb(struct rpc_task *task,
1480				struct nfs_pgio_header *hdr)
1481{
1482	loff_t end_offs = 0;
1483	int err;
1484
1485	if (task->tk_status < 0) {
 
1486		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1487					    hdr->args.offset, hdr->args.count,
1488					    &hdr->res.op_status, OP_WRITE,
1489					    task->tk_status);
1490		trace_ff_layout_write_error(hdr);
1491	}
1492
1493	err = ff_layout_async_handle_error(task, hdr->args.context->state,
1494					   hdr->ds_clp, hdr->lseg,
1495					   hdr->pgio_mirror_idx);
1496
1497	trace_nfs4_pnfs_write(hdr, err);
1498	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1499	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1500	switch (err) {
1501	case -NFS4ERR_RESET_TO_PNFS:
1502		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1503		return task->tk_status;
1504	case -NFS4ERR_RESET_TO_MDS:
1505		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1506		return task->tk_status;
1507	case -EAGAIN:
1508		return -EAGAIN;
1509	}
1510
1511	if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1512	    hdr->res.verf->committed == NFS_DATA_SYNC)
1513		end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
1514
1515	/* Note: if the write is unstable, don't set end_offs until commit */
1516	ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
1517
1518	/* zero out fattr since we don't care DS attr at all */
1519	hdr->fattr.valid = 0;
1520	if (task->tk_status >= 0)
1521		nfs_writeback_update_inode(hdr);
1522
1523	return 0;
1524}
1525
1526static int ff_layout_commit_done_cb(struct rpc_task *task,
1527				     struct nfs_commit_data *data)
1528{
1529	int err;
1530
1531	if (task->tk_status < 0) {
 
1532		ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1533					    data->args.offset, data->args.count,
1534					    &data->res.op_status, OP_COMMIT,
1535					    task->tk_status);
1536		trace_ff_layout_commit_error(data);
1537	}
1538
1539	err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
1540					   data->lseg, data->ds_commit_index);
1541
1542	trace_nfs4_pnfs_commit_ds(data, err);
1543	switch (err) {
1544	case -NFS4ERR_RESET_TO_PNFS:
1545		pnfs_generic_prepare_to_resend_writes(data);
1546		return -EAGAIN;
1547	case -NFS4ERR_RESET_TO_MDS:
1548		pnfs_generic_prepare_to_resend_writes(data);
1549		return -EAGAIN;
1550	case -EAGAIN:
1551		rpc_restart_call_prepare(task);
1552		return -EAGAIN;
1553	}
1554
1555	ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
1556
1557	return 0;
1558}
1559
1560static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
1561		struct nfs_pgio_header *hdr)
1562{
1563	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1564		return;
1565	nfs4_ff_layout_stat_io_start_write(hdr->inode,
1566			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1567			hdr->args.count,
1568			task->tk_start);
1569}
1570
1571static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
1572		struct nfs_pgio_header *hdr)
1573{
1574	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1575		return;
1576	nfs4_ff_layout_stat_io_end_write(task,
1577			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1578			hdr->args.count, hdr->res.count,
1579			hdr->res.verf->committed);
1580	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1581}
1582
1583static int ff_layout_write_prepare_common(struct rpc_task *task,
1584					  struct nfs_pgio_header *hdr)
1585{
1586	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1587		rpc_exit(task, -EIO);
1588		return -EIO;
1589	}
1590
1591	if (!pnfs_is_valid_lseg(hdr->lseg)) {
1592		rpc_exit(task, -EAGAIN);
1593		return -EAGAIN;
1594	}
1595
1596	ff_layout_write_record_layoutstats_start(task, hdr);
1597	return 0;
1598}
1599
1600static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1601{
1602	struct nfs_pgio_header *hdr = data;
1603
1604	if (ff_layout_write_prepare_common(task, hdr))
1605		return;
1606
1607	rpc_call_start(task);
1608}
1609
1610static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1611{
1612	struct nfs_pgio_header *hdr = data;
1613
1614	if (nfs4_setup_sequence(hdr->ds_clp,
1615				&hdr->args.seq_args,
1616				&hdr->res.seq_res,
1617				task))
1618		return;
1619
1620	ff_layout_write_prepare_common(task, hdr);
1621}
1622
1623static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1624{
1625	struct nfs_pgio_header *hdr = data;
1626
1627	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1628	    task->tk_status == 0) {
1629		nfs4_sequence_done(task, &hdr->res.seq_res);
1630		return;
1631	}
1632
1633	/* Note this may cause RPC to be resent */
1634	hdr->mds_ops->rpc_call_done(task, hdr);
1635}
1636
1637static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1638{
1639	struct nfs_pgio_header *hdr = data;
1640
1641	ff_layout_write_record_layoutstats_done(task, hdr);
1642	rpc_count_iostats_metrics(task,
1643	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1644}
1645
1646static void ff_layout_write_release(void *data)
1647{
1648	struct nfs_pgio_header *hdr = data;
1649
1650	ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
1651	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
1652		ff_layout_send_layouterror(hdr->lseg);
1653		ff_layout_reset_write(hdr, true);
1654	} else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1655		ff_layout_reset_write(hdr, false);
1656	pnfs_generic_rw_release(data);
1657}
1658
1659static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
1660		struct nfs_commit_data *cdata)
1661{
1662	if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
1663		return;
1664	nfs4_ff_layout_stat_io_start_write(cdata->inode,
1665			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1666			0, task->tk_start);
1667}
1668
1669static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
1670		struct nfs_commit_data *cdata)
1671{
1672	struct nfs_page *req;
1673	__u64 count = 0;
1674
1675	if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
1676		return;
1677
1678	if (task->tk_status == 0) {
1679		list_for_each_entry(req, &cdata->pages, wb_list)
1680			count += req->wb_bytes;
1681	}
1682	nfs4_ff_layout_stat_io_end_write(task,
1683			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1684			count, count, NFS_FILE_SYNC);
1685	set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags);
1686}
1687
1688static int ff_layout_commit_prepare_common(struct rpc_task *task,
1689					   struct nfs_commit_data *cdata)
1690{
1691	if (!pnfs_is_valid_lseg(cdata->lseg)) {
1692		rpc_exit(task, -EAGAIN);
1693		return -EAGAIN;
1694	}
1695
1696	ff_layout_commit_record_layoutstats_start(task, cdata);
1697	return 0;
1698}
1699
1700static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1701{
1702	if (ff_layout_commit_prepare_common(task, data))
1703		return;
1704
1705	rpc_call_start(task);
1706}
1707
1708static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1709{
1710	struct nfs_commit_data *wdata = data;
1711
1712	if (nfs4_setup_sequence(wdata->ds_clp,
1713				&wdata->args.seq_args,
1714				&wdata->res.seq_res,
1715				task))
1716		return;
1717	ff_layout_commit_prepare_common(task, data);
1718}
1719
1720static void ff_layout_commit_done(struct rpc_task *task, void *data)
1721{
1722	pnfs_generic_write_commit_done(task, data);
1723}
1724
1725static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1726{
1727	struct nfs_commit_data *cdata = data;
1728
1729	ff_layout_commit_record_layoutstats_done(task, cdata);
1730	rpc_count_iostats_metrics(task,
1731	    &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1732}
1733
1734static void ff_layout_commit_release(void *data)
1735{
1736	struct nfs_commit_data *cdata = data;
1737
1738	ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
1739	pnfs_generic_commit_release(data);
1740}
1741
1742static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1743	.rpc_call_prepare = ff_layout_read_prepare_v3,
1744	.rpc_call_done = ff_layout_read_call_done,
1745	.rpc_count_stats = ff_layout_read_count_stats,
1746	.rpc_release = ff_layout_read_release,
1747};
1748
1749static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1750	.rpc_call_prepare = ff_layout_read_prepare_v4,
1751	.rpc_call_done = ff_layout_read_call_done,
1752	.rpc_count_stats = ff_layout_read_count_stats,
1753	.rpc_release = ff_layout_read_release,
1754};
1755
1756static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1757	.rpc_call_prepare = ff_layout_write_prepare_v3,
1758	.rpc_call_done = ff_layout_write_call_done,
1759	.rpc_count_stats = ff_layout_write_count_stats,
1760	.rpc_release = ff_layout_write_release,
1761};
1762
1763static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1764	.rpc_call_prepare = ff_layout_write_prepare_v4,
1765	.rpc_call_done = ff_layout_write_call_done,
1766	.rpc_count_stats = ff_layout_write_count_stats,
1767	.rpc_release = ff_layout_write_release,
1768};
1769
1770static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1771	.rpc_call_prepare = ff_layout_commit_prepare_v3,
1772	.rpc_call_done = ff_layout_commit_done,
1773	.rpc_count_stats = ff_layout_commit_count_stats,
1774	.rpc_release = ff_layout_commit_release,
1775};
1776
1777static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1778	.rpc_call_prepare = ff_layout_commit_prepare_v4,
1779	.rpc_call_done = ff_layout_commit_done,
1780	.rpc_count_stats = ff_layout_commit_count_stats,
1781	.rpc_release = ff_layout_commit_release,
1782};
1783
1784static enum pnfs_try_status
1785ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1786{
1787	struct pnfs_layout_segment *lseg = hdr->lseg;
1788	struct nfs4_pnfs_ds *ds;
1789	struct rpc_clnt *ds_clnt;
1790	struct nfsd_file *localio;
1791	struct nfs4_ff_layout_mirror *mirror;
1792	const struct cred *ds_cred;
1793	loff_t offset = hdr->args.offset;
1794	u32 idx = hdr->pgio_mirror_idx;
1795	int vers;
1796	struct nfs_fh *fh;
1797
1798	dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
1799		__func__, hdr->inode->i_ino,
1800		hdr->args.pgbase, (size_t)hdr->args.count, offset);
1801
1802	mirror = FF_LAYOUT_COMP(lseg, idx);
1803	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
1804	if (!ds)
1805		goto out_failed;
1806
1807	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1808						   hdr->inode);
1809	if (IS_ERR(ds_clnt))
1810		goto out_failed;
1811
1812	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1813	if (!ds_cred)
1814		goto out_failed;
1815
1816	vers = nfs4_ff_layout_ds_version(mirror);
1817
1818	dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1819		ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers);
1820
1821	hdr->pgio_done_cb = ff_layout_read_done_cb;
1822	refcount_inc(&ds->ds_clp->cl_count);
1823	hdr->ds_clp = ds->ds_clp;
1824	fh = nfs4_ff_layout_select_ds_fh(mirror);
1825	if (fh)
1826		hdr->args.fh = fh;
1827
1828	nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1829
1830	/*
1831	 * Note that if we ever decide to split across DSes,
1832	 * then we may need to handle dense-like offsets.
1833	 */
1834	hdr->args.offset = offset;
1835	hdr->mds_offset = offset;
1836
1837	/* Start IO accounting for local read */
1838	localio = ff_local_open_fh(ds->ds_clp, ds_cred, fh, FMODE_READ);
1839	if (localio) {
1840		hdr->task.tk_start = ktime_get();
1841		ff_layout_read_record_layoutstats_start(&hdr->task, hdr);
1842	}
1843
1844	/* Perform an asynchronous read to ds */
1845	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1846			  vers == 3 ? &ff_layout_read_call_ops_v3 :
1847				      &ff_layout_read_call_ops_v4,
1848			  0, RPC_TASK_SOFTCONN, localio);
1849	put_cred(ds_cred);
1850	return PNFS_ATTEMPTED;
1851
1852out_failed:
1853	if (ff_layout_avoid_mds_available_ds(lseg))
1854		return PNFS_TRY_AGAIN;
1855	trace_pnfs_mds_fallback_read_pagelist(hdr->inode,
1856			hdr->args.offset, hdr->args.count,
1857			IOMODE_READ, NFS_I(hdr->inode)->layout, lseg);
1858	return PNFS_NOT_ATTEMPTED;
1859}
1860
1861/* Perform async writes. */
1862static enum pnfs_try_status
1863ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1864{
1865	struct pnfs_layout_segment *lseg = hdr->lseg;
1866	struct nfs4_pnfs_ds *ds;
1867	struct rpc_clnt *ds_clnt;
1868	struct nfsd_file *localio;
1869	struct nfs4_ff_layout_mirror *mirror;
1870	const struct cred *ds_cred;
1871	loff_t offset = hdr->args.offset;
1872	int vers;
1873	struct nfs_fh *fh;
1874	u32 idx = hdr->pgio_mirror_idx;
1875
1876	mirror = FF_LAYOUT_COMP(lseg, idx);
1877	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1878	if (!ds)
1879		goto out_failed;
1880
1881	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1882						   hdr->inode);
1883	if (IS_ERR(ds_clnt))
1884		goto out_failed;
1885
1886	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1887	if (!ds_cred)
1888		goto out_failed;
1889
1890	vers = nfs4_ff_layout_ds_version(mirror);
1891
1892	dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
1893		__func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1894		offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count),
1895		vers);
1896
1897	hdr->pgio_done_cb = ff_layout_write_done_cb;
1898	refcount_inc(&ds->ds_clp->cl_count);
1899	hdr->ds_clp = ds->ds_clp;
1900	hdr->ds_commit_idx = idx;
1901	fh = nfs4_ff_layout_select_ds_fh(mirror);
1902	if (fh)
1903		hdr->args.fh = fh;
1904
1905	nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1906
1907	/*
1908	 * Note that if we ever decide to split across DSes,
1909	 * then we may need to handle dense-like offsets.
1910	 */
1911	hdr->args.offset = offset;
1912
1913	/* Start IO accounting for local write */
1914	localio = ff_local_open_fh(ds->ds_clp, ds_cred, fh,
1915				   FMODE_READ|FMODE_WRITE);
1916	if (localio) {
1917		hdr->task.tk_start = ktime_get();
1918		ff_layout_write_record_layoutstats_start(&hdr->task, hdr);
1919	}
1920
1921	/* Perform an asynchronous write */
1922	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1923			  vers == 3 ? &ff_layout_write_call_ops_v3 :
1924				      &ff_layout_write_call_ops_v4,
1925			  sync, RPC_TASK_SOFTCONN, localio);
1926	put_cred(ds_cred);
1927	return PNFS_ATTEMPTED;
1928
1929out_failed:
1930	if (ff_layout_avoid_mds_available_ds(lseg))
1931		return PNFS_TRY_AGAIN;
1932	trace_pnfs_mds_fallback_write_pagelist(hdr->inode,
1933			hdr->args.offset, hdr->args.count,
1934			IOMODE_RW, NFS_I(hdr->inode)->layout, lseg);
1935	return PNFS_NOT_ATTEMPTED;
1936}
1937
1938static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1939{
1940	return i;
1941}
1942
1943static struct nfs_fh *
1944select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1945{
1946	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1947
1948	/* FIXME: Assume that there is only one NFS version available
1949	 * for the DS.
1950	 */
1951	return &flseg->mirror_array[i]->fh_versions[0];
1952}
1953
1954static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
1955{
1956	struct pnfs_layout_segment *lseg = data->lseg;
1957	struct nfs4_pnfs_ds *ds;
1958	struct rpc_clnt *ds_clnt;
1959	struct nfsd_file *localio;
1960	struct nfs4_ff_layout_mirror *mirror;
1961	const struct cred *ds_cred;
1962	u32 idx;
1963	int vers, ret;
1964	struct nfs_fh *fh;
1965
1966	if (!lseg || !(pnfs_is_valid_lseg(lseg) ||
1967	    test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)))
1968		goto out_err;
1969
1970	idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
1971	mirror = FF_LAYOUT_COMP(lseg, idx);
1972	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1973	if (!ds)
1974		goto out_err;
1975
1976	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1977						   data->inode);
1978	if (IS_ERR(ds_clnt))
1979		goto out_err;
1980
1981	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred);
1982	if (!ds_cred)
1983		goto out_err;
1984
1985	vers = nfs4_ff_layout_ds_version(mirror);
1986
1987	dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
1988		data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count),
1989		vers);
1990	data->commit_done_cb = ff_layout_commit_done_cb;
1991	data->cred = ds_cred;
1992	refcount_inc(&ds->ds_clp->cl_count);
1993	data->ds_clp = ds->ds_clp;
1994	fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
1995	if (fh)
1996		data->args.fh = fh;
1997
1998	/* Start IO accounting for local commit */
1999	localio = ff_local_open_fh(ds->ds_clp, ds_cred, fh,
2000				   FMODE_READ|FMODE_WRITE);
2001	if (localio) {
2002		data->task.tk_start = ktime_get();
2003		ff_layout_commit_record_layoutstats_start(&data->task, data);
2004	}
2005
2006	ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
2007				   vers == 3 ? &ff_layout_commit_call_ops_v3 :
2008					       &ff_layout_commit_call_ops_v4,
2009				   how, RPC_TASK_SOFTCONN, localio);
2010	put_cred(ds_cred);
2011	return ret;
2012out_err:
2013	pnfs_generic_prepare_to_resend_writes(data);
2014	pnfs_generic_commit_release(data);
2015	return -EAGAIN;
2016}
2017
2018static int
2019ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
2020			   int how, struct nfs_commit_info *cinfo)
2021{
2022	return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
2023					    ff_layout_initiate_commit);
2024}
2025
2026static bool ff_layout_match_rw(const struct rpc_task *task,
2027			       const struct nfs_pgio_header *hdr,
2028			       const struct pnfs_layout_segment *lseg)
2029{
2030	return hdr->lseg == lseg;
2031}
2032
2033static bool ff_layout_match_commit(const struct rpc_task *task,
2034				   const struct nfs_commit_data *cdata,
2035				   const struct pnfs_layout_segment *lseg)
2036{
2037	return cdata->lseg == lseg;
2038}
2039
2040static bool ff_layout_match_io(const struct rpc_task *task, const void *data)
2041{
2042	const struct rpc_call_ops *ops = task->tk_ops;
2043
2044	if (ops == &ff_layout_read_call_ops_v3 ||
2045	    ops == &ff_layout_read_call_ops_v4 ||
2046	    ops == &ff_layout_write_call_ops_v3 ||
2047	    ops == &ff_layout_write_call_ops_v4)
2048		return ff_layout_match_rw(task, task->tk_calldata, data);
2049	if (ops == &ff_layout_commit_call_ops_v3 ||
2050	    ops == &ff_layout_commit_call_ops_v4)
2051		return ff_layout_match_commit(task, task->tk_calldata, data);
2052	return false;
2053}
2054
2055static void ff_layout_cancel_io(struct pnfs_layout_segment *lseg)
2056{
2057	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
2058	struct nfs4_ff_layout_mirror *mirror;
2059	struct nfs4_ff_layout_ds *mirror_ds;
2060	struct nfs4_pnfs_ds *ds;
2061	struct nfs_client *ds_clp;
2062	struct rpc_clnt *clnt;
2063	u32 idx;
2064
2065	for (idx = 0; idx < flseg->mirror_array_cnt; idx++) {
2066		mirror = flseg->mirror_array[idx];
2067		mirror_ds = mirror->mirror_ds;
2068		if (IS_ERR_OR_NULL(mirror_ds))
2069			continue;
2070		ds = mirror->mirror_ds->ds;
2071		if (!ds)
2072			continue;
2073		ds_clp = ds->ds_clp;
2074		if (!ds_clp)
2075			continue;
2076		clnt = ds_clp->cl_rpcclient;
2077		if (!clnt)
2078			continue;
2079		if (!rpc_cancel_tasks(clnt, -EAGAIN, ff_layout_match_io, lseg))
2080			continue;
2081		rpc_clnt_disconnect(clnt);
2082	}
2083}
2084
2085static struct pnfs_ds_commit_info *
2086ff_layout_get_ds_info(struct inode *inode)
2087{
2088	struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
2089
2090	if (layout == NULL)
2091		return NULL;
2092
2093	return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
2094}
2095
2096static void
2097ff_layout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
2098		struct pnfs_layout_segment *lseg)
2099{
2100	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
2101	struct inode *inode = lseg->pls_layout->plh_inode;
2102	struct pnfs_commit_array *array, *new;
2103
2104	new = pnfs_alloc_commit_array(flseg->mirror_array_cnt,
2105				      nfs_io_gfp_mask());
2106	if (new) {
2107		spin_lock(&inode->i_lock);
2108		array = pnfs_add_commit_array(fl_cinfo, new, lseg);
2109		spin_unlock(&inode->i_lock);
2110		if (array != new)
2111			pnfs_free_commit_array(new);
2112	}
2113}
2114
2115static void
2116ff_layout_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
2117		struct inode *inode)
2118{
2119	spin_lock(&inode->i_lock);
2120	pnfs_generic_ds_cinfo_destroy(fl_cinfo);
2121	spin_unlock(&inode->i_lock);
2122}
2123
2124static void
2125ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
2126{
2127	nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
2128						  id_node));
2129}
2130
2131static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
2132				  const struct nfs4_layoutreturn_args *args,
2133				  const struct nfs4_flexfile_layoutreturn_args *ff_args)
2134{
2135	__be32 *start;
2136
2137	start = xdr_reserve_space(xdr, 4);
2138	if (unlikely(!start))
2139		return -E2BIG;
2140
2141	*start = cpu_to_be32(ff_args->num_errors);
2142	/* This assume we always return _ALL_ layouts */
2143	return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors);
2144}
2145
2146static void
 
 
 
 
 
 
2147ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
2148			    const nfs4_stateid *stateid,
2149			    const struct nfs42_layoutstat_devinfo *devinfo)
2150{
2151	__be32 *p;
2152
2153	p = xdr_reserve_space(xdr, 8 + 8);
2154	p = xdr_encode_hyper(p, devinfo->offset);
2155	p = xdr_encode_hyper(p, devinfo->length);
2156	encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
2157	p = xdr_reserve_space(xdr, 4*8);
2158	p = xdr_encode_hyper(p, devinfo->read_count);
2159	p = xdr_encode_hyper(p, devinfo->read_bytes);
2160	p = xdr_encode_hyper(p, devinfo->write_count);
2161	p = xdr_encode_hyper(p, devinfo->write_bytes);
2162	encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE);
2163}
2164
2165static void
2166ff_layout_encode_ff_iostat(struct xdr_stream *xdr,
2167			    const nfs4_stateid *stateid,
2168			    const struct nfs42_layoutstat_devinfo *devinfo)
2169{
2170	ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo);
2171	ff_layout_encode_ff_layoutupdate(xdr, devinfo,
2172			devinfo->ld_private.data);
2173}
2174
2175/* report nothing for now */
2176static void ff_layout_encode_iostats_array(struct xdr_stream *xdr,
2177		const struct nfs4_layoutreturn_args *args,
2178		struct nfs4_flexfile_layoutreturn_args *ff_args)
2179{
2180	__be32 *p;
2181	int i;
2182
2183	p = xdr_reserve_space(xdr, 4);
2184	*p = cpu_to_be32(ff_args->num_dev);
2185	for (i = 0; i < ff_args->num_dev; i++)
2186		ff_layout_encode_ff_iostat(xdr,
2187				&args->layout->plh_stateid,
2188				&ff_args->devinfo[i]);
2189}
2190
2191static void
2192ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo,
2193		unsigned int num_entries)
2194{
2195	unsigned int i;
2196
2197	for (i = 0; i < num_entries; i++) {
2198		if (!devinfo[i].ld_private.ops)
2199			continue;
2200		if (!devinfo[i].ld_private.ops->free)
2201			continue;
2202		devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
2203	}
2204}
2205
2206static struct nfs4_deviceid_node *
2207ff_layout_alloc_deviceid_node(struct nfs_server *server,
2208			      struct pnfs_device *pdev, gfp_t gfp_flags)
2209{
2210	struct nfs4_ff_layout_ds *dsaddr;
2211
2212	dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
2213	if (!dsaddr)
2214		return NULL;
2215	return &dsaddr->id_node;
2216}
2217
2218static void
2219ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
2220		const void *voidargs,
2221		const struct nfs4_xdr_opaque_data *ff_opaque)
2222{
2223	const struct nfs4_layoutreturn_args *args = voidargs;
2224	struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data;
2225	struct xdr_buf tmp_buf = {
2226		.head = {
2227			[0] = {
2228				.iov_base = page_address(ff_args->pages[0]),
2229			},
2230		},
2231		.buflen = PAGE_SIZE,
2232	};
2233	struct xdr_stream tmp_xdr;
2234	__be32 *start;
2235
2236	dprintk("%s: Begin\n", __func__);
2237
2238	xdr_init_encode(&tmp_xdr, &tmp_buf, NULL, NULL);
2239
2240	ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
2241	ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);
2242
2243	start = xdr_reserve_space(xdr, 4);
2244	*start = cpu_to_be32(tmp_buf.len);
2245	xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len);
2246
2247	dprintk("%s: Return\n", __func__);
2248}
2249
2250static void
2251ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args)
2252{
2253	struct nfs4_flexfile_layoutreturn_args *ff_args;
2254
2255	if (!args->data)
2256		return;
2257	ff_args = args->data;
2258	args->data = NULL;
2259
2260	ff_layout_free_ds_ioerr(&ff_args->errors);
2261	ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev);
2262
2263	put_page(ff_args->pages[0]);
2264	kfree(ff_args);
2265}
2266
2267static const struct nfs4_xdr_opaque_ops layoutreturn_ops = {
2268	.encode = ff_layout_encode_layoutreturn,
2269	.free = ff_layout_free_layoutreturn,
2270};
2271
2272static int
2273ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
2274{
2275	struct nfs4_flexfile_layoutreturn_args *ff_args;
2276	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout);
2277
2278	ff_args = kmalloc(sizeof(*ff_args), nfs_io_gfp_mask());
2279	if (!ff_args)
2280		goto out_nomem;
2281	ff_args->pages[0] = alloc_page(nfs_io_gfp_mask());
2282	if (!ff_args->pages[0])
2283		goto out_nomem_free;
2284
2285	INIT_LIST_HEAD(&ff_args->errors);
2286	ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout,
2287			&args->range, &ff_args->errors,
2288			FF_LAYOUTRETURN_MAXERR);
2289
2290	spin_lock(&args->inode->i_lock);
2291	ff_args->num_dev = ff_layout_mirror_prepare_stats(
2292		&ff_layout->generic_hdr, &ff_args->devinfo[0],
2293		ARRAY_SIZE(ff_args->devinfo), NFS4_FF_OP_LAYOUTRETURN);
2294	spin_unlock(&args->inode->i_lock);
2295
2296	args->ld_private->ops = &layoutreturn_ops;
2297	args->ld_private->data = ff_args;
2298	return 0;
2299out_nomem_free:
2300	kfree(ff_args);
2301out_nomem:
2302	return -ENOMEM;
2303}
2304
2305#ifdef CONFIG_NFS_V4_2
2306void
2307ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2308{
2309	struct pnfs_layout_hdr *lo = lseg->pls_layout;
2310	struct nfs42_layout_error *errors;
2311	LIST_HEAD(head);
2312
2313	if (!nfs_server_capable(lo->plh_inode, NFS_CAP_LAYOUTERROR))
2314		return;
2315	ff_layout_fetch_ds_ioerr(lo, &lseg->pls_range, &head, -1);
2316	if (list_empty(&head))
2317		return;
2318
2319	errors = kmalloc_array(NFS42_LAYOUTERROR_MAX, sizeof(*errors),
2320			       nfs_io_gfp_mask());
2321	if (errors != NULL) {
2322		const struct nfs4_ff_layout_ds_err *pos;
2323		size_t n = 0;
2324
2325		list_for_each_entry(pos, &head, list) {
2326			errors[n].offset = pos->offset;
2327			errors[n].length = pos->length;
2328			nfs4_stateid_copy(&errors[n].stateid, &pos->stateid);
2329			errors[n].errors[0].dev_id = pos->deviceid;
2330			errors[n].errors[0].status = pos->status;
2331			errors[n].errors[0].opnum = pos->opnum;
2332			n++;
2333			if (!list_is_last(&pos->list, &head) &&
2334			    n < NFS42_LAYOUTERROR_MAX)
2335				continue;
2336			if (nfs42_proc_layouterror(lseg, errors, n) < 0)
2337				break;
2338			n = 0;
2339		}
2340		kfree(errors);
2341	}
2342	ff_layout_free_ds_ioerr(&head);
2343}
2344#else
2345void
2346ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2347{
2348}
2349#endif
2350
2351static int
2352ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
2353{
2354	const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
2355
2356	return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
2357}
2358
2359static size_t
2360ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
2361			  const int buflen)
2362{
2363	const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
2364	const struct in6_addr *addr = &sin6->sin6_addr;
2365
2366	/*
2367	 * RFC 4291, Section 2.2.2
2368	 *
2369	 * Shorthanded ANY address
2370	 */
2371	if (ipv6_addr_any(addr))
2372		return snprintf(buf, buflen, "::");
2373
2374	/*
2375	 * RFC 4291, Section 2.2.2
2376	 *
2377	 * Shorthanded loopback address
2378	 */
2379	if (ipv6_addr_loopback(addr))
2380		return snprintf(buf, buflen, "::1");
2381
2382	/*
2383	 * RFC 4291, Section 2.2.3
2384	 *
2385	 * Special presentation address format for mapped v4
2386	 * addresses.
2387	 */
2388	if (ipv6_addr_v4mapped(addr))
2389		return snprintf(buf, buflen, "::ffff:%pI4",
2390					&addr->s6_addr32[3]);
2391
2392	/*
2393	 * RFC 4291, Section 2.2.1
2394	 */
2395	return snprintf(buf, buflen, "%pI6c", addr);
2396}
2397
2398/* Derived from rpc_sockaddr2uaddr */
2399static void
2400ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
2401{
2402	struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
2403	char portbuf[RPCBIND_MAXUADDRPLEN];
2404	char addrbuf[RPCBIND_MAXUADDRLEN];
 
2405	unsigned short port;
2406	int len, netid_len;
2407	__be32 *p;
2408
2409	switch (sap->sa_family) {
2410	case AF_INET:
2411		if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
2412			return;
2413		port = ntohs(((struct sockaddr_in *)sap)->sin_port);
 
 
2414		break;
2415	case AF_INET6:
2416		if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
2417			return;
2418		port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
 
 
2419		break;
2420	default:
 
2421		WARN_ON_ONCE(1);
2422		return;
2423	}
2424
2425	snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
2426	len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
2427
2428	netid_len = strlen(da->da_netid);
2429	p = xdr_reserve_space(xdr, 4 + netid_len);
2430	xdr_encode_opaque(p, da->da_netid, netid_len);
2431
2432	p = xdr_reserve_space(xdr, 4 + len);
2433	xdr_encode_opaque(p, addrbuf, len);
2434}
2435
2436static void
2437ff_layout_encode_nfstime(struct xdr_stream *xdr,
2438			 ktime_t t)
2439{
2440	struct timespec64 ts;
2441	__be32 *p;
2442
2443	p = xdr_reserve_space(xdr, 12);
2444	ts = ktime_to_timespec64(t);
2445	p = xdr_encode_hyper(p, ts.tv_sec);
2446	*p++ = cpu_to_be32(ts.tv_nsec);
2447}
2448
2449static void
2450ff_layout_encode_io_latency(struct xdr_stream *xdr,
2451			    struct nfs4_ff_io_stat *stat)
2452{
2453	__be32 *p;
2454
2455	p = xdr_reserve_space(xdr, 5 * 8);
2456	p = xdr_encode_hyper(p, stat->ops_requested);
2457	p = xdr_encode_hyper(p, stat->bytes_requested);
2458	p = xdr_encode_hyper(p, stat->ops_completed);
2459	p = xdr_encode_hyper(p, stat->bytes_completed);
2460	p = xdr_encode_hyper(p, stat->bytes_not_delivered);
2461	ff_layout_encode_nfstime(xdr, stat->total_busy_time);
2462	ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
2463}
2464
2465static void
2466ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
2467			      const struct nfs42_layoutstat_devinfo *devinfo,
2468			      struct nfs4_ff_layout_mirror *mirror)
2469{
2470	struct nfs4_pnfs_ds_addr *da;
2471	struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
2472	struct nfs_fh *fh = &mirror->fh_versions[0];
2473	__be32 *p;
2474
2475	da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
2476	dprintk("%s: DS %s: encoding address %s\n",
2477		__func__, ds->ds_remotestr, da->da_remotestr);
2478	/* netaddr4 */
2479	ff_layout_encode_netaddr(xdr, da);
2480	/* nfs_fh4 */
2481	p = xdr_reserve_space(xdr, 4 + fh->size);
2482	xdr_encode_opaque(p, fh->data, fh->size);
2483	/* ff_io_latency4 read */
2484	spin_lock(&mirror->lock);
2485	ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
2486	/* ff_io_latency4 write */
2487	ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
2488	spin_unlock(&mirror->lock);
2489	/* nfstime4 */
2490	ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
2491	/* bool */
2492	p = xdr_reserve_space(xdr, 4);
2493	*p = cpu_to_be32(false);
2494}
2495
2496static void
2497ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
2498			     const struct nfs4_xdr_opaque_data *opaque)
2499{
2500	struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque,
2501			struct nfs42_layoutstat_devinfo, ld_private);
2502	__be32 *start;
2503
2504	/* layoutupdate length */
2505	start = xdr_reserve_space(xdr, 4);
2506	ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data);
2507
2508	*start = cpu_to_be32((xdr->p - start - 1) * 4);
2509}
2510
2511static void
2512ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
2513{
2514	struct nfs4_ff_layout_mirror *mirror = opaque->data;
2515
2516	ff_layout_put_mirror(mirror);
2517}
2518
2519static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
2520	.encode = ff_layout_encode_layoutstats,
2521	.free	= ff_layout_free_layoutstats,
2522};
2523
2524static int
2525ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
2526			       struct nfs42_layoutstat_devinfo *devinfo,
2527			       int dev_limit, enum nfs4_ff_op_type type)
2528{
2529	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
2530	struct nfs4_ff_layout_mirror *mirror;
2531	struct nfs4_deviceid_node *dev;
2532	int i = 0;
2533
2534	list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2535		if (i >= dev_limit)
2536			break;
2537		if (IS_ERR_OR_NULL(mirror->mirror_ds))
2538			continue;
2539		if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL,
2540					&mirror->flags) &&
2541		    type != NFS4_FF_OP_LAYOUTRETURN)
2542			continue;
2543		/* mirror refcount put in cleanup_layoutstats */
2544		if (!refcount_inc_not_zero(&mirror->ref))
2545			continue;
2546		dev = &mirror->mirror_ds->id_node; 
2547		memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
2548		devinfo->offset = 0;
2549		devinfo->length = NFS4_MAX_UINT64;
2550		spin_lock(&mirror->lock);
2551		devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
2552		devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
2553		devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
2554		devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
2555		spin_unlock(&mirror->lock);
2556		devinfo->layout_type = LAYOUT_FLEX_FILES;
2557		devinfo->ld_private.ops = &layoutstat_ops;
2558		devinfo->ld_private.data = mirror;
2559
2560		devinfo++;
2561		i++;
2562	}
2563	return i;
2564}
2565
2566static int ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
 
2567{
2568	struct pnfs_layout_hdr *lo;
2569	struct nfs4_flexfile_layout *ff_layout;
2570	const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
2571
2572	/* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2573	args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo),
2574				      nfs_io_gfp_mask());
2575	if (!args->devinfo)
2576		return -ENOMEM;
2577
2578	spin_lock(&args->inode->i_lock);
2579	lo = NFS_I(args->inode)->layout;
2580	if (lo && pnfs_layout_is_valid(lo)) {
2581		ff_layout = FF_LAYOUT_FROM_HDR(lo);
2582		args->num_dev = ff_layout_mirror_prepare_stats(
2583			&ff_layout->generic_hdr, &args->devinfo[0], dev_count,
2584			NFS4_FF_OP_LAYOUTSTATS);
2585	} else
2586		args->num_dev = 0;
2587	spin_unlock(&args->inode->i_lock);
2588	if (!args->num_dev) {
2589		kfree(args->devinfo);
2590		args->devinfo = NULL;
2591		return -ENOENT;
2592	}
2593
2594	return 0;
2595}
2596
2597static int
2598ff_layout_set_layoutdriver(struct nfs_server *server,
2599		const struct nfs_fh *dummy)
2600{
2601#if IS_ENABLED(CONFIG_NFS_V4_2)
2602	server->caps |= NFS_CAP_LAYOUTSTATS | NFS_CAP_REBOOT_LAYOUTRETURN;
2603#endif
2604	return 0;
2605}
2606
2607static const struct pnfs_commit_ops ff_layout_commit_ops = {
2608	.setup_ds_info		= ff_layout_setup_ds_info,
2609	.release_ds_info	= ff_layout_release_ds_info,
2610	.mark_request_commit	= pnfs_layout_mark_request_commit,
2611	.clear_request_commit	= pnfs_generic_clear_request_commit,
2612	.scan_commit_lists	= pnfs_generic_scan_commit_lists,
2613	.recover_commit_reqs	= pnfs_generic_recover_commit_reqs,
2614	.commit_pagelist	= ff_layout_commit_pagelist,
2615};
2616
2617static struct pnfs_layoutdriver_type flexfilelayout_type = {
2618	.id			= LAYOUT_FLEX_FILES,
2619	.name			= "LAYOUT_FLEX_FILES",
2620	.owner			= THIS_MODULE,
2621	.flags			= PNFS_LAYOUTGET_ON_OPEN,
2622	.max_layoutget_response	= 4096, /* 1 page or so... */
2623	.set_layoutdriver	= ff_layout_set_layoutdriver,
2624	.alloc_layout_hdr	= ff_layout_alloc_layout_hdr,
2625	.free_layout_hdr	= ff_layout_free_layout_hdr,
2626	.alloc_lseg		= ff_layout_alloc_lseg,
2627	.free_lseg		= ff_layout_free_lseg,
2628	.add_lseg		= ff_layout_add_lseg,
2629	.pg_read_ops		= &ff_layout_pg_read_ops,
2630	.pg_write_ops		= &ff_layout_pg_write_ops,
2631	.get_ds_info		= ff_layout_get_ds_info,
2632	.free_deviceid_node	= ff_layout_free_deviceid_node,
 
 
 
 
 
2633	.read_pagelist		= ff_layout_read_pagelist,
2634	.write_pagelist		= ff_layout_write_pagelist,
2635	.alloc_deviceid_node    = ff_layout_alloc_deviceid_node,
2636	.prepare_layoutreturn   = ff_layout_prepare_layoutreturn,
2637	.sync			= pnfs_nfs_generic_sync,
2638	.prepare_layoutstats	= ff_layout_prepare_layoutstats,
2639	.cancel_io		= ff_layout_cancel_io,
2640};
2641
2642static int __init nfs4flexfilelayout_init(void)
2643{
2644	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
2645	       __func__);
2646	return pnfs_register_layoutdriver(&flexfilelayout_type);
2647}
2648
2649static void __exit nfs4flexfilelayout_exit(void)
2650{
2651	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
2652	       __func__);
2653	pnfs_unregister_layoutdriver(&flexfilelayout_type);
2654}
2655
2656MODULE_ALIAS("nfs-layouttype4-4");
2657
2658MODULE_LICENSE("GPL");
2659MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
2660
2661module_init(nfs4flexfilelayout_init);
2662module_exit(nfs4flexfilelayout_exit);
2663
2664module_param(io_maxretrans, ushort, 0644);
2665MODULE_PARM_DESC(io_maxretrans, "The  number of times the NFSv4.1 client "
2666			"retries an I/O request before returning an error. ");
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Module for pnfs flexfile layout driver.
   4 *
   5 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
   6 *
   7 * Tao Peng <bergwolf@primarydata.com>
   8 */
   9
  10#include <linux/nfs_fs.h>
  11#include <linux/nfs_mount.h>
  12#include <linux/nfs_page.h>
  13#include <linux/module.h>
 
  14#include <linux/sched/mm.h>
  15
  16#include <linux/sunrpc/metrics.h>
  17
  18#include "flexfilelayout.h"
  19#include "../nfs4session.h"
  20#include "../nfs4idmap.h"
  21#include "../internal.h"
  22#include "../delegation.h"
  23#include "../nfs4trace.h"
  24#include "../iostat.h"
  25#include "../nfs.h"
  26#include "../nfs42.h"
  27
  28#define NFSDBG_FACILITY         NFSDBG_PNFS_LD
  29
  30#define FF_LAYOUT_POLL_RETRY_MAX     (15*HZ)
  31#define FF_LAYOUTRETURN_MAXERR 20
  32
 
 
 
 
 
  33static unsigned short io_maxretrans;
  34
 
  35static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
  36		struct nfs_pgio_header *hdr);
  37static int ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
 
  38			       struct nfs42_layoutstat_devinfo *devinfo,
  39			       int dev_limit);
  40static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
  41			      const struct nfs42_layoutstat_devinfo *devinfo,
  42			      struct nfs4_ff_layout_mirror *mirror);
  43
  44static struct pnfs_layout_hdr *
  45ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
  46{
  47	struct nfs4_flexfile_layout *ffl;
  48
  49	ffl = kzalloc(sizeof(*ffl), gfp_flags);
  50	if (ffl) {
 
  51		INIT_LIST_HEAD(&ffl->error_list);
  52		INIT_LIST_HEAD(&ffl->mirrors);
  53		ffl->last_report_time = ktime_get();
 
  54		return &ffl->generic_hdr;
  55	} else
  56		return NULL;
  57}
  58
  59static void
  60ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
  61{
 
  62	struct nfs4_ff_layout_ds_err *err, *n;
  63
  64	list_for_each_entry_safe(err, n, &FF_LAYOUT_FROM_HDR(lo)->error_list,
  65				 list) {
  66		list_del(&err->list);
  67		kfree(err);
  68	}
  69	kfree(FF_LAYOUT_FROM_HDR(lo));
  70}
  71
  72static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
  73{
  74	__be32 *p;
  75
  76	p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
  77	if (unlikely(p == NULL))
  78		return -ENOBUFS;
  79	stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
  80	memcpy(stateid->data, p, NFS4_STATEID_SIZE);
  81	dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
  82		p[0], p[1], p[2], p[3]);
  83	return 0;
  84}
  85
  86static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
  87{
  88	__be32 *p;
  89
  90	p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
  91	if (unlikely(!p))
  92		return -ENOBUFS;
  93	memcpy(devid, p, NFS4_DEVICEID4_SIZE);
  94	nfs4_print_deviceid(devid);
  95	return 0;
  96}
  97
  98static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
  99{
 100	__be32 *p;
 101
 102	p = xdr_inline_decode(xdr, 4);
 103	if (unlikely(!p))
 104		return -ENOBUFS;
 105	fh->size = be32_to_cpup(p++);
 106	if (fh->size > sizeof(struct nfs_fh)) {
 107		printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
 108		       fh->size);
 109		return -EOVERFLOW;
 110	}
 111	/* fh.data */
 112	p = xdr_inline_decode(xdr, fh->size);
 113	if (unlikely(!p))
 114		return -ENOBUFS;
 115	memcpy(&fh->data, p, fh->size);
 116	dprintk("%s: fh len %d\n", __func__, fh->size);
 117
 118	return 0;
 119}
 120
 121/*
 122 * Currently only stringified uids and gids are accepted.
 123 * I.e., kerberos is not supported to the DSes, so no pricipals.
 124 *
 125 * That means that one common function will suffice, but when
 126 * principals are added, this should be split to accomodate
 127 * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
 128 */
 129static int
 130decode_name(struct xdr_stream *xdr, u32 *id)
 131{
 132	__be32 *p;
 133	int len;
 134
 135	/* opaque_length(4)*/
 136	p = xdr_inline_decode(xdr, 4);
 137	if (unlikely(!p))
 138		return -ENOBUFS;
 139	len = be32_to_cpup(p++);
 140	if (len < 0)
 141		return -EINVAL;
 142
 143	dprintk("%s: len %u\n", __func__, len);
 144
 145	/* opaque body */
 146	p = xdr_inline_decode(xdr, len);
 147	if (unlikely(!p))
 148		return -ENOBUFS;
 149
 150	if (!nfs_map_string_to_numeric((char *)p, len, id))
 151		return -EINVAL;
 152
 153	return 0;
 154}
 155
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 156static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
 157		const struct nfs4_ff_layout_mirror *m2)
 158{
 159	int i, j;
 160
 161	if (m1->fh_versions_cnt != m2->fh_versions_cnt)
 162		return false;
 163	for (i = 0; i < m1->fh_versions_cnt; i++) {
 164		bool found_fh = false;
 165		for (j = 0; j < m2->fh_versions_cnt; j++) {
 166			if (nfs_compare_fh(&m1->fh_versions[i],
 167					&m2->fh_versions[j]) == 0) {
 168				found_fh = true;
 169				break;
 170			}
 171		}
 172		if (!found_fh)
 173			return false;
 174	}
 175	return true;
 176}
 177
 178static struct nfs4_ff_layout_mirror *
 179ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
 180		struct nfs4_ff_layout_mirror *mirror)
 181{
 182	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
 183	struct nfs4_ff_layout_mirror *pos;
 184	struct inode *inode = lo->plh_inode;
 185
 186	spin_lock(&inode->i_lock);
 187	list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
 188		if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0)
 189			continue;
 190		if (!ff_mirror_match_fh(mirror, pos))
 191			continue;
 192		if (refcount_inc_not_zero(&pos->ref)) {
 193			spin_unlock(&inode->i_lock);
 194			return pos;
 195		}
 196	}
 197	list_add(&mirror->mirrors, &ff_layout->mirrors);
 198	mirror->layout = lo;
 199	spin_unlock(&inode->i_lock);
 200	return mirror;
 201}
 202
 203static void
 204ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
 205{
 206	struct inode *inode;
 207	if (mirror->layout == NULL)
 208		return;
 209	inode = mirror->layout->plh_inode;
 210	spin_lock(&inode->i_lock);
 211	list_del(&mirror->mirrors);
 212	spin_unlock(&inode->i_lock);
 213	mirror->layout = NULL;
 214}
 215
 216static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
 217{
 218	struct nfs4_ff_layout_mirror *mirror;
 219
 220	mirror = kzalloc(sizeof(*mirror), gfp_flags);
 221	if (mirror != NULL) {
 222		spin_lock_init(&mirror->lock);
 223		refcount_set(&mirror->ref, 1);
 224		INIT_LIST_HEAD(&mirror->mirrors);
 225	}
 226	return mirror;
 227}
 228
 229static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
 230{
 231	const struct cred	*cred;
 232
 233	ff_layout_remove_mirror(mirror);
 234	kfree(mirror->fh_versions);
 235	cred = rcu_access_pointer(mirror->ro_cred);
 236	put_cred(cred);
 237	cred = rcu_access_pointer(mirror->rw_cred);
 238	put_cred(cred);
 239	nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
 240	kfree(mirror);
 241}
 242
 243static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
 244{
 245	if (mirror != NULL && refcount_dec_and_test(&mirror->ref))
 246		ff_layout_free_mirror(mirror);
 247}
 248
 249static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
 250{
 251	int i;
 252
 253	if (fls->mirror_array) {
 254		for (i = 0; i < fls->mirror_array_cnt; i++) {
 255			/* normally mirror_ds is freed in
 256			 * .free_deviceid_node but we still do it here
 257			 * for .alloc_lseg error path */
 258			ff_layout_put_mirror(fls->mirror_array[i]);
 259		}
 260		kfree(fls->mirror_array);
 261		fls->mirror_array = NULL;
 262	}
 263}
 264
 265static int ff_layout_check_layout(struct nfs4_layoutget_res *lgr)
 266{
 267	int ret = 0;
 268
 269	dprintk("--> %s\n", __func__);
 270
 271	/* FIXME: remove this check when layout segment support is added */
 272	if (lgr->range.offset != 0 ||
 273	    lgr->range.length != NFS4_MAX_UINT64) {
 274		dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
 275			__func__);
 276		ret = -EINVAL;
 277	}
 278
 279	dprintk("--> %s returns %d\n", __func__, ret);
 280	return ret;
 281}
 282
 283static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
 
 
 284{
 285	if (fls) {
 286		ff_layout_free_mirror_array(fls);
 287		kfree(fls);
 
 
 
 
 
 
 288	}
 
 289}
 290
 291static bool
 292ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
 293		const struct pnfs_layout_range *l2)
 294{
 295	u64 end1, end2;
 296
 297	if (l1->iomode != l2->iomode)
 298		return l1->iomode != IOMODE_READ;
 299	end1 = pnfs_calc_offset_end(l1->offset, l1->length);
 300	end2 = pnfs_calc_offset_end(l2->offset, l2->length);
 301	if (end1 < l2->offset)
 302		return false;
 303	if (end2 < l1->offset)
 304		return true;
 305	return l2->offset <= l1->offset;
 306}
 307
 308static bool
 309ff_lseg_merge(struct pnfs_layout_segment *new,
 310		struct pnfs_layout_segment *old)
 311{
 312	u64 new_end, old_end;
 313
 314	if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
 315		return false;
 316	if (new->pls_range.iomode != old->pls_range.iomode)
 317		return false;
 318	old_end = pnfs_calc_offset_end(old->pls_range.offset,
 319			old->pls_range.length);
 320	if (old_end < new->pls_range.offset)
 321		return false;
 322	new_end = pnfs_calc_offset_end(new->pls_range.offset,
 323			new->pls_range.length);
 324	if (new_end < old->pls_range.offset)
 325		return false;
 
 
 326
 327	/* Mergeable: copy info from 'old' to 'new' */
 328	if (new_end < old_end)
 329		new_end = old_end;
 330	if (new->pls_range.offset < old->pls_range.offset)
 331		new->pls_range.offset = old->pls_range.offset;
 332	new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
 333			new_end);
 334	if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
 335		set_bit(NFS_LSEG_ROC, &new->pls_flags);
 336	return true;
 337}
 338
 339static void
 340ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
 341		struct pnfs_layout_segment *lseg,
 342		struct list_head *free_me)
 343{
 344	pnfs_generic_layout_insert_lseg(lo, lseg,
 345			ff_lseg_range_is_after,
 346			ff_lseg_merge,
 347			free_me);
 348}
 349
 350static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
 351{
 352	int i, j;
 353
 354	for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
 355		for (j = i + 1; j < fls->mirror_array_cnt; j++)
 356			if (fls->mirror_array[i]->efficiency <
 357			    fls->mirror_array[j]->efficiency)
 358				swap(fls->mirror_array[i],
 359				     fls->mirror_array[j]);
 360	}
 361}
 362
 363static struct pnfs_layout_segment *
 364ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
 365		     struct nfs4_layoutget_res *lgr,
 366		     gfp_t gfp_flags)
 367{
 368	struct pnfs_layout_segment *ret;
 369	struct nfs4_ff_layout_segment *fls = NULL;
 370	struct xdr_stream stream;
 371	struct xdr_buf buf;
 372	struct page *scratch;
 373	u64 stripe_unit;
 374	u32 mirror_array_cnt;
 375	__be32 *p;
 376	int i, rc;
 377
 378	dprintk("--> %s\n", __func__);
 379	scratch = alloc_page(gfp_flags);
 380	if (!scratch)
 381		return ERR_PTR(-ENOMEM);
 382
 383	xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
 384			      lgr->layoutp->len);
 385	xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
 386
 387	/* stripe unit and mirror_array_cnt */
 388	rc = -EIO;
 389	p = xdr_inline_decode(&stream, 8 + 4);
 390	if (!p)
 391		goto out_err_free;
 392
 393	p = xdr_decode_hyper(p, &stripe_unit);
 394	mirror_array_cnt = be32_to_cpup(p++);
 395	dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
 396		stripe_unit, mirror_array_cnt);
 397
 398	if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
 399	    mirror_array_cnt == 0)
 400		goto out_err_free;
 401
 402	rc = -ENOMEM;
 403	fls = kzalloc(sizeof(*fls), gfp_flags);
 
 404	if (!fls)
 405		goto out_err_free;
 406
 407	fls->mirror_array_cnt = mirror_array_cnt;
 408	fls->stripe_unit = stripe_unit;
 409	fls->mirror_array = kcalloc(fls->mirror_array_cnt,
 410				    sizeof(fls->mirror_array[0]), gfp_flags);
 411	if (fls->mirror_array == NULL)
 412		goto out_err_free;
 413
 414	for (i = 0; i < fls->mirror_array_cnt; i++) {
 415		struct nfs4_ff_layout_mirror *mirror;
 416		struct cred *kcred;
 417		const struct cred __rcu *cred;
 418		kuid_t uid;
 419		kgid_t gid;
 420		u32 ds_count, fh_count, id;
 421		int j;
 422
 423		rc = -EIO;
 424		p = xdr_inline_decode(&stream, 4);
 425		if (!p)
 426			goto out_err_free;
 427		ds_count = be32_to_cpup(p);
 428
 429		/* FIXME: allow for striping? */
 430		if (ds_count != 1)
 431			goto out_err_free;
 432
 433		fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
 434		if (fls->mirror_array[i] == NULL) {
 435			rc = -ENOMEM;
 436			goto out_err_free;
 437		}
 438
 439		fls->mirror_array[i]->ds_count = ds_count;
 440
 441		/* deviceid */
 442		rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid);
 443		if (rc)
 444			goto out_err_free;
 445
 446		/* efficiency */
 447		rc = -EIO;
 448		p = xdr_inline_decode(&stream, 4);
 449		if (!p)
 450			goto out_err_free;
 451		fls->mirror_array[i]->efficiency = be32_to_cpup(p);
 452
 453		/* stateid */
 454		rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid);
 455		if (rc)
 456			goto out_err_free;
 457
 458		/* fh */
 459		rc = -EIO;
 460		p = xdr_inline_decode(&stream, 4);
 461		if (!p)
 462			goto out_err_free;
 463		fh_count = be32_to_cpup(p);
 464
 465		fls->mirror_array[i]->fh_versions =
 466			kcalloc(fh_count, sizeof(struct nfs_fh),
 467				gfp_flags);
 468		if (fls->mirror_array[i]->fh_versions == NULL) {
 469			rc = -ENOMEM;
 470			goto out_err_free;
 471		}
 472
 473		for (j = 0; j < fh_count; j++) {
 474			rc = decode_nfs_fh(&stream,
 475					   &fls->mirror_array[i]->fh_versions[j]);
 476			if (rc)
 477				goto out_err_free;
 478		}
 479
 480		fls->mirror_array[i]->fh_versions_cnt = fh_count;
 481
 482		/* user */
 483		rc = decode_name(&stream, &id);
 484		if (rc)
 485			goto out_err_free;
 486
 487		uid = make_kuid(&init_user_ns, id);
 488
 489		/* group */
 490		rc = decode_name(&stream, &id);
 491		if (rc)
 492			goto out_err_free;
 493
 494		gid = make_kgid(&init_user_ns, id);
 495
 496		if (gfp_flags & __GFP_FS)
 497			kcred = prepare_kernel_cred(NULL);
 498		else {
 499			unsigned int nofs_flags = memalloc_nofs_save();
 500			kcred = prepare_kernel_cred(NULL);
 501			memalloc_nofs_restore(nofs_flags);
 502		}
 503		rc = -ENOMEM;
 504		if (!kcred)
 505			goto out_err_free;
 506		kcred->fsuid = uid;
 507		kcred->fsgid = gid;
 508		cred = RCU_INITIALIZER(kcred);
 509
 510		if (lgr->range.iomode == IOMODE_READ)
 511			rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
 512		else
 513			rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
 514
 515		mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
 516		if (mirror != fls->mirror_array[i]) {
 517			/* swap cred ptrs so free_mirror will clean up old */
 518			if (lgr->range.iomode == IOMODE_READ) {
 519				cred = xchg(&mirror->ro_cred, cred);
 520				rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
 521			} else {
 522				cred = xchg(&mirror->rw_cred, cred);
 523				rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
 524			}
 525			ff_layout_free_mirror(fls->mirror_array[i]);
 526			fls->mirror_array[i] = mirror;
 527		}
 528
 529		dprintk("%s: iomode %s uid %u gid %u\n", __func__,
 530			lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
 531			from_kuid(&init_user_ns, uid),
 532			from_kgid(&init_user_ns, gid));
 533	}
 534
 535	p = xdr_inline_decode(&stream, 4);
 536	if (!p)
 537		goto out_sort_mirrors;
 538	fls->flags = be32_to_cpup(p);
 539
 540	p = xdr_inline_decode(&stream, 4);
 541	if (!p)
 542		goto out_sort_mirrors;
 543	for (i=0; i < fls->mirror_array_cnt; i++)
 544		fls->mirror_array[i]->report_interval = be32_to_cpup(p);
 545
 546out_sort_mirrors:
 547	ff_layout_sort_mirrors(fls);
 548	rc = ff_layout_check_layout(lgr);
 549	if (rc)
 550		goto out_err_free;
 551	ret = &fls->generic_hdr;
 552	dprintk("<-- %s (success)\n", __func__);
 553out_free_page:
 554	__free_page(scratch);
 555	return ret;
 556out_err_free:
 557	_ff_layout_free_lseg(fls);
 558	ret = ERR_PTR(rc);
 559	dprintk("<-- %s (%d)\n", __func__, rc);
 560	goto out_free_page;
 561}
 562
 563static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr *layout)
 564{
 565	struct pnfs_layout_segment *lseg;
 566
 567	list_for_each_entry(lseg, &layout->plh_segs, pls_list)
 568		if (lseg->pls_range.iomode == IOMODE_RW)
 569			return true;
 570
 571	return false;
 572}
 573
 574static void
 575ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
 576{
 577	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
 578
 579	dprintk("--> %s\n", __func__);
 580
 581	if (lseg->pls_range.iomode == IOMODE_RW) {
 582		struct nfs4_flexfile_layout *ffl;
 583		struct inode *inode;
 584
 585		ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
 586		inode = ffl->generic_hdr.plh_inode;
 587		spin_lock(&inode->i_lock);
 588		if (!ff_layout_has_rw_segments(lseg->pls_layout)) {
 589			ffl->commit_info.nbuckets = 0;
 590			kfree(ffl->commit_info.buckets);
 591			ffl->commit_info.buckets = NULL;
 592		}
 593		spin_unlock(&inode->i_lock);
 594	}
 595	_ff_layout_free_lseg(fls);
 596}
 597
 598/* Return 1 until we have multiple lsegs support */
 599static int
 600ff_layout_get_lseg_count(struct nfs4_ff_layout_segment *fls)
 601{
 602	return 1;
 603}
 604
 605static void
 606nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
 607{
 608	/* first IO request? */
 609	if (atomic_inc_return(&timer->n_ops) == 1) {
 610		timer->start_time = now;
 611	}
 612}
 613
 614static ktime_t
 615nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
 616{
 617	ktime_t start;
 618
 619	if (atomic_dec_return(&timer->n_ops) < 0)
 620		WARN_ON_ONCE(1);
 621
 622	start = timer->start_time;
 623	timer->start_time = now;
 624	return ktime_sub(now, start);
 625}
 626
 627static bool
 628nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
 629			    struct nfs4_ff_layoutstat *layoutstat,
 630			    ktime_t now)
 631{
 632	s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
 633	struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
 634
 635	nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
 636	if (!mirror->start_time)
 637		mirror->start_time = now;
 638	if (mirror->report_interval != 0)
 639		report_interval = (s64)mirror->report_interval * 1000LL;
 640	else if (layoutstats_timer != 0)
 641		report_interval = (s64)layoutstats_timer * 1000LL;
 642	if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
 643			report_interval) {
 644		ffl->last_report_time = now;
 645		return true;
 646	}
 647
 648	return false;
 649}
 650
 651static void
 652nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
 653		__u64 requested)
 654{
 655	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
 656
 657	iostat->ops_requested++;
 658	iostat->bytes_requested += requested;
 659}
 660
 661static void
 662nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
 663		__u64 requested,
 664		__u64 completed,
 665		ktime_t time_completed,
 666		ktime_t time_started)
 667{
 668	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
 669	ktime_t completion_time = ktime_sub(time_completed, time_started);
 670	ktime_t timer;
 671
 672	iostat->ops_completed++;
 673	iostat->bytes_completed += completed;
 674	iostat->bytes_not_delivered += requested - completed;
 675
 676	timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
 677	iostat->total_busy_time =
 678			ktime_add(iostat->total_busy_time, timer);
 679	iostat->aggregate_completion_time =
 680			ktime_add(iostat->aggregate_completion_time,
 681					completion_time);
 682}
 683
 684static void
 685nfs4_ff_layout_stat_io_start_read(struct inode *inode,
 686		struct nfs4_ff_layout_mirror *mirror,
 687		__u64 requested, ktime_t now)
 688{
 689	bool report;
 690
 691	spin_lock(&mirror->lock);
 692	report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
 693	nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
 694	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
 695	spin_unlock(&mirror->lock);
 696
 697	if (report)
 698		pnfs_report_layoutstat(inode, GFP_KERNEL);
 699}
 700
 701static void
 702nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
 703		struct nfs4_ff_layout_mirror *mirror,
 704		__u64 requested,
 705		__u64 completed)
 706{
 707	spin_lock(&mirror->lock);
 708	nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
 709			requested, completed,
 710			ktime_get(), task->tk_start);
 711	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
 712	spin_unlock(&mirror->lock);
 713}
 714
 715static void
 716nfs4_ff_layout_stat_io_start_write(struct inode *inode,
 717		struct nfs4_ff_layout_mirror *mirror,
 718		__u64 requested, ktime_t now)
 719{
 720	bool report;
 721
 722	spin_lock(&mirror->lock);
 723	report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
 724	nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
 725	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
 726	spin_unlock(&mirror->lock);
 727
 728	if (report)
 729		pnfs_report_layoutstat(inode, GFP_NOIO);
 730}
 731
 732static void
 733nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
 734		struct nfs4_ff_layout_mirror *mirror,
 735		__u64 requested,
 736		__u64 completed,
 737		enum nfs3_stable_how committed)
 738{
 739	if (committed == NFS_UNSTABLE)
 740		requested = completed = 0;
 741
 742	spin_lock(&mirror->lock);
 743	nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
 744			requested, completed, ktime_get(), task->tk_start);
 745	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
 746	spin_unlock(&mirror->lock);
 747}
 748
 749static int
 750ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg,
 751			    struct nfs_commit_info *cinfo,
 752			    gfp_t gfp_flags)
 753{
 754	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
 755	struct pnfs_commit_bucket *buckets;
 756	int size;
 757
 758	if (cinfo->ds->nbuckets != 0) {
 759		/* This assumes there is only one RW lseg per file.
 760		 * To support multiple lseg per file, we need to
 761		 * change struct pnfs_commit_bucket to allow dynamic
 762		 * increasing nbuckets.
 763		 */
 764		return 0;
 765	}
 766
 767	size = ff_layout_get_lseg_count(fls) * FF_LAYOUT_MIRROR_COUNT(lseg);
 768
 769	buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
 770			  gfp_flags);
 771	if (!buckets)
 772		return -ENOMEM;
 773	else {
 774		int i;
 775
 776		spin_lock(&cinfo->inode->i_lock);
 777		if (cinfo->ds->nbuckets != 0)
 778			kfree(buckets);
 779		else {
 780			cinfo->ds->buckets = buckets;
 781			cinfo->ds->nbuckets = size;
 782			for (i = 0; i < size; i++) {
 783				INIT_LIST_HEAD(&buckets[i].written);
 784				INIT_LIST_HEAD(&buckets[i].committing);
 785				/* mark direct verifier as unset */
 786				buckets[i].direct_verf.committed =
 787					NFS_INVALID_STABLE_HOW;
 788			}
 789		}
 790		spin_unlock(&cinfo->inode->i_lock);
 791		return 0;
 792	}
 793}
 794
 795static void
 796ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, int idx)
 797{
 798	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
 799
 800	if (devid)
 801		nfs4_mark_deviceid_unavailable(devid);
 802}
 803
 804static void
 805ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, int idx)
 806{
 807	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
 808
 809	if (devid)
 810		nfs4_mark_deviceid_available(devid);
 811}
 812
 813static struct nfs4_pnfs_ds *
 814ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
 815			     int start_idx, int *best_idx,
 816			     bool check_device)
 817{
 818	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
 819	struct nfs4_ff_layout_mirror *mirror;
 820	struct nfs4_pnfs_ds *ds;
 821	bool fail_return = false;
 822	int idx;
 823
 824	/* mirrors are initially sorted by efficiency */
 825	for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
 826		if (idx+1 == fls->mirror_array_cnt)
 827			fail_return = !check_device;
 828
 829		mirror = FF_LAYOUT_COMP(lseg, idx);
 830		ds = nfs4_ff_layout_prepare_ds(lseg, mirror, fail_return);
 831		if (!ds)
 832			continue;
 833
 834		if (check_device &&
 835		    nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node))
 836			continue;
 837
 838		*best_idx = idx;
 839		return ds;
 840	}
 841
 842	return NULL;
 843}
 844
 845static struct nfs4_pnfs_ds *
 846ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg,
 847				 int start_idx, int *best_idx)
 848{
 849	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, false);
 850}
 851
 852static struct nfs4_pnfs_ds *
 853ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg,
 854				   int start_idx, int *best_idx)
 855{
 856	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, true);
 857}
 858
 859static struct nfs4_pnfs_ds *
 860ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
 861				  int start_idx, int *best_idx)
 862{
 863	struct nfs4_pnfs_ds *ds;
 864
 865	ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx);
 866	if (ds)
 867		return ds;
 868	return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx);
 869}
 870
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 871static void
 872ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
 873		      struct nfs_page *req,
 874		      bool strict_iomode)
 875{
 876	pnfs_put_lseg(pgio->pg_lseg);
 877	pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
 878					   nfs_req_openctx(req),
 879					   0,
 880					   NFS4_MAX_UINT64,
 881					   IOMODE_READ,
 882					   strict_iomode,
 883					   GFP_KERNEL);
 884	if (IS_ERR(pgio->pg_lseg)) {
 885		pgio->pg_error = PTR_ERR(pgio->pg_lseg);
 886		pgio->pg_lseg = NULL;
 887	}
 888}
 889
 890static void
 891ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
 892			struct nfs_page *req)
 893{
 894	struct nfs_pgio_mirror *pgm;
 895	struct nfs4_ff_layout_mirror *mirror;
 896	struct nfs4_pnfs_ds *ds;
 897	int ds_idx;
 898
 
 
 
 899retry:
 900	pnfs_generic_pg_check_layout(pgio);
 901	/* Use full layout for now */
 902	if (!pgio->pg_lseg) {
 903		ff_layout_pg_get_read(pgio, req, false);
 904		if (!pgio->pg_lseg)
 905			goto out_nolseg;
 906	}
 907	if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
 908		ff_layout_pg_get_read(pgio, req, true);
 909		if (!pgio->pg_lseg)
 910			goto out_nolseg;
 911	}
 
 
 912
 913	ds = ff_layout_choose_best_ds_for_read(pgio->pg_lseg, 0, &ds_idx);
 914	if (!ds) {
 915		if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
 916			goto out_mds;
 917		pnfs_put_lseg(pgio->pg_lseg);
 918		pgio->pg_lseg = NULL;
 919		/* Sleep for 1 second before retrying */
 920		ssleep(1);
 921		goto retry;
 922	}
 923
 924	mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
 925
 926	pgio->pg_mirror_idx = ds_idx;
 927
 928	/* read always uses only one mirror - idx 0 for pgio layer */
 929	pgm = &pgio->pg_mirrors[0];
 930	pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
 931
 932	if (NFS_SERVER(pgio->pg_inode)->flags &
 933			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
 934		pgio->pg_maxretrans = io_maxretrans;
 935	return;
 936out_nolseg:
 937	if (pgio->pg_error < 0)
 938		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 939out_mds:
 940	trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode,
 941			0, NFS4_MAX_UINT64, IOMODE_READ,
 942			NFS_I(pgio->pg_inode)->layout,
 943			pgio->pg_lseg);
 944	pnfs_put_lseg(pgio->pg_lseg);
 945	pgio->pg_lseg = NULL;
 946	pgio->pg_maxretrans = 0;
 947	nfs_pageio_reset_read_mds(pgio);
 948}
 949
 950static void
 951ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
 952			struct nfs_page *req)
 953{
 954	struct nfs4_ff_layout_mirror *mirror;
 955	struct nfs_pgio_mirror *pgm;
 956	struct nfs_commit_info cinfo;
 957	struct nfs4_pnfs_ds *ds;
 958	int i;
 959	int status;
 960
 961retry:
 962	pnfs_generic_pg_check_layout(pgio);
 963	if (!pgio->pg_lseg) {
 964		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
 965						   nfs_req_openctx(req),
 966						   0,
 967						   NFS4_MAX_UINT64,
 968						   IOMODE_RW,
 969						   false,
 970						   GFP_NOFS);
 971		if (IS_ERR(pgio->pg_lseg)) {
 972			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
 973			pgio->pg_lseg = NULL;
 974			return;
 975		}
 976	}
 977	/* If no lseg, fall back to write through mds */
 978	if (pgio->pg_lseg == NULL)
 979		goto out_mds;
 980
 981	nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
 982	status = ff_layout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
 983	if (status < 0)
 984		goto out_mds;
 985
 986	/* Use a direct mapping of ds_idx to pgio mirror_idx */
 987	if (WARN_ON_ONCE(pgio->pg_mirror_count !=
 988	    FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)))
 989		goto out_mds;
 990
 991	for (i = 0; i < pgio->pg_mirror_count; i++) {
 992		mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
 993		ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true);
 994		if (!ds) {
 995			if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
 996				goto out_mds;
 997			pnfs_put_lseg(pgio->pg_lseg);
 998			pgio->pg_lseg = NULL;
 999			/* Sleep for 1 second before retrying */
1000			ssleep(1);
1001			goto retry;
1002		}
1003		pgm = &pgio->pg_mirrors[i];
1004		pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
1005	}
1006
1007	if (NFS_SERVER(pgio->pg_inode)->flags &
1008			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
1009		pgio->pg_maxretrans = io_maxretrans;
1010	return;
1011
 
 
 
1012out_mds:
1013	trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode,
1014			0, NFS4_MAX_UINT64, IOMODE_RW,
1015			NFS_I(pgio->pg_inode)->layout,
1016			pgio->pg_lseg);
1017	pnfs_put_lseg(pgio->pg_lseg);
1018	pgio->pg_lseg = NULL;
1019	pgio->pg_maxretrans = 0;
1020	nfs_pageio_reset_write_mds(pgio);
 
1021}
1022
1023static unsigned int
1024ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
1025				    struct nfs_page *req)
1026{
1027	if (!pgio->pg_lseg) {
1028		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1029						   nfs_req_openctx(req),
1030						   0,
1031						   NFS4_MAX_UINT64,
1032						   IOMODE_RW,
1033						   false,
1034						   GFP_NOFS);
1035		if (IS_ERR(pgio->pg_lseg)) {
1036			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
1037			pgio->pg_lseg = NULL;
1038			goto out;
1039		}
1040	}
1041	if (pgio->pg_lseg)
1042		return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
1043
1044	trace_pnfs_mds_fallback_pg_get_mirror_count(pgio->pg_inode,
1045			0, NFS4_MAX_UINT64, IOMODE_RW,
1046			NFS_I(pgio->pg_inode)->layout,
1047			pgio->pg_lseg);
1048	/* no lseg means that pnfs is not in use, so no mirroring here */
1049	nfs_pageio_reset_write_mds(pgio);
1050out:
1051	return 1;
1052}
1053
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1054static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
1055	.pg_init = ff_layout_pg_init_read,
1056	.pg_test = pnfs_generic_pg_test,
1057	.pg_doio = pnfs_generic_pg_readpages,
1058	.pg_cleanup = pnfs_generic_pg_cleanup,
1059};
1060
1061static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
1062	.pg_init = ff_layout_pg_init_write,
1063	.pg_test = pnfs_generic_pg_test,
1064	.pg_doio = pnfs_generic_pg_writepages,
1065	.pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
1066	.pg_cleanup = pnfs_generic_pg_cleanup,
 
 
1067};
1068
1069static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
1070{
1071	struct rpc_task *task = &hdr->task;
1072
1073	pnfs_layoutcommit_inode(hdr->inode, false);
1074
1075	if (retry_pnfs) {
1076		dprintk("%s Reset task %5u for i/o through pNFS "
1077			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1078			hdr->task.tk_pid,
1079			hdr->inode->i_sb->s_id,
1080			(unsigned long long)NFS_FILEID(hdr->inode),
1081			hdr->args.count,
1082			(unsigned long long)hdr->args.offset);
1083
1084		hdr->completion_ops->reschedule_io(hdr);
1085		return;
1086	}
1087
1088	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1089		dprintk("%s Reset task %5u for i/o through MDS "
1090			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1091			hdr->task.tk_pid,
1092			hdr->inode->i_sb->s_id,
1093			(unsigned long long)NFS_FILEID(hdr->inode),
1094			hdr->args.count,
1095			(unsigned long long)hdr->args.offset);
1096
1097		trace_pnfs_mds_fallback_write_done(hdr->inode,
1098				hdr->args.offset, hdr->args.count,
1099				IOMODE_RW, NFS_I(hdr->inode)->layout,
1100				hdr->lseg);
1101		task->tk_status = pnfs_write_done_resend_to_mds(hdr);
1102	}
1103}
1104
 
 
 
 
 
 
 
 
 
 
 
 
1105static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
1106{
1107	struct rpc_task *task = &hdr->task;
1108
1109	pnfs_layoutcommit_inode(hdr->inode, false);
 
1110
1111	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1112		dprintk("%s Reset task %5u for i/o through MDS "
1113			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1114			hdr->task.tk_pid,
1115			hdr->inode->i_sb->s_id,
1116			(unsigned long long)NFS_FILEID(hdr->inode),
1117			hdr->args.count,
1118			(unsigned long long)hdr->args.offset);
1119
1120		trace_pnfs_mds_fallback_read_done(hdr->inode,
1121				hdr->args.offset, hdr->args.count,
1122				IOMODE_READ, NFS_I(hdr->inode)->layout,
1123				hdr->lseg);
1124		task->tk_status = pnfs_read_done_resend_to_mds(hdr);
1125	}
1126}
1127
1128static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1129					   struct nfs4_state *state,
1130					   struct nfs_client *clp,
1131					   struct pnfs_layout_segment *lseg,
1132					   int idx)
1133{
1134	struct pnfs_layout_hdr *lo = lseg->pls_layout;
1135	struct inode *inode = lo->plh_inode;
1136	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1137	struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
1138
1139	switch (task->tk_status) {
1140	case -NFS4ERR_BADSESSION:
1141	case -NFS4ERR_BADSLOT:
1142	case -NFS4ERR_BAD_HIGH_SLOT:
1143	case -NFS4ERR_DEADSESSION:
1144	case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1145	case -NFS4ERR_SEQ_FALSE_RETRY:
1146	case -NFS4ERR_SEQ_MISORDERED:
1147		dprintk("%s ERROR %d, Reset session. Exchangeid "
1148			"flags 0x%x\n", __func__, task->tk_status,
1149			clp->cl_exchange_flags);
1150		nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
1151		break;
1152	case -NFS4ERR_DELAY:
1153	case -NFS4ERR_GRACE:
1154		rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
1155		break;
1156	case -NFS4ERR_RETRY_UNCACHED_REP:
1157		break;
1158	/* Invalidate Layout errors */
1159	case -NFS4ERR_PNFS_NO_LAYOUT:
1160	case -ESTALE:           /* mapped NFS4ERR_STALE */
1161	case -EBADHANDLE:       /* mapped NFS4ERR_BADHANDLE */
1162	case -EISDIR:           /* mapped NFS4ERR_ISDIR */
1163	case -NFS4ERR_FHEXPIRED:
1164	case -NFS4ERR_WRONG_TYPE:
1165		dprintk("%s Invalid layout error %d\n", __func__,
1166			task->tk_status);
1167		/*
1168		 * Destroy layout so new i/o will get a new layout.
1169		 * Layout will not be destroyed until all current lseg
1170		 * references are put. Mark layout as invalid to resend failed
1171		 * i/o and all i/o waiting on the slot table to the MDS until
1172		 * layout is destroyed and a new valid layout is obtained.
1173		 */
1174		pnfs_destroy_layout(NFS_I(inode));
1175		rpc_wake_up(&tbl->slot_tbl_waitq);
1176		goto reset;
1177	/* RPC connection errors */
1178	case -ECONNREFUSED:
1179	case -EHOSTDOWN:
1180	case -EHOSTUNREACH:
1181	case -ENETUNREACH:
1182	case -EIO:
1183	case -ETIMEDOUT:
1184	case -EPIPE:
 
 
1185		dprintk("%s DS connection error %d\n", __func__,
1186			task->tk_status);
1187		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1188				&devid->deviceid);
1189		rpc_wake_up(&tbl->slot_tbl_waitq);
1190		/* fall through */
1191	default:
1192		if (ff_layout_avoid_mds_available_ds(lseg))
1193			return -NFS4ERR_RESET_TO_PNFS;
1194reset:
1195		dprintk("%s Retry through MDS. Error %d\n", __func__,
1196			task->tk_status);
1197		return -NFS4ERR_RESET_TO_MDS;
1198	}
1199	task->tk_status = 0;
1200	return -EAGAIN;
1201}
1202
1203/* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
1204static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1205					   struct pnfs_layout_segment *lseg,
1206					   int idx)
1207{
1208	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1209
1210	switch (task->tk_status) {
1211	/* File access problems. Don't mark the device as unavailable */
1212	case -EACCES:
1213	case -ESTALE:
1214	case -EISDIR:
1215	case -EBADHANDLE:
1216	case -ELOOP:
1217	case -ENOSPC:
1218		break;
1219	case -EJUKEBOX:
1220		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1221		goto out_retry;
1222	default:
1223		dprintk("%s DS connection error %d\n", __func__,
1224			task->tk_status);
1225		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1226				&devid->deviceid);
1227	}
1228	/* FIXME: Need to prevent infinite looping here. */
1229	return -NFS4ERR_RESET_TO_PNFS;
1230out_retry:
1231	task->tk_status = 0;
1232	rpc_restart_call_prepare(task);
1233	rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1234	return -EAGAIN;
1235}
1236
1237static int ff_layout_async_handle_error(struct rpc_task *task,
1238					struct nfs4_state *state,
1239					struct nfs_client *clp,
1240					struct pnfs_layout_segment *lseg,
1241					int idx)
1242{
1243	int vers = clp->cl_nfs_mod->rpc_vers->number;
1244
1245	if (task->tk_status >= 0) {
1246		ff_layout_mark_ds_reachable(lseg, idx);
1247		return 0;
1248	}
1249
1250	/* Handle the case of an invalid layout segment */
1251	if (!pnfs_is_valid_lseg(lseg))
1252		return -NFS4ERR_RESET_TO_PNFS;
1253
1254	switch (vers) {
1255	case 3:
1256		return ff_layout_async_handle_error_v3(task, lseg, idx);
1257	case 4:
1258		return ff_layout_async_handle_error_v4(task, state, clp,
1259						       lseg, idx);
1260	default:
1261		/* should never happen */
1262		WARN_ON_ONCE(1);
1263		return 0;
1264	}
1265}
1266
1267static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1268					int idx, u64 offset, u64 length,
1269					u32 status, int opnum, int error)
1270{
1271	struct nfs4_ff_layout_mirror *mirror;
 
1272	int err;
1273
1274	if (status == 0) {
1275		switch (error) {
1276		case -ETIMEDOUT:
1277		case -EPFNOSUPPORT:
1278		case -EPROTONOSUPPORT:
1279		case -EOPNOTSUPP:
 
1280		case -ECONNREFUSED:
1281		case -ECONNRESET:
1282		case -EHOSTDOWN:
1283		case -EHOSTUNREACH:
1284		case -ENETUNREACH:
1285		case -EADDRINUSE:
1286		case -ENOBUFS:
1287		case -EPIPE:
1288		case -EPERM:
1289			status = NFS4ERR_NXIO;
 
 
1290			break;
1291		case -EACCES:
1292			status = NFS4ERR_ACCESS;
1293			break;
1294		default:
1295			return;
1296		}
1297	}
1298
 
 
 
 
 
1299	switch (status) {
1300	case NFS4ERR_DELAY:
1301	case NFS4ERR_GRACE:
1302		return;
 
 
 
 
 
 
 
 
 
1303	default:
1304		break;
 
1305	}
1306
1307	mirror = FF_LAYOUT_COMP(lseg, idx);
1308	err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
1309				       mirror, offset, length, status, opnum,
1310				       GFP_NOIO);
1311	if (status == NFS4ERR_NXIO)
1312		ff_layout_mark_ds_unreachable(lseg, idx);
1313	pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, lseg);
1314	dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
1315}
1316
1317/* NFS_PROTO call done callback routines */
1318static int ff_layout_read_done_cb(struct rpc_task *task,
1319				struct nfs_pgio_header *hdr)
1320{
1321	int new_idx = hdr->pgio_mirror_idx;
1322	int err;
1323
1324	trace_nfs4_pnfs_read(hdr, task->tk_status);
1325	if (task->tk_status < 0)
1326		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1327					    hdr->args.offset, hdr->args.count,
1328					    hdr->res.op_status, OP_READ,
1329					    task->tk_status);
 
 
 
1330	err = ff_layout_async_handle_error(task, hdr->args.context->state,
1331					   hdr->ds_clp, hdr->lseg,
1332					   hdr->pgio_mirror_idx);
1333
 
1334	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1335	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1336	switch (err) {
1337	case -NFS4ERR_RESET_TO_PNFS:
1338		if (ff_layout_choose_best_ds_for_read(hdr->lseg,
1339					hdr->pgio_mirror_idx + 1,
1340					&new_idx))
1341			goto out_layouterror;
1342		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1343		return task->tk_status;
1344	case -NFS4ERR_RESET_TO_MDS:
1345		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1346		return task->tk_status;
1347	case -EAGAIN:
1348		goto out_eagain;
1349	}
1350
1351	return 0;
1352out_layouterror:
1353	ff_layout_read_record_layoutstats_done(task, hdr);
1354	ff_layout_send_layouterror(hdr->lseg);
1355	hdr->pgio_mirror_idx = new_idx;
1356out_eagain:
1357	rpc_restart_call_prepare(task);
1358	return -EAGAIN;
1359}
1360
1361static bool
1362ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1363{
1364	return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1365}
1366
1367/*
1368 * We reference the rpc_cred of the first WRITE that triggers the need for
1369 * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1370 * rfc5661 is not clear about which credential should be used.
1371 *
1372 * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1373 * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1374 * we always send layoutcommit after DS writes.
1375 */
1376static void
1377ff_layout_set_layoutcommit(struct inode *inode,
1378		struct pnfs_layout_segment *lseg,
1379		loff_t end_offset)
1380{
1381	if (!ff_layout_need_layoutcommit(lseg))
1382		return;
1383
1384	pnfs_set_layoutcommit(inode, lseg, end_offset);
1385	dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino,
1386		(unsigned long long) NFS_I(inode)->layout->plh_lwb);
1387}
1388
1389static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
1390		struct nfs_pgio_header *hdr)
1391{
1392	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1393		return;
1394	nfs4_ff_layout_stat_io_start_read(hdr->inode,
1395			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1396			hdr->args.count,
1397			task->tk_start);
1398}
1399
1400static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
1401		struct nfs_pgio_header *hdr)
1402{
1403	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1404		return;
1405	nfs4_ff_layout_stat_io_end_read(task,
1406			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1407			hdr->args.count,
1408			hdr->res.count);
1409	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1410}
1411
1412static int ff_layout_read_prepare_common(struct rpc_task *task,
1413					 struct nfs_pgio_header *hdr)
1414{
1415	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1416		rpc_exit(task, -EIO);
1417		return -EIO;
1418	}
1419
 
 
 
 
 
1420	ff_layout_read_record_layoutstats_start(task, hdr);
1421	return 0;
1422}
1423
1424/*
1425 * Call ops for the async read/write cases
1426 * In the case of dense layouts, the offset needs to be reset to its
1427 * original value.
1428 */
1429static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1430{
1431	struct nfs_pgio_header *hdr = data;
1432
1433	if (ff_layout_read_prepare_common(task, hdr))
1434		return;
1435
1436	rpc_call_start(task);
1437}
1438
1439static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1440{
1441	struct nfs_pgio_header *hdr = data;
1442
1443	if (nfs4_setup_sequence(hdr->ds_clp,
1444				&hdr->args.seq_args,
1445				&hdr->res.seq_res,
1446				task))
1447		return;
1448
1449	ff_layout_read_prepare_common(task, hdr);
1450}
1451
1452static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1453{
1454	struct nfs_pgio_header *hdr = data;
1455
1456	dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
1457
1458	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1459	    task->tk_status == 0) {
1460		nfs4_sequence_done(task, &hdr->res.seq_res);
1461		return;
1462	}
1463
1464	/* Note this may cause RPC to be resent */
1465	hdr->mds_ops->rpc_call_done(task, hdr);
1466}
1467
1468static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1469{
1470	struct nfs_pgio_header *hdr = data;
1471
1472	ff_layout_read_record_layoutstats_done(task, hdr);
1473	rpc_count_iostats_metrics(task,
1474	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1475}
1476
1477static void ff_layout_read_release(void *data)
1478{
1479	struct nfs_pgio_header *hdr = data;
1480
1481	ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
1482	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
1483		ff_layout_send_layouterror(hdr->lseg);
1484		pnfs_read_resend_pnfs(hdr);
1485	} else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1486		ff_layout_reset_read(hdr);
1487	pnfs_generic_rw_release(data);
1488}
1489
1490
1491static int ff_layout_write_done_cb(struct rpc_task *task,
1492				struct nfs_pgio_header *hdr)
1493{
1494	loff_t end_offs = 0;
1495	int err;
1496
1497	trace_nfs4_pnfs_write(hdr, task->tk_status);
1498	if (task->tk_status < 0)
1499		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1500					    hdr->args.offset, hdr->args.count,
1501					    hdr->res.op_status, OP_WRITE,
1502					    task->tk_status);
 
 
 
1503	err = ff_layout_async_handle_error(task, hdr->args.context->state,
1504					   hdr->ds_clp, hdr->lseg,
1505					   hdr->pgio_mirror_idx);
1506
 
1507	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1508	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1509	switch (err) {
1510	case -NFS4ERR_RESET_TO_PNFS:
1511		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1512		return task->tk_status;
1513	case -NFS4ERR_RESET_TO_MDS:
1514		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1515		return task->tk_status;
1516	case -EAGAIN:
1517		return -EAGAIN;
1518	}
1519
1520	if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1521	    hdr->res.verf->committed == NFS_DATA_SYNC)
1522		end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
1523
1524	/* Note: if the write is unstable, don't set end_offs until commit */
1525	ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
1526
1527	/* zero out fattr since we don't care DS attr at all */
1528	hdr->fattr.valid = 0;
1529	if (task->tk_status >= 0)
1530		nfs_writeback_update_inode(hdr);
1531
1532	return 0;
1533}
1534
1535static int ff_layout_commit_done_cb(struct rpc_task *task,
1536				     struct nfs_commit_data *data)
1537{
1538	int err;
1539
1540	trace_nfs4_pnfs_commit_ds(data, task->tk_status);
1541	if (task->tk_status < 0)
1542		ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1543					    data->args.offset, data->args.count,
1544					    data->res.op_status, OP_COMMIT,
1545					    task->tk_status);
 
 
 
1546	err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
1547					   data->lseg, data->ds_commit_index);
1548
 
1549	switch (err) {
1550	case -NFS4ERR_RESET_TO_PNFS:
1551		pnfs_generic_prepare_to_resend_writes(data);
1552		return -EAGAIN;
1553	case -NFS4ERR_RESET_TO_MDS:
1554		pnfs_generic_prepare_to_resend_writes(data);
1555		return -EAGAIN;
1556	case -EAGAIN:
1557		rpc_restart_call_prepare(task);
1558		return -EAGAIN;
1559	}
1560
1561	ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
1562
1563	return 0;
1564}
1565
1566static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
1567		struct nfs_pgio_header *hdr)
1568{
1569	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1570		return;
1571	nfs4_ff_layout_stat_io_start_write(hdr->inode,
1572			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1573			hdr->args.count,
1574			task->tk_start);
1575}
1576
1577static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
1578		struct nfs_pgio_header *hdr)
1579{
1580	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1581		return;
1582	nfs4_ff_layout_stat_io_end_write(task,
1583			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1584			hdr->args.count, hdr->res.count,
1585			hdr->res.verf->committed);
1586	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1587}
1588
1589static int ff_layout_write_prepare_common(struct rpc_task *task,
1590					  struct nfs_pgio_header *hdr)
1591{
1592	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1593		rpc_exit(task, -EIO);
1594		return -EIO;
1595	}
1596
 
 
 
 
 
1597	ff_layout_write_record_layoutstats_start(task, hdr);
1598	return 0;
1599}
1600
1601static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1602{
1603	struct nfs_pgio_header *hdr = data;
1604
1605	if (ff_layout_write_prepare_common(task, hdr))
1606		return;
1607
1608	rpc_call_start(task);
1609}
1610
1611static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1612{
1613	struct nfs_pgio_header *hdr = data;
1614
1615	if (nfs4_setup_sequence(hdr->ds_clp,
1616				&hdr->args.seq_args,
1617				&hdr->res.seq_res,
1618				task))
1619		return;
1620
1621	ff_layout_write_prepare_common(task, hdr);
1622}
1623
1624static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1625{
1626	struct nfs_pgio_header *hdr = data;
1627
1628	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1629	    task->tk_status == 0) {
1630		nfs4_sequence_done(task, &hdr->res.seq_res);
1631		return;
1632	}
1633
1634	/* Note this may cause RPC to be resent */
1635	hdr->mds_ops->rpc_call_done(task, hdr);
1636}
1637
1638static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1639{
1640	struct nfs_pgio_header *hdr = data;
1641
1642	ff_layout_write_record_layoutstats_done(task, hdr);
1643	rpc_count_iostats_metrics(task,
1644	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1645}
1646
1647static void ff_layout_write_release(void *data)
1648{
1649	struct nfs_pgio_header *hdr = data;
1650
1651	ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
1652	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
1653		ff_layout_send_layouterror(hdr->lseg);
1654		ff_layout_reset_write(hdr, true);
1655	} else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1656		ff_layout_reset_write(hdr, false);
1657	pnfs_generic_rw_release(data);
1658}
1659
1660static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
1661		struct nfs_commit_data *cdata)
1662{
1663	if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
1664		return;
1665	nfs4_ff_layout_stat_io_start_write(cdata->inode,
1666			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1667			0, task->tk_start);
1668}
1669
1670static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
1671		struct nfs_commit_data *cdata)
1672{
1673	struct nfs_page *req;
1674	__u64 count = 0;
1675
1676	if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
1677		return;
1678
1679	if (task->tk_status == 0) {
1680		list_for_each_entry(req, &cdata->pages, wb_list)
1681			count += req->wb_bytes;
1682	}
1683	nfs4_ff_layout_stat_io_end_write(task,
1684			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1685			count, count, NFS_FILE_SYNC);
1686	set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags);
1687}
1688
1689static void ff_layout_commit_prepare_common(struct rpc_task *task,
1690		struct nfs_commit_data *cdata)
1691{
 
 
 
 
 
1692	ff_layout_commit_record_layoutstats_start(task, cdata);
 
1693}
1694
1695static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1696{
1697	ff_layout_commit_prepare_common(task, data);
 
 
1698	rpc_call_start(task);
1699}
1700
1701static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1702{
1703	struct nfs_commit_data *wdata = data;
1704
1705	if (nfs4_setup_sequence(wdata->ds_clp,
1706				&wdata->args.seq_args,
1707				&wdata->res.seq_res,
1708				task))
1709		return;
1710	ff_layout_commit_prepare_common(task, data);
1711}
1712
1713static void ff_layout_commit_done(struct rpc_task *task, void *data)
1714{
1715	pnfs_generic_write_commit_done(task, data);
1716}
1717
1718static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1719{
1720	struct nfs_commit_data *cdata = data;
1721
1722	ff_layout_commit_record_layoutstats_done(task, cdata);
1723	rpc_count_iostats_metrics(task,
1724	    &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1725}
1726
1727static void ff_layout_commit_release(void *data)
1728{
1729	struct nfs_commit_data *cdata = data;
1730
1731	ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
1732	pnfs_generic_commit_release(data);
1733}
1734
1735static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1736	.rpc_call_prepare = ff_layout_read_prepare_v3,
1737	.rpc_call_done = ff_layout_read_call_done,
1738	.rpc_count_stats = ff_layout_read_count_stats,
1739	.rpc_release = ff_layout_read_release,
1740};
1741
1742static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1743	.rpc_call_prepare = ff_layout_read_prepare_v4,
1744	.rpc_call_done = ff_layout_read_call_done,
1745	.rpc_count_stats = ff_layout_read_count_stats,
1746	.rpc_release = ff_layout_read_release,
1747};
1748
1749static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1750	.rpc_call_prepare = ff_layout_write_prepare_v3,
1751	.rpc_call_done = ff_layout_write_call_done,
1752	.rpc_count_stats = ff_layout_write_count_stats,
1753	.rpc_release = ff_layout_write_release,
1754};
1755
1756static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1757	.rpc_call_prepare = ff_layout_write_prepare_v4,
1758	.rpc_call_done = ff_layout_write_call_done,
1759	.rpc_count_stats = ff_layout_write_count_stats,
1760	.rpc_release = ff_layout_write_release,
1761};
1762
1763static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1764	.rpc_call_prepare = ff_layout_commit_prepare_v3,
1765	.rpc_call_done = ff_layout_commit_done,
1766	.rpc_count_stats = ff_layout_commit_count_stats,
1767	.rpc_release = ff_layout_commit_release,
1768};
1769
1770static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1771	.rpc_call_prepare = ff_layout_commit_prepare_v4,
1772	.rpc_call_done = ff_layout_commit_done,
1773	.rpc_count_stats = ff_layout_commit_count_stats,
1774	.rpc_release = ff_layout_commit_release,
1775};
1776
1777static enum pnfs_try_status
1778ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1779{
1780	struct pnfs_layout_segment *lseg = hdr->lseg;
1781	struct nfs4_pnfs_ds *ds;
1782	struct rpc_clnt *ds_clnt;
 
1783	struct nfs4_ff_layout_mirror *mirror;
1784	const struct cred *ds_cred;
1785	loff_t offset = hdr->args.offset;
1786	u32 idx = hdr->pgio_mirror_idx;
1787	int vers;
1788	struct nfs_fh *fh;
1789
1790	dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
1791		__func__, hdr->inode->i_ino,
1792		hdr->args.pgbase, (size_t)hdr->args.count, offset);
1793
1794	mirror = FF_LAYOUT_COMP(lseg, idx);
1795	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
1796	if (!ds)
1797		goto out_failed;
1798
1799	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1800						   hdr->inode);
1801	if (IS_ERR(ds_clnt))
1802		goto out_failed;
1803
1804	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1805	if (!ds_cred)
1806		goto out_failed;
1807
1808	vers = nfs4_ff_layout_ds_version(mirror);
1809
1810	dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1811		ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers);
1812
1813	hdr->pgio_done_cb = ff_layout_read_done_cb;
1814	refcount_inc(&ds->ds_clp->cl_count);
1815	hdr->ds_clp = ds->ds_clp;
1816	fh = nfs4_ff_layout_select_ds_fh(mirror);
1817	if (fh)
1818		hdr->args.fh = fh;
1819
1820	nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1821
1822	/*
1823	 * Note that if we ever decide to split across DSes,
1824	 * then we may need to handle dense-like offsets.
1825	 */
1826	hdr->args.offset = offset;
1827	hdr->mds_offset = offset;
1828
 
 
 
 
 
 
 
1829	/* Perform an asynchronous read to ds */
1830	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1831			  vers == 3 ? &ff_layout_read_call_ops_v3 :
1832				      &ff_layout_read_call_ops_v4,
1833			  0, RPC_TASK_SOFTCONN);
1834	put_cred(ds_cred);
1835	return PNFS_ATTEMPTED;
1836
1837out_failed:
1838	if (ff_layout_avoid_mds_available_ds(lseg))
1839		return PNFS_TRY_AGAIN;
1840	trace_pnfs_mds_fallback_read_pagelist(hdr->inode,
1841			hdr->args.offset, hdr->args.count,
1842			IOMODE_READ, NFS_I(hdr->inode)->layout, lseg);
1843	return PNFS_NOT_ATTEMPTED;
1844}
1845
1846/* Perform async writes. */
1847static enum pnfs_try_status
1848ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1849{
1850	struct pnfs_layout_segment *lseg = hdr->lseg;
1851	struct nfs4_pnfs_ds *ds;
1852	struct rpc_clnt *ds_clnt;
 
1853	struct nfs4_ff_layout_mirror *mirror;
1854	const struct cred *ds_cred;
1855	loff_t offset = hdr->args.offset;
1856	int vers;
1857	struct nfs_fh *fh;
1858	int idx = hdr->pgio_mirror_idx;
1859
1860	mirror = FF_LAYOUT_COMP(lseg, idx);
1861	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1862	if (!ds)
1863		goto out_failed;
1864
1865	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1866						   hdr->inode);
1867	if (IS_ERR(ds_clnt))
1868		goto out_failed;
1869
1870	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1871	if (!ds_cred)
1872		goto out_failed;
1873
1874	vers = nfs4_ff_layout_ds_version(mirror);
1875
1876	dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
1877		__func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1878		offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count),
1879		vers);
1880
1881	hdr->pgio_done_cb = ff_layout_write_done_cb;
1882	refcount_inc(&ds->ds_clp->cl_count);
1883	hdr->ds_clp = ds->ds_clp;
1884	hdr->ds_commit_idx = idx;
1885	fh = nfs4_ff_layout_select_ds_fh(mirror);
1886	if (fh)
1887		hdr->args.fh = fh;
1888
1889	nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1890
1891	/*
1892	 * Note that if we ever decide to split across DSes,
1893	 * then we may need to handle dense-like offsets.
1894	 */
1895	hdr->args.offset = offset;
1896
 
 
 
 
 
 
 
 
1897	/* Perform an asynchronous write */
1898	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1899			  vers == 3 ? &ff_layout_write_call_ops_v3 :
1900				      &ff_layout_write_call_ops_v4,
1901			  sync, RPC_TASK_SOFTCONN);
1902	put_cred(ds_cred);
1903	return PNFS_ATTEMPTED;
1904
1905out_failed:
1906	if (ff_layout_avoid_mds_available_ds(lseg))
1907		return PNFS_TRY_AGAIN;
1908	trace_pnfs_mds_fallback_write_pagelist(hdr->inode,
1909			hdr->args.offset, hdr->args.count,
1910			IOMODE_RW, NFS_I(hdr->inode)->layout, lseg);
1911	return PNFS_NOT_ATTEMPTED;
1912}
1913
1914static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1915{
1916	return i;
1917}
1918
1919static struct nfs_fh *
1920select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1921{
1922	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1923
1924	/* FIXME: Assume that there is only one NFS version available
1925	 * for the DS.
1926	 */
1927	return &flseg->mirror_array[i]->fh_versions[0];
1928}
1929
1930static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
1931{
1932	struct pnfs_layout_segment *lseg = data->lseg;
1933	struct nfs4_pnfs_ds *ds;
1934	struct rpc_clnt *ds_clnt;
 
1935	struct nfs4_ff_layout_mirror *mirror;
1936	const struct cred *ds_cred;
1937	u32 idx;
1938	int vers, ret;
1939	struct nfs_fh *fh;
1940
1941	if (!lseg || !(pnfs_is_valid_lseg(lseg) ||
1942	    test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)))
1943		goto out_err;
1944
1945	idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
1946	mirror = FF_LAYOUT_COMP(lseg, idx);
1947	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1948	if (!ds)
1949		goto out_err;
1950
1951	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1952						   data->inode);
1953	if (IS_ERR(ds_clnt))
1954		goto out_err;
1955
1956	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred);
1957	if (!ds_cred)
1958		goto out_err;
1959
1960	vers = nfs4_ff_layout_ds_version(mirror);
1961
1962	dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
1963		data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count),
1964		vers);
1965	data->commit_done_cb = ff_layout_commit_done_cb;
1966	data->cred = ds_cred;
1967	refcount_inc(&ds->ds_clp->cl_count);
1968	data->ds_clp = ds->ds_clp;
1969	fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
1970	if (fh)
1971		data->args.fh = fh;
1972
 
 
 
 
 
 
 
 
1973	ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
1974				   vers == 3 ? &ff_layout_commit_call_ops_v3 :
1975					       &ff_layout_commit_call_ops_v4,
1976				   how, RPC_TASK_SOFTCONN);
1977	put_cred(ds_cred);
1978	return ret;
1979out_err:
1980	pnfs_generic_prepare_to_resend_writes(data);
1981	pnfs_generic_commit_release(data);
1982	return -EAGAIN;
1983}
1984
1985static int
1986ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
1987			   int how, struct nfs_commit_info *cinfo)
1988{
1989	return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
1990					    ff_layout_initiate_commit);
1991}
1992
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1993static struct pnfs_ds_commit_info *
1994ff_layout_get_ds_info(struct inode *inode)
1995{
1996	struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
1997
1998	if (layout == NULL)
1999		return NULL;
2000
2001	return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
2002}
2003
2004static void
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2005ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
2006{
2007	nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
2008						  id_node));
2009}
2010
2011static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
2012				  const struct nfs4_layoutreturn_args *args,
2013				  const struct nfs4_flexfile_layoutreturn_args *ff_args)
2014{
2015	__be32 *start;
2016
2017	start = xdr_reserve_space(xdr, 4);
2018	if (unlikely(!start))
2019		return -E2BIG;
2020
2021	*start = cpu_to_be32(ff_args->num_errors);
2022	/* This assume we always return _ALL_ layouts */
2023	return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors);
2024}
2025
2026static void
2027encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len)
2028{
2029	WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
2030}
2031
2032static void
2033ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
2034			    const nfs4_stateid *stateid,
2035			    const struct nfs42_layoutstat_devinfo *devinfo)
2036{
2037	__be32 *p;
2038
2039	p = xdr_reserve_space(xdr, 8 + 8);
2040	p = xdr_encode_hyper(p, devinfo->offset);
2041	p = xdr_encode_hyper(p, devinfo->length);
2042	encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
2043	p = xdr_reserve_space(xdr, 4*8);
2044	p = xdr_encode_hyper(p, devinfo->read_count);
2045	p = xdr_encode_hyper(p, devinfo->read_bytes);
2046	p = xdr_encode_hyper(p, devinfo->write_count);
2047	p = xdr_encode_hyper(p, devinfo->write_bytes);
2048	encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE);
2049}
2050
2051static void
2052ff_layout_encode_ff_iostat(struct xdr_stream *xdr,
2053			    const nfs4_stateid *stateid,
2054			    const struct nfs42_layoutstat_devinfo *devinfo)
2055{
2056	ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo);
2057	ff_layout_encode_ff_layoutupdate(xdr, devinfo,
2058			devinfo->ld_private.data);
2059}
2060
2061/* report nothing for now */
2062static void ff_layout_encode_iostats_array(struct xdr_stream *xdr,
2063		const struct nfs4_layoutreturn_args *args,
2064		struct nfs4_flexfile_layoutreturn_args *ff_args)
2065{
2066	__be32 *p;
2067	int i;
2068
2069	p = xdr_reserve_space(xdr, 4);
2070	*p = cpu_to_be32(ff_args->num_dev);
2071	for (i = 0; i < ff_args->num_dev; i++)
2072		ff_layout_encode_ff_iostat(xdr,
2073				&args->layout->plh_stateid,
2074				&ff_args->devinfo[i]);
2075}
2076
2077static void
2078ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo,
2079		unsigned int num_entries)
2080{
2081	unsigned int i;
2082
2083	for (i = 0; i < num_entries; i++) {
2084		if (!devinfo[i].ld_private.ops)
2085			continue;
2086		if (!devinfo[i].ld_private.ops->free)
2087			continue;
2088		devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
2089	}
2090}
2091
2092static struct nfs4_deviceid_node *
2093ff_layout_alloc_deviceid_node(struct nfs_server *server,
2094			      struct pnfs_device *pdev, gfp_t gfp_flags)
2095{
2096	struct nfs4_ff_layout_ds *dsaddr;
2097
2098	dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
2099	if (!dsaddr)
2100		return NULL;
2101	return &dsaddr->id_node;
2102}
2103
2104static void
2105ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
2106		const void *voidargs,
2107		const struct nfs4_xdr_opaque_data *ff_opaque)
2108{
2109	const struct nfs4_layoutreturn_args *args = voidargs;
2110	struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data;
2111	struct xdr_buf tmp_buf = {
2112		.head = {
2113			[0] = {
2114				.iov_base = page_address(ff_args->pages[0]),
2115			},
2116		},
2117		.buflen = PAGE_SIZE,
2118	};
2119	struct xdr_stream tmp_xdr;
2120	__be32 *start;
2121
2122	dprintk("%s: Begin\n", __func__);
2123
2124	xdr_init_encode(&tmp_xdr, &tmp_buf, NULL, NULL);
2125
2126	ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
2127	ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);
2128
2129	start = xdr_reserve_space(xdr, 4);
2130	*start = cpu_to_be32(tmp_buf.len);
2131	xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len);
2132
2133	dprintk("%s: Return\n", __func__);
2134}
2135
2136static void
2137ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args)
2138{
2139	struct nfs4_flexfile_layoutreturn_args *ff_args;
2140
2141	if (!args->data)
2142		return;
2143	ff_args = args->data;
2144	args->data = NULL;
2145
2146	ff_layout_free_ds_ioerr(&ff_args->errors);
2147	ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev);
2148
2149	put_page(ff_args->pages[0]);
2150	kfree(ff_args);
2151}
2152
2153static const struct nfs4_xdr_opaque_ops layoutreturn_ops = {
2154	.encode = ff_layout_encode_layoutreturn,
2155	.free = ff_layout_free_layoutreturn,
2156};
2157
2158static int
2159ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
2160{
2161	struct nfs4_flexfile_layoutreturn_args *ff_args;
2162	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout);
2163
2164	ff_args = kmalloc(sizeof(*ff_args), GFP_KERNEL);
2165	if (!ff_args)
2166		goto out_nomem;
2167	ff_args->pages[0] = alloc_page(GFP_KERNEL);
2168	if (!ff_args->pages[0])
2169		goto out_nomem_free;
2170
2171	INIT_LIST_HEAD(&ff_args->errors);
2172	ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout,
2173			&args->range, &ff_args->errors,
2174			FF_LAYOUTRETURN_MAXERR);
2175
2176	spin_lock(&args->inode->i_lock);
2177	ff_args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
2178			&ff_args->devinfo[0], ARRAY_SIZE(ff_args->devinfo));
 
2179	spin_unlock(&args->inode->i_lock);
2180
2181	args->ld_private->ops = &layoutreturn_ops;
2182	args->ld_private->data = ff_args;
2183	return 0;
2184out_nomem_free:
2185	kfree(ff_args);
2186out_nomem:
2187	return -ENOMEM;
2188}
2189
2190#ifdef CONFIG_NFS_V4_2
2191void
2192ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2193{
2194	struct pnfs_layout_hdr *lo = lseg->pls_layout;
2195	struct nfs42_layout_error *errors;
2196	LIST_HEAD(head);
2197
2198	if (!nfs_server_capable(lo->plh_inode, NFS_CAP_LAYOUTERROR))
2199		return;
2200	ff_layout_fetch_ds_ioerr(lo, &lseg->pls_range, &head, -1);
2201	if (list_empty(&head))
2202		return;
2203
2204	errors = kmalloc_array(NFS42_LAYOUTERROR_MAX,
2205			sizeof(*errors), GFP_NOFS);
2206	if (errors != NULL) {
2207		const struct nfs4_ff_layout_ds_err *pos;
2208		size_t n = 0;
2209
2210		list_for_each_entry(pos, &head, list) {
2211			errors[n].offset = pos->offset;
2212			errors[n].length = pos->length;
2213			nfs4_stateid_copy(&errors[n].stateid, &pos->stateid);
2214			errors[n].errors[0].dev_id = pos->deviceid;
2215			errors[n].errors[0].status = pos->status;
2216			errors[n].errors[0].opnum = pos->opnum;
2217			n++;
2218			if (!list_is_last(&pos->list, &head) &&
2219			    n < NFS42_LAYOUTERROR_MAX)
2220				continue;
2221			if (nfs42_proc_layouterror(lseg, errors, n) < 0)
2222				break;
2223			n = 0;
2224		}
2225		kfree(errors);
2226	}
2227	ff_layout_free_ds_ioerr(&head);
2228}
2229#else
2230void
2231ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2232{
2233}
2234#endif
2235
2236static int
2237ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
2238{
2239	const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
2240
2241	return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
2242}
2243
2244static size_t
2245ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
2246			  const int buflen)
2247{
2248	const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
2249	const struct in6_addr *addr = &sin6->sin6_addr;
2250
2251	/*
2252	 * RFC 4291, Section 2.2.2
2253	 *
2254	 * Shorthanded ANY address
2255	 */
2256	if (ipv6_addr_any(addr))
2257		return snprintf(buf, buflen, "::");
2258
2259	/*
2260	 * RFC 4291, Section 2.2.2
2261	 *
2262	 * Shorthanded loopback address
2263	 */
2264	if (ipv6_addr_loopback(addr))
2265		return snprintf(buf, buflen, "::1");
2266
2267	/*
2268	 * RFC 4291, Section 2.2.3
2269	 *
2270	 * Special presentation address format for mapped v4
2271	 * addresses.
2272	 */
2273	if (ipv6_addr_v4mapped(addr))
2274		return snprintf(buf, buflen, "::ffff:%pI4",
2275					&addr->s6_addr32[3]);
2276
2277	/*
2278	 * RFC 4291, Section 2.2.1
2279	 */
2280	return snprintf(buf, buflen, "%pI6c", addr);
2281}
2282
2283/* Derived from rpc_sockaddr2uaddr */
2284static void
2285ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
2286{
2287	struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
2288	char portbuf[RPCBIND_MAXUADDRPLEN];
2289	char addrbuf[RPCBIND_MAXUADDRLEN];
2290	char *netid;
2291	unsigned short port;
2292	int len, netid_len;
2293	__be32 *p;
2294
2295	switch (sap->sa_family) {
2296	case AF_INET:
2297		if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
2298			return;
2299		port = ntohs(((struct sockaddr_in *)sap)->sin_port);
2300		netid = "tcp";
2301		netid_len = 3;
2302		break;
2303	case AF_INET6:
2304		if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
2305			return;
2306		port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
2307		netid = "tcp6";
2308		netid_len = 4;
2309		break;
2310	default:
2311		/* we only support tcp and tcp6 */
2312		WARN_ON_ONCE(1);
2313		return;
2314	}
2315
2316	snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
2317	len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
2318
 
2319	p = xdr_reserve_space(xdr, 4 + netid_len);
2320	xdr_encode_opaque(p, netid, netid_len);
2321
2322	p = xdr_reserve_space(xdr, 4 + len);
2323	xdr_encode_opaque(p, addrbuf, len);
2324}
2325
2326static void
2327ff_layout_encode_nfstime(struct xdr_stream *xdr,
2328			 ktime_t t)
2329{
2330	struct timespec64 ts;
2331	__be32 *p;
2332
2333	p = xdr_reserve_space(xdr, 12);
2334	ts = ktime_to_timespec64(t);
2335	p = xdr_encode_hyper(p, ts.tv_sec);
2336	*p++ = cpu_to_be32(ts.tv_nsec);
2337}
2338
2339static void
2340ff_layout_encode_io_latency(struct xdr_stream *xdr,
2341			    struct nfs4_ff_io_stat *stat)
2342{
2343	__be32 *p;
2344
2345	p = xdr_reserve_space(xdr, 5 * 8);
2346	p = xdr_encode_hyper(p, stat->ops_requested);
2347	p = xdr_encode_hyper(p, stat->bytes_requested);
2348	p = xdr_encode_hyper(p, stat->ops_completed);
2349	p = xdr_encode_hyper(p, stat->bytes_completed);
2350	p = xdr_encode_hyper(p, stat->bytes_not_delivered);
2351	ff_layout_encode_nfstime(xdr, stat->total_busy_time);
2352	ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
2353}
2354
2355static void
2356ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
2357			      const struct nfs42_layoutstat_devinfo *devinfo,
2358			      struct nfs4_ff_layout_mirror *mirror)
2359{
2360	struct nfs4_pnfs_ds_addr *da;
2361	struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
2362	struct nfs_fh *fh = &mirror->fh_versions[0];
2363	__be32 *p;
2364
2365	da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
2366	dprintk("%s: DS %s: encoding address %s\n",
2367		__func__, ds->ds_remotestr, da->da_remotestr);
2368	/* netaddr4 */
2369	ff_layout_encode_netaddr(xdr, da);
2370	/* nfs_fh4 */
2371	p = xdr_reserve_space(xdr, 4 + fh->size);
2372	xdr_encode_opaque(p, fh->data, fh->size);
2373	/* ff_io_latency4 read */
2374	spin_lock(&mirror->lock);
2375	ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
2376	/* ff_io_latency4 write */
2377	ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
2378	spin_unlock(&mirror->lock);
2379	/* nfstime4 */
2380	ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
2381	/* bool */
2382	p = xdr_reserve_space(xdr, 4);
2383	*p = cpu_to_be32(false);
2384}
2385
2386static void
2387ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
2388			     const struct nfs4_xdr_opaque_data *opaque)
2389{
2390	struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque,
2391			struct nfs42_layoutstat_devinfo, ld_private);
2392	__be32 *start;
2393
2394	/* layoutupdate length */
2395	start = xdr_reserve_space(xdr, 4);
2396	ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data);
2397
2398	*start = cpu_to_be32((xdr->p - start - 1) * 4);
2399}
2400
2401static void
2402ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
2403{
2404	struct nfs4_ff_layout_mirror *mirror = opaque->data;
2405
2406	ff_layout_put_mirror(mirror);
2407}
2408
2409static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
2410	.encode = ff_layout_encode_layoutstats,
2411	.free	= ff_layout_free_layoutstats,
2412};
2413
2414static int
2415ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
2416			       struct nfs42_layoutstat_devinfo *devinfo,
2417			       int dev_limit)
2418{
2419	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
2420	struct nfs4_ff_layout_mirror *mirror;
2421	struct nfs4_deviceid_node *dev;
2422	int i = 0;
2423
2424	list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2425		if (i >= dev_limit)
2426			break;
2427		if (IS_ERR_OR_NULL(mirror->mirror_ds))
2428			continue;
2429		if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags))
 
 
2430			continue;
2431		/* mirror refcount put in cleanup_layoutstats */
2432		if (!refcount_inc_not_zero(&mirror->ref))
2433			continue;
2434		dev = &mirror->mirror_ds->id_node; 
2435		memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
2436		devinfo->offset = 0;
2437		devinfo->length = NFS4_MAX_UINT64;
2438		spin_lock(&mirror->lock);
2439		devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
2440		devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
2441		devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
2442		devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
2443		spin_unlock(&mirror->lock);
2444		devinfo->layout_type = LAYOUT_FLEX_FILES;
2445		devinfo->ld_private.ops = &layoutstat_ops;
2446		devinfo->ld_private.data = mirror;
2447
2448		devinfo++;
2449		i++;
2450	}
2451	return i;
2452}
2453
2454static int
2455ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
2456{
 
2457	struct nfs4_flexfile_layout *ff_layout;
2458	const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
2459
2460	/* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2461	args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), GFP_NOIO);
 
2462	if (!args->devinfo)
2463		return -ENOMEM;
2464
2465	spin_lock(&args->inode->i_lock);
2466	ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
2467	args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
2468			&args->devinfo[0], dev_count);
 
 
 
 
 
2469	spin_unlock(&args->inode->i_lock);
2470	if (!args->num_dev) {
2471		kfree(args->devinfo);
2472		args->devinfo = NULL;
2473		return -ENOENT;
2474	}
2475
2476	return 0;
2477}
2478
2479static int
2480ff_layout_set_layoutdriver(struct nfs_server *server,
2481		const struct nfs_fh *dummy)
2482{
2483#if IS_ENABLED(CONFIG_NFS_V4_2)
2484	server->caps |= NFS_CAP_LAYOUTSTATS;
2485#endif
2486	return 0;
2487}
2488
 
 
 
 
 
 
 
 
 
 
2489static struct pnfs_layoutdriver_type flexfilelayout_type = {
2490	.id			= LAYOUT_FLEX_FILES,
2491	.name			= "LAYOUT_FLEX_FILES",
2492	.owner			= THIS_MODULE,
2493	.flags			= PNFS_LAYOUTGET_ON_OPEN,
2494	.max_layoutget_response	= 4096, /* 1 page or so... */
2495	.set_layoutdriver	= ff_layout_set_layoutdriver,
2496	.alloc_layout_hdr	= ff_layout_alloc_layout_hdr,
2497	.free_layout_hdr	= ff_layout_free_layout_hdr,
2498	.alloc_lseg		= ff_layout_alloc_lseg,
2499	.free_lseg		= ff_layout_free_lseg,
2500	.add_lseg		= ff_layout_add_lseg,
2501	.pg_read_ops		= &ff_layout_pg_read_ops,
2502	.pg_write_ops		= &ff_layout_pg_write_ops,
2503	.get_ds_info		= ff_layout_get_ds_info,
2504	.free_deviceid_node	= ff_layout_free_deviceid_node,
2505	.mark_request_commit	= pnfs_layout_mark_request_commit,
2506	.clear_request_commit	= pnfs_generic_clear_request_commit,
2507	.scan_commit_lists	= pnfs_generic_scan_commit_lists,
2508	.recover_commit_reqs	= pnfs_generic_recover_commit_reqs,
2509	.commit_pagelist	= ff_layout_commit_pagelist,
2510	.read_pagelist		= ff_layout_read_pagelist,
2511	.write_pagelist		= ff_layout_write_pagelist,
2512	.alloc_deviceid_node    = ff_layout_alloc_deviceid_node,
2513	.prepare_layoutreturn   = ff_layout_prepare_layoutreturn,
2514	.sync			= pnfs_nfs_generic_sync,
2515	.prepare_layoutstats	= ff_layout_prepare_layoutstats,
 
2516};
2517
2518static int __init nfs4flexfilelayout_init(void)
2519{
2520	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
2521	       __func__);
2522	return pnfs_register_layoutdriver(&flexfilelayout_type);
2523}
2524
2525static void __exit nfs4flexfilelayout_exit(void)
2526{
2527	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
2528	       __func__);
2529	pnfs_unregister_layoutdriver(&flexfilelayout_type);
2530}
2531
2532MODULE_ALIAS("nfs-layouttype4-4");
2533
2534MODULE_LICENSE("GPL");
2535MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
2536
2537module_init(nfs4flexfilelayout_init);
2538module_exit(nfs4flexfilelayout_exit);
2539
2540module_param(io_maxretrans, ushort, 0644);
2541MODULE_PARM_DESC(io_maxretrans, "The  number of times the NFSv4.1 client "
2542			"retries an I/O request before returning an error. ");