Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Module for pnfs flexfile layout driver.
4 *
5 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
6 *
7 * Tao Peng <bergwolf@primarydata.com>
8 */
9
10#include <linux/nfs_fs.h>
11#include <linux/nfs_mount.h>
12#include <linux/nfs_page.h>
13#include <linux/module.h>
14#include <linux/file.h>
15#include <linux/sched/mm.h>
16
17#include <linux/sunrpc/metrics.h>
18
19#include "flexfilelayout.h"
20#include "../nfs4session.h"
21#include "../nfs4idmap.h"
22#include "../internal.h"
23#include "../delegation.h"
24#include "../nfs4trace.h"
25#include "../iostat.h"
26#include "../nfs.h"
27#include "../nfs42.h"
28
29#define NFSDBG_FACILITY NFSDBG_PNFS_LD
30
31#define FF_LAYOUT_POLL_RETRY_MAX (15*HZ)
32#define FF_LAYOUTRETURN_MAXERR 20
33
34enum nfs4_ff_op_type {
35 NFS4_FF_OP_LAYOUTSTATS,
36 NFS4_FF_OP_LAYOUTRETURN,
37};
38
39static unsigned short io_maxretrans;
40
41static const struct pnfs_commit_ops ff_layout_commit_ops;
42static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
43 struct nfs_pgio_header *hdr);
44static int
45ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
46 struct nfs42_layoutstat_devinfo *devinfo,
47 int dev_limit, enum nfs4_ff_op_type type);
48static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
49 const struct nfs42_layoutstat_devinfo *devinfo,
50 struct nfs4_ff_layout_mirror *mirror);
51
52static struct pnfs_layout_hdr *
53ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
54{
55 struct nfs4_flexfile_layout *ffl;
56
57 ffl = kzalloc(sizeof(*ffl), gfp_flags);
58 if (ffl) {
59 pnfs_init_ds_commit_info(&ffl->commit_info);
60 INIT_LIST_HEAD(&ffl->error_list);
61 INIT_LIST_HEAD(&ffl->mirrors);
62 ffl->last_report_time = ktime_get();
63 ffl->commit_info.ops = &ff_layout_commit_ops;
64 return &ffl->generic_hdr;
65 } else
66 return NULL;
67}
68
69static void
70ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
71{
72 struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(lo);
73 struct nfs4_ff_layout_ds_err *err, *n;
74
75 list_for_each_entry_safe(err, n, &ffl->error_list, list) {
76 list_del(&err->list);
77 kfree(err);
78 }
79 kfree_rcu(ffl, generic_hdr.plh_rcu);
80}
81
82static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
83{
84 __be32 *p;
85
86 p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
87 if (unlikely(p == NULL))
88 return -ENOBUFS;
89 stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
90 memcpy(stateid->data, p, NFS4_STATEID_SIZE);
91 dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
92 p[0], p[1], p[2], p[3]);
93 return 0;
94}
95
96static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
97{
98 __be32 *p;
99
100 p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
101 if (unlikely(!p))
102 return -ENOBUFS;
103 memcpy(devid, p, NFS4_DEVICEID4_SIZE);
104 nfs4_print_deviceid(devid);
105 return 0;
106}
107
108static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
109{
110 __be32 *p;
111
112 p = xdr_inline_decode(xdr, 4);
113 if (unlikely(!p))
114 return -ENOBUFS;
115 fh->size = be32_to_cpup(p++);
116 if (fh->size > NFS_MAXFHSIZE) {
117 printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
118 fh->size);
119 return -EOVERFLOW;
120 }
121 /* fh.data */
122 p = xdr_inline_decode(xdr, fh->size);
123 if (unlikely(!p))
124 return -ENOBUFS;
125 memcpy(&fh->data, p, fh->size);
126 dprintk("%s: fh len %d\n", __func__, fh->size);
127
128 return 0;
129}
130
131/*
132 * Currently only stringified uids and gids are accepted.
133 * I.e., kerberos is not supported to the DSes, so no pricipals.
134 *
135 * That means that one common function will suffice, but when
136 * principals are added, this should be split to accomodate
137 * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
138 */
139static int
140decode_name(struct xdr_stream *xdr, u32 *id)
141{
142 __be32 *p;
143 int len;
144
145 /* opaque_length(4)*/
146 p = xdr_inline_decode(xdr, 4);
147 if (unlikely(!p))
148 return -ENOBUFS;
149 len = be32_to_cpup(p++);
150 if (len < 0)
151 return -EINVAL;
152
153 dprintk("%s: len %u\n", __func__, len);
154
155 /* opaque body */
156 p = xdr_inline_decode(xdr, len);
157 if (unlikely(!p))
158 return -ENOBUFS;
159
160 if (!nfs_map_string_to_numeric((char *)p, len, id))
161 return -EINVAL;
162
163 return 0;
164}
165
166static struct nfsd_file *
167ff_local_open_fh(struct nfs_client *clp, const struct cred *cred,
168 struct nfs_fh *fh, fmode_t mode)
169{
170 if (mode & FMODE_WRITE) {
171 /*
172 * Always request read and write access since this corresponds
173 * to a rw layout.
174 */
175 mode |= FMODE_READ;
176 }
177
178 return nfs_local_open_fh(clp, cred, fh, mode);
179}
180
181static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
182 const struct nfs4_ff_layout_mirror *m2)
183{
184 int i, j;
185
186 if (m1->fh_versions_cnt != m2->fh_versions_cnt)
187 return false;
188 for (i = 0; i < m1->fh_versions_cnt; i++) {
189 bool found_fh = false;
190 for (j = 0; j < m2->fh_versions_cnt; j++) {
191 if (nfs_compare_fh(&m1->fh_versions[i],
192 &m2->fh_versions[j]) == 0) {
193 found_fh = true;
194 break;
195 }
196 }
197 if (!found_fh)
198 return false;
199 }
200 return true;
201}
202
203static struct nfs4_ff_layout_mirror *
204ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
205 struct nfs4_ff_layout_mirror *mirror)
206{
207 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
208 struct nfs4_ff_layout_mirror *pos;
209 struct inode *inode = lo->plh_inode;
210
211 spin_lock(&inode->i_lock);
212 list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
213 if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0)
214 continue;
215 if (!ff_mirror_match_fh(mirror, pos))
216 continue;
217 if (refcount_inc_not_zero(&pos->ref)) {
218 spin_unlock(&inode->i_lock);
219 return pos;
220 }
221 }
222 list_add(&mirror->mirrors, &ff_layout->mirrors);
223 mirror->layout = lo;
224 spin_unlock(&inode->i_lock);
225 return mirror;
226}
227
228static void
229ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
230{
231 struct inode *inode;
232 if (mirror->layout == NULL)
233 return;
234 inode = mirror->layout->plh_inode;
235 spin_lock(&inode->i_lock);
236 list_del(&mirror->mirrors);
237 spin_unlock(&inode->i_lock);
238 mirror->layout = NULL;
239}
240
241static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
242{
243 struct nfs4_ff_layout_mirror *mirror;
244
245 mirror = kzalloc(sizeof(*mirror), gfp_flags);
246 if (mirror != NULL) {
247 spin_lock_init(&mirror->lock);
248 refcount_set(&mirror->ref, 1);
249 INIT_LIST_HEAD(&mirror->mirrors);
250 }
251 return mirror;
252}
253
254static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
255{
256 const struct cred *cred;
257
258 ff_layout_remove_mirror(mirror);
259 kfree(mirror->fh_versions);
260 cred = rcu_access_pointer(mirror->ro_cred);
261 put_cred(cred);
262 cred = rcu_access_pointer(mirror->rw_cred);
263 put_cred(cred);
264 nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
265 kfree(mirror);
266}
267
268static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
269{
270 if (mirror != NULL && refcount_dec_and_test(&mirror->ref))
271 ff_layout_free_mirror(mirror);
272}
273
274static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
275{
276 u32 i;
277
278 for (i = 0; i < fls->mirror_array_cnt; i++)
279 ff_layout_put_mirror(fls->mirror_array[i]);
280}
281
282static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
283{
284 if (fls) {
285 ff_layout_free_mirror_array(fls);
286 kfree(fls);
287 }
288}
289
290static bool
291ff_lseg_match_mirrors(struct pnfs_layout_segment *l1,
292 struct pnfs_layout_segment *l2)
293{
294 const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1);
295 const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l1);
296 u32 i;
297
298 if (fl1->mirror_array_cnt != fl2->mirror_array_cnt)
299 return false;
300 for (i = 0; i < fl1->mirror_array_cnt; i++) {
301 if (fl1->mirror_array[i] != fl2->mirror_array[i])
302 return false;
303 }
304 return true;
305}
306
307static bool
308ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
309 const struct pnfs_layout_range *l2)
310{
311 u64 end1, end2;
312
313 if (l1->iomode != l2->iomode)
314 return l1->iomode != IOMODE_READ;
315 end1 = pnfs_calc_offset_end(l1->offset, l1->length);
316 end2 = pnfs_calc_offset_end(l2->offset, l2->length);
317 if (end1 < l2->offset)
318 return false;
319 if (end2 < l1->offset)
320 return true;
321 return l2->offset <= l1->offset;
322}
323
324static bool
325ff_lseg_merge(struct pnfs_layout_segment *new,
326 struct pnfs_layout_segment *old)
327{
328 u64 new_end, old_end;
329
330 if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
331 return false;
332 if (new->pls_range.iomode != old->pls_range.iomode)
333 return false;
334 old_end = pnfs_calc_offset_end(old->pls_range.offset,
335 old->pls_range.length);
336 if (old_end < new->pls_range.offset)
337 return false;
338 new_end = pnfs_calc_offset_end(new->pls_range.offset,
339 new->pls_range.length);
340 if (new_end < old->pls_range.offset)
341 return false;
342 if (!ff_lseg_match_mirrors(new, old))
343 return false;
344
345 /* Mergeable: copy info from 'old' to 'new' */
346 if (new_end < old_end)
347 new_end = old_end;
348 if (new->pls_range.offset < old->pls_range.offset)
349 new->pls_range.offset = old->pls_range.offset;
350 new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
351 new_end);
352 if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
353 set_bit(NFS_LSEG_ROC, &new->pls_flags);
354 return true;
355}
356
357static void
358ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
359 struct pnfs_layout_segment *lseg,
360 struct list_head *free_me)
361{
362 pnfs_generic_layout_insert_lseg(lo, lseg,
363 ff_lseg_range_is_after,
364 ff_lseg_merge,
365 free_me);
366}
367
368static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
369{
370 int i, j;
371
372 for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
373 for (j = i + 1; j < fls->mirror_array_cnt; j++)
374 if (fls->mirror_array[i]->efficiency <
375 fls->mirror_array[j]->efficiency)
376 swap(fls->mirror_array[i],
377 fls->mirror_array[j]);
378 }
379}
380
381static struct pnfs_layout_segment *
382ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
383 struct nfs4_layoutget_res *lgr,
384 gfp_t gfp_flags)
385{
386 struct pnfs_layout_segment *ret;
387 struct nfs4_ff_layout_segment *fls = NULL;
388 struct xdr_stream stream;
389 struct xdr_buf buf;
390 struct page *scratch;
391 u64 stripe_unit;
392 u32 mirror_array_cnt;
393 __be32 *p;
394 int i, rc;
395
396 dprintk("--> %s\n", __func__);
397 scratch = alloc_page(gfp_flags);
398 if (!scratch)
399 return ERR_PTR(-ENOMEM);
400
401 xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
402 lgr->layoutp->len);
403 xdr_set_scratch_page(&stream, scratch);
404
405 /* stripe unit and mirror_array_cnt */
406 rc = -EIO;
407 p = xdr_inline_decode(&stream, 8 + 4);
408 if (!p)
409 goto out_err_free;
410
411 p = xdr_decode_hyper(p, &stripe_unit);
412 mirror_array_cnt = be32_to_cpup(p++);
413 dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
414 stripe_unit, mirror_array_cnt);
415
416 if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
417 mirror_array_cnt == 0)
418 goto out_err_free;
419
420 rc = -ENOMEM;
421 fls = kzalloc(struct_size(fls, mirror_array, mirror_array_cnt),
422 gfp_flags);
423 if (!fls)
424 goto out_err_free;
425
426 fls->mirror_array_cnt = mirror_array_cnt;
427 fls->stripe_unit = stripe_unit;
428
429 for (i = 0; i < fls->mirror_array_cnt; i++) {
430 struct nfs4_ff_layout_mirror *mirror;
431 struct cred *kcred;
432 const struct cred __rcu *cred;
433 kuid_t uid;
434 kgid_t gid;
435 u32 ds_count, fh_count, id;
436 int j;
437
438 rc = -EIO;
439 p = xdr_inline_decode(&stream, 4);
440 if (!p)
441 goto out_err_free;
442 ds_count = be32_to_cpup(p);
443
444 /* FIXME: allow for striping? */
445 if (ds_count != 1)
446 goto out_err_free;
447
448 fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
449 if (fls->mirror_array[i] == NULL) {
450 rc = -ENOMEM;
451 goto out_err_free;
452 }
453
454 fls->mirror_array[i]->ds_count = ds_count;
455
456 /* deviceid */
457 rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid);
458 if (rc)
459 goto out_err_free;
460
461 /* efficiency */
462 rc = -EIO;
463 p = xdr_inline_decode(&stream, 4);
464 if (!p)
465 goto out_err_free;
466 fls->mirror_array[i]->efficiency = be32_to_cpup(p);
467
468 /* stateid */
469 rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid);
470 if (rc)
471 goto out_err_free;
472
473 /* fh */
474 rc = -EIO;
475 p = xdr_inline_decode(&stream, 4);
476 if (!p)
477 goto out_err_free;
478 fh_count = be32_to_cpup(p);
479
480 fls->mirror_array[i]->fh_versions =
481 kcalloc(fh_count, sizeof(struct nfs_fh),
482 gfp_flags);
483 if (fls->mirror_array[i]->fh_versions == NULL) {
484 rc = -ENOMEM;
485 goto out_err_free;
486 }
487
488 for (j = 0; j < fh_count; j++) {
489 rc = decode_nfs_fh(&stream,
490 &fls->mirror_array[i]->fh_versions[j]);
491 if (rc)
492 goto out_err_free;
493 }
494
495 fls->mirror_array[i]->fh_versions_cnt = fh_count;
496
497 /* user */
498 rc = decode_name(&stream, &id);
499 if (rc)
500 goto out_err_free;
501
502 uid = make_kuid(&init_user_ns, id);
503
504 /* group */
505 rc = decode_name(&stream, &id);
506 if (rc)
507 goto out_err_free;
508
509 gid = make_kgid(&init_user_ns, id);
510
511 if (gfp_flags & __GFP_FS)
512 kcred = prepare_kernel_cred(&init_task);
513 else {
514 unsigned int nofs_flags = memalloc_nofs_save();
515 kcred = prepare_kernel_cred(&init_task);
516 memalloc_nofs_restore(nofs_flags);
517 }
518 rc = -ENOMEM;
519 if (!kcred)
520 goto out_err_free;
521 kcred->fsuid = uid;
522 kcred->fsgid = gid;
523 cred = RCU_INITIALIZER(kcred);
524
525 if (lgr->range.iomode == IOMODE_READ)
526 rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
527 else
528 rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
529
530 mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
531 if (mirror != fls->mirror_array[i]) {
532 /* swap cred ptrs so free_mirror will clean up old */
533 if (lgr->range.iomode == IOMODE_READ) {
534 cred = xchg(&mirror->ro_cred, cred);
535 rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
536 } else {
537 cred = xchg(&mirror->rw_cred, cred);
538 rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
539 }
540 ff_layout_free_mirror(fls->mirror_array[i]);
541 fls->mirror_array[i] = mirror;
542 }
543
544 dprintk("%s: iomode %s uid %u gid %u\n", __func__,
545 lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
546 from_kuid(&init_user_ns, uid),
547 from_kgid(&init_user_ns, gid));
548 }
549
550 p = xdr_inline_decode(&stream, 4);
551 if (!p)
552 goto out_sort_mirrors;
553 fls->flags = be32_to_cpup(p);
554
555 p = xdr_inline_decode(&stream, 4);
556 if (!p)
557 goto out_sort_mirrors;
558 for (i=0; i < fls->mirror_array_cnt; i++)
559 fls->mirror_array[i]->report_interval = be32_to_cpup(p);
560
561out_sort_mirrors:
562 ff_layout_sort_mirrors(fls);
563 ret = &fls->generic_hdr;
564 dprintk("<-- %s (success)\n", __func__);
565out_free_page:
566 __free_page(scratch);
567 return ret;
568out_err_free:
569 _ff_layout_free_lseg(fls);
570 ret = ERR_PTR(rc);
571 dprintk("<-- %s (%d)\n", __func__, rc);
572 goto out_free_page;
573}
574
575static void
576ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
577{
578 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
579
580 dprintk("--> %s\n", __func__);
581
582 if (lseg->pls_range.iomode == IOMODE_RW) {
583 struct nfs4_flexfile_layout *ffl;
584 struct inode *inode;
585
586 ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
587 inode = ffl->generic_hdr.plh_inode;
588 spin_lock(&inode->i_lock);
589 pnfs_generic_ds_cinfo_release_lseg(&ffl->commit_info, lseg);
590 spin_unlock(&inode->i_lock);
591 }
592 _ff_layout_free_lseg(fls);
593}
594
595static void
596nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
597{
598 /* first IO request? */
599 if (atomic_inc_return(&timer->n_ops) == 1) {
600 timer->start_time = now;
601 }
602}
603
604static ktime_t
605nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
606{
607 ktime_t start;
608
609 if (atomic_dec_return(&timer->n_ops) < 0)
610 WARN_ON_ONCE(1);
611
612 start = timer->start_time;
613 timer->start_time = now;
614 return ktime_sub(now, start);
615}
616
617static bool
618nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
619 struct nfs4_ff_layoutstat *layoutstat,
620 ktime_t now)
621{
622 s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
623 struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
624
625 nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
626 if (!mirror->start_time)
627 mirror->start_time = now;
628 if (mirror->report_interval != 0)
629 report_interval = (s64)mirror->report_interval * 1000LL;
630 else if (layoutstats_timer != 0)
631 report_interval = (s64)layoutstats_timer * 1000LL;
632 if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
633 report_interval) {
634 ffl->last_report_time = now;
635 return true;
636 }
637
638 return false;
639}
640
641static void
642nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
643 __u64 requested)
644{
645 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
646
647 iostat->ops_requested++;
648 iostat->bytes_requested += requested;
649}
650
651static void
652nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
653 __u64 requested,
654 __u64 completed,
655 ktime_t time_completed,
656 ktime_t time_started)
657{
658 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
659 ktime_t completion_time = ktime_sub(time_completed, time_started);
660 ktime_t timer;
661
662 iostat->ops_completed++;
663 iostat->bytes_completed += completed;
664 iostat->bytes_not_delivered += requested - completed;
665
666 timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
667 iostat->total_busy_time =
668 ktime_add(iostat->total_busy_time, timer);
669 iostat->aggregate_completion_time =
670 ktime_add(iostat->aggregate_completion_time,
671 completion_time);
672}
673
674static void
675nfs4_ff_layout_stat_io_start_read(struct inode *inode,
676 struct nfs4_ff_layout_mirror *mirror,
677 __u64 requested, ktime_t now)
678{
679 bool report;
680
681 spin_lock(&mirror->lock);
682 report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
683 nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
684 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
685 spin_unlock(&mirror->lock);
686
687 if (report)
688 pnfs_report_layoutstat(inode, nfs_io_gfp_mask());
689}
690
691static void
692nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
693 struct nfs4_ff_layout_mirror *mirror,
694 __u64 requested,
695 __u64 completed)
696{
697 spin_lock(&mirror->lock);
698 nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
699 requested, completed,
700 ktime_get(), task->tk_start);
701 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
702 spin_unlock(&mirror->lock);
703}
704
705static void
706nfs4_ff_layout_stat_io_start_write(struct inode *inode,
707 struct nfs4_ff_layout_mirror *mirror,
708 __u64 requested, ktime_t now)
709{
710 bool report;
711
712 spin_lock(&mirror->lock);
713 report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
714 nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
715 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
716 spin_unlock(&mirror->lock);
717
718 if (report)
719 pnfs_report_layoutstat(inode, nfs_io_gfp_mask());
720}
721
722static void
723nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
724 struct nfs4_ff_layout_mirror *mirror,
725 __u64 requested,
726 __u64 completed,
727 enum nfs3_stable_how committed)
728{
729 if (committed == NFS_UNSTABLE)
730 requested = completed = 0;
731
732 spin_lock(&mirror->lock);
733 nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
734 requested, completed, ktime_get(), task->tk_start);
735 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
736 spin_unlock(&mirror->lock);
737}
738
739static void
740ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, u32 idx)
741{
742 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
743
744 if (devid)
745 nfs4_mark_deviceid_unavailable(devid);
746}
747
748static void
749ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, u32 idx)
750{
751 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
752
753 if (devid)
754 nfs4_mark_deviceid_available(devid);
755}
756
757static struct nfs4_pnfs_ds *
758ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
759 u32 start_idx, u32 *best_idx,
760 bool check_device)
761{
762 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
763 struct nfs4_ff_layout_mirror *mirror;
764 struct nfs4_pnfs_ds *ds;
765 u32 idx;
766
767 /* mirrors are initially sorted by efficiency */
768 for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
769 mirror = FF_LAYOUT_COMP(lseg, idx);
770 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
771 if (!ds)
772 continue;
773
774 if (check_device &&
775 nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node))
776 continue;
777
778 *best_idx = idx;
779 return ds;
780 }
781
782 return NULL;
783}
784
785static struct nfs4_pnfs_ds *
786ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg,
787 u32 start_idx, u32 *best_idx)
788{
789 return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, false);
790}
791
792static struct nfs4_pnfs_ds *
793ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg,
794 u32 start_idx, u32 *best_idx)
795{
796 return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, true);
797}
798
799static struct nfs4_pnfs_ds *
800ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
801 u32 start_idx, u32 *best_idx)
802{
803 struct nfs4_pnfs_ds *ds;
804
805 ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx);
806 if (ds)
807 return ds;
808 return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx);
809}
810
811static struct nfs4_pnfs_ds *
812ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio,
813 u32 *best_idx)
814{
815 struct pnfs_layout_segment *lseg = pgio->pg_lseg;
816 struct nfs4_pnfs_ds *ds;
817
818 ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx,
819 best_idx);
820 if (ds || !pgio->pg_mirror_idx)
821 return ds;
822 return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx);
823}
824
825static void
826ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
827 struct nfs_page *req,
828 bool strict_iomode)
829{
830 pnfs_put_lseg(pgio->pg_lseg);
831 pgio->pg_lseg =
832 pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
833 req_offset(req), req->wb_bytes, IOMODE_READ,
834 strict_iomode, nfs_io_gfp_mask());
835 if (IS_ERR(pgio->pg_lseg)) {
836 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
837 pgio->pg_lseg = NULL;
838 }
839}
840
841static void
842ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
843 struct nfs_page *req)
844{
845 struct nfs_pgio_mirror *pgm;
846 struct nfs4_ff_layout_mirror *mirror;
847 struct nfs4_pnfs_ds *ds;
848 u32 ds_idx;
849
850 if (NFS_SERVER(pgio->pg_inode)->flags &
851 (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
852 pgio->pg_maxretrans = io_maxretrans;
853retry:
854 pnfs_generic_pg_check_layout(pgio, req);
855 /* Use full layout for now */
856 if (!pgio->pg_lseg) {
857 ff_layout_pg_get_read(pgio, req, false);
858 if (!pgio->pg_lseg)
859 goto out_nolseg;
860 }
861 if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
862 ff_layout_pg_get_read(pgio, req, true);
863 if (!pgio->pg_lseg)
864 goto out_nolseg;
865 }
866 /* Reset wb_nio, since getting layout segment was successful */
867 req->wb_nio = 0;
868
869 ds = ff_layout_get_ds_for_read(pgio, &ds_idx);
870 if (!ds) {
871 if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
872 goto out_mds;
873 pnfs_generic_pg_cleanup(pgio);
874 /* Sleep for 1 second before retrying */
875 ssleep(1);
876 goto retry;
877 }
878
879 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
880 pgm = &pgio->pg_mirrors[0];
881 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
882
883 pgio->pg_mirror_idx = ds_idx;
884 return;
885out_nolseg:
886 if (pgio->pg_error < 0) {
887 if (pgio->pg_error != -EAGAIN)
888 return;
889 /* Retry getting layout segment if lower layer returned -EAGAIN */
890 if (pgio->pg_maxretrans && req->wb_nio++ > pgio->pg_maxretrans) {
891 if (NFS_SERVER(pgio->pg_inode)->flags & NFS_MOUNT_SOFTERR)
892 pgio->pg_error = -ETIMEDOUT;
893 else
894 pgio->pg_error = -EIO;
895 return;
896 }
897 pgio->pg_error = 0;
898 /* Sleep for 1 second before retrying */
899 ssleep(1);
900 goto retry;
901 }
902out_mds:
903 trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode,
904 0, NFS4_MAX_UINT64, IOMODE_READ,
905 NFS_I(pgio->pg_inode)->layout,
906 pgio->pg_lseg);
907 pgio->pg_maxretrans = 0;
908 nfs_pageio_reset_read_mds(pgio);
909}
910
911static void
912ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
913 struct nfs_page *req)
914{
915 struct nfs4_ff_layout_mirror *mirror;
916 struct nfs_pgio_mirror *pgm;
917 struct nfs4_pnfs_ds *ds;
918 u32 i;
919
920retry:
921 pnfs_generic_pg_check_layout(pgio, req);
922 if (!pgio->pg_lseg) {
923 pgio->pg_lseg =
924 pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
925 req_offset(req), req->wb_bytes,
926 IOMODE_RW, false, nfs_io_gfp_mask());
927 if (IS_ERR(pgio->pg_lseg)) {
928 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
929 pgio->pg_lseg = NULL;
930 return;
931 }
932 }
933 /* If no lseg, fall back to write through mds */
934 if (pgio->pg_lseg == NULL)
935 goto out_mds;
936
937 /* Use a direct mapping of ds_idx to pgio mirror_idx */
938 if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))
939 goto out_eagain;
940
941 for (i = 0; i < pgio->pg_mirror_count; i++) {
942 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
943 ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true);
944 if (!ds) {
945 if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
946 goto out_mds;
947 pnfs_generic_pg_cleanup(pgio);
948 /* Sleep for 1 second before retrying */
949 ssleep(1);
950 goto retry;
951 }
952 pgm = &pgio->pg_mirrors[i];
953 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
954 }
955
956 if (NFS_SERVER(pgio->pg_inode)->flags &
957 (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
958 pgio->pg_maxretrans = io_maxretrans;
959 return;
960out_eagain:
961 pnfs_generic_pg_cleanup(pgio);
962 pgio->pg_error = -EAGAIN;
963 return;
964out_mds:
965 trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode,
966 0, NFS4_MAX_UINT64, IOMODE_RW,
967 NFS_I(pgio->pg_inode)->layout,
968 pgio->pg_lseg);
969 pgio->pg_maxretrans = 0;
970 nfs_pageio_reset_write_mds(pgio);
971 pgio->pg_error = -EAGAIN;
972}
973
974static unsigned int
975ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
976 struct nfs_page *req)
977{
978 if (!pgio->pg_lseg) {
979 pgio->pg_lseg =
980 pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
981 req_offset(req), req->wb_bytes,
982 IOMODE_RW, false, nfs_io_gfp_mask());
983 if (IS_ERR(pgio->pg_lseg)) {
984 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
985 pgio->pg_lseg = NULL;
986 goto out;
987 }
988 }
989 if (pgio->pg_lseg)
990 return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
991
992 trace_pnfs_mds_fallback_pg_get_mirror_count(pgio->pg_inode,
993 0, NFS4_MAX_UINT64, IOMODE_RW,
994 NFS_I(pgio->pg_inode)->layout,
995 pgio->pg_lseg);
996 /* no lseg means that pnfs is not in use, so no mirroring here */
997 nfs_pageio_reset_write_mds(pgio);
998out:
999 return 1;
1000}
1001
1002static u32
1003ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
1004{
1005 u32 old = desc->pg_mirror_idx;
1006
1007 desc->pg_mirror_idx = idx;
1008 return old;
1009}
1010
1011static struct nfs_pgio_mirror *
1012ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
1013{
1014 return &desc->pg_mirrors[idx];
1015}
1016
1017static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
1018 .pg_init = ff_layout_pg_init_read,
1019 .pg_test = pnfs_generic_pg_test,
1020 .pg_doio = pnfs_generic_pg_readpages,
1021 .pg_cleanup = pnfs_generic_pg_cleanup,
1022};
1023
1024static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
1025 .pg_init = ff_layout_pg_init_write,
1026 .pg_test = pnfs_generic_pg_test,
1027 .pg_doio = pnfs_generic_pg_writepages,
1028 .pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
1029 .pg_cleanup = pnfs_generic_pg_cleanup,
1030 .pg_get_mirror = ff_layout_pg_get_mirror_write,
1031 .pg_set_mirror = ff_layout_pg_set_mirror_write,
1032};
1033
1034static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
1035{
1036 struct rpc_task *task = &hdr->task;
1037
1038 pnfs_layoutcommit_inode(hdr->inode, false);
1039
1040 if (retry_pnfs) {
1041 dprintk("%s Reset task %5u for i/o through pNFS "
1042 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1043 hdr->task.tk_pid,
1044 hdr->inode->i_sb->s_id,
1045 (unsigned long long)NFS_FILEID(hdr->inode),
1046 hdr->args.count,
1047 (unsigned long long)hdr->args.offset);
1048
1049 hdr->completion_ops->reschedule_io(hdr);
1050 return;
1051 }
1052
1053 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1054 dprintk("%s Reset task %5u for i/o through MDS "
1055 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1056 hdr->task.tk_pid,
1057 hdr->inode->i_sb->s_id,
1058 (unsigned long long)NFS_FILEID(hdr->inode),
1059 hdr->args.count,
1060 (unsigned long long)hdr->args.offset);
1061
1062 trace_pnfs_mds_fallback_write_done(hdr->inode,
1063 hdr->args.offset, hdr->args.count,
1064 IOMODE_RW, NFS_I(hdr->inode)->layout,
1065 hdr->lseg);
1066 task->tk_status = pnfs_write_done_resend_to_mds(hdr);
1067 }
1068}
1069
1070static void ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr)
1071{
1072 u32 idx = hdr->pgio_mirror_idx + 1;
1073 u32 new_idx = 0;
1074
1075 if (ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx))
1076 ff_layout_send_layouterror(hdr->lseg);
1077 else
1078 pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1079 pnfs_read_resend_pnfs(hdr, new_idx);
1080}
1081
1082static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
1083{
1084 struct rpc_task *task = &hdr->task;
1085
1086 pnfs_layoutcommit_inode(hdr->inode, false);
1087 pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1088
1089 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1090 dprintk("%s Reset task %5u for i/o through MDS "
1091 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1092 hdr->task.tk_pid,
1093 hdr->inode->i_sb->s_id,
1094 (unsigned long long)NFS_FILEID(hdr->inode),
1095 hdr->args.count,
1096 (unsigned long long)hdr->args.offset);
1097
1098 trace_pnfs_mds_fallback_read_done(hdr->inode,
1099 hdr->args.offset, hdr->args.count,
1100 IOMODE_READ, NFS_I(hdr->inode)->layout,
1101 hdr->lseg);
1102 task->tk_status = pnfs_read_done_resend_to_mds(hdr);
1103 }
1104}
1105
1106static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1107 struct nfs4_state *state,
1108 struct nfs_client *clp,
1109 struct pnfs_layout_segment *lseg,
1110 u32 idx)
1111{
1112 struct pnfs_layout_hdr *lo = lseg->pls_layout;
1113 struct inode *inode = lo->plh_inode;
1114 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1115 struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
1116
1117 switch (task->tk_status) {
1118 case -NFS4ERR_BADSESSION:
1119 case -NFS4ERR_BADSLOT:
1120 case -NFS4ERR_BAD_HIGH_SLOT:
1121 case -NFS4ERR_DEADSESSION:
1122 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1123 case -NFS4ERR_SEQ_FALSE_RETRY:
1124 case -NFS4ERR_SEQ_MISORDERED:
1125 dprintk("%s ERROR %d, Reset session. Exchangeid "
1126 "flags 0x%x\n", __func__, task->tk_status,
1127 clp->cl_exchange_flags);
1128 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
1129 break;
1130 case -NFS4ERR_DELAY:
1131 case -NFS4ERR_GRACE:
1132 rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
1133 break;
1134 case -NFS4ERR_RETRY_UNCACHED_REP:
1135 break;
1136 /* Invalidate Layout errors */
1137 case -NFS4ERR_PNFS_NO_LAYOUT:
1138 case -ESTALE: /* mapped NFS4ERR_STALE */
1139 case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */
1140 case -EISDIR: /* mapped NFS4ERR_ISDIR */
1141 case -NFS4ERR_FHEXPIRED:
1142 case -NFS4ERR_WRONG_TYPE:
1143 dprintk("%s Invalid layout error %d\n", __func__,
1144 task->tk_status);
1145 /*
1146 * Destroy layout so new i/o will get a new layout.
1147 * Layout will not be destroyed until all current lseg
1148 * references are put. Mark layout as invalid to resend failed
1149 * i/o and all i/o waiting on the slot table to the MDS until
1150 * layout is destroyed and a new valid layout is obtained.
1151 */
1152 pnfs_destroy_layout(NFS_I(inode));
1153 rpc_wake_up(&tbl->slot_tbl_waitq);
1154 goto reset;
1155 /* RPC connection errors */
1156 case -ECONNREFUSED:
1157 case -EHOSTDOWN:
1158 case -EHOSTUNREACH:
1159 case -ENETUNREACH:
1160 case -EIO:
1161 case -ETIMEDOUT:
1162 case -EPIPE:
1163 case -EPROTO:
1164 case -ENODEV:
1165 dprintk("%s DS connection error %d\n", __func__,
1166 task->tk_status);
1167 nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1168 &devid->deviceid);
1169 rpc_wake_up(&tbl->slot_tbl_waitq);
1170 fallthrough;
1171 default:
1172 if (ff_layout_avoid_mds_available_ds(lseg))
1173 return -NFS4ERR_RESET_TO_PNFS;
1174reset:
1175 dprintk("%s Retry through MDS. Error %d\n", __func__,
1176 task->tk_status);
1177 return -NFS4ERR_RESET_TO_MDS;
1178 }
1179 task->tk_status = 0;
1180 return -EAGAIN;
1181}
1182
1183/* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
1184static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1185 struct pnfs_layout_segment *lseg,
1186 u32 idx)
1187{
1188 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1189
1190 switch (task->tk_status) {
1191 /* File access problems. Don't mark the device as unavailable */
1192 case -EACCES:
1193 case -ESTALE:
1194 case -EISDIR:
1195 case -EBADHANDLE:
1196 case -ELOOP:
1197 case -ENOSPC:
1198 break;
1199 case -EJUKEBOX:
1200 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1201 goto out_retry;
1202 default:
1203 dprintk("%s DS connection error %d\n", __func__,
1204 task->tk_status);
1205 nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1206 &devid->deviceid);
1207 }
1208 /* FIXME: Need to prevent infinite looping here. */
1209 return -NFS4ERR_RESET_TO_PNFS;
1210out_retry:
1211 task->tk_status = 0;
1212 rpc_restart_call_prepare(task);
1213 rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1214 return -EAGAIN;
1215}
1216
1217static int ff_layout_async_handle_error(struct rpc_task *task,
1218 struct nfs4_state *state,
1219 struct nfs_client *clp,
1220 struct pnfs_layout_segment *lseg,
1221 u32 idx)
1222{
1223 int vers = clp->cl_nfs_mod->rpc_vers->number;
1224
1225 if (task->tk_status >= 0) {
1226 ff_layout_mark_ds_reachable(lseg, idx);
1227 return 0;
1228 }
1229
1230 /* Handle the case of an invalid layout segment */
1231 if (!pnfs_is_valid_lseg(lseg))
1232 return -NFS4ERR_RESET_TO_PNFS;
1233
1234 switch (vers) {
1235 case 3:
1236 return ff_layout_async_handle_error_v3(task, lseg, idx);
1237 case 4:
1238 return ff_layout_async_handle_error_v4(task, state, clp,
1239 lseg, idx);
1240 default:
1241 /* should never happen */
1242 WARN_ON_ONCE(1);
1243 return 0;
1244 }
1245}
1246
1247static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1248 u32 idx, u64 offset, u64 length,
1249 u32 *op_status, int opnum, int error)
1250{
1251 struct nfs4_ff_layout_mirror *mirror;
1252 u32 status = *op_status;
1253 int err;
1254
1255 if (status == 0) {
1256 switch (error) {
1257 case -ETIMEDOUT:
1258 case -EPFNOSUPPORT:
1259 case -EPROTONOSUPPORT:
1260 case -EOPNOTSUPP:
1261 case -EINVAL:
1262 case -ECONNREFUSED:
1263 case -ECONNRESET:
1264 case -EHOSTDOWN:
1265 case -EHOSTUNREACH:
1266 case -ENETUNREACH:
1267 case -EADDRINUSE:
1268 case -ENOBUFS:
1269 case -EPIPE:
1270 case -EPERM:
1271 case -EPROTO:
1272 case -ENODEV:
1273 *op_status = status = NFS4ERR_NXIO;
1274 break;
1275 case -EACCES:
1276 *op_status = status = NFS4ERR_ACCESS;
1277 break;
1278 default:
1279 return;
1280 }
1281 }
1282
1283 mirror = FF_LAYOUT_COMP(lseg, idx);
1284 err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
1285 mirror, offset, length, status, opnum,
1286 nfs_io_gfp_mask());
1287
1288 switch (status) {
1289 case NFS4ERR_DELAY:
1290 case NFS4ERR_GRACE:
1291 break;
1292 case NFS4ERR_NXIO:
1293 ff_layout_mark_ds_unreachable(lseg, idx);
1294 /*
1295 * Don't return the layout if this is a read and we still
1296 * have layouts to try
1297 */
1298 if (opnum == OP_READ)
1299 break;
1300 fallthrough;
1301 default:
1302 pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
1303 lseg);
1304 }
1305
1306 dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
1307}
1308
1309/* NFS_PROTO call done callback routines */
1310static int ff_layout_read_done_cb(struct rpc_task *task,
1311 struct nfs_pgio_header *hdr)
1312{
1313 int err;
1314
1315 if (task->tk_status < 0) {
1316 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1317 hdr->args.offset, hdr->args.count,
1318 &hdr->res.op_status, OP_READ,
1319 task->tk_status);
1320 trace_ff_layout_read_error(hdr);
1321 }
1322
1323 err = ff_layout_async_handle_error(task, hdr->args.context->state,
1324 hdr->ds_clp, hdr->lseg,
1325 hdr->pgio_mirror_idx);
1326
1327 trace_nfs4_pnfs_read(hdr, err);
1328 clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1329 clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1330 switch (err) {
1331 case -NFS4ERR_RESET_TO_PNFS:
1332 set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1333 return task->tk_status;
1334 case -NFS4ERR_RESET_TO_MDS:
1335 set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1336 return task->tk_status;
1337 case -EAGAIN:
1338 goto out_eagain;
1339 }
1340
1341 return 0;
1342out_eagain:
1343 rpc_restart_call_prepare(task);
1344 return -EAGAIN;
1345}
1346
1347static bool
1348ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1349{
1350 return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1351}
1352
1353/*
1354 * We reference the rpc_cred of the first WRITE that triggers the need for
1355 * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1356 * rfc5661 is not clear about which credential should be used.
1357 *
1358 * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1359 * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1360 * we always send layoutcommit after DS writes.
1361 */
1362static void
1363ff_layout_set_layoutcommit(struct inode *inode,
1364 struct pnfs_layout_segment *lseg,
1365 loff_t end_offset)
1366{
1367 if (!ff_layout_need_layoutcommit(lseg))
1368 return;
1369
1370 pnfs_set_layoutcommit(inode, lseg, end_offset);
1371 dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino,
1372 (unsigned long long) NFS_I(inode)->layout->plh_lwb);
1373}
1374
1375static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
1376 struct nfs_pgio_header *hdr)
1377{
1378 if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1379 return;
1380 nfs4_ff_layout_stat_io_start_read(hdr->inode,
1381 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1382 hdr->args.count,
1383 task->tk_start);
1384}
1385
1386static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
1387 struct nfs_pgio_header *hdr)
1388{
1389 if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1390 return;
1391 nfs4_ff_layout_stat_io_end_read(task,
1392 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1393 hdr->args.count,
1394 hdr->res.count);
1395 set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1396}
1397
1398static int ff_layout_read_prepare_common(struct rpc_task *task,
1399 struct nfs_pgio_header *hdr)
1400{
1401 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1402 rpc_exit(task, -EIO);
1403 return -EIO;
1404 }
1405
1406 if (!pnfs_is_valid_lseg(hdr->lseg)) {
1407 rpc_exit(task, -EAGAIN);
1408 return -EAGAIN;
1409 }
1410
1411 ff_layout_read_record_layoutstats_start(task, hdr);
1412 return 0;
1413}
1414
1415/*
1416 * Call ops for the async read/write cases
1417 * In the case of dense layouts, the offset needs to be reset to its
1418 * original value.
1419 */
1420static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1421{
1422 struct nfs_pgio_header *hdr = data;
1423
1424 if (ff_layout_read_prepare_common(task, hdr))
1425 return;
1426
1427 rpc_call_start(task);
1428}
1429
1430static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1431{
1432 struct nfs_pgio_header *hdr = data;
1433
1434 if (nfs4_setup_sequence(hdr->ds_clp,
1435 &hdr->args.seq_args,
1436 &hdr->res.seq_res,
1437 task))
1438 return;
1439
1440 ff_layout_read_prepare_common(task, hdr);
1441}
1442
1443static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1444{
1445 struct nfs_pgio_header *hdr = data;
1446
1447 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1448 task->tk_status == 0) {
1449 nfs4_sequence_done(task, &hdr->res.seq_res);
1450 return;
1451 }
1452
1453 /* Note this may cause RPC to be resent */
1454 hdr->mds_ops->rpc_call_done(task, hdr);
1455}
1456
1457static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1458{
1459 struct nfs_pgio_header *hdr = data;
1460
1461 ff_layout_read_record_layoutstats_done(task, hdr);
1462 rpc_count_iostats_metrics(task,
1463 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1464}
1465
1466static void ff_layout_read_release(void *data)
1467{
1468 struct nfs_pgio_header *hdr = data;
1469
1470 ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
1471 if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
1472 ff_layout_resend_pnfs_read(hdr);
1473 else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1474 ff_layout_reset_read(hdr);
1475 pnfs_generic_rw_release(data);
1476}
1477
1478
1479static int ff_layout_write_done_cb(struct rpc_task *task,
1480 struct nfs_pgio_header *hdr)
1481{
1482 loff_t end_offs = 0;
1483 int err;
1484
1485 if (task->tk_status < 0) {
1486 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1487 hdr->args.offset, hdr->args.count,
1488 &hdr->res.op_status, OP_WRITE,
1489 task->tk_status);
1490 trace_ff_layout_write_error(hdr);
1491 }
1492
1493 err = ff_layout_async_handle_error(task, hdr->args.context->state,
1494 hdr->ds_clp, hdr->lseg,
1495 hdr->pgio_mirror_idx);
1496
1497 trace_nfs4_pnfs_write(hdr, err);
1498 clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1499 clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1500 switch (err) {
1501 case -NFS4ERR_RESET_TO_PNFS:
1502 set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1503 return task->tk_status;
1504 case -NFS4ERR_RESET_TO_MDS:
1505 set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1506 return task->tk_status;
1507 case -EAGAIN:
1508 return -EAGAIN;
1509 }
1510
1511 if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1512 hdr->res.verf->committed == NFS_DATA_SYNC)
1513 end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
1514
1515 /* Note: if the write is unstable, don't set end_offs until commit */
1516 ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
1517
1518 /* zero out fattr since we don't care DS attr at all */
1519 hdr->fattr.valid = 0;
1520 if (task->tk_status >= 0)
1521 nfs_writeback_update_inode(hdr);
1522
1523 return 0;
1524}
1525
1526static int ff_layout_commit_done_cb(struct rpc_task *task,
1527 struct nfs_commit_data *data)
1528{
1529 int err;
1530
1531 if (task->tk_status < 0) {
1532 ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1533 data->args.offset, data->args.count,
1534 &data->res.op_status, OP_COMMIT,
1535 task->tk_status);
1536 trace_ff_layout_commit_error(data);
1537 }
1538
1539 err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
1540 data->lseg, data->ds_commit_index);
1541
1542 trace_nfs4_pnfs_commit_ds(data, err);
1543 switch (err) {
1544 case -NFS4ERR_RESET_TO_PNFS:
1545 pnfs_generic_prepare_to_resend_writes(data);
1546 return -EAGAIN;
1547 case -NFS4ERR_RESET_TO_MDS:
1548 pnfs_generic_prepare_to_resend_writes(data);
1549 return -EAGAIN;
1550 case -EAGAIN:
1551 rpc_restart_call_prepare(task);
1552 return -EAGAIN;
1553 }
1554
1555 ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
1556
1557 return 0;
1558}
1559
1560static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
1561 struct nfs_pgio_header *hdr)
1562{
1563 if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1564 return;
1565 nfs4_ff_layout_stat_io_start_write(hdr->inode,
1566 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1567 hdr->args.count,
1568 task->tk_start);
1569}
1570
1571static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
1572 struct nfs_pgio_header *hdr)
1573{
1574 if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1575 return;
1576 nfs4_ff_layout_stat_io_end_write(task,
1577 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1578 hdr->args.count, hdr->res.count,
1579 hdr->res.verf->committed);
1580 set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1581}
1582
1583static int ff_layout_write_prepare_common(struct rpc_task *task,
1584 struct nfs_pgio_header *hdr)
1585{
1586 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1587 rpc_exit(task, -EIO);
1588 return -EIO;
1589 }
1590
1591 if (!pnfs_is_valid_lseg(hdr->lseg)) {
1592 rpc_exit(task, -EAGAIN);
1593 return -EAGAIN;
1594 }
1595
1596 ff_layout_write_record_layoutstats_start(task, hdr);
1597 return 0;
1598}
1599
1600static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1601{
1602 struct nfs_pgio_header *hdr = data;
1603
1604 if (ff_layout_write_prepare_common(task, hdr))
1605 return;
1606
1607 rpc_call_start(task);
1608}
1609
1610static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1611{
1612 struct nfs_pgio_header *hdr = data;
1613
1614 if (nfs4_setup_sequence(hdr->ds_clp,
1615 &hdr->args.seq_args,
1616 &hdr->res.seq_res,
1617 task))
1618 return;
1619
1620 ff_layout_write_prepare_common(task, hdr);
1621}
1622
1623static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1624{
1625 struct nfs_pgio_header *hdr = data;
1626
1627 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1628 task->tk_status == 0) {
1629 nfs4_sequence_done(task, &hdr->res.seq_res);
1630 return;
1631 }
1632
1633 /* Note this may cause RPC to be resent */
1634 hdr->mds_ops->rpc_call_done(task, hdr);
1635}
1636
1637static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1638{
1639 struct nfs_pgio_header *hdr = data;
1640
1641 ff_layout_write_record_layoutstats_done(task, hdr);
1642 rpc_count_iostats_metrics(task,
1643 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1644}
1645
1646static void ff_layout_write_release(void *data)
1647{
1648 struct nfs_pgio_header *hdr = data;
1649
1650 ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
1651 if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
1652 ff_layout_send_layouterror(hdr->lseg);
1653 ff_layout_reset_write(hdr, true);
1654 } else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1655 ff_layout_reset_write(hdr, false);
1656 pnfs_generic_rw_release(data);
1657}
1658
1659static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
1660 struct nfs_commit_data *cdata)
1661{
1662 if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
1663 return;
1664 nfs4_ff_layout_stat_io_start_write(cdata->inode,
1665 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1666 0, task->tk_start);
1667}
1668
1669static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
1670 struct nfs_commit_data *cdata)
1671{
1672 struct nfs_page *req;
1673 __u64 count = 0;
1674
1675 if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
1676 return;
1677
1678 if (task->tk_status == 0) {
1679 list_for_each_entry(req, &cdata->pages, wb_list)
1680 count += req->wb_bytes;
1681 }
1682 nfs4_ff_layout_stat_io_end_write(task,
1683 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1684 count, count, NFS_FILE_SYNC);
1685 set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags);
1686}
1687
1688static int ff_layout_commit_prepare_common(struct rpc_task *task,
1689 struct nfs_commit_data *cdata)
1690{
1691 if (!pnfs_is_valid_lseg(cdata->lseg)) {
1692 rpc_exit(task, -EAGAIN);
1693 return -EAGAIN;
1694 }
1695
1696 ff_layout_commit_record_layoutstats_start(task, cdata);
1697 return 0;
1698}
1699
1700static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1701{
1702 if (ff_layout_commit_prepare_common(task, data))
1703 return;
1704
1705 rpc_call_start(task);
1706}
1707
1708static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1709{
1710 struct nfs_commit_data *wdata = data;
1711
1712 if (nfs4_setup_sequence(wdata->ds_clp,
1713 &wdata->args.seq_args,
1714 &wdata->res.seq_res,
1715 task))
1716 return;
1717 ff_layout_commit_prepare_common(task, data);
1718}
1719
1720static void ff_layout_commit_done(struct rpc_task *task, void *data)
1721{
1722 pnfs_generic_write_commit_done(task, data);
1723}
1724
1725static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1726{
1727 struct nfs_commit_data *cdata = data;
1728
1729 ff_layout_commit_record_layoutstats_done(task, cdata);
1730 rpc_count_iostats_metrics(task,
1731 &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1732}
1733
1734static void ff_layout_commit_release(void *data)
1735{
1736 struct nfs_commit_data *cdata = data;
1737
1738 ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
1739 pnfs_generic_commit_release(data);
1740}
1741
1742static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1743 .rpc_call_prepare = ff_layout_read_prepare_v3,
1744 .rpc_call_done = ff_layout_read_call_done,
1745 .rpc_count_stats = ff_layout_read_count_stats,
1746 .rpc_release = ff_layout_read_release,
1747};
1748
1749static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1750 .rpc_call_prepare = ff_layout_read_prepare_v4,
1751 .rpc_call_done = ff_layout_read_call_done,
1752 .rpc_count_stats = ff_layout_read_count_stats,
1753 .rpc_release = ff_layout_read_release,
1754};
1755
1756static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1757 .rpc_call_prepare = ff_layout_write_prepare_v3,
1758 .rpc_call_done = ff_layout_write_call_done,
1759 .rpc_count_stats = ff_layout_write_count_stats,
1760 .rpc_release = ff_layout_write_release,
1761};
1762
1763static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1764 .rpc_call_prepare = ff_layout_write_prepare_v4,
1765 .rpc_call_done = ff_layout_write_call_done,
1766 .rpc_count_stats = ff_layout_write_count_stats,
1767 .rpc_release = ff_layout_write_release,
1768};
1769
1770static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1771 .rpc_call_prepare = ff_layout_commit_prepare_v3,
1772 .rpc_call_done = ff_layout_commit_done,
1773 .rpc_count_stats = ff_layout_commit_count_stats,
1774 .rpc_release = ff_layout_commit_release,
1775};
1776
1777static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1778 .rpc_call_prepare = ff_layout_commit_prepare_v4,
1779 .rpc_call_done = ff_layout_commit_done,
1780 .rpc_count_stats = ff_layout_commit_count_stats,
1781 .rpc_release = ff_layout_commit_release,
1782};
1783
1784static enum pnfs_try_status
1785ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1786{
1787 struct pnfs_layout_segment *lseg = hdr->lseg;
1788 struct nfs4_pnfs_ds *ds;
1789 struct rpc_clnt *ds_clnt;
1790 struct nfsd_file *localio;
1791 struct nfs4_ff_layout_mirror *mirror;
1792 const struct cred *ds_cred;
1793 loff_t offset = hdr->args.offset;
1794 u32 idx = hdr->pgio_mirror_idx;
1795 int vers;
1796 struct nfs_fh *fh;
1797
1798 dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
1799 __func__, hdr->inode->i_ino,
1800 hdr->args.pgbase, (size_t)hdr->args.count, offset);
1801
1802 mirror = FF_LAYOUT_COMP(lseg, idx);
1803 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
1804 if (!ds)
1805 goto out_failed;
1806
1807 ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1808 hdr->inode);
1809 if (IS_ERR(ds_clnt))
1810 goto out_failed;
1811
1812 ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1813 if (!ds_cred)
1814 goto out_failed;
1815
1816 vers = nfs4_ff_layout_ds_version(mirror);
1817
1818 dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1819 ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers);
1820
1821 hdr->pgio_done_cb = ff_layout_read_done_cb;
1822 refcount_inc(&ds->ds_clp->cl_count);
1823 hdr->ds_clp = ds->ds_clp;
1824 fh = nfs4_ff_layout_select_ds_fh(mirror);
1825 if (fh)
1826 hdr->args.fh = fh;
1827
1828 nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1829
1830 /*
1831 * Note that if we ever decide to split across DSes,
1832 * then we may need to handle dense-like offsets.
1833 */
1834 hdr->args.offset = offset;
1835 hdr->mds_offset = offset;
1836
1837 /* Start IO accounting for local read */
1838 localio = ff_local_open_fh(ds->ds_clp, ds_cred, fh, FMODE_READ);
1839 if (localio) {
1840 hdr->task.tk_start = ktime_get();
1841 ff_layout_read_record_layoutstats_start(&hdr->task, hdr);
1842 }
1843
1844 /* Perform an asynchronous read to ds */
1845 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1846 vers == 3 ? &ff_layout_read_call_ops_v3 :
1847 &ff_layout_read_call_ops_v4,
1848 0, RPC_TASK_SOFTCONN, localio);
1849 put_cred(ds_cred);
1850 return PNFS_ATTEMPTED;
1851
1852out_failed:
1853 if (ff_layout_avoid_mds_available_ds(lseg))
1854 return PNFS_TRY_AGAIN;
1855 trace_pnfs_mds_fallback_read_pagelist(hdr->inode,
1856 hdr->args.offset, hdr->args.count,
1857 IOMODE_READ, NFS_I(hdr->inode)->layout, lseg);
1858 return PNFS_NOT_ATTEMPTED;
1859}
1860
1861/* Perform async writes. */
1862static enum pnfs_try_status
1863ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1864{
1865 struct pnfs_layout_segment *lseg = hdr->lseg;
1866 struct nfs4_pnfs_ds *ds;
1867 struct rpc_clnt *ds_clnt;
1868 struct nfsd_file *localio;
1869 struct nfs4_ff_layout_mirror *mirror;
1870 const struct cred *ds_cred;
1871 loff_t offset = hdr->args.offset;
1872 int vers;
1873 struct nfs_fh *fh;
1874 u32 idx = hdr->pgio_mirror_idx;
1875
1876 mirror = FF_LAYOUT_COMP(lseg, idx);
1877 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1878 if (!ds)
1879 goto out_failed;
1880
1881 ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1882 hdr->inode);
1883 if (IS_ERR(ds_clnt))
1884 goto out_failed;
1885
1886 ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1887 if (!ds_cred)
1888 goto out_failed;
1889
1890 vers = nfs4_ff_layout_ds_version(mirror);
1891
1892 dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
1893 __func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1894 offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count),
1895 vers);
1896
1897 hdr->pgio_done_cb = ff_layout_write_done_cb;
1898 refcount_inc(&ds->ds_clp->cl_count);
1899 hdr->ds_clp = ds->ds_clp;
1900 hdr->ds_commit_idx = idx;
1901 fh = nfs4_ff_layout_select_ds_fh(mirror);
1902 if (fh)
1903 hdr->args.fh = fh;
1904
1905 nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1906
1907 /*
1908 * Note that if we ever decide to split across DSes,
1909 * then we may need to handle dense-like offsets.
1910 */
1911 hdr->args.offset = offset;
1912
1913 /* Start IO accounting for local write */
1914 localio = ff_local_open_fh(ds->ds_clp, ds_cred, fh,
1915 FMODE_READ|FMODE_WRITE);
1916 if (localio) {
1917 hdr->task.tk_start = ktime_get();
1918 ff_layout_write_record_layoutstats_start(&hdr->task, hdr);
1919 }
1920
1921 /* Perform an asynchronous write */
1922 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1923 vers == 3 ? &ff_layout_write_call_ops_v3 :
1924 &ff_layout_write_call_ops_v4,
1925 sync, RPC_TASK_SOFTCONN, localio);
1926 put_cred(ds_cred);
1927 return PNFS_ATTEMPTED;
1928
1929out_failed:
1930 if (ff_layout_avoid_mds_available_ds(lseg))
1931 return PNFS_TRY_AGAIN;
1932 trace_pnfs_mds_fallback_write_pagelist(hdr->inode,
1933 hdr->args.offset, hdr->args.count,
1934 IOMODE_RW, NFS_I(hdr->inode)->layout, lseg);
1935 return PNFS_NOT_ATTEMPTED;
1936}
1937
1938static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1939{
1940 return i;
1941}
1942
1943static struct nfs_fh *
1944select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1945{
1946 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1947
1948 /* FIXME: Assume that there is only one NFS version available
1949 * for the DS.
1950 */
1951 return &flseg->mirror_array[i]->fh_versions[0];
1952}
1953
1954static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
1955{
1956 struct pnfs_layout_segment *lseg = data->lseg;
1957 struct nfs4_pnfs_ds *ds;
1958 struct rpc_clnt *ds_clnt;
1959 struct nfsd_file *localio;
1960 struct nfs4_ff_layout_mirror *mirror;
1961 const struct cred *ds_cred;
1962 u32 idx;
1963 int vers, ret;
1964 struct nfs_fh *fh;
1965
1966 if (!lseg || !(pnfs_is_valid_lseg(lseg) ||
1967 test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)))
1968 goto out_err;
1969
1970 idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
1971 mirror = FF_LAYOUT_COMP(lseg, idx);
1972 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1973 if (!ds)
1974 goto out_err;
1975
1976 ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1977 data->inode);
1978 if (IS_ERR(ds_clnt))
1979 goto out_err;
1980
1981 ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred);
1982 if (!ds_cred)
1983 goto out_err;
1984
1985 vers = nfs4_ff_layout_ds_version(mirror);
1986
1987 dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
1988 data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count),
1989 vers);
1990 data->commit_done_cb = ff_layout_commit_done_cb;
1991 data->cred = ds_cred;
1992 refcount_inc(&ds->ds_clp->cl_count);
1993 data->ds_clp = ds->ds_clp;
1994 fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
1995 if (fh)
1996 data->args.fh = fh;
1997
1998 /* Start IO accounting for local commit */
1999 localio = ff_local_open_fh(ds->ds_clp, ds_cred, fh,
2000 FMODE_READ|FMODE_WRITE);
2001 if (localio) {
2002 data->task.tk_start = ktime_get();
2003 ff_layout_commit_record_layoutstats_start(&data->task, data);
2004 }
2005
2006 ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
2007 vers == 3 ? &ff_layout_commit_call_ops_v3 :
2008 &ff_layout_commit_call_ops_v4,
2009 how, RPC_TASK_SOFTCONN, localio);
2010 put_cred(ds_cred);
2011 return ret;
2012out_err:
2013 pnfs_generic_prepare_to_resend_writes(data);
2014 pnfs_generic_commit_release(data);
2015 return -EAGAIN;
2016}
2017
2018static int
2019ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
2020 int how, struct nfs_commit_info *cinfo)
2021{
2022 return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
2023 ff_layout_initiate_commit);
2024}
2025
2026static bool ff_layout_match_rw(const struct rpc_task *task,
2027 const struct nfs_pgio_header *hdr,
2028 const struct pnfs_layout_segment *lseg)
2029{
2030 return hdr->lseg == lseg;
2031}
2032
2033static bool ff_layout_match_commit(const struct rpc_task *task,
2034 const struct nfs_commit_data *cdata,
2035 const struct pnfs_layout_segment *lseg)
2036{
2037 return cdata->lseg == lseg;
2038}
2039
2040static bool ff_layout_match_io(const struct rpc_task *task, const void *data)
2041{
2042 const struct rpc_call_ops *ops = task->tk_ops;
2043
2044 if (ops == &ff_layout_read_call_ops_v3 ||
2045 ops == &ff_layout_read_call_ops_v4 ||
2046 ops == &ff_layout_write_call_ops_v3 ||
2047 ops == &ff_layout_write_call_ops_v4)
2048 return ff_layout_match_rw(task, task->tk_calldata, data);
2049 if (ops == &ff_layout_commit_call_ops_v3 ||
2050 ops == &ff_layout_commit_call_ops_v4)
2051 return ff_layout_match_commit(task, task->tk_calldata, data);
2052 return false;
2053}
2054
2055static void ff_layout_cancel_io(struct pnfs_layout_segment *lseg)
2056{
2057 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
2058 struct nfs4_ff_layout_mirror *mirror;
2059 struct nfs4_ff_layout_ds *mirror_ds;
2060 struct nfs4_pnfs_ds *ds;
2061 struct nfs_client *ds_clp;
2062 struct rpc_clnt *clnt;
2063 u32 idx;
2064
2065 for (idx = 0; idx < flseg->mirror_array_cnt; idx++) {
2066 mirror = flseg->mirror_array[idx];
2067 mirror_ds = mirror->mirror_ds;
2068 if (IS_ERR_OR_NULL(mirror_ds))
2069 continue;
2070 ds = mirror->mirror_ds->ds;
2071 if (!ds)
2072 continue;
2073 ds_clp = ds->ds_clp;
2074 if (!ds_clp)
2075 continue;
2076 clnt = ds_clp->cl_rpcclient;
2077 if (!clnt)
2078 continue;
2079 if (!rpc_cancel_tasks(clnt, -EAGAIN, ff_layout_match_io, lseg))
2080 continue;
2081 rpc_clnt_disconnect(clnt);
2082 }
2083}
2084
2085static struct pnfs_ds_commit_info *
2086ff_layout_get_ds_info(struct inode *inode)
2087{
2088 struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
2089
2090 if (layout == NULL)
2091 return NULL;
2092
2093 return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
2094}
2095
2096static void
2097ff_layout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
2098 struct pnfs_layout_segment *lseg)
2099{
2100 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
2101 struct inode *inode = lseg->pls_layout->plh_inode;
2102 struct pnfs_commit_array *array, *new;
2103
2104 new = pnfs_alloc_commit_array(flseg->mirror_array_cnt,
2105 nfs_io_gfp_mask());
2106 if (new) {
2107 spin_lock(&inode->i_lock);
2108 array = pnfs_add_commit_array(fl_cinfo, new, lseg);
2109 spin_unlock(&inode->i_lock);
2110 if (array != new)
2111 pnfs_free_commit_array(new);
2112 }
2113}
2114
2115static void
2116ff_layout_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
2117 struct inode *inode)
2118{
2119 spin_lock(&inode->i_lock);
2120 pnfs_generic_ds_cinfo_destroy(fl_cinfo);
2121 spin_unlock(&inode->i_lock);
2122}
2123
2124static void
2125ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
2126{
2127 nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
2128 id_node));
2129}
2130
2131static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
2132 const struct nfs4_layoutreturn_args *args,
2133 const struct nfs4_flexfile_layoutreturn_args *ff_args)
2134{
2135 __be32 *start;
2136
2137 start = xdr_reserve_space(xdr, 4);
2138 if (unlikely(!start))
2139 return -E2BIG;
2140
2141 *start = cpu_to_be32(ff_args->num_errors);
2142 /* This assume we always return _ALL_ layouts */
2143 return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors);
2144}
2145
2146static void
2147ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
2148 const nfs4_stateid *stateid,
2149 const struct nfs42_layoutstat_devinfo *devinfo)
2150{
2151 __be32 *p;
2152
2153 p = xdr_reserve_space(xdr, 8 + 8);
2154 p = xdr_encode_hyper(p, devinfo->offset);
2155 p = xdr_encode_hyper(p, devinfo->length);
2156 encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
2157 p = xdr_reserve_space(xdr, 4*8);
2158 p = xdr_encode_hyper(p, devinfo->read_count);
2159 p = xdr_encode_hyper(p, devinfo->read_bytes);
2160 p = xdr_encode_hyper(p, devinfo->write_count);
2161 p = xdr_encode_hyper(p, devinfo->write_bytes);
2162 encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE);
2163}
2164
2165static void
2166ff_layout_encode_ff_iostat(struct xdr_stream *xdr,
2167 const nfs4_stateid *stateid,
2168 const struct nfs42_layoutstat_devinfo *devinfo)
2169{
2170 ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo);
2171 ff_layout_encode_ff_layoutupdate(xdr, devinfo,
2172 devinfo->ld_private.data);
2173}
2174
2175/* report nothing for now */
2176static void ff_layout_encode_iostats_array(struct xdr_stream *xdr,
2177 const struct nfs4_layoutreturn_args *args,
2178 struct nfs4_flexfile_layoutreturn_args *ff_args)
2179{
2180 __be32 *p;
2181 int i;
2182
2183 p = xdr_reserve_space(xdr, 4);
2184 *p = cpu_to_be32(ff_args->num_dev);
2185 for (i = 0; i < ff_args->num_dev; i++)
2186 ff_layout_encode_ff_iostat(xdr,
2187 &args->layout->plh_stateid,
2188 &ff_args->devinfo[i]);
2189}
2190
2191static void
2192ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo,
2193 unsigned int num_entries)
2194{
2195 unsigned int i;
2196
2197 for (i = 0; i < num_entries; i++) {
2198 if (!devinfo[i].ld_private.ops)
2199 continue;
2200 if (!devinfo[i].ld_private.ops->free)
2201 continue;
2202 devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
2203 }
2204}
2205
2206static struct nfs4_deviceid_node *
2207ff_layout_alloc_deviceid_node(struct nfs_server *server,
2208 struct pnfs_device *pdev, gfp_t gfp_flags)
2209{
2210 struct nfs4_ff_layout_ds *dsaddr;
2211
2212 dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
2213 if (!dsaddr)
2214 return NULL;
2215 return &dsaddr->id_node;
2216}
2217
2218static void
2219ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
2220 const void *voidargs,
2221 const struct nfs4_xdr_opaque_data *ff_opaque)
2222{
2223 const struct nfs4_layoutreturn_args *args = voidargs;
2224 struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data;
2225 struct xdr_buf tmp_buf = {
2226 .head = {
2227 [0] = {
2228 .iov_base = page_address(ff_args->pages[0]),
2229 },
2230 },
2231 .buflen = PAGE_SIZE,
2232 };
2233 struct xdr_stream tmp_xdr;
2234 __be32 *start;
2235
2236 dprintk("%s: Begin\n", __func__);
2237
2238 xdr_init_encode(&tmp_xdr, &tmp_buf, NULL, NULL);
2239
2240 ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
2241 ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);
2242
2243 start = xdr_reserve_space(xdr, 4);
2244 *start = cpu_to_be32(tmp_buf.len);
2245 xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len);
2246
2247 dprintk("%s: Return\n", __func__);
2248}
2249
2250static void
2251ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args)
2252{
2253 struct nfs4_flexfile_layoutreturn_args *ff_args;
2254
2255 if (!args->data)
2256 return;
2257 ff_args = args->data;
2258 args->data = NULL;
2259
2260 ff_layout_free_ds_ioerr(&ff_args->errors);
2261 ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev);
2262
2263 put_page(ff_args->pages[0]);
2264 kfree(ff_args);
2265}
2266
2267static const struct nfs4_xdr_opaque_ops layoutreturn_ops = {
2268 .encode = ff_layout_encode_layoutreturn,
2269 .free = ff_layout_free_layoutreturn,
2270};
2271
2272static int
2273ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
2274{
2275 struct nfs4_flexfile_layoutreturn_args *ff_args;
2276 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout);
2277
2278 ff_args = kmalloc(sizeof(*ff_args), nfs_io_gfp_mask());
2279 if (!ff_args)
2280 goto out_nomem;
2281 ff_args->pages[0] = alloc_page(nfs_io_gfp_mask());
2282 if (!ff_args->pages[0])
2283 goto out_nomem_free;
2284
2285 INIT_LIST_HEAD(&ff_args->errors);
2286 ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout,
2287 &args->range, &ff_args->errors,
2288 FF_LAYOUTRETURN_MAXERR);
2289
2290 spin_lock(&args->inode->i_lock);
2291 ff_args->num_dev = ff_layout_mirror_prepare_stats(
2292 &ff_layout->generic_hdr, &ff_args->devinfo[0],
2293 ARRAY_SIZE(ff_args->devinfo), NFS4_FF_OP_LAYOUTRETURN);
2294 spin_unlock(&args->inode->i_lock);
2295
2296 args->ld_private->ops = &layoutreturn_ops;
2297 args->ld_private->data = ff_args;
2298 return 0;
2299out_nomem_free:
2300 kfree(ff_args);
2301out_nomem:
2302 return -ENOMEM;
2303}
2304
2305#ifdef CONFIG_NFS_V4_2
2306void
2307ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2308{
2309 struct pnfs_layout_hdr *lo = lseg->pls_layout;
2310 struct nfs42_layout_error *errors;
2311 LIST_HEAD(head);
2312
2313 if (!nfs_server_capable(lo->plh_inode, NFS_CAP_LAYOUTERROR))
2314 return;
2315 ff_layout_fetch_ds_ioerr(lo, &lseg->pls_range, &head, -1);
2316 if (list_empty(&head))
2317 return;
2318
2319 errors = kmalloc_array(NFS42_LAYOUTERROR_MAX, sizeof(*errors),
2320 nfs_io_gfp_mask());
2321 if (errors != NULL) {
2322 const struct nfs4_ff_layout_ds_err *pos;
2323 size_t n = 0;
2324
2325 list_for_each_entry(pos, &head, list) {
2326 errors[n].offset = pos->offset;
2327 errors[n].length = pos->length;
2328 nfs4_stateid_copy(&errors[n].stateid, &pos->stateid);
2329 errors[n].errors[0].dev_id = pos->deviceid;
2330 errors[n].errors[0].status = pos->status;
2331 errors[n].errors[0].opnum = pos->opnum;
2332 n++;
2333 if (!list_is_last(&pos->list, &head) &&
2334 n < NFS42_LAYOUTERROR_MAX)
2335 continue;
2336 if (nfs42_proc_layouterror(lseg, errors, n) < 0)
2337 break;
2338 n = 0;
2339 }
2340 kfree(errors);
2341 }
2342 ff_layout_free_ds_ioerr(&head);
2343}
2344#else
2345void
2346ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2347{
2348}
2349#endif
2350
2351static int
2352ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
2353{
2354 const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
2355
2356 return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
2357}
2358
2359static size_t
2360ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
2361 const int buflen)
2362{
2363 const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
2364 const struct in6_addr *addr = &sin6->sin6_addr;
2365
2366 /*
2367 * RFC 4291, Section 2.2.2
2368 *
2369 * Shorthanded ANY address
2370 */
2371 if (ipv6_addr_any(addr))
2372 return snprintf(buf, buflen, "::");
2373
2374 /*
2375 * RFC 4291, Section 2.2.2
2376 *
2377 * Shorthanded loopback address
2378 */
2379 if (ipv6_addr_loopback(addr))
2380 return snprintf(buf, buflen, "::1");
2381
2382 /*
2383 * RFC 4291, Section 2.2.3
2384 *
2385 * Special presentation address format for mapped v4
2386 * addresses.
2387 */
2388 if (ipv6_addr_v4mapped(addr))
2389 return snprintf(buf, buflen, "::ffff:%pI4",
2390 &addr->s6_addr32[3]);
2391
2392 /*
2393 * RFC 4291, Section 2.2.1
2394 */
2395 return snprintf(buf, buflen, "%pI6c", addr);
2396}
2397
2398/* Derived from rpc_sockaddr2uaddr */
2399static void
2400ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
2401{
2402 struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
2403 char portbuf[RPCBIND_MAXUADDRPLEN];
2404 char addrbuf[RPCBIND_MAXUADDRLEN];
2405 unsigned short port;
2406 int len, netid_len;
2407 __be32 *p;
2408
2409 switch (sap->sa_family) {
2410 case AF_INET:
2411 if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
2412 return;
2413 port = ntohs(((struct sockaddr_in *)sap)->sin_port);
2414 break;
2415 case AF_INET6:
2416 if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
2417 return;
2418 port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
2419 break;
2420 default:
2421 WARN_ON_ONCE(1);
2422 return;
2423 }
2424
2425 snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
2426 len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
2427
2428 netid_len = strlen(da->da_netid);
2429 p = xdr_reserve_space(xdr, 4 + netid_len);
2430 xdr_encode_opaque(p, da->da_netid, netid_len);
2431
2432 p = xdr_reserve_space(xdr, 4 + len);
2433 xdr_encode_opaque(p, addrbuf, len);
2434}
2435
2436static void
2437ff_layout_encode_nfstime(struct xdr_stream *xdr,
2438 ktime_t t)
2439{
2440 struct timespec64 ts;
2441 __be32 *p;
2442
2443 p = xdr_reserve_space(xdr, 12);
2444 ts = ktime_to_timespec64(t);
2445 p = xdr_encode_hyper(p, ts.tv_sec);
2446 *p++ = cpu_to_be32(ts.tv_nsec);
2447}
2448
2449static void
2450ff_layout_encode_io_latency(struct xdr_stream *xdr,
2451 struct nfs4_ff_io_stat *stat)
2452{
2453 __be32 *p;
2454
2455 p = xdr_reserve_space(xdr, 5 * 8);
2456 p = xdr_encode_hyper(p, stat->ops_requested);
2457 p = xdr_encode_hyper(p, stat->bytes_requested);
2458 p = xdr_encode_hyper(p, stat->ops_completed);
2459 p = xdr_encode_hyper(p, stat->bytes_completed);
2460 p = xdr_encode_hyper(p, stat->bytes_not_delivered);
2461 ff_layout_encode_nfstime(xdr, stat->total_busy_time);
2462 ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
2463}
2464
2465static void
2466ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
2467 const struct nfs42_layoutstat_devinfo *devinfo,
2468 struct nfs4_ff_layout_mirror *mirror)
2469{
2470 struct nfs4_pnfs_ds_addr *da;
2471 struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
2472 struct nfs_fh *fh = &mirror->fh_versions[0];
2473 __be32 *p;
2474
2475 da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
2476 dprintk("%s: DS %s: encoding address %s\n",
2477 __func__, ds->ds_remotestr, da->da_remotestr);
2478 /* netaddr4 */
2479 ff_layout_encode_netaddr(xdr, da);
2480 /* nfs_fh4 */
2481 p = xdr_reserve_space(xdr, 4 + fh->size);
2482 xdr_encode_opaque(p, fh->data, fh->size);
2483 /* ff_io_latency4 read */
2484 spin_lock(&mirror->lock);
2485 ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
2486 /* ff_io_latency4 write */
2487 ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
2488 spin_unlock(&mirror->lock);
2489 /* nfstime4 */
2490 ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
2491 /* bool */
2492 p = xdr_reserve_space(xdr, 4);
2493 *p = cpu_to_be32(false);
2494}
2495
2496static void
2497ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
2498 const struct nfs4_xdr_opaque_data *opaque)
2499{
2500 struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque,
2501 struct nfs42_layoutstat_devinfo, ld_private);
2502 __be32 *start;
2503
2504 /* layoutupdate length */
2505 start = xdr_reserve_space(xdr, 4);
2506 ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data);
2507
2508 *start = cpu_to_be32((xdr->p - start - 1) * 4);
2509}
2510
2511static void
2512ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
2513{
2514 struct nfs4_ff_layout_mirror *mirror = opaque->data;
2515
2516 ff_layout_put_mirror(mirror);
2517}
2518
2519static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
2520 .encode = ff_layout_encode_layoutstats,
2521 .free = ff_layout_free_layoutstats,
2522};
2523
2524static int
2525ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
2526 struct nfs42_layoutstat_devinfo *devinfo,
2527 int dev_limit, enum nfs4_ff_op_type type)
2528{
2529 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
2530 struct nfs4_ff_layout_mirror *mirror;
2531 struct nfs4_deviceid_node *dev;
2532 int i = 0;
2533
2534 list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2535 if (i >= dev_limit)
2536 break;
2537 if (IS_ERR_OR_NULL(mirror->mirror_ds))
2538 continue;
2539 if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL,
2540 &mirror->flags) &&
2541 type != NFS4_FF_OP_LAYOUTRETURN)
2542 continue;
2543 /* mirror refcount put in cleanup_layoutstats */
2544 if (!refcount_inc_not_zero(&mirror->ref))
2545 continue;
2546 dev = &mirror->mirror_ds->id_node;
2547 memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
2548 devinfo->offset = 0;
2549 devinfo->length = NFS4_MAX_UINT64;
2550 spin_lock(&mirror->lock);
2551 devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
2552 devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
2553 devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
2554 devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
2555 spin_unlock(&mirror->lock);
2556 devinfo->layout_type = LAYOUT_FLEX_FILES;
2557 devinfo->ld_private.ops = &layoutstat_ops;
2558 devinfo->ld_private.data = mirror;
2559
2560 devinfo++;
2561 i++;
2562 }
2563 return i;
2564}
2565
2566static int ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
2567{
2568 struct pnfs_layout_hdr *lo;
2569 struct nfs4_flexfile_layout *ff_layout;
2570 const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
2571
2572 /* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2573 args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo),
2574 nfs_io_gfp_mask());
2575 if (!args->devinfo)
2576 return -ENOMEM;
2577
2578 spin_lock(&args->inode->i_lock);
2579 lo = NFS_I(args->inode)->layout;
2580 if (lo && pnfs_layout_is_valid(lo)) {
2581 ff_layout = FF_LAYOUT_FROM_HDR(lo);
2582 args->num_dev = ff_layout_mirror_prepare_stats(
2583 &ff_layout->generic_hdr, &args->devinfo[0], dev_count,
2584 NFS4_FF_OP_LAYOUTSTATS);
2585 } else
2586 args->num_dev = 0;
2587 spin_unlock(&args->inode->i_lock);
2588 if (!args->num_dev) {
2589 kfree(args->devinfo);
2590 args->devinfo = NULL;
2591 return -ENOENT;
2592 }
2593
2594 return 0;
2595}
2596
2597static int
2598ff_layout_set_layoutdriver(struct nfs_server *server,
2599 const struct nfs_fh *dummy)
2600{
2601#if IS_ENABLED(CONFIG_NFS_V4_2)
2602 server->caps |= NFS_CAP_LAYOUTSTATS | NFS_CAP_REBOOT_LAYOUTRETURN;
2603#endif
2604 return 0;
2605}
2606
2607static const struct pnfs_commit_ops ff_layout_commit_ops = {
2608 .setup_ds_info = ff_layout_setup_ds_info,
2609 .release_ds_info = ff_layout_release_ds_info,
2610 .mark_request_commit = pnfs_layout_mark_request_commit,
2611 .clear_request_commit = pnfs_generic_clear_request_commit,
2612 .scan_commit_lists = pnfs_generic_scan_commit_lists,
2613 .recover_commit_reqs = pnfs_generic_recover_commit_reqs,
2614 .commit_pagelist = ff_layout_commit_pagelist,
2615};
2616
2617static struct pnfs_layoutdriver_type flexfilelayout_type = {
2618 .id = LAYOUT_FLEX_FILES,
2619 .name = "LAYOUT_FLEX_FILES",
2620 .owner = THIS_MODULE,
2621 .flags = PNFS_LAYOUTGET_ON_OPEN,
2622 .max_layoutget_response = 4096, /* 1 page or so... */
2623 .set_layoutdriver = ff_layout_set_layoutdriver,
2624 .alloc_layout_hdr = ff_layout_alloc_layout_hdr,
2625 .free_layout_hdr = ff_layout_free_layout_hdr,
2626 .alloc_lseg = ff_layout_alloc_lseg,
2627 .free_lseg = ff_layout_free_lseg,
2628 .add_lseg = ff_layout_add_lseg,
2629 .pg_read_ops = &ff_layout_pg_read_ops,
2630 .pg_write_ops = &ff_layout_pg_write_ops,
2631 .get_ds_info = ff_layout_get_ds_info,
2632 .free_deviceid_node = ff_layout_free_deviceid_node,
2633 .read_pagelist = ff_layout_read_pagelist,
2634 .write_pagelist = ff_layout_write_pagelist,
2635 .alloc_deviceid_node = ff_layout_alloc_deviceid_node,
2636 .prepare_layoutreturn = ff_layout_prepare_layoutreturn,
2637 .sync = pnfs_nfs_generic_sync,
2638 .prepare_layoutstats = ff_layout_prepare_layoutstats,
2639 .cancel_io = ff_layout_cancel_io,
2640};
2641
2642static int __init nfs4flexfilelayout_init(void)
2643{
2644 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
2645 __func__);
2646 return pnfs_register_layoutdriver(&flexfilelayout_type);
2647}
2648
2649static void __exit nfs4flexfilelayout_exit(void)
2650{
2651 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
2652 __func__);
2653 pnfs_unregister_layoutdriver(&flexfilelayout_type);
2654}
2655
2656MODULE_ALIAS("nfs-layouttype4-4");
2657
2658MODULE_LICENSE("GPL");
2659MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
2660
2661module_init(nfs4flexfilelayout_init);
2662module_exit(nfs4flexfilelayout_exit);
2663
2664module_param(io_maxretrans, ushort, 0644);
2665MODULE_PARM_DESC(io_maxretrans, "The number of times the NFSv4.1 client "
2666 "retries an I/O request before returning an error. ");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Module for pnfs flexfile layout driver.
4 *
5 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
6 *
7 * Tao Peng <bergwolf@primarydata.com>
8 */
9
10#include <linux/nfs_fs.h>
11#include <linux/nfs_mount.h>
12#include <linux/nfs_page.h>
13#include <linux/module.h>
14#include <linux/sched/mm.h>
15
16#include <linux/sunrpc/metrics.h>
17
18#include "flexfilelayout.h"
19#include "../nfs4session.h"
20#include "../nfs4idmap.h"
21#include "../internal.h"
22#include "../delegation.h"
23#include "../nfs4trace.h"
24#include "../iostat.h"
25#include "../nfs.h"
26#include "../nfs42.h"
27
28#define NFSDBG_FACILITY NFSDBG_PNFS_LD
29
30#define FF_LAYOUT_POLL_RETRY_MAX (15*HZ)
31#define FF_LAYOUTRETURN_MAXERR 20
32
33enum nfs4_ff_op_type {
34 NFS4_FF_OP_LAYOUTSTATS,
35 NFS4_FF_OP_LAYOUTRETURN,
36};
37
38static unsigned short io_maxretrans;
39
40static const struct pnfs_commit_ops ff_layout_commit_ops;
41static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
42 struct nfs_pgio_header *hdr);
43static int
44ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
45 struct nfs42_layoutstat_devinfo *devinfo,
46 int dev_limit, enum nfs4_ff_op_type type);
47static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
48 const struct nfs42_layoutstat_devinfo *devinfo,
49 struct nfs4_ff_layout_mirror *mirror);
50
51static struct pnfs_layout_hdr *
52ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
53{
54 struct nfs4_flexfile_layout *ffl;
55
56 ffl = kzalloc(sizeof(*ffl), gfp_flags);
57 if (ffl) {
58 pnfs_init_ds_commit_info(&ffl->commit_info);
59 INIT_LIST_HEAD(&ffl->error_list);
60 INIT_LIST_HEAD(&ffl->mirrors);
61 ffl->last_report_time = ktime_get();
62 ffl->commit_info.ops = &ff_layout_commit_ops;
63 return &ffl->generic_hdr;
64 } else
65 return NULL;
66}
67
68static void
69ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
70{
71 struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(lo);
72 struct nfs4_ff_layout_ds_err *err, *n;
73
74 list_for_each_entry_safe(err, n, &ffl->error_list, list) {
75 list_del(&err->list);
76 kfree(err);
77 }
78 kfree_rcu(ffl, generic_hdr.plh_rcu);
79}
80
81static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
82{
83 __be32 *p;
84
85 p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
86 if (unlikely(p == NULL))
87 return -ENOBUFS;
88 stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
89 memcpy(stateid->data, p, NFS4_STATEID_SIZE);
90 dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
91 p[0], p[1], p[2], p[3]);
92 return 0;
93}
94
95static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
96{
97 __be32 *p;
98
99 p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
100 if (unlikely(!p))
101 return -ENOBUFS;
102 memcpy(devid, p, NFS4_DEVICEID4_SIZE);
103 nfs4_print_deviceid(devid);
104 return 0;
105}
106
107static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
108{
109 __be32 *p;
110
111 p = xdr_inline_decode(xdr, 4);
112 if (unlikely(!p))
113 return -ENOBUFS;
114 fh->size = be32_to_cpup(p++);
115 if (fh->size > NFS_MAXFHSIZE) {
116 printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
117 fh->size);
118 return -EOVERFLOW;
119 }
120 /* fh.data */
121 p = xdr_inline_decode(xdr, fh->size);
122 if (unlikely(!p))
123 return -ENOBUFS;
124 memcpy(&fh->data, p, fh->size);
125 dprintk("%s: fh len %d\n", __func__, fh->size);
126
127 return 0;
128}
129
130/*
131 * Currently only stringified uids and gids are accepted.
132 * I.e., kerberos is not supported to the DSes, so no pricipals.
133 *
134 * That means that one common function will suffice, but when
135 * principals are added, this should be split to accomodate
136 * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
137 */
138static int
139decode_name(struct xdr_stream *xdr, u32 *id)
140{
141 __be32 *p;
142 int len;
143
144 /* opaque_length(4)*/
145 p = xdr_inline_decode(xdr, 4);
146 if (unlikely(!p))
147 return -ENOBUFS;
148 len = be32_to_cpup(p++);
149 if (len < 0)
150 return -EINVAL;
151
152 dprintk("%s: len %u\n", __func__, len);
153
154 /* opaque body */
155 p = xdr_inline_decode(xdr, len);
156 if (unlikely(!p))
157 return -ENOBUFS;
158
159 if (!nfs_map_string_to_numeric((char *)p, len, id))
160 return -EINVAL;
161
162 return 0;
163}
164
165static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
166 const struct nfs4_ff_layout_mirror *m2)
167{
168 int i, j;
169
170 if (m1->fh_versions_cnt != m2->fh_versions_cnt)
171 return false;
172 for (i = 0; i < m1->fh_versions_cnt; i++) {
173 bool found_fh = false;
174 for (j = 0; j < m2->fh_versions_cnt; j++) {
175 if (nfs_compare_fh(&m1->fh_versions[i],
176 &m2->fh_versions[j]) == 0) {
177 found_fh = true;
178 break;
179 }
180 }
181 if (!found_fh)
182 return false;
183 }
184 return true;
185}
186
187static struct nfs4_ff_layout_mirror *
188ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
189 struct nfs4_ff_layout_mirror *mirror)
190{
191 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
192 struct nfs4_ff_layout_mirror *pos;
193 struct inode *inode = lo->plh_inode;
194
195 spin_lock(&inode->i_lock);
196 list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
197 if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0)
198 continue;
199 if (!ff_mirror_match_fh(mirror, pos))
200 continue;
201 if (refcount_inc_not_zero(&pos->ref)) {
202 spin_unlock(&inode->i_lock);
203 return pos;
204 }
205 }
206 list_add(&mirror->mirrors, &ff_layout->mirrors);
207 mirror->layout = lo;
208 spin_unlock(&inode->i_lock);
209 return mirror;
210}
211
212static void
213ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
214{
215 struct inode *inode;
216 if (mirror->layout == NULL)
217 return;
218 inode = mirror->layout->plh_inode;
219 spin_lock(&inode->i_lock);
220 list_del(&mirror->mirrors);
221 spin_unlock(&inode->i_lock);
222 mirror->layout = NULL;
223}
224
225static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
226{
227 struct nfs4_ff_layout_mirror *mirror;
228
229 mirror = kzalloc(sizeof(*mirror), gfp_flags);
230 if (mirror != NULL) {
231 spin_lock_init(&mirror->lock);
232 refcount_set(&mirror->ref, 1);
233 INIT_LIST_HEAD(&mirror->mirrors);
234 }
235 return mirror;
236}
237
238static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
239{
240 const struct cred *cred;
241
242 ff_layout_remove_mirror(mirror);
243 kfree(mirror->fh_versions);
244 cred = rcu_access_pointer(mirror->ro_cred);
245 put_cred(cred);
246 cred = rcu_access_pointer(mirror->rw_cred);
247 put_cred(cred);
248 nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
249 kfree(mirror);
250}
251
252static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
253{
254 if (mirror != NULL && refcount_dec_and_test(&mirror->ref))
255 ff_layout_free_mirror(mirror);
256}
257
258static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
259{
260 u32 i;
261
262 for (i = 0; i < fls->mirror_array_cnt; i++)
263 ff_layout_put_mirror(fls->mirror_array[i]);
264}
265
266static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
267{
268 if (fls) {
269 ff_layout_free_mirror_array(fls);
270 kfree(fls);
271 }
272}
273
274static bool
275ff_lseg_match_mirrors(struct pnfs_layout_segment *l1,
276 struct pnfs_layout_segment *l2)
277{
278 const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1);
279 const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l1);
280 u32 i;
281
282 if (fl1->mirror_array_cnt != fl2->mirror_array_cnt)
283 return false;
284 for (i = 0; i < fl1->mirror_array_cnt; i++) {
285 if (fl1->mirror_array[i] != fl2->mirror_array[i])
286 return false;
287 }
288 return true;
289}
290
291static bool
292ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
293 const struct pnfs_layout_range *l2)
294{
295 u64 end1, end2;
296
297 if (l1->iomode != l2->iomode)
298 return l1->iomode != IOMODE_READ;
299 end1 = pnfs_calc_offset_end(l1->offset, l1->length);
300 end2 = pnfs_calc_offset_end(l2->offset, l2->length);
301 if (end1 < l2->offset)
302 return false;
303 if (end2 < l1->offset)
304 return true;
305 return l2->offset <= l1->offset;
306}
307
308static bool
309ff_lseg_merge(struct pnfs_layout_segment *new,
310 struct pnfs_layout_segment *old)
311{
312 u64 new_end, old_end;
313
314 if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
315 return false;
316 if (new->pls_range.iomode != old->pls_range.iomode)
317 return false;
318 old_end = pnfs_calc_offset_end(old->pls_range.offset,
319 old->pls_range.length);
320 if (old_end < new->pls_range.offset)
321 return false;
322 new_end = pnfs_calc_offset_end(new->pls_range.offset,
323 new->pls_range.length);
324 if (new_end < old->pls_range.offset)
325 return false;
326 if (!ff_lseg_match_mirrors(new, old))
327 return false;
328
329 /* Mergeable: copy info from 'old' to 'new' */
330 if (new_end < old_end)
331 new_end = old_end;
332 if (new->pls_range.offset < old->pls_range.offset)
333 new->pls_range.offset = old->pls_range.offset;
334 new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
335 new_end);
336 if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
337 set_bit(NFS_LSEG_ROC, &new->pls_flags);
338 return true;
339}
340
341static void
342ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
343 struct pnfs_layout_segment *lseg,
344 struct list_head *free_me)
345{
346 pnfs_generic_layout_insert_lseg(lo, lseg,
347 ff_lseg_range_is_after,
348 ff_lseg_merge,
349 free_me);
350}
351
352static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
353{
354 int i, j;
355
356 for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
357 for (j = i + 1; j < fls->mirror_array_cnt; j++)
358 if (fls->mirror_array[i]->efficiency <
359 fls->mirror_array[j]->efficiency)
360 swap(fls->mirror_array[i],
361 fls->mirror_array[j]);
362 }
363}
364
365static struct pnfs_layout_segment *
366ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
367 struct nfs4_layoutget_res *lgr,
368 gfp_t gfp_flags)
369{
370 struct pnfs_layout_segment *ret;
371 struct nfs4_ff_layout_segment *fls = NULL;
372 struct xdr_stream stream;
373 struct xdr_buf buf;
374 struct page *scratch;
375 u64 stripe_unit;
376 u32 mirror_array_cnt;
377 __be32 *p;
378 int i, rc;
379
380 dprintk("--> %s\n", __func__);
381 scratch = alloc_page(gfp_flags);
382 if (!scratch)
383 return ERR_PTR(-ENOMEM);
384
385 xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
386 lgr->layoutp->len);
387 xdr_set_scratch_page(&stream, scratch);
388
389 /* stripe unit and mirror_array_cnt */
390 rc = -EIO;
391 p = xdr_inline_decode(&stream, 8 + 4);
392 if (!p)
393 goto out_err_free;
394
395 p = xdr_decode_hyper(p, &stripe_unit);
396 mirror_array_cnt = be32_to_cpup(p++);
397 dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
398 stripe_unit, mirror_array_cnt);
399
400 if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
401 mirror_array_cnt == 0)
402 goto out_err_free;
403
404 rc = -ENOMEM;
405 fls = kzalloc(struct_size(fls, mirror_array, mirror_array_cnt),
406 gfp_flags);
407 if (!fls)
408 goto out_err_free;
409
410 fls->mirror_array_cnt = mirror_array_cnt;
411 fls->stripe_unit = stripe_unit;
412
413 for (i = 0; i < fls->mirror_array_cnt; i++) {
414 struct nfs4_ff_layout_mirror *mirror;
415 struct cred *kcred;
416 const struct cred __rcu *cred;
417 kuid_t uid;
418 kgid_t gid;
419 u32 ds_count, fh_count, id;
420 int j;
421
422 rc = -EIO;
423 p = xdr_inline_decode(&stream, 4);
424 if (!p)
425 goto out_err_free;
426 ds_count = be32_to_cpup(p);
427
428 /* FIXME: allow for striping? */
429 if (ds_count != 1)
430 goto out_err_free;
431
432 fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
433 if (fls->mirror_array[i] == NULL) {
434 rc = -ENOMEM;
435 goto out_err_free;
436 }
437
438 fls->mirror_array[i]->ds_count = ds_count;
439
440 /* deviceid */
441 rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid);
442 if (rc)
443 goto out_err_free;
444
445 /* efficiency */
446 rc = -EIO;
447 p = xdr_inline_decode(&stream, 4);
448 if (!p)
449 goto out_err_free;
450 fls->mirror_array[i]->efficiency = be32_to_cpup(p);
451
452 /* stateid */
453 rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid);
454 if (rc)
455 goto out_err_free;
456
457 /* fh */
458 rc = -EIO;
459 p = xdr_inline_decode(&stream, 4);
460 if (!p)
461 goto out_err_free;
462 fh_count = be32_to_cpup(p);
463
464 fls->mirror_array[i]->fh_versions =
465 kcalloc(fh_count, sizeof(struct nfs_fh),
466 gfp_flags);
467 if (fls->mirror_array[i]->fh_versions == NULL) {
468 rc = -ENOMEM;
469 goto out_err_free;
470 }
471
472 for (j = 0; j < fh_count; j++) {
473 rc = decode_nfs_fh(&stream,
474 &fls->mirror_array[i]->fh_versions[j]);
475 if (rc)
476 goto out_err_free;
477 }
478
479 fls->mirror_array[i]->fh_versions_cnt = fh_count;
480
481 /* user */
482 rc = decode_name(&stream, &id);
483 if (rc)
484 goto out_err_free;
485
486 uid = make_kuid(&init_user_ns, id);
487
488 /* group */
489 rc = decode_name(&stream, &id);
490 if (rc)
491 goto out_err_free;
492
493 gid = make_kgid(&init_user_ns, id);
494
495 if (gfp_flags & __GFP_FS)
496 kcred = prepare_kernel_cred(&init_task);
497 else {
498 unsigned int nofs_flags = memalloc_nofs_save();
499 kcred = prepare_kernel_cred(&init_task);
500 memalloc_nofs_restore(nofs_flags);
501 }
502 rc = -ENOMEM;
503 if (!kcred)
504 goto out_err_free;
505 kcred->fsuid = uid;
506 kcred->fsgid = gid;
507 cred = RCU_INITIALIZER(kcred);
508
509 if (lgr->range.iomode == IOMODE_READ)
510 rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
511 else
512 rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
513
514 mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
515 if (mirror != fls->mirror_array[i]) {
516 /* swap cred ptrs so free_mirror will clean up old */
517 if (lgr->range.iomode == IOMODE_READ) {
518 cred = xchg(&mirror->ro_cred, cred);
519 rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
520 } else {
521 cred = xchg(&mirror->rw_cred, cred);
522 rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
523 }
524 ff_layout_free_mirror(fls->mirror_array[i]);
525 fls->mirror_array[i] = mirror;
526 }
527
528 dprintk("%s: iomode %s uid %u gid %u\n", __func__,
529 lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
530 from_kuid(&init_user_ns, uid),
531 from_kgid(&init_user_ns, gid));
532 }
533
534 p = xdr_inline_decode(&stream, 4);
535 if (!p)
536 goto out_sort_mirrors;
537 fls->flags = be32_to_cpup(p);
538
539 p = xdr_inline_decode(&stream, 4);
540 if (!p)
541 goto out_sort_mirrors;
542 for (i=0; i < fls->mirror_array_cnt; i++)
543 fls->mirror_array[i]->report_interval = be32_to_cpup(p);
544
545out_sort_mirrors:
546 ff_layout_sort_mirrors(fls);
547 ret = &fls->generic_hdr;
548 dprintk("<-- %s (success)\n", __func__);
549out_free_page:
550 __free_page(scratch);
551 return ret;
552out_err_free:
553 _ff_layout_free_lseg(fls);
554 ret = ERR_PTR(rc);
555 dprintk("<-- %s (%d)\n", __func__, rc);
556 goto out_free_page;
557}
558
559static void
560ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
561{
562 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
563
564 dprintk("--> %s\n", __func__);
565
566 if (lseg->pls_range.iomode == IOMODE_RW) {
567 struct nfs4_flexfile_layout *ffl;
568 struct inode *inode;
569
570 ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
571 inode = ffl->generic_hdr.plh_inode;
572 spin_lock(&inode->i_lock);
573 pnfs_generic_ds_cinfo_release_lseg(&ffl->commit_info, lseg);
574 spin_unlock(&inode->i_lock);
575 }
576 _ff_layout_free_lseg(fls);
577}
578
579static void
580nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
581{
582 /* first IO request? */
583 if (atomic_inc_return(&timer->n_ops) == 1) {
584 timer->start_time = now;
585 }
586}
587
588static ktime_t
589nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
590{
591 ktime_t start;
592
593 if (atomic_dec_return(&timer->n_ops) < 0)
594 WARN_ON_ONCE(1);
595
596 start = timer->start_time;
597 timer->start_time = now;
598 return ktime_sub(now, start);
599}
600
601static bool
602nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
603 struct nfs4_ff_layoutstat *layoutstat,
604 ktime_t now)
605{
606 s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
607 struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
608
609 nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
610 if (!mirror->start_time)
611 mirror->start_time = now;
612 if (mirror->report_interval != 0)
613 report_interval = (s64)mirror->report_interval * 1000LL;
614 else if (layoutstats_timer != 0)
615 report_interval = (s64)layoutstats_timer * 1000LL;
616 if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
617 report_interval) {
618 ffl->last_report_time = now;
619 return true;
620 }
621
622 return false;
623}
624
625static void
626nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
627 __u64 requested)
628{
629 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
630
631 iostat->ops_requested++;
632 iostat->bytes_requested += requested;
633}
634
635static void
636nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
637 __u64 requested,
638 __u64 completed,
639 ktime_t time_completed,
640 ktime_t time_started)
641{
642 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
643 ktime_t completion_time = ktime_sub(time_completed, time_started);
644 ktime_t timer;
645
646 iostat->ops_completed++;
647 iostat->bytes_completed += completed;
648 iostat->bytes_not_delivered += requested - completed;
649
650 timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
651 iostat->total_busy_time =
652 ktime_add(iostat->total_busy_time, timer);
653 iostat->aggregate_completion_time =
654 ktime_add(iostat->aggregate_completion_time,
655 completion_time);
656}
657
658static void
659nfs4_ff_layout_stat_io_start_read(struct inode *inode,
660 struct nfs4_ff_layout_mirror *mirror,
661 __u64 requested, ktime_t now)
662{
663 bool report;
664
665 spin_lock(&mirror->lock);
666 report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
667 nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
668 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
669 spin_unlock(&mirror->lock);
670
671 if (report)
672 pnfs_report_layoutstat(inode, nfs_io_gfp_mask());
673}
674
675static void
676nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
677 struct nfs4_ff_layout_mirror *mirror,
678 __u64 requested,
679 __u64 completed)
680{
681 spin_lock(&mirror->lock);
682 nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
683 requested, completed,
684 ktime_get(), task->tk_start);
685 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
686 spin_unlock(&mirror->lock);
687}
688
689static void
690nfs4_ff_layout_stat_io_start_write(struct inode *inode,
691 struct nfs4_ff_layout_mirror *mirror,
692 __u64 requested, ktime_t now)
693{
694 bool report;
695
696 spin_lock(&mirror->lock);
697 report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
698 nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
699 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
700 spin_unlock(&mirror->lock);
701
702 if (report)
703 pnfs_report_layoutstat(inode, nfs_io_gfp_mask());
704}
705
706static void
707nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
708 struct nfs4_ff_layout_mirror *mirror,
709 __u64 requested,
710 __u64 completed,
711 enum nfs3_stable_how committed)
712{
713 if (committed == NFS_UNSTABLE)
714 requested = completed = 0;
715
716 spin_lock(&mirror->lock);
717 nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
718 requested, completed, ktime_get(), task->tk_start);
719 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
720 spin_unlock(&mirror->lock);
721}
722
723static void
724ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, u32 idx)
725{
726 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
727
728 if (devid)
729 nfs4_mark_deviceid_unavailable(devid);
730}
731
732static void
733ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, u32 idx)
734{
735 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
736
737 if (devid)
738 nfs4_mark_deviceid_available(devid);
739}
740
741static struct nfs4_pnfs_ds *
742ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
743 u32 start_idx, u32 *best_idx,
744 bool check_device)
745{
746 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
747 struct nfs4_ff_layout_mirror *mirror;
748 struct nfs4_pnfs_ds *ds;
749 u32 idx;
750
751 /* mirrors are initially sorted by efficiency */
752 for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
753 mirror = FF_LAYOUT_COMP(lseg, idx);
754 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
755 if (!ds)
756 continue;
757
758 if (check_device &&
759 nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node))
760 continue;
761
762 *best_idx = idx;
763 return ds;
764 }
765
766 return NULL;
767}
768
769static struct nfs4_pnfs_ds *
770ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg,
771 u32 start_idx, u32 *best_idx)
772{
773 return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, false);
774}
775
776static struct nfs4_pnfs_ds *
777ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg,
778 u32 start_idx, u32 *best_idx)
779{
780 return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, true);
781}
782
783static struct nfs4_pnfs_ds *
784ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
785 u32 start_idx, u32 *best_idx)
786{
787 struct nfs4_pnfs_ds *ds;
788
789 ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx);
790 if (ds)
791 return ds;
792 return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx);
793}
794
795static struct nfs4_pnfs_ds *
796ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio,
797 u32 *best_idx)
798{
799 struct pnfs_layout_segment *lseg = pgio->pg_lseg;
800 struct nfs4_pnfs_ds *ds;
801
802 ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx,
803 best_idx);
804 if (ds || !pgio->pg_mirror_idx)
805 return ds;
806 return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx);
807}
808
809static void
810ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
811 struct nfs_page *req,
812 bool strict_iomode)
813{
814 pnfs_put_lseg(pgio->pg_lseg);
815 pgio->pg_lseg =
816 pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
817 req_offset(req), req->wb_bytes, IOMODE_READ,
818 strict_iomode, nfs_io_gfp_mask());
819 if (IS_ERR(pgio->pg_lseg)) {
820 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
821 pgio->pg_lseg = NULL;
822 }
823}
824
825static void
826ff_layout_pg_check_layout(struct nfs_pageio_descriptor *pgio,
827 struct nfs_page *req)
828{
829 pnfs_generic_pg_check_layout(pgio);
830 pnfs_generic_pg_check_range(pgio, req);
831}
832
833static void
834ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
835 struct nfs_page *req)
836{
837 struct nfs_pgio_mirror *pgm;
838 struct nfs4_ff_layout_mirror *mirror;
839 struct nfs4_pnfs_ds *ds;
840 u32 ds_idx;
841
842retry:
843 ff_layout_pg_check_layout(pgio, req);
844 /* Use full layout for now */
845 if (!pgio->pg_lseg) {
846 ff_layout_pg_get_read(pgio, req, false);
847 if (!pgio->pg_lseg)
848 goto out_nolseg;
849 }
850 if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
851 ff_layout_pg_get_read(pgio, req, true);
852 if (!pgio->pg_lseg)
853 goto out_nolseg;
854 }
855
856 ds = ff_layout_get_ds_for_read(pgio, &ds_idx);
857 if (!ds) {
858 if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
859 goto out_mds;
860 pnfs_generic_pg_cleanup(pgio);
861 /* Sleep for 1 second before retrying */
862 ssleep(1);
863 goto retry;
864 }
865
866 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
867 pgm = &pgio->pg_mirrors[0];
868 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
869
870 pgio->pg_mirror_idx = ds_idx;
871
872 if (NFS_SERVER(pgio->pg_inode)->flags &
873 (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
874 pgio->pg_maxretrans = io_maxretrans;
875 return;
876out_nolseg:
877 if (pgio->pg_error < 0)
878 return;
879out_mds:
880 trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode,
881 0, NFS4_MAX_UINT64, IOMODE_READ,
882 NFS_I(pgio->pg_inode)->layout,
883 pgio->pg_lseg);
884 pgio->pg_maxretrans = 0;
885 nfs_pageio_reset_read_mds(pgio);
886}
887
888static void
889ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
890 struct nfs_page *req)
891{
892 struct nfs4_ff_layout_mirror *mirror;
893 struct nfs_pgio_mirror *pgm;
894 struct nfs4_pnfs_ds *ds;
895 u32 i;
896
897retry:
898 ff_layout_pg_check_layout(pgio, req);
899 if (!pgio->pg_lseg) {
900 pgio->pg_lseg =
901 pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
902 req_offset(req), req->wb_bytes,
903 IOMODE_RW, false, nfs_io_gfp_mask());
904 if (IS_ERR(pgio->pg_lseg)) {
905 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
906 pgio->pg_lseg = NULL;
907 return;
908 }
909 }
910 /* If no lseg, fall back to write through mds */
911 if (pgio->pg_lseg == NULL)
912 goto out_mds;
913
914 /* Use a direct mapping of ds_idx to pgio mirror_idx */
915 if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))
916 goto out_eagain;
917
918 for (i = 0; i < pgio->pg_mirror_count; i++) {
919 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
920 ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true);
921 if (!ds) {
922 if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
923 goto out_mds;
924 pnfs_generic_pg_cleanup(pgio);
925 /* Sleep for 1 second before retrying */
926 ssleep(1);
927 goto retry;
928 }
929 pgm = &pgio->pg_mirrors[i];
930 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
931 }
932
933 if (NFS_SERVER(pgio->pg_inode)->flags &
934 (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
935 pgio->pg_maxretrans = io_maxretrans;
936 return;
937out_eagain:
938 pnfs_generic_pg_cleanup(pgio);
939 pgio->pg_error = -EAGAIN;
940 return;
941out_mds:
942 trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode,
943 0, NFS4_MAX_UINT64, IOMODE_RW,
944 NFS_I(pgio->pg_inode)->layout,
945 pgio->pg_lseg);
946 pgio->pg_maxretrans = 0;
947 nfs_pageio_reset_write_mds(pgio);
948 pgio->pg_error = -EAGAIN;
949}
950
951static unsigned int
952ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
953 struct nfs_page *req)
954{
955 if (!pgio->pg_lseg) {
956 pgio->pg_lseg =
957 pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
958 req_offset(req), req->wb_bytes,
959 IOMODE_RW, false, nfs_io_gfp_mask());
960 if (IS_ERR(pgio->pg_lseg)) {
961 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
962 pgio->pg_lseg = NULL;
963 goto out;
964 }
965 }
966 if (pgio->pg_lseg)
967 return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
968
969 trace_pnfs_mds_fallback_pg_get_mirror_count(pgio->pg_inode,
970 0, NFS4_MAX_UINT64, IOMODE_RW,
971 NFS_I(pgio->pg_inode)->layout,
972 pgio->pg_lseg);
973 /* no lseg means that pnfs is not in use, so no mirroring here */
974 nfs_pageio_reset_write_mds(pgio);
975out:
976 return 1;
977}
978
979static u32
980ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
981{
982 u32 old = desc->pg_mirror_idx;
983
984 desc->pg_mirror_idx = idx;
985 return old;
986}
987
988static struct nfs_pgio_mirror *
989ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
990{
991 return &desc->pg_mirrors[idx];
992}
993
994static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
995 .pg_init = ff_layout_pg_init_read,
996 .pg_test = pnfs_generic_pg_test,
997 .pg_doio = pnfs_generic_pg_readpages,
998 .pg_cleanup = pnfs_generic_pg_cleanup,
999};
1000
1001static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
1002 .pg_init = ff_layout_pg_init_write,
1003 .pg_test = pnfs_generic_pg_test,
1004 .pg_doio = pnfs_generic_pg_writepages,
1005 .pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
1006 .pg_cleanup = pnfs_generic_pg_cleanup,
1007 .pg_get_mirror = ff_layout_pg_get_mirror_write,
1008 .pg_set_mirror = ff_layout_pg_set_mirror_write,
1009};
1010
1011static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
1012{
1013 struct rpc_task *task = &hdr->task;
1014
1015 pnfs_layoutcommit_inode(hdr->inode, false);
1016
1017 if (retry_pnfs) {
1018 dprintk("%s Reset task %5u for i/o through pNFS "
1019 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1020 hdr->task.tk_pid,
1021 hdr->inode->i_sb->s_id,
1022 (unsigned long long)NFS_FILEID(hdr->inode),
1023 hdr->args.count,
1024 (unsigned long long)hdr->args.offset);
1025
1026 hdr->completion_ops->reschedule_io(hdr);
1027 return;
1028 }
1029
1030 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1031 dprintk("%s Reset task %5u for i/o through MDS "
1032 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1033 hdr->task.tk_pid,
1034 hdr->inode->i_sb->s_id,
1035 (unsigned long long)NFS_FILEID(hdr->inode),
1036 hdr->args.count,
1037 (unsigned long long)hdr->args.offset);
1038
1039 trace_pnfs_mds_fallback_write_done(hdr->inode,
1040 hdr->args.offset, hdr->args.count,
1041 IOMODE_RW, NFS_I(hdr->inode)->layout,
1042 hdr->lseg);
1043 task->tk_status = pnfs_write_done_resend_to_mds(hdr);
1044 }
1045}
1046
1047static void ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr)
1048{
1049 u32 idx = hdr->pgio_mirror_idx + 1;
1050 u32 new_idx = 0;
1051
1052 if (ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx))
1053 ff_layout_send_layouterror(hdr->lseg);
1054 else
1055 pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1056 pnfs_read_resend_pnfs(hdr, new_idx);
1057}
1058
1059static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
1060{
1061 struct rpc_task *task = &hdr->task;
1062
1063 pnfs_layoutcommit_inode(hdr->inode, false);
1064 pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1065
1066 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1067 dprintk("%s Reset task %5u for i/o through MDS "
1068 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1069 hdr->task.tk_pid,
1070 hdr->inode->i_sb->s_id,
1071 (unsigned long long)NFS_FILEID(hdr->inode),
1072 hdr->args.count,
1073 (unsigned long long)hdr->args.offset);
1074
1075 trace_pnfs_mds_fallback_read_done(hdr->inode,
1076 hdr->args.offset, hdr->args.count,
1077 IOMODE_READ, NFS_I(hdr->inode)->layout,
1078 hdr->lseg);
1079 task->tk_status = pnfs_read_done_resend_to_mds(hdr);
1080 }
1081}
1082
1083static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1084 struct nfs4_state *state,
1085 struct nfs_client *clp,
1086 struct pnfs_layout_segment *lseg,
1087 u32 idx)
1088{
1089 struct pnfs_layout_hdr *lo = lseg->pls_layout;
1090 struct inode *inode = lo->plh_inode;
1091 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1092 struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
1093
1094 switch (task->tk_status) {
1095 case -NFS4ERR_BADSESSION:
1096 case -NFS4ERR_BADSLOT:
1097 case -NFS4ERR_BAD_HIGH_SLOT:
1098 case -NFS4ERR_DEADSESSION:
1099 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1100 case -NFS4ERR_SEQ_FALSE_RETRY:
1101 case -NFS4ERR_SEQ_MISORDERED:
1102 dprintk("%s ERROR %d, Reset session. Exchangeid "
1103 "flags 0x%x\n", __func__, task->tk_status,
1104 clp->cl_exchange_flags);
1105 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
1106 break;
1107 case -NFS4ERR_DELAY:
1108 case -NFS4ERR_GRACE:
1109 rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
1110 break;
1111 case -NFS4ERR_RETRY_UNCACHED_REP:
1112 break;
1113 /* Invalidate Layout errors */
1114 case -NFS4ERR_PNFS_NO_LAYOUT:
1115 case -ESTALE: /* mapped NFS4ERR_STALE */
1116 case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */
1117 case -EISDIR: /* mapped NFS4ERR_ISDIR */
1118 case -NFS4ERR_FHEXPIRED:
1119 case -NFS4ERR_WRONG_TYPE:
1120 dprintk("%s Invalid layout error %d\n", __func__,
1121 task->tk_status);
1122 /*
1123 * Destroy layout so new i/o will get a new layout.
1124 * Layout will not be destroyed until all current lseg
1125 * references are put. Mark layout as invalid to resend failed
1126 * i/o and all i/o waiting on the slot table to the MDS until
1127 * layout is destroyed and a new valid layout is obtained.
1128 */
1129 pnfs_destroy_layout(NFS_I(inode));
1130 rpc_wake_up(&tbl->slot_tbl_waitq);
1131 goto reset;
1132 /* RPC connection errors */
1133 case -ECONNREFUSED:
1134 case -EHOSTDOWN:
1135 case -EHOSTUNREACH:
1136 case -ENETUNREACH:
1137 case -EIO:
1138 case -ETIMEDOUT:
1139 case -EPIPE:
1140 case -EPROTO:
1141 case -ENODEV:
1142 dprintk("%s DS connection error %d\n", __func__,
1143 task->tk_status);
1144 nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1145 &devid->deviceid);
1146 rpc_wake_up(&tbl->slot_tbl_waitq);
1147 fallthrough;
1148 default:
1149 if (ff_layout_avoid_mds_available_ds(lseg))
1150 return -NFS4ERR_RESET_TO_PNFS;
1151reset:
1152 dprintk("%s Retry through MDS. Error %d\n", __func__,
1153 task->tk_status);
1154 return -NFS4ERR_RESET_TO_MDS;
1155 }
1156 task->tk_status = 0;
1157 return -EAGAIN;
1158}
1159
1160/* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
1161static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1162 struct pnfs_layout_segment *lseg,
1163 u32 idx)
1164{
1165 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1166
1167 switch (task->tk_status) {
1168 /* File access problems. Don't mark the device as unavailable */
1169 case -EACCES:
1170 case -ESTALE:
1171 case -EISDIR:
1172 case -EBADHANDLE:
1173 case -ELOOP:
1174 case -ENOSPC:
1175 break;
1176 case -EJUKEBOX:
1177 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1178 goto out_retry;
1179 default:
1180 dprintk("%s DS connection error %d\n", __func__,
1181 task->tk_status);
1182 nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1183 &devid->deviceid);
1184 }
1185 /* FIXME: Need to prevent infinite looping here. */
1186 return -NFS4ERR_RESET_TO_PNFS;
1187out_retry:
1188 task->tk_status = 0;
1189 rpc_restart_call_prepare(task);
1190 rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1191 return -EAGAIN;
1192}
1193
1194static int ff_layout_async_handle_error(struct rpc_task *task,
1195 struct nfs4_state *state,
1196 struct nfs_client *clp,
1197 struct pnfs_layout_segment *lseg,
1198 u32 idx)
1199{
1200 int vers = clp->cl_nfs_mod->rpc_vers->number;
1201
1202 if (task->tk_status >= 0) {
1203 ff_layout_mark_ds_reachable(lseg, idx);
1204 return 0;
1205 }
1206
1207 /* Handle the case of an invalid layout segment */
1208 if (!pnfs_is_valid_lseg(lseg))
1209 return -NFS4ERR_RESET_TO_PNFS;
1210
1211 switch (vers) {
1212 case 3:
1213 return ff_layout_async_handle_error_v3(task, lseg, idx);
1214 case 4:
1215 return ff_layout_async_handle_error_v4(task, state, clp,
1216 lseg, idx);
1217 default:
1218 /* should never happen */
1219 WARN_ON_ONCE(1);
1220 return 0;
1221 }
1222}
1223
1224static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1225 u32 idx, u64 offset, u64 length,
1226 u32 *op_status, int opnum, int error)
1227{
1228 struct nfs4_ff_layout_mirror *mirror;
1229 u32 status = *op_status;
1230 int err;
1231
1232 if (status == 0) {
1233 switch (error) {
1234 case -ETIMEDOUT:
1235 case -EPFNOSUPPORT:
1236 case -EPROTONOSUPPORT:
1237 case -EOPNOTSUPP:
1238 case -ECONNREFUSED:
1239 case -ECONNRESET:
1240 case -EHOSTDOWN:
1241 case -EHOSTUNREACH:
1242 case -ENETUNREACH:
1243 case -EADDRINUSE:
1244 case -ENOBUFS:
1245 case -EPIPE:
1246 case -EPERM:
1247 case -EPROTO:
1248 case -ENODEV:
1249 *op_status = status = NFS4ERR_NXIO;
1250 break;
1251 case -EACCES:
1252 *op_status = status = NFS4ERR_ACCESS;
1253 break;
1254 default:
1255 return;
1256 }
1257 }
1258
1259 mirror = FF_LAYOUT_COMP(lseg, idx);
1260 err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
1261 mirror, offset, length, status, opnum,
1262 nfs_io_gfp_mask());
1263
1264 switch (status) {
1265 case NFS4ERR_DELAY:
1266 case NFS4ERR_GRACE:
1267 break;
1268 case NFS4ERR_NXIO:
1269 ff_layout_mark_ds_unreachable(lseg, idx);
1270 /*
1271 * Don't return the layout if this is a read and we still
1272 * have layouts to try
1273 */
1274 if (opnum == OP_READ)
1275 break;
1276 fallthrough;
1277 default:
1278 pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
1279 lseg);
1280 }
1281
1282 dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
1283}
1284
1285/* NFS_PROTO call done callback routines */
1286static int ff_layout_read_done_cb(struct rpc_task *task,
1287 struct nfs_pgio_header *hdr)
1288{
1289 int err;
1290
1291 if (task->tk_status < 0) {
1292 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1293 hdr->args.offset, hdr->args.count,
1294 &hdr->res.op_status, OP_READ,
1295 task->tk_status);
1296 trace_ff_layout_read_error(hdr);
1297 }
1298
1299 err = ff_layout_async_handle_error(task, hdr->args.context->state,
1300 hdr->ds_clp, hdr->lseg,
1301 hdr->pgio_mirror_idx);
1302
1303 trace_nfs4_pnfs_read(hdr, err);
1304 clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1305 clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1306 switch (err) {
1307 case -NFS4ERR_RESET_TO_PNFS:
1308 set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1309 return task->tk_status;
1310 case -NFS4ERR_RESET_TO_MDS:
1311 set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1312 return task->tk_status;
1313 case -EAGAIN:
1314 goto out_eagain;
1315 }
1316
1317 return 0;
1318out_eagain:
1319 rpc_restart_call_prepare(task);
1320 return -EAGAIN;
1321}
1322
1323static bool
1324ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1325{
1326 return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1327}
1328
1329/*
1330 * We reference the rpc_cred of the first WRITE that triggers the need for
1331 * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1332 * rfc5661 is not clear about which credential should be used.
1333 *
1334 * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1335 * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1336 * we always send layoutcommit after DS writes.
1337 */
1338static void
1339ff_layout_set_layoutcommit(struct inode *inode,
1340 struct pnfs_layout_segment *lseg,
1341 loff_t end_offset)
1342{
1343 if (!ff_layout_need_layoutcommit(lseg))
1344 return;
1345
1346 pnfs_set_layoutcommit(inode, lseg, end_offset);
1347 dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino,
1348 (unsigned long long) NFS_I(inode)->layout->plh_lwb);
1349}
1350
1351static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
1352 struct nfs_pgio_header *hdr)
1353{
1354 if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1355 return;
1356 nfs4_ff_layout_stat_io_start_read(hdr->inode,
1357 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1358 hdr->args.count,
1359 task->tk_start);
1360}
1361
1362static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
1363 struct nfs_pgio_header *hdr)
1364{
1365 if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1366 return;
1367 nfs4_ff_layout_stat_io_end_read(task,
1368 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1369 hdr->args.count,
1370 hdr->res.count);
1371 set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1372}
1373
1374static int ff_layout_read_prepare_common(struct rpc_task *task,
1375 struct nfs_pgio_header *hdr)
1376{
1377 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1378 rpc_exit(task, -EIO);
1379 return -EIO;
1380 }
1381
1382 if (!pnfs_is_valid_lseg(hdr->lseg)) {
1383 rpc_exit(task, -EAGAIN);
1384 return -EAGAIN;
1385 }
1386
1387 ff_layout_read_record_layoutstats_start(task, hdr);
1388 return 0;
1389}
1390
1391/*
1392 * Call ops for the async read/write cases
1393 * In the case of dense layouts, the offset needs to be reset to its
1394 * original value.
1395 */
1396static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1397{
1398 struct nfs_pgio_header *hdr = data;
1399
1400 if (ff_layout_read_prepare_common(task, hdr))
1401 return;
1402
1403 rpc_call_start(task);
1404}
1405
1406static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1407{
1408 struct nfs_pgio_header *hdr = data;
1409
1410 if (nfs4_setup_sequence(hdr->ds_clp,
1411 &hdr->args.seq_args,
1412 &hdr->res.seq_res,
1413 task))
1414 return;
1415
1416 ff_layout_read_prepare_common(task, hdr);
1417}
1418
1419static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1420{
1421 struct nfs_pgio_header *hdr = data;
1422
1423 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1424 task->tk_status == 0) {
1425 nfs4_sequence_done(task, &hdr->res.seq_res);
1426 return;
1427 }
1428
1429 /* Note this may cause RPC to be resent */
1430 hdr->mds_ops->rpc_call_done(task, hdr);
1431}
1432
1433static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1434{
1435 struct nfs_pgio_header *hdr = data;
1436
1437 ff_layout_read_record_layoutstats_done(task, hdr);
1438 rpc_count_iostats_metrics(task,
1439 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1440}
1441
1442static void ff_layout_read_release(void *data)
1443{
1444 struct nfs_pgio_header *hdr = data;
1445
1446 ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
1447 if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
1448 ff_layout_resend_pnfs_read(hdr);
1449 else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1450 ff_layout_reset_read(hdr);
1451 pnfs_generic_rw_release(data);
1452}
1453
1454
1455static int ff_layout_write_done_cb(struct rpc_task *task,
1456 struct nfs_pgio_header *hdr)
1457{
1458 loff_t end_offs = 0;
1459 int err;
1460
1461 if (task->tk_status < 0) {
1462 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1463 hdr->args.offset, hdr->args.count,
1464 &hdr->res.op_status, OP_WRITE,
1465 task->tk_status);
1466 trace_ff_layout_write_error(hdr);
1467 }
1468
1469 err = ff_layout_async_handle_error(task, hdr->args.context->state,
1470 hdr->ds_clp, hdr->lseg,
1471 hdr->pgio_mirror_idx);
1472
1473 trace_nfs4_pnfs_write(hdr, err);
1474 clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1475 clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1476 switch (err) {
1477 case -NFS4ERR_RESET_TO_PNFS:
1478 set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1479 return task->tk_status;
1480 case -NFS4ERR_RESET_TO_MDS:
1481 set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1482 return task->tk_status;
1483 case -EAGAIN:
1484 return -EAGAIN;
1485 }
1486
1487 if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1488 hdr->res.verf->committed == NFS_DATA_SYNC)
1489 end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
1490
1491 /* Note: if the write is unstable, don't set end_offs until commit */
1492 ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
1493
1494 /* zero out fattr since we don't care DS attr at all */
1495 hdr->fattr.valid = 0;
1496 if (task->tk_status >= 0)
1497 nfs_writeback_update_inode(hdr);
1498
1499 return 0;
1500}
1501
1502static int ff_layout_commit_done_cb(struct rpc_task *task,
1503 struct nfs_commit_data *data)
1504{
1505 int err;
1506
1507 if (task->tk_status < 0) {
1508 ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1509 data->args.offset, data->args.count,
1510 &data->res.op_status, OP_COMMIT,
1511 task->tk_status);
1512 trace_ff_layout_commit_error(data);
1513 }
1514
1515 err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
1516 data->lseg, data->ds_commit_index);
1517
1518 trace_nfs4_pnfs_commit_ds(data, err);
1519 switch (err) {
1520 case -NFS4ERR_RESET_TO_PNFS:
1521 pnfs_generic_prepare_to_resend_writes(data);
1522 return -EAGAIN;
1523 case -NFS4ERR_RESET_TO_MDS:
1524 pnfs_generic_prepare_to_resend_writes(data);
1525 return -EAGAIN;
1526 case -EAGAIN:
1527 rpc_restart_call_prepare(task);
1528 return -EAGAIN;
1529 }
1530
1531 ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
1532
1533 return 0;
1534}
1535
1536static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
1537 struct nfs_pgio_header *hdr)
1538{
1539 if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1540 return;
1541 nfs4_ff_layout_stat_io_start_write(hdr->inode,
1542 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1543 hdr->args.count,
1544 task->tk_start);
1545}
1546
1547static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
1548 struct nfs_pgio_header *hdr)
1549{
1550 if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1551 return;
1552 nfs4_ff_layout_stat_io_end_write(task,
1553 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1554 hdr->args.count, hdr->res.count,
1555 hdr->res.verf->committed);
1556 set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1557}
1558
1559static int ff_layout_write_prepare_common(struct rpc_task *task,
1560 struct nfs_pgio_header *hdr)
1561{
1562 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1563 rpc_exit(task, -EIO);
1564 return -EIO;
1565 }
1566
1567 if (!pnfs_is_valid_lseg(hdr->lseg)) {
1568 rpc_exit(task, -EAGAIN);
1569 return -EAGAIN;
1570 }
1571
1572 ff_layout_write_record_layoutstats_start(task, hdr);
1573 return 0;
1574}
1575
1576static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1577{
1578 struct nfs_pgio_header *hdr = data;
1579
1580 if (ff_layout_write_prepare_common(task, hdr))
1581 return;
1582
1583 rpc_call_start(task);
1584}
1585
1586static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1587{
1588 struct nfs_pgio_header *hdr = data;
1589
1590 if (nfs4_setup_sequence(hdr->ds_clp,
1591 &hdr->args.seq_args,
1592 &hdr->res.seq_res,
1593 task))
1594 return;
1595
1596 ff_layout_write_prepare_common(task, hdr);
1597}
1598
1599static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1600{
1601 struct nfs_pgio_header *hdr = data;
1602
1603 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1604 task->tk_status == 0) {
1605 nfs4_sequence_done(task, &hdr->res.seq_res);
1606 return;
1607 }
1608
1609 /* Note this may cause RPC to be resent */
1610 hdr->mds_ops->rpc_call_done(task, hdr);
1611}
1612
1613static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1614{
1615 struct nfs_pgio_header *hdr = data;
1616
1617 ff_layout_write_record_layoutstats_done(task, hdr);
1618 rpc_count_iostats_metrics(task,
1619 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1620}
1621
1622static void ff_layout_write_release(void *data)
1623{
1624 struct nfs_pgio_header *hdr = data;
1625
1626 ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
1627 if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
1628 ff_layout_send_layouterror(hdr->lseg);
1629 ff_layout_reset_write(hdr, true);
1630 } else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1631 ff_layout_reset_write(hdr, false);
1632 pnfs_generic_rw_release(data);
1633}
1634
1635static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
1636 struct nfs_commit_data *cdata)
1637{
1638 if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
1639 return;
1640 nfs4_ff_layout_stat_io_start_write(cdata->inode,
1641 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1642 0, task->tk_start);
1643}
1644
1645static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
1646 struct nfs_commit_data *cdata)
1647{
1648 struct nfs_page *req;
1649 __u64 count = 0;
1650
1651 if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
1652 return;
1653
1654 if (task->tk_status == 0) {
1655 list_for_each_entry(req, &cdata->pages, wb_list)
1656 count += req->wb_bytes;
1657 }
1658 nfs4_ff_layout_stat_io_end_write(task,
1659 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1660 count, count, NFS_FILE_SYNC);
1661 set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags);
1662}
1663
1664static int ff_layout_commit_prepare_common(struct rpc_task *task,
1665 struct nfs_commit_data *cdata)
1666{
1667 if (!pnfs_is_valid_lseg(cdata->lseg)) {
1668 rpc_exit(task, -EAGAIN);
1669 return -EAGAIN;
1670 }
1671
1672 ff_layout_commit_record_layoutstats_start(task, cdata);
1673 return 0;
1674}
1675
1676static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1677{
1678 if (ff_layout_commit_prepare_common(task, data))
1679 return;
1680
1681 rpc_call_start(task);
1682}
1683
1684static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1685{
1686 struct nfs_commit_data *wdata = data;
1687
1688 if (nfs4_setup_sequence(wdata->ds_clp,
1689 &wdata->args.seq_args,
1690 &wdata->res.seq_res,
1691 task))
1692 return;
1693 ff_layout_commit_prepare_common(task, data);
1694}
1695
1696static void ff_layout_commit_done(struct rpc_task *task, void *data)
1697{
1698 pnfs_generic_write_commit_done(task, data);
1699}
1700
1701static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1702{
1703 struct nfs_commit_data *cdata = data;
1704
1705 ff_layout_commit_record_layoutstats_done(task, cdata);
1706 rpc_count_iostats_metrics(task,
1707 &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1708}
1709
1710static void ff_layout_commit_release(void *data)
1711{
1712 struct nfs_commit_data *cdata = data;
1713
1714 ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
1715 pnfs_generic_commit_release(data);
1716}
1717
1718static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1719 .rpc_call_prepare = ff_layout_read_prepare_v3,
1720 .rpc_call_done = ff_layout_read_call_done,
1721 .rpc_count_stats = ff_layout_read_count_stats,
1722 .rpc_release = ff_layout_read_release,
1723};
1724
1725static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1726 .rpc_call_prepare = ff_layout_read_prepare_v4,
1727 .rpc_call_done = ff_layout_read_call_done,
1728 .rpc_count_stats = ff_layout_read_count_stats,
1729 .rpc_release = ff_layout_read_release,
1730};
1731
1732static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1733 .rpc_call_prepare = ff_layout_write_prepare_v3,
1734 .rpc_call_done = ff_layout_write_call_done,
1735 .rpc_count_stats = ff_layout_write_count_stats,
1736 .rpc_release = ff_layout_write_release,
1737};
1738
1739static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1740 .rpc_call_prepare = ff_layout_write_prepare_v4,
1741 .rpc_call_done = ff_layout_write_call_done,
1742 .rpc_count_stats = ff_layout_write_count_stats,
1743 .rpc_release = ff_layout_write_release,
1744};
1745
1746static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1747 .rpc_call_prepare = ff_layout_commit_prepare_v3,
1748 .rpc_call_done = ff_layout_commit_done,
1749 .rpc_count_stats = ff_layout_commit_count_stats,
1750 .rpc_release = ff_layout_commit_release,
1751};
1752
1753static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1754 .rpc_call_prepare = ff_layout_commit_prepare_v4,
1755 .rpc_call_done = ff_layout_commit_done,
1756 .rpc_count_stats = ff_layout_commit_count_stats,
1757 .rpc_release = ff_layout_commit_release,
1758};
1759
1760static enum pnfs_try_status
1761ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1762{
1763 struct pnfs_layout_segment *lseg = hdr->lseg;
1764 struct nfs4_pnfs_ds *ds;
1765 struct rpc_clnt *ds_clnt;
1766 struct nfs4_ff_layout_mirror *mirror;
1767 const struct cred *ds_cred;
1768 loff_t offset = hdr->args.offset;
1769 u32 idx = hdr->pgio_mirror_idx;
1770 int vers;
1771 struct nfs_fh *fh;
1772
1773 dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
1774 __func__, hdr->inode->i_ino,
1775 hdr->args.pgbase, (size_t)hdr->args.count, offset);
1776
1777 mirror = FF_LAYOUT_COMP(lseg, idx);
1778 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
1779 if (!ds)
1780 goto out_failed;
1781
1782 ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1783 hdr->inode);
1784 if (IS_ERR(ds_clnt))
1785 goto out_failed;
1786
1787 ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1788 if (!ds_cred)
1789 goto out_failed;
1790
1791 vers = nfs4_ff_layout_ds_version(mirror);
1792
1793 dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1794 ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers);
1795
1796 hdr->pgio_done_cb = ff_layout_read_done_cb;
1797 refcount_inc(&ds->ds_clp->cl_count);
1798 hdr->ds_clp = ds->ds_clp;
1799 fh = nfs4_ff_layout_select_ds_fh(mirror);
1800 if (fh)
1801 hdr->args.fh = fh;
1802
1803 nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1804
1805 /*
1806 * Note that if we ever decide to split across DSes,
1807 * then we may need to handle dense-like offsets.
1808 */
1809 hdr->args.offset = offset;
1810 hdr->mds_offset = offset;
1811
1812 /* Perform an asynchronous read to ds */
1813 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1814 vers == 3 ? &ff_layout_read_call_ops_v3 :
1815 &ff_layout_read_call_ops_v4,
1816 0, RPC_TASK_SOFTCONN);
1817 put_cred(ds_cred);
1818 return PNFS_ATTEMPTED;
1819
1820out_failed:
1821 if (ff_layout_avoid_mds_available_ds(lseg))
1822 return PNFS_TRY_AGAIN;
1823 trace_pnfs_mds_fallback_read_pagelist(hdr->inode,
1824 hdr->args.offset, hdr->args.count,
1825 IOMODE_READ, NFS_I(hdr->inode)->layout, lseg);
1826 return PNFS_NOT_ATTEMPTED;
1827}
1828
1829/* Perform async writes. */
1830static enum pnfs_try_status
1831ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1832{
1833 struct pnfs_layout_segment *lseg = hdr->lseg;
1834 struct nfs4_pnfs_ds *ds;
1835 struct rpc_clnt *ds_clnt;
1836 struct nfs4_ff_layout_mirror *mirror;
1837 const struct cred *ds_cred;
1838 loff_t offset = hdr->args.offset;
1839 int vers;
1840 struct nfs_fh *fh;
1841 u32 idx = hdr->pgio_mirror_idx;
1842
1843 mirror = FF_LAYOUT_COMP(lseg, idx);
1844 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1845 if (!ds)
1846 goto out_failed;
1847
1848 ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1849 hdr->inode);
1850 if (IS_ERR(ds_clnt))
1851 goto out_failed;
1852
1853 ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1854 if (!ds_cred)
1855 goto out_failed;
1856
1857 vers = nfs4_ff_layout_ds_version(mirror);
1858
1859 dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
1860 __func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1861 offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count),
1862 vers);
1863
1864 hdr->pgio_done_cb = ff_layout_write_done_cb;
1865 refcount_inc(&ds->ds_clp->cl_count);
1866 hdr->ds_clp = ds->ds_clp;
1867 hdr->ds_commit_idx = idx;
1868 fh = nfs4_ff_layout_select_ds_fh(mirror);
1869 if (fh)
1870 hdr->args.fh = fh;
1871
1872 nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1873
1874 /*
1875 * Note that if we ever decide to split across DSes,
1876 * then we may need to handle dense-like offsets.
1877 */
1878 hdr->args.offset = offset;
1879
1880 /* Perform an asynchronous write */
1881 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1882 vers == 3 ? &ff_layout_write_call_ops_v3 :
1883 &ff_layout_write_call_ops_v4,
1884 sync, RPC_TASK_SOFTCONN);
1885 put_cred(ds_cred);
1886 return PNFS_ATTEMPTED;
1887
1888out_failed:
1889 if (ff_layout_avoid_mds_available_ds(lseg))
1890 return PNFS_TRY_AGAIN;
1891 trace_pnfs_mds_fallback_write_pagelist(hdr->inode,
1892 hdr->args.offset, hdr->args.count,
1893 IOMODE_RW, NFS_I(hdr->inode)->layout, lseg);
1894 return PNFS_NOT_ATTEMPTED;
1895}
1896
1897static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1898{
1899 return i;
1900}
1901
1902static struct nfs_fh *
1903select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1904{
1905 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1906
1907 /* FIXME: Assume that there is only one NFS version available
1908 * for the DS.
1909 */
1910 return &flseg->mirror_array[i]->fh_versions[0];
1911}
1912
1913static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
1914{
1915 struct pnfs_layout_segment *lseg = data->lseg;
1916 struct nfs4_pnfs_ds *ds;
1917 struct rpc_clnt *ds_clnt;
1918 struct nfs4_ff_layout_mirror *mirror;
1919 const struct cred *ds_cred;
1920 u32 idx;
1921 int vers, ret;
1922 struct nfs_fh *fh;
1923
1924 if (!lseg || !(pnfs_is_valid_lseg(lseg) ||
1925 test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)))
1926 goto out_err;
1927
1928 idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
1929 mirror = FF_LAYOUT_COMP(lseg, idx);
1930 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1931 if (!ds)
1932 goto out_err;
1933
1934 ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1935 data->inode);
1936 if (IS_ERR(ds_clnt))
1937 goto out_err;
1938
1939 ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred);
1940 if (!ds_cred)
1941 goto out_err;
1942
1943 vers = nfs4_ff_layout_ds_version(mirror);
1944
1945 dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
1946 data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count),
1947 vers);
1948 data->commit_done_cb = ff_layout_commit_done_cb;
1949 data->cred = ds_cred;
1950 refcount_inc(&ds->ds_clp->cl_count);
1951 data->ds_clp = ds->ds_clp;
1952 fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
1953 if (fh)
1954 data->args.fh = fh;
1955
1956 ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
1957 vers == 3 ? &ff_layout_commit_call_ops_v3 :
1958 &ff_layout_commit_call_ops_v4,
1959 how, RPC_TASK_SOFTCONN);
1960 put_cred(ds_cred);
1961 return ret;
1962out_err:
1963 pnfs_generic_prepare_to_resend_writes(data);
1964 pnfs_generic_commit_release(data);
1965 return -EAGAIN;
1966}
1967
1968static int
1969ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
1970 int how, struct nfs_commit_info *cinfo)
1971{
1972 return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
1973 ff_layout_initiate_commit);
1974}
1975
1976static bool ff_layout_match_rw(const struct rpc_task *task,
1977 const struct nfs_pgio_header *hdr,
1978 const struct pnfs_layout_segment *lseg)
1979{
1980 return hdr->lseg == lseg;
1981}
1982
1983static bool ff_layout_match_commit(const struct rpc_task *task,
1984 const struct nfs_commit_data *cdata,
1985 const struct pnfs_layout_segment *lseg)
1986{
1987 return cdata->lseg == lseg;
1988}
1989
1990static bool ff_layout_match_io(const struct rpc_task *task, const void *data)
1991{
1992 const struct rpc_call_ops *ops = task->tk_ops;
1993
1994 if (ops == &ff_layout_read_call_ops_v3 ||
1995 ops == &ff_layout_read_call_ops_v4 ||
1996 ops == &ff_layout_write_call_ops_v3 ||
1997 ops == &ff_layout_write_call_ops_v4)
1998 return ff_layout_match_rw(task, task->tk_calldata, data);
1999 if (ops == &ff_layout_commit_call_ops_v3 ||
2000 ops == &ff_layout_commit_call_ops_v4)
2001 return ff_layout_match_commit(task, task->tk_calldata, data);
2002 return false;
2003}
2004
2005static void ff_layout_cancel_io(struct pnfs_layout_segment *lseg)
2006{
2007 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
2008 struct nfs4_ff_layout_mirror *mirror;
2009 struct nfs4_ff_layout_ds *mirror_ds;
2010 struct nfs4_pnfs_ds *ds;
2011 struct nfs_client *ds_clp;
2012 struct rpc_clnt *clnt;
2013 u32 idx;
2014
2015 for (idx = 0; idx < flseg->mirror_array_cnt; idx++) {
2016 mirror = flseg->mirror_array[idx];
2017 mirror_ds = mirror->mirror_ds;
2018 if (!mirror_ds)
2019 continue;
2020 ds = mirror->mirror_ds->ds;
2021 if (!ds)
2022 continue;
2023 ds_clp = ds->ds_clp;
2024 if (!ds_clp)
2025 continue;
2026 clnt = ds_clp->cl_rpcclient;
2027 if (!clnt)
2028 continue;
2029 if (!rpc_cancel_tasks(clnt, -EAGAIN, ff_layout_match_io, lseg))
2030 continue;
2031 rpc_clnt_disconnect(clnt);
2032 }
2033}
2034
2035static struct pnfs_ds_commit_info *
2036ff_layout_get_ds_info(struct inode *inode)
2037{
2038 struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
2039
2040 if (layout == NULL)
2041 return NULL;
2042
2043 return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
2044}
2045
2046static void
2047ff_layout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
2048 struct pnfs_layout_segment *lseg)
2049{
2050 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
2051 struct inode *inode = lseg->pls_layout->plh_inode;
2052 struct pnfs_commit_array *array, *new;
2053
2054 new = pnfs_alloc_commit_array(flseg->mirror_array_cnt,
2055 nfs_io_gfp_mask());
2056 if (new) {
2057 spin_lock(&inode->i_lock);
2058 array = pnfs_add_commit_array(fl_cinfo, new, lseg);
2059 spin_unlock(&inode->i_lock);
2060 if (array != new)
2061 pnfs_free_commit_array(new);
2062 }
2063}
2064
2065static void
2066ff_layout_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
2067 struct inode *inode)
2068{
2069 spin_lock(&inode->i_lock);
2070 pnfs_generic_ds_cinfo_destroy(fl_cinfo);
2071 spin_unlock(&inode->i_lock);
2072}
2073
2074static void
2075ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
2076{
2077 nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
2078 id_node));
2079}
2080
2081static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
2082 const struct nfs4_layoutreturn_args *args,
2083 const struct nfs4_flexfile_layoutreturn_args *ff_args)
2084{
2085 __be32 *start;
2086
2087 start = xdr_reserve_space(xdr, 4);
2088 if (unlikely(!start))
2089 return -E2BIG;
2090
2091 *start = cpu_to_be32(ff_args->num_errors);
2092 /* This assume we always return _ALL_ layouts */
2093 return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors);
2094}
2095
2096static void
2097encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len)
2098{
2099 WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
2100}
2101
2102static void
2103ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
2104 const nfs4_stateid *stateid,
2105 const struct nfs42_layoutstat_devinfo *devinfo)
2106{
2107 __be32 *p;
2108
2109 p = xdr_reserve_space(xdr, 8 + 8);
2110 p = xdr_encode_hyper(p, devinfo->offset);
2111 p = xdr_encode_hyper(p, devinfo->length);
2112 encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
2113 p = xdr_reserve_space(xdr, 4*8);
2114 p = xdr_encode_hyper(p, devinfo->read_count);
2115 p = xdr_encode_hyper(p, devinfo->read_bytes);
2116 p = xdr_encode_hyper(p, devinfo->write_count);
2117 p = xdr_encode_hyper(p, devinfo->write_bytes);
2118 encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE);
2119}
2120
2121static void
2122ff_layout_encode_ff_iostat(struct xdr_stream *xdr,
2123 const nfs4_stateid *stateid,
2124 const struct nfs42_layoutstat_devinfo *devinfo)
2125{
2126 ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo);
2127 ff_layout_encode_ff_layoutupdate(xdr, devinfo,
2128 devinfo->ld_private.data);
2129}
2130
2131/* report nothing for now */
2132static void ff_layout_encode_iostats_array(struct xdr_stream *xdr,
2133 const struct nfs4_layoutreturn_args *args,
2134 struct nfs4_flexfile_layoutreturn_args *ff_args)
2135{
2136 __be32 *p;
2137 int i;
2138
2139 p = xdr_reserve_space(xdr, 4);
2140 *p = cpu_to_be32(ff_args->num_dev);
2141 for (i = 0; i < ff_args->num_dev; i++)
2142 ff_layout_encode_ff_iostat(xdr,
2143 &args->layout->plh_stateid,
2144 &ff_args->devinfo[i]);
2145}
2146
2147static void
2148ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo,
2149 unsigned int num_entries)
2150{
2151 unsigned int i;
2152
2153 for (i = 0; i < num_entries; i++) {
2154 if (!devinfo[i].ld_private.ops)
2155 continue;
2156 if (!devinfo[i].ld_private.ops->free)
2157 continue;
2158 devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
2159 }
2160}
2161
2162static struct nfs4_deviceid_node *
2163ff_layout_alloc_deviceid_node(struct nfs_server *server,
2164 struct pnfs_device *pdev, gfp_t gfp_flags)
2165{
2166 struct nfs4_ff_layout_ds *dsaddr;
2167
2168 dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
2169 if (!dsaddr)
2170 return NULL;
2171 return &dsaddr->id_node;
2172}
2173
2174static void
2175ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
2176 const void *voidargs,
2177 const struct nfs4_xdr_opaque_data *ff_opaque)
2178{
2179 const struct nfs4_layoutreturn_args *args = voidargs;
2180 struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data;
2181 struct xdr_buf tmp_buf = {
2182 .head = {
2183 [0] = {
2184 .iov_base = page_address(ff_args->pages[0]),
2185 },
2186 },
2187 .buflen = PAGE_SIZE,
2188 };
2189 struct xdr_stream tmp_xdr;
2190 __be32 *start;
2191
2192 dprintk("%s: Begin\n", __func__);
2193
2194 xdr_init_encode(&tmp_xdr, &tmp_buf, NULL, NULL);
2195
2196 ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
2197 ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);
2198
2199 start = xdr_reserve_space(xdr, 4);
2200 *start = cpu_to_be32(tmp_buf.len);
2201 xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len);
2202
2203 dprintk("%s: Return\n", __func__);
2204}
2205
2206static void
2207ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args)
2208{
2209 struct nfs4_flexfile_layoutreturn_args *ff_args;
2210
2211 if (!args->data)
2212 return;
2213 ff_args = args->data;
2214 args->data = NULL;
2215
2216 ff_layout_free_ds_ioerr(&ff_args->errors);
2217 ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev);
2218
2219 put_page(ff_args->pages[0]);
2220 kfree(ff_args);
2221}
2222
2223static const struct nfs4_xdr_opaque_ops layoutreturn_ops = {
2224 .encode = ff_layout_encode_layoutreturn,
2225 .free = ff_layout_free_layoutreturn,
2226};
2227
2228static int
2229ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
2230{
2231 struct nfs4_flexfile_layoutreturn_args *ff_args;
2232 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout);
2233
2234 ff_args = kmalloc(sizeof(*ff_args), nfs_io_gfp_mask());
2235 if (!ff_args)
2236 goto out_nomem;
2237 ff_args->pages[0] = alloc_page(nfs_io_gfp_mask());
2238 if (!ff_args->pages[0])
2239 goto out_nomem_free;
2240
2241 INIT_LIST_HEAD(&ff_args->errors);
2242 ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout,
2243 &args->range, &ff_args->errors,
2244 FF_LAYOUTRETURN_MAXERR);
2245
2246 spin_lock(&args->inode->i_lock);
2247 ff_args->num_dev = ff_layout_mirror_prepare_stats(
2248 &ff_layout->generic_hdr, &ff_args->devinfo[0],
2249 ARRAY_SIZE(ff_args->devinfo), NFS4_FF_OP_LAYOUTRETURN);
2250 spin_unlock(&args->inode->i_lock);
2251
2252 args->ld_private->ops = &layoutreturn_ops;
2253 args->ld_private->data = ff_args;
2254 return 0;
2255out_nomem_free:
2256 kfree(ff_args);
2257out_nomem:
2258 return -ENOMEM;
2259}
2260
2261#ifdef CONFIG_NFS_V4_2
2262void
2263ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2264{
2265 struct pnfs_layout_hdr *lo = lseg->pls_layout;
2266 struct nfs42_layout_error *errors;
2267 LIST_HEAD(head);
2268
2269 if (!nfs_server_capable(lo->plh_inode, NFS_CAP_LAYOUTERROR))
2270 return;
2271 ff_layout_fetch_ds_ioerr(lo, &lseg->pls_range, &head, -1);
2272 if (list_empty(&head))
2273 return;
2274
2275 errors = kmalloc_array(NFS42_LAYOUTERROR_MAX, sizeof(*errors),
2276 nfs_io_gfp_mask());
2277 if (errors != NULL) {
2278 const struct nfs4_ff_layout_ds_err *pos;
2279 size_t n = 0;
2280
2281 list_for_each_entry(pos, &head, list) {
2282 errors[n].offset = pos->offset;
2283 errors[n].length = pos->length;
2284 nfs4_stateid_copy(&errors[n].stateid, &pos->stateid);
2285 errors[n].errors[0].dev_id = pos->deviceid;
2286 errors[n].errors[0].status = pos->status;
2287 errors[n].errors[0].opnum = pos->opnum;
2288 n++;
2289 if (!list_is_last(&pos->list, &head) &&
2290 n < NFS42_LAYOUTERROR_MAX)
2291 continue;
2292 if (nfs42_proc_layouterror(lseg, errors, n) < 0)
2293 break;
2294 n = 0;
2295 }
2296 kfree(errors);
2297 }
2298 ff_layout_free_ds_ioerr(&head);
2299}
2300#else
2301void
2302ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2303{
2304}
2305#endif
2306
2307static int
2308ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
2309{
2310 const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
2311
2312 return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
2313}
2314
2315static size_t
2316ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
2317 const int buflen)
2318{
2319 const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
2320 const struct in6_addr *addr = &sin6->sin6_addr;
2321
2322 /*
2323 * RFC 4291, Section 2.2.2
2324 *
2325 * Shorthanded ANY address
2326 */
2327 if (ipv6_addr_any(addr))
2328 return snprintf(buf, buflen, "::");
2329
2330 /*
2331 * RFC 4291, Section 2.2.2
2332 *
2333 * Shorthanded loopback address
2334 */
2335 if (ipv6_addr_loopback(addr))
2336 return snprintf(buf, buflen, "::1");
2337
2338 /*
2339 * RFC 4291, Section 2.2.3
2340 *
2341 * Special presentation address format for mapped v4
2342 * addresses.
2343 */
2344 if (ipv6_addr_v4mapped(addr))
2345 return snprintf(buf, buflen, "::ffff:%pI4",
2346 &addr->s6_addr32[3]);
2347
2348 /*
2349 * RFC 4291, Section 2.2.1
2350 */
2351 return snprintf(buf, buflen, "%pI6c", addr);
2352}
2353
2354/* Derived from rpc_sockaddr2uaddr */
2355static void
2356ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
2357{
2358 struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
2359 char portbuf[RPCBIND_MAXUADDRPLEN];
2360 char addrbuf[RPCBIND_MAXUADDRLEN];
2361 unsigned short port;
2362 int len, netid_len;
2363 __be32 *p;
2364
2365 switch (sap->sa_family) {
2366 case AF_INET:
2367 if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
2368 return;
2369 port = ntohs(((struct sockaddr_in *)sap)->sin_port);
2370 break;
2371 case AF_INET6:
2372 if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
2373 return;
2374 port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
2375 break;
2376 default:
2377 WARN_ON_ONCE(1);
2378 return;
2379 }
2380
2381 snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
2382 len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
2383
2384 netid_len = strlen(da->da_netid);
2385 p = xdr_reserve_space(xdr, 4 + netid_len);
2386 xdr_encode_opaque(p, da->da_netid, netid_len);
2387
2388 p = xdr_reserve_space(xdr, 4 + len);
2389 xdr_encode_opaque(p, addrbuf, len);
2390}
2391
2392static void
2393ff_layout_encode_nfstime(struct xdr_stream *xdr,
2394 ktime_t t)
2395{
2396 struct timespec64 ts;
2397 __be32 *p;
2398
2399 p = xdr_reserve_space(xdr, 12);
2400 ts = ktime_to_timespec64(t);
2401 p = xdr_encode_hyper(p, ts.tv_sec);
2402 *p++ = cpu_to_be32(ts.tv_nsec);
2403}
2404
2405static void
2406ff_layout_encode_io_latency(struct xdr_stream *xdr,
2407 struct nfs4_ff_io_stat *stat)
2408{
2409 __be32 *p;
2410
2411 p = xdr_reserve_space(xdr, 5 * 8);
2412 p = xdr_encode_hyper(p, stat->ops_requested);
2413 p = xdr_encode_hyper(p, stat->bytes_requested);
2414 p = xdr_encode_hyper(p, stat->ops_completed);
2415 p = xdr_encode_hyper(p, stat->bytes_completed);
2416 p = xdr_encode_hyper(p, stat->bytes_not_delivered);
2417 ff_layout_encode_nfstime(xdr, stat->total_busy_time);
2418 ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
2419}
2420
2421static void
2422ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
2423 const struct nfs42_layoutstat_devinfo *devinfo,
2424 struct nfs4_ff_layout_mirror *mirror)
2425{
2426 struct nfs4_pnfs_ds_addr *da;
2427 struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
2428 struct nfs_fh *fh = &mirror->fh_versions[0];
2429 __be32 *p;
2430
2431 da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
2432 dprintk("%s: DS %s: encoding address %s\n",
2433 __func__, ds->ds_remotestr, da->da_remotestr);
2434 /* netaddr4 */
2435 ff_layout_encode_netaddr(xdr, da);
2436 /* nfs_fh4 */
2437 p = xdr_reserve_space(xdr, 4 + fh->size);
2438 xdr_encode_opaque(p, fh->data, fh->size);
2439 /* ff_io_latency4 read */
2440 spin_lock(&mirror->lock);
2441 ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
2442 /* ff_io_latency4 write */
2443 ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
2444 spin_unlock(&mirror->lock);
2445 /* nfstime4 */
2446 ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
2447 /* bool */
2448 p = xdr_reserve_space(xdr, 4);
2449 *p = cpu_to_be32(false);
2450}
2451
2452static void
2453ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
2454 const struct nfs4_xdr_opaque_data *opaque)
2455{
2456 struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque,
2457 struct nfs42_layoutstat_devinfo, ld_private);
2458 __be32 *start;
2459
2460 /* layoutupdate length */
2461 start = xdr_reserve_space(xdr, 4);
2462 ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data);
2463
2464 *start = cpu_to_be32((xdr->p - start - 1) * 4);
2465}
2466
2467static void
2468ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
2469{
2470 struct nfs4_ff_layout_mirror *mirror = opaque->data;
2471
2472 ff_layout_put_mirror(mirror);
2473}
2474
2475static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
2476 .encode = ff_layout_encode_layoutstats,
2477 .free = ff_layout_free_layoutstats,
2478};
2479
2480static int
2481ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
2482 struct nfs42_layoutstat_devinfo *devinfo,
2483 int dev_limit, enum nfs4_ff_op_type type)
2484{
2485 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
2486 struct nfs4_ff_layout_mirror *mirror;
2487 struct nfs4_deviceid_node *dev;
2488 int i = 0;
2489
2490 list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2491 if (i >= dev_limit)
2492 break;
2493 if (IS_ERR_OR_NULL(mirror->mirror_ds))
2494 continue;
2495 if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL,
2496 &mirror->flags) &&
2497 type != NFS4_FF_OP_LAYOUTRETURN)
2498 continue;
2499 /* mirror refcount put in cleanup_layoutstats */
2500 if (!refcount_inc_not_zero(&mirror->ref))
2501 continue;
2502 dev = &mirror->mirror_ds->id_node;
2503 memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
2504 devinfo->offset = 0;
2505 devinfo->length = NFS4_MAX_UINT64;
2506 spin_lock(&mirror->lock);
2507 devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
2508 devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
2509 devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
2510 devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
2511 spin_unlock(&mirror->lock);
2512 devinfo->layout_type = LAYOUT_FLEX_FILES;
2513 devinfo->ld_private.ops = &layoutstat_ops;
2514 devinfo->ld_private.data = mirror;
2515
2516 devinfo++;
2517 i++;
2518 }
2519 return i;
2520}
2521
2522static int
2523ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
2524{
2525 struct nfs4_flexfile_layout *ff_layout;
2526 const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
2527
2528 /* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2529 args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo),
2530 nfs_io_gfp_mask());
2531 if (!args->devinfo)
2532 return -ENOMEM;
2533
2534 spin_lock(&args->inode->i_lock);
2535 ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
2536 args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
2537 &args->devinfo[0],
2538 dev_count,
2539 NFS4_FF_OP_LAYOUTSTATS);
2540 spin_unlock(&args->inode->i_lock);
2541 if (!args->num_dev) {
2542 kfree(args->devinfo);
2543 args->devinfo = NULL;
2544 return -ENOENT;
2545 }
2546
2547 return 0;
2548}
2549
2550static int
2551ff_layout_set_layoutdriver(struct nfs_server *server,
2552 const struct nfs_fh *dummy)
2553{
2554#if IS_ENABLED(CONFIG_NFS_V4_2)
2555 server->caps |= NFS_CAP_LAYOUTSTATS;
2556#endif
2557 return 0;
2558}
2559
2560static const struct pnfs_commit_ops ff_layout_commit_ops = {
2561 .setup_ds_info = ff_layout_setup_ds_info,
2562 .release_ds_info = ff_layout_release_ds_info,
2563 .mark_request_commit = pnfs_layout_mark_request_commit,
2564 .clear_request_commit = pnfs_generic_clear_request_commit,
2565 .scan_commit_lists = pnfs_generic_scan_commit_lists,
2566 .recover_commit_reqs = pnfs_generic_recover_commit_reqs,
2567 .commit_pagelist = ff_layout_commit_pagelist,
2568};
2569
2570static struct pnfs_layoutdriver_type flexfilelayout_type = {
2571 .id = LAYOUT_FLEX_FILES,
2572 .name = "LAYOUT_FLEX_FILES",
2573 .owner = THIS_MODULE,
2574 .flags = PNFS_LAYOUTGET_ON_OPEN,
2575 .max_layoutget_response = 4096, /* 1 page or so... */
2576 .set_layoutdriver = ff_layout_set_layoutdriver,
2577 .alloc_layout_hdr = ff_layout_alloc_layout_hdr,
2578 .free_layout_hdr = ff_layout_free_layout_hdr,
2579 .alloc_lseg = ff_layout_alloc_lseg,
2580 .free_lseg = ff_layout_free_lseg,
2581 .add_lseg = ff_layout_add_lseg,
2582 .pg_read_ops = &ff_layout_pg_read_ops,
2583 .pg_write_ops = &ff_layout_pg_write_ops,
2584 .get_ds_info = ff_layout_get_ds_info,
2585 .free_deviceid_node = ff_layout_free_deviceid_node,
2586 .read_pagelist = ff_layout_read_pagelist,
2587 .write_pagelist = ff_layout_write_pagelist,
2588 .alloc_deviceid_node = ff_layout_alloc_deviceid_node,
2589 .prepare_layoutreturn = ff_layout_prepare_layoutreturn,
2590 .sync = pnfs_nfs_generic_sync,
2591 .prepare_layoutstats = ff_layout_prepare_layoutstats,
2592 .cancel_io = ff_layout_cancel_io,
2593};
2594
2595static int __init nfs4flexfilelayout_init(void)
2596{
2597 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
2598 __func__);
2599 return pnfs_register_layoutdriver(&flexfilelayout_type);
2600}
2601
2602static void __exit nfs4flexfilelayout_exit(void)
2603{
2604 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
2605 __func__);
2606 pnfs_unregister_layoutdriver(&flexfilelayout_type);
2607}
2608
2609MODULE_ALIAS("nfs-layouttype4-4");
2610
2611MODULE_LICENSE("GPL");
2612MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
2613
2614module_init(nfs4flexfilelayout_init);
2615module_exit(nfs4flexfilelayout_exit);
2616
2617module_param(io_maxretrans, ushort, 0644);
2618MODULE_PARM_DESC(io_maxretrans, "The number of times the NFSv4.1 client "
2619 "retries an I/O request before returning an error. ");