Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Device operations for the pnfs nfs4 file layout driver.
4 *
5 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
6 *
7 * Tao Peng <bergwolf@primarydata.com>
8 */
9
10#include <linux/nfs_fs.h>
11#include <linux/vmalloc.h>
12#include <linux/module.h>
13#include <linux/sunrpc/addr.h>
14
15#include "../internal.h"
16#include "../nfs4session.h"
17#include "flexfilelayout.h"
18
19#define NFSDBG_FACILITY NFSDBG_PNFS_LD
20
21static unsigned int dataserver_timeo = NFS_DEF_TCP_RETRANS;
22static unsigned int dataserver_retrans;
23
24static bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg);
25
26void nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
27{
28 if (!IS_ERR_OR_NULL(mirror_ds))
29 nfs4_put_deviceid_node(&mirror_ds->id_node);
30}
31
32void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
33{
34 nfs4_print_deviceid(&mirror_ds->id_node.deviceid);
35 nfs4_pnfs_ds_put(mirror_ds->ds);
36 kfree(mirror_ds->ds_versions);
37 kfree_rcu(mirror_ds, id_node.rcu);
38}
39
40/* Decode opaque device data and construct new_ds using it */
41struct nfs4_ff_layout_ds *
42nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
43 gfp_t gfp_flags)
44{
45 struct xdr_stream stream;
46 struct xdr_buf buf;
47 struct page *scratch;
48 struct list_head dsaddrs;
49 struct nfs4_pnfs_ds_addr *da;
50 struct nfs4_ff_layout_ds *new_ds = NULL;
51 struct nfs4_ff_ds_version *ds_versions = NULL;
52 u32 mp_count;
53 u32 version_count;
54 __be32 *p;
55 int i, ret = -ENOMEM;
56
57 /* set up xdr stream */
58 scratch = alloc_page(gfp_flags);
59 if (!scratch)
60 goto out_err;
61
62 new_ds = kzalloc(sizeof(struct nfs4_ff_layout_ds), gfp_flags);
63 if (!new_ds)
64 goto out_scratch;
65
66 nfs4_init_deviceid_node(&new_ds->id_node,
67 server,
68 &pdev->dev_id);
69 INIT_LIST_HEAD(&dsaddrs);
70
71 xdr_init_decode_pages(&stream, &buf, pdev->pages, pdev->pglen);
72 xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
73
74 /* multipath count */
75 p = xdr_inline_decode(&stream, 4);
76 if (unlikely(!p))
77 goto out_err_drain_dsaddrs;
78 mp_count = be32_to_cpup(p);
79 dprintk("%s: multipath ds count %d\n", __func__, mp_count);
80
81 for (i = 0; i < mp_count; i++) {
82 /* multipath ds */
83 da = nfs4_decode_mp_ds_addr(server->nfs_client->cl_net,
84 &stream, gfp_flags);
85 if (da)
86 list_add_tail(&da->da_node, &dsaddrs);
87 }
88 if (list_empty(&dsaddrs)) {
89 dprintk("%s: no suitable DS addresses found\n",
90 __func__);
91 ret = -ENOMEDIUM;
92 goto out_err_drain_dsaddrs;
93 }
94
95 /* version count */
96 p = xdr_inline_decode(&stream, 4);
97 if (unlikely(!p))
98 goto out_err_drain_dsaddrs;
99 version_count = be32_to_cpup(p);
100 dprintk("%s: version count %d\n", __func__, version_count);
101
102 ds_versions = kzalloc(version_count * sizeof(struct nfs4_ff_ds_version),
103 gfp_flags);
104 if (!ds_versions)
105 goto out_scratch;
106
107 for (i = 0; i < version_count; i++) {
108 /* 20 = version(4) + minor_version(4) + rsize(4) + wsize(4) +
109 * tightly_coupled(4) */
110 p = xdr_inline_decode(&stream, 20);
111 if (unlikely(!p))
112 goto out_err_drain_dsaddrs;
113 ds_versions[i].version = be32_to_cpup(p++);
114 ds_versions[i].minor_version = be32_to_cpup(p++);
115 ds_versions[i].rsize = nfs_block_size(be32_to_cpup(p++), NULL);
116 ds_versions[i].wsize = nfs_block_size(be32_to_cpup(p++), NULL);
117 ds_versions[i].tightly_coupled = be32_to_cpup(p);
118
119 if (ds_versions[i].rsize > NFS_MAX_FILE_IO_SIZE)
120 ds_versions[i].rsize = NFS_MAX_FILE_IO_SIZE;
121 if (ds_versions[i].wsize > NFS_MAX_FILE_IO_SIZE)
122 ds_versions[i].wsize = NFS_MAX_FILE_IO_SIZE;
123
124 /*
125 * check for valid major/minor combination.
126 * currently we support dataserver which talk:
127 * v3, v4.0, v4.1, v4.2
128 */
129 if (!((ds_versions[i].version == 3 && ds_versions[i].minor_version == 0) ||
130 (ds_versions[i].version == 4 && ds_versions[i].minor_version < 3))) {
131 dprintk("%s: [%d] unsupported ds version %d-%d\n", __func__,
132 i, ds_versions[i].version,
133 ds_versions[i].minor_version);
134 ret = -EPROTONOSUPPORT;
135 goto out_err_drain_dsaddrs;
136 }
137
138 dprintk("%s: [%d] vers %u minor_ver %u rsize %u wsize %u coupled %d\n",
139 __func__, i, ds_versions[i].version,
140 ds_versions[i].minor_version,
141 ds_versions[i].rsize,
142 ds_versions[i].wsize,
143 ds_versions[i].tightly_coupled);
144 }
145
146 new_ds->ds_versions = ds_versions;
147 new_ds->ds_versions_cnt = version_count;
148
149 new_ds->ds = nfs4_pnfs_ds_add(&dsaddrs, gfp_flags);
150 if (!new_ds->ds)
151 goto out_err_drain_dsaddrs;
152
153 /* If DS was already in cache, free ds addrs */
154 while (!list_empty(&dsaddrs)) {
155 da = list_first_entry(&dsaddrs,
156 struct nfs4_pnfs_ds_addr,
157 da_node);
158 list_del_init(&da->da_node);
159 kfree(da->da_remotestr);
160 kfree(da);
161 }
162
163 __free_page(scratch);
164 return new_ds;
165
166out_err_drain_dsaddrs:
167 while (!list_empty(&dsaddrs)) {
168 da = list_first_entry(&dsaddrs, struct nfs4_pnfs_ds_addr,
169 da_node);
170 list_del_init(&da->da_node);
171 kfree(da->da_remotestr);
172 kfree(da);
173 }
174
175 kfree(ds_versions);
176out_scratch:
177 __free_page(scratch);
178out_err:
179 kfree(new_ds);
180
181 dprintk("%s ERROR: returning %d\n", __func__, ret);
182 return NULL;
183}
184
185static void ff_layout_mark_devid_invalid(struct pnfs_layout_segment *lseg,
186 struct nfs4_deviceid_node *devid)
187{
188 nfs4_delete_deviceid(devid->ld, devid->nfs_client, &devid->deviceid);
189 if (!ff_layout_has_available_ds(lseg))
190 pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
191 lseg);
192}
193
194static bool ff_layout_mirror_valid(struct pnfs_layout_segment *lseg,
195 struct nfs4_ff_layout_mirror *mirror,
196 bool create)
197{
198 if (mirror == NULL || IS_ERR(mirror->mirror_ds))
199 goto outerr;
200 if (mirror->mirror_ds == NULL) {
201 if (create) {
202 struct nfs4_deviceid_node *node;
203 struct pnfs_layout_hdr *lh = lseg->pls_layout;
204 struct nfs4_ff_layout_ds *mirror_ds = ERR_PTR(-ENODEV);
205
206 node = nfs4_find_get_deviceid(NFS_SERVER(lh->plh_inode),
207 &mirror->devid, lh->plh_lc_cred,
208 GFP_KERNEL);
209 if (node)
210 mirror_ds = FF_LAYOUT_MIRROR_DS(node);
211
212 /* check for race with another call to this function */
213 if (cmpxchg(&mirror->mirror_ds, NULL, mirror_ds) &&
214 mirror_ds != ERR_PTR(-ENODEV))
215 nfs4_put_deviceid_node(node);
216 } else
217 goto outerr;
218 }
219
220 if (IS_ERR(mirror->mirror_ds))
221 goto outerr;
222
223 if (mirror->mirror_ds->ds == NULL) {
224 struct nfs4_deviceid_node *devid;
225 devid = &mirror->mirror_ds->id_node;
226 ff_layout_mark_devid_invalid(lseg, devid);
227 return false;
228 }
229 return true;
230outerr:
231 pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, lseg);
232 return false;
233}
234
235static void extend_ds_error(struct nfs4_ff_layout_ds_err *err,
236 u64 offset, u64 length)
237{
238 u64 end;
239
240 end = max_t(u64, pnfs_end_offset(err->offset, err->length),
241 pnfs_end_offset(offset, length));
242 err->offset = min_t(u64, err->offset, offset);
243 err->length = end - err->offset;
244}
245
246static int
247ff_ds_error_match(const struct nfs4_ff_layout_ds_err *e1,
248 const struct nfs4_ff_layout_ds_err *e2)
249{
250 int ret;
251
252 if (e1->opnum != e2->opnum)
253 return e1->opnum < e2->opnum ? -1 : 1;
254 if (e1->status != e2->status)
255 return e1->status < e2->status ? -1 : 1;
256 ret = memcmp(e1->stateid.data, e2->stateid.data,
257 sizeof(e1->stateid.data));
258 if (ret != 0)
259 return ret;
260 ret = memcmp(&e1->deviceid, &e2->deviceid, sizeof(e1->deviceid));
261 if (ret != 0)
262 return ret;
263 if (pnfs_end_offset(e1->offset, e1->length) < e2->offset)
264 return -1;
265 if (e1->offset > pnfs_end_offset(e2->offset, e2->length))
266 return 1;
267 /* If ranges overlap or are contiguous, they are the same */
268 return 0;
269}
270
271static void
272ff_layout_add_ds_error_locked(struct nfs4_flexfile_layout *flo,
273 struct nfs4_ff_layout_ds_err *dserr)
274{
275 struct nfs4_ff_layout_ds_err *err, *tmp;
276 struct list_head *head = &flo->error_list;
277 int match;
278
279 /* Do insertion sort w/ merges */
280 list_for_each_entry_safe(err, tmp, &flo->error_list, list) {
281 match = ff_ds_error_match(err, dserr);
282 if (match < 0)
283 continue;
284 if (match > 0) {
285 /* Add entry "dserr" _before_ entry "err" */
286 head = &err->list;
287 break;
288 }
289 /* Entries match, so merge "err" into "dserr" */
290 extend_ds_error(dserr, err->offset, err->length);
291 list_replace(&err->list, &dserr->list);
292 kfree(err);
293 return;
294 }
295
296 list_add_tail(&dserr->list, head);
297}
298
299int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
300 struct nfs4_ff_layout_mirror *mirror, u64 offset,
301 u64 length, int status, enum nfs_opnum4 opnum,
302 gfp_t gfp_flags)
303{
304 struct nfs4_ff_layout_ds_err *dserr;
305
306 if (status == 0)
307 return 0;
308
309 if (mirror->mirror_ds == NULL)
310 return -EINVAL;
311
312 dserr = kmalloc(sizeof(*dserr), gfp_flags);
313 if (!dserr)
314 return -ENOMEM;
315
316 INIT_LIST_HEAD(&dserr->list);
317 dserr->offset = offset;
318 dserr->length = length;
319 dserr->status = status;
320 dserr->opnum = opnum;
321 nfs4_stateid_copy(&dserr->stateid, &mirror->stateid);
322 memcpy(&dserr->deviceid, &mirror->mirror_ds->id_node.deviceid,
323 NFS4_DEVICEID4_SIZE);
324
325 spin_lock(&flo->generic_hdr.plh_inode->i_lock);
326 ff_layout_add_ds_error_locked(flo, dserr);
327 spin_unlock(&flo->generic_hdr.plh_inode->i_lock);
328
329 return 0;
330}
331
332static struct rpc_cred *
333ff_layout_get_mirror_cred(struct nfs4_ff_layout_mirror *mirror, u32 iomode)
334{
335 struct rpc_cred *cred, __rcu **pcred;
336
337 if (iomode == IOMODE_READ)
338 pcred = &mirror->ro_cred;
339 else
340 pcred = &mirror->rw_cred;
341
342 rcu_read_lock();
343 do {
344 cred = rcu_dereference(*pcred);
345 if (!cred)
346 break;
347
348 cred = get_rpccred_rcu(cred);
349 } while(!cred);
350 rcu_read_unlock();
351 return cred;
352}
353
354struct nfs_fh *
355nfs4_ff_layout_select_ds_fh(struct pnfs_layout_segment *lseg, u32 mirror_idx)
356{
357 struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, mirror_idx);
358 struct nfs_fh *fh = NULL;
359
360 if (!ff_layout_mirror_valid(lseg, mirror, false)) {
361 pr_err_ratelimited("NFS: %s: No data server for mirror offset index %d\n",
362 __func__, mirror_idx);
363 goto out;
364 }
365
366 /* FIXME: For now assume there is only 1 version available for the DS */
367 fh = &mirror->fh_versions[0];
368out:
369 return fh;
370}
371
372/**
373 * nfs4_ff_layout_prepare_ds - prepare a DS connection for an RPC call
374 * @lseg: the layout segment we're operating on
375 * @ds_idx: index of the DS to use
376 * @fail_return: return layout on connect failure?
377 *
378 * Try to prepare a DS connection to accept an RPC call. This involves
379 * selecting a mirror to use and connecting the client to it if it's not
380 * already connected.
381 *
382 * Since we only need a single functioning mirror to satisfy a read, we don't
383 * want to return the layout if there is one. For writes though, any down
384 * mirror should result in a LAYOUTRETURN. @fail_return is how we distinguish
385 * between the two cases.
386 *
387 * Returns a pointer to a connected DS object on success or NULL on failure.
388 */
389struct nfs4_pnfs_ds *
390nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
391 bool fail_return)
392{
393 struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
394 struct nfs4_pnfs_ds *ds = NULL;
395 struct nfs4_deviceid_node *devid;
396 struct inode *ino = lseg->pls_layout->plh_inode;
397 struct nfs_server *s = NFS_SERVER(ino);
398 unsigned int max_payload;
399 int status;
400
401 if (!ff_layout_mirror_valid(lseg, mirror, true)) {
402 pr_err_ratelimited("NFS: %s: No data server for offset index %d\n",
403 __func__, ds_idx);
404 goto out;
405 }
406
407 devid = &mirror->mirror_ds->id_node;
408 if (ff_layout_test_devid_unavailable(devid))
409 goto out_fail;
410
411 ds = mirror->mirror_ds->ds;
412 /* matching smp_wmb() in _nfs4_pnfs_v3/4_ds_connect */
413 smp_rmb();
414 if (ds->ds_clp)
415 goto out;
416
417 /* FIXME: For now we assume the server sent only one version of NFS
418 * to use for the DS.
419 */
420 status = nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
421 dataserver_retrans,
422 mirror->mirror_ds->ds_versions[0].version,
423 mirror->mirror_ds->ds_versions[0].minor_version);
424
425 /* connect success, check rsize/wsize limit */
426 if (!status) {
427 max_payload =
428 nfs_block_size(rpc_max_payload(ds->ds_clp->cl_rpcclient),
429 NULL);
430 if (mirror->mirror_ds->ds_versions[0].rsize > max_payload)
431 mirror->mirror_ds->ds_versions[0].rsize = max_payload;
432 if (mirror->mirror_ds->ds_versions[0].wsize > max_payload)
433 mirror->mirror_ds->ds_versions[0].wsize = max_payload;
434 goto out;
435 }
436out_fail:
437 ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
438 mirror, lseg->pls_range.offset,
439 lseg->pls_range.length, NFS4ERR_NXIO,
440 OP_ILLEGAL, GFP_NOIO);
441 if (fail_return || !ff_layout_has_available_ds(lseg))
442 pnfs_error_mark_layout_for_return(ino, lseg);
443 ds = NULL;
444out:
445 return ds;
446}
447
448struct rpc_cred *
449ff_layout_get_ds_cred(struct pnfs_layout_segment *lseg, u32 ds_idx,
450 struct rpc_cred *mdscred)
451{
452 struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
453 struct rpc_cred *cred;
454
455 if (mirror) {
456 cred = ff_layout_get_mirror_cred(mirror, lseg->pls_range.iomode);
457 if (!cred)
458 cred = get_rpccred(mdscred);
459 } else {
460 cred = get_rpccred(mdscred);
461 }
462 return cred;
463}
464
465/**
466* Find or create a DS rpc client with th MDS server rpc client auth flavor
467* in the nfs_client cl_ds_clients list.
468*/
469struct rpc_clnt *
470nfs4_ff_find_or_create_ds_client(struct pnfs_layout_segment *lseg, u32 ds_idx,
471 struct nfs_client *ds_clp, struct inode *inode)
472{
473 struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
474
475 switch (mirror->mirror_ds->ds_versions[0].version) {
476 case 3:
477 /* For NFSv3 DS, flavor is set when creating DS connections */
478 return ds_clp->cl_rpcclient;
479 case 4:
480 return nfs4_find_or_create_ds_client(ds_clp, inode);
481 default:
482 BUG();
483 }
484}
485
486void ff_layout_free_ds_ioerr(struct list_head *head)
487{
488 struct nfs4_ff_layout_ds_err *err;
489
490 while (!list_empty(head)) {
491 err = list_first_entry(head,
492 struct nfs4_ff_layout_ds_err,
493 list);
494 list_del(&err->list);
495 kfree(err);
496 }
497}
498
499/* called with inode i_lock held */
500int ff_layout_encode_ds_ioerr(struct xdr_stream *xdr, const struct list_head *head)
501{
502 struct nfs4_ff_layout_ds_err *err;
503 __be32 *p;
504
505 list_for_each_entry(err, head, list) {
506 /* offset(8) + length(8) + stateid(NFS4_STATEID_SIZE)
507 * + array length + deviceid(NFS4_DEVICEID4_SIZE)
508 * + status(4) + opnum(4)
509 */
510 p = xdr_reserve_space(xdr,
511 28 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE);
512 if (unlikely(!p))
513 return -ENOBUFS;
514 p = xdr_encode_hyper(p, err->offset);
515 p = xdr_encode_hyper(p, err->length);
516 p = xdr_encode_opaque_fixed(p, &err->stateid,
517 NFS4_STATEID_SIZE);
518 /* Encode 1 error */
519 *p++ = cpu_to_be32(1);
520 p = xdr_encode_opaque_fixed(p, &err->deviceid,
521 NFS4_DEVICEID4_SIZE);
522 *p++ = cpu_to_be32(err->status);
523 *p++ = cpu_to_be32(err->opnum);
524 dprintk("%s: offset %llu length %llu status %d op %d\n",
525 __func__, err->offset, err->length, err->status,
526 err->opnum);
527 }
528
529 return 0;
530}
531
532static
533unsigned int do_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
534 const struct pnfs_layout_range *range,
535 struct list_head *head,
536 unsigned int maxnum)
537{
538 struct nfs4_flexfile_layout *flo = FF_LAYOUT_FROM_HDR(lo);
539 struct inode *inode = lo->plh_inode;
540 struct nfs4_ff_layout_ds_err *err, *n;
541 unsigned int ret = 0;
542
543 spin_lock(&inode->i_lock);
544 list_for_each_entry_safe(err, n, &flo->error_list, list) {
545 if (!pnfs_is_range_intersecting(err->offset,
546 pnfs_end_offset(err->offset, err->length),
547 range->offset,
548 pnfs_end_offset(range->offset, range->length)))
549 continue;
550 if (!maxnum)
551 break;
552 list_move(&err->list, head);
553 maxnum--;
554 ret++;
555 }
556 spin_unlock(&inode->i_lock);
557 return ret;
558}
559
560unsigned int ff_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
561 const struct pnfs_layout_range *range,
562 struct list_head *head,
563 unsigned int maxnum)
564{
565 unsigned int ret;
566
567 ret = do_layout_fetch_ds_ioerr(lo, range, head, maxnum);
568 /* If we're over the max, discard all remaining entries */
569 if (ret == maxnum) {
570 LIST_HEAD(discard);
571 do_layout_fetch_ds_ioerr(lo, range, &discard, -1);
572 ff_layout_free_ds_ioerr(&discard);
573 }
574 return ret;
575}
576
577static bool ff_read_layout_has_available_ds(struct pnfs_layout_segment *lseg)
578{
579 struct nfs4_ff_layout_mirror *mirror;
580 struct nfs4_deviceid_node *devid;
581 u32 idx;
582
583 for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) {
584 mirror = FF_LAYOUT_COMP(lseg, idx);
585 if (mirror) {
586 if (!mirror->mirror_ds)
587 return true;
588 if (IS_ERR(mirror->mirror_ds))
589 continue;
590 devid = &mirror->mirror_ds->id_node;
591 if (!ff_layout_test_devid_unavailable(devid))
592 return true;
593 }
594 }
595
596 return false;
597}
598
599static bool ff_rw_layout_has_available_ds(struct pnfs_layout_segment *lseg)
600{
601 struct nfs4_ff_layout_mirror *mirror;
602 struct nfs4_deviceid_node *devid;
603 u32 idx;
604
605 for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) {
606 mirror = FF_LAYOUT_COMP(lseg, idx);
607 if (!mirror || IS_ERR(mirror->mirror_ds))
608 return false;
609 if (!mirror->mirror_ds)
610 continue;
611 devid = &mirror->mirror_ds->id_node;
612 if (ff_layout_test_devid_unavailable(devid))
613 return false;
614 }
615
616 return FF_LAYOUT_MIRROR_COUNT(lseg) != 0;
617}
618
619static bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg)
620{
621 if (lseg->pls_range.iomode == IOMODE_READ)
622 return ff_read_layout_has_available_ds(lseg);
623 /* Note: RW layout needs all mirrors available */
624 return ff_rw_layout_has_available_ds(lseg);
625}
626
627bool ff_layout_avoid_mds_available_ds(struct pnfs_layout_segment *lseg)
628{
629 return ff_layout_no_fallback_to_mds(lseg) ||
630 ff_layout_has_available_ds(lseg);
631}
632
633bool ff_layout_avoid_read_on_rw(struct pnfs_layout_segment *lseg)
634{
635 return lseg->pls_range.iomode == IOMODE_RW &&
636 ff_layout_no_read_on_rw(lseg);
637}
638
639module_param(dataserver_retrans, uint, 0644);
640MODULE_PARM_DESC(dataserver_retrans, "The number of times the NFSv4.1 client "
641 "retries a request before it attempts further "
642 " recovery action.");
643module_param(dataserver_timeo, uint, 0644);
644MODULE_PARM_DESC(dataserver_timeo, "The time (in tenths of a second) the "
645 "NFSv4.1 client waits for a response from a "
646 " data server before it retries an NFS request.");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Device operations for the pnfs nfs4 file layout driver.
4 *
5 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
6 *
7 * Tao Peng <bergwolf@primarydata.com>
8 */
9
10#include <linux/nfs_fs.h>
11#include <linux/vmalloc.h>
12#include <linux/module.h>
13#include <linux/sunrpc/addr.h>
14
15#include "../internal.h"
16#include "../nfs4session.h"
17#include "flexfilelayout.h"
18
19#define NFSDBG_FACILITY NFSDBG_PNFS_LD
20
21static unsigned int dataserver_timeo = NFS_DEF_TCP_TIMEO;
22static unsigned int dataserver_retrans;
23
24static bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg);
25
26void nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
27{
28 if (!IS_ERR_OR_NULL(mirror_ds))
29 nfs4_put_deviceid_node(&mirror_ds->id_node);
30}
31
32void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
33{
34 nfs4_print_deviceid(&mirror_ds->id_node.deviceid);
35 nfs4_pnfs_ds_put(mirror_ds->ds);
36 kfree(mirror_ds->ds_versions);
37 kfree_rcu(mirror_ds, id_node.rcu);
38}
39
40/* Decode opaque device data and construct new_ds using it */
41struct nfs4_ff_layout_ds *
42nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
43 gfp_t gfp_flags)
44{
45 struct xdr_stream stream;
46 struct xdr_buf buf;
47 struct page *scratch;
48 struct list_head dsaddrs;
49 struct nfs4_pnfs_ds_addr *da;
50 struct nfs4_ff_layout_ds *new_ds = NULL;
51 struct nfs4_ff_ds_version *ds_versions = NULL;
52 u32 mp_count;
53 u32 version_count;
54 __be32 *p;
55 int i, ret = -ENOMEM;
56
57 /* set up xdr stream */
58 scratch = alloc_page(gfp_flags);
59 if (!scratch)
60 goto out_err;
61
62 new_ds = kzalloc(sizeof(struct nfs4_ff_layout_ds), gfp_flags);
63 if (!new_ds)
64 goto out_scratch;
65
66 nfs4_init_deviceid_node(&new_ds->id_node,
67 server,
68 &pdev->dev_id);
69 INIT_LIST_HEAD(&dsaddrs);
70
71 xdr_init_decode_pages(&stream, &buf, pdev->pages, pdev->pglen);
72 xdr_set_scratch_page(&stream, scratch);
73
74 /* multipath count */
75 p = xdr_inline_decode(&stream, 4);
76 if (unlikely(!p))
77 goto out_err_drain_dsaddrs;
78 mp_count = be32_to_cpup(p);
79 dprintk("%s: multipath ds count %d\n", __func__, mp_count);
80
81 for (i = 0; i < mp_count; i++) {
82 /* multipath ds */
83 da = nfs4_decode_mp_ds_addr(server->nfs_client->cl_net,
84 &stream, gfp_flags);
85 if (da)
86 list_add_tail(&da->da_node, &dsaddrs);
87 }
88 if (list_empty(&dsaddrs)) {
89 dprintk("%s: no suitable DS addresses found\n",
90 __func__);
91 ret = -ENOMEDIUM;
92 goto out_err_drain_dsaddrs;
93 }
94
95 /* version count */
96 p = xdr_inline_decode(&stream, 4);
97 if (unlikely(!p))
98 goto out_err_drain_dsaddrs;
99 version_count = be32_to_cpup(p);
100 dprintk("%s: version count %d\n", __func__, version_count);
101
102 ds_versions = kcalloc(version_count,
103 sizeof(struct nfs4_ff_ds_version),
104 gfp_flags);
105 if (!ds_versions)
106 goto out_scratch;
107
108 for (i = 0; i < version_count; i++) {
109 /* 20 = version(4) + minor_version(4) + rsize(4) + wsize(4) +
110 * tightly_coupled(4) */
111 p = xdr_inline_decode(&stream, 20);
112 if (unlikely(!p))
113 goto out_err_drain_dsaddrs;
114 ds_versions[i].version = be32_to_cpup(p++);
115 ds_versions[i].minor_version = be32_to_cpup(p++);
116 ds_versions[i].rsize = nfs_io_size(be32_to_cpup(p++),
117 server->nfs_client->cl_proto);
118 ds_versions[i].wsize = nfs_io_size(be32_to_cpup(p++),
119 server->nfs_client->cl_proto);
120 ds_versions[i].tightly_coupled = be32_to_cpup(p);
121
122 if (ds_versions[i].rsize > NFS_MAX_FILE_IO_SIZE)
123 ds_versions[i].rsize = NFS_MAX_FILE_IO_SIZE;
124 if (ds_versions[i].wsize > NFS_MAX_FILE_IO_SIZE)
125 ds_versions[i].wsize = NFS_MAX_FILE_IO_SIZE;
126
127 /*
128 * check for valid major/minor combination.
129 * currently we support dataserver which talk:
130 * v3, v4.0, v4.1, v4.2
131 */
132 if (!((ds_versions[i].version == 3 && ds_versions[i].minor_version == 0) ||
133 (ds_versions[i].version == 4 && ds_versions[i].minor_version < 3))) {
134 dprintk("%s: [%d] unsupported ds version %d-%d\n", __func__,
135 i, ds_versions[i].version,
136 ds_versions[i].minor_version);
137 ret = -EPROTONOSUPPORT;
138 goto out_err_drain_dsaddrs;
139 }
140
141 dprintk("%s: [%d] vers %u minor_ver %u rsize %u wsize %u coupled %d\n",
142 __func__, i, ds_versions[i].version,
143 ds_versions[i].minor_version,
144 ds_versions[i].rsize,
145 ds_versions[i].wsize,
146 ds_versions[i].tightly_coupled);
147 }
148
149 new_ds->ds_versions = ds_versions;
150 new_ds->ds_versions_cnt = version_count;
151
152 new_ds->ds = nfs4_pnfs_ds_add(&dsaddrs, gfp_flags);
153 if (!new_ds->ds)
154 goto out_err_drain_dsaddrs;
155
156 /* If DS was already in cache, free ds addrs */
157 while (!list_empty(&dsaddrs)) {
158 da = list_first_entry(&dsaddrs,
159 struct nfs4_pnfs_ds_addr,
160 da_node);
161 list_del_init(&da->da_node);
162 kfree(da->da_remotestr);
163 kfree(da);
164 }
165
166 __free_page(scratch);
167 return new_ds;
168
169out_err_drain_dsaddrs:
170 while (!list_empty(&dsaddrs)) {
171 da = list_first_entry(&dsaddrs, struct nfs4_pnfs_ds_addr,
172 da_node);
173 list_del_init(&da->da_node);
174 kfree(da->da_remotestr);
175 kfree(da);
176 }
177
178 kfree(ds_versions);
179out_scratch:
180 __free_page(scratch);
181out_err:
182 kfree(new_ds);
183
184 dprintk("%s ERROR: returning %d\n", __func__, ret);
185 return NULL;
186}
187
188static void extend_ds_error(struct nfs4_ff_layout_ds_err *err,
189 u64 offset, u64 length)
190{
191 u64 end;
192
193 end = max_t(u64, pnfs_end_offset(err->offset, err->length),
194 pnfs_end_offset(offset, length));
195 err->offset = min_t(u64, err->offset, offset);
196 err->length = end - err->offset;
197}
198
199static int
200ff_ds_error_match(const struct nfs4_ff_layout_ds_err *e1,
201 const struct nfs4_ff_layout_ds_err *e2)
202{
203 int ret;
204
205 if (e1->opnum != e2->opnum)
206 return e1->opnum < e2->opnum ? -1 : 1;
207 if (e1->status != e2->status)
208 return e1->status < e2->status ? -1 : 1;
209 ret = memcmp(e1->stateid.data, e2->stateid.data,
210 sizeof(e1->stateid.data));
211 if (ret != 0)
212 return ret;
213 ret = memcmp(&e1->deviceid, &e2->deviceid, sizeof(e1->deviceid));
214 if (ret != 0)
215 return ret;
216 if (pnfs_end_offset(e1->offset, e1->length) < e2->offset)
217 return -1;
218 if (e1->offset > pnfs_end_offset(e2->offset, e2->length))
219 return 1;
220 /* If ranges overlap or are contiguous, they are the same */
221 return 0;
222}
223
224static void
225ff_layout_add_ds_error_locked(struct nfs4_flexfile_layout *flo,
226 struct nfs4_ff_layout_ds_err *dserr)
227{
228 struct nfs4_ff_layout_ds_err *err, *tmp;
229 struct list_head *head = &flo->error_list;
230 int match;
231
232 /* Do insertion sort w/ merges */
233 list_for_each_entry_safe(err, tmp, &flo->error_list, list) {
234 match = ff_ds_error_match(err, dserr);
235 if (match < 0)
236 continue;
237 if (match > 0) {
238 /* Add entry "dserr" _before_ entry "err" */
239 head = &err->list;
240 break;
241 }
242 /* Entries match, so merge "err" into "dserr" */
243 extend_ds_error(dserr, err->offset, err->length);
244 list_replace(&err->list, &dserr->list);
245 kfree(err);
246 return;
247 }
248
249 list_add_tail(&dserr->list, head);
250}
251
252int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
253 struct nfs4_ff_layout_mirror *mirror, u64 offset,
254 u64 length, int status, enum nfs_opnum4 opnum,
255 gfp_t gfp_flags)
256{
257 struct nfs4_ff_layout_ds_err *dserr;
258
259 if (status == 0)
260 return 0;
261
262 if (IS_ERR_OR_NULL(mirror->mirror_ds))
263 return -EINVAL;
264
265 dserr = kmalloc(sizeof(*dserr), gfp_flags);
266 if (!dserr)
267 return -ENOMEM;
268
269 INIT_LIST_HEAD(&dserr->list);
270 dserr->offset = offset;
271 dserr->length = length;
272 dserr->status = status;
273 dserr->opnum = opnum;
274 nfs4_stateid_copy(&dserr->stateid, &mirror->stateid);
275 memcpy(&dserr->deviceid, &mirror->mirror_ds->id_node.deviceid,
276 NFS4_DEVICEID4_SIZE);
277
278 spin_lock(&flo->generic_hdr.plh_inode->i_lock);
279 ff_layout_add_ds_error_locked(flo, dserr);
280 spin_unlock(&flo->generic_hdr.plh_inode->i_lock);
281 return 0;
282}
283
284static const struct cred *
285ff_layout_get_mirror_cred(struct nfs4_ff_layout_mirror *mirror, u32 iomode)
286{
287 const struct cred *cred, __rcu **pcred;
288
289 if (iomode == IOMODE_READ)
290 pcred = &mirror->ro_cred;
291 else
292 pcred = &mirror->rw_cred;
293
294 rcu_read_lock();
295 do {
296 cred = rcu_dereference(*pcred);
297 if (!cred)
298 break;
299
300 cred = get_cred_rcu(cred);
301 } while(!cred);
302 rcu_read_unlock();
303 return cred;
304}
305
306struct nfs_fh *
307nfs4_ff_layout_select_ds_fh(struct nfs4_ff_layout_mirror *mirror)
308{
309 /* FIXME: For now assume there is only 1 version available for the DS */
310 return &mirror->fh_versions[0];
311}
312
313void
314nfs4_ff_layout_select_ds_stateid(const struct nfs4_ff_layout_mirror *mirror,
315 nfs4_stateid *stateid)
316{
317 if (nfs4_ff_layout_ds_version(mirror) == 4)
318 nfs4_stateid_copy(stateid, &mirror->stateid);
319}
320
321static bool
322ff_layout_init_mirror_ds(struct pnfs_layout_hdr *lo,
323 struct nfs4_ff_layout_mirror *mirror)
324{
325 if (mirror == NULL)
326 goto outerr;
327 if (mirror->mirror_ds == NULL) {
328 struct nfs4_deviceid_node *node;
329 struct nfs4_ff_layout_ds *mirror_ds = ERR_PTR(-ENODEV);
330
331 node = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode),
332 &mirror->devid, lo->plh_lc_cred,
333 GFP_KERNEL);
334 if (node)
335 mirror_ds = FF_LAYOUT_MIRROR_DS(node);
336
337 /* check for race with another call to this function */
338 if (cmpxchg(&mirror->mirror_ds, NULL, mirror_ds) &&
339 mirror_ds != ERR_PTR(-ENODEV))
340 nfs4_put_deviceid_node(node);
341 }
342
343 if (IS_ERR(mirror->mirror_ds))
344 goto outerr;
345
346 return true;
347outerr:
348 return false;
349}
350
351/**
352 * nfs4_ff_layout_prepare_ds - prepare a DS connection for an RPC call
353 * @lseg: the layout segment we're operating on
354 * @mirror: layout mirror describing the DS to use
355 * @fail_return: return layout on connect failure?
356 *
357 * Try to prepare a DS connection to accept an RPC call. This involves
358 * selecting a mirror to use and connecting the client to it if it's not
359 * already connected.
360 *
361 * Since we only need a single functioning mirror to satisfy a read, we don't
362 * want to return the layout if there is one. For writes though, any down
363 * mirror should result in a LAYOUTRETURN. @fail_return is how we distinguish
364 * between the two cases.
365 *
366 * Returns a pointer to a connected DS object on success or NULL on failure.
367 */
368struct nfs4_pnfs_ds *
369nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg,
370 struct nfs4_ff_layout_mirror *mirror,
371 bool fail_return)
372{
373 struct nfs4_pnfs_ds *ds = NULL;
374 struct inode *ino = lseg->pls_layout->plh_inode;
375 struct nfs_server *s = NFS_SERVER(ino);
376 unsigned int max_payload;
377 int status;
378
379 if (!ff_layout_init_mirror_ds(lseg->pls_layout, mirror))
380 goto noconnect;
381
382 ds = mirror->mirror_ds->ds;
383 if (READ_ONCE(ds->ds_clp))
384 goto out;
385 /* matching smp_wmb() in _nfs4_pnfs_v3/4_ds_connect */
386 smp_rmb();
387
388 /* FIXME: For now we assume the server sent only one version of NFS
389 * to use for the DS.
390 */
391 status = nfs4_pnfs_ds_connect(s, ds, &mirror->mirror_ds->id_node,
392 dataserver_timeo, dataserver_retrans,
393 mirror->mirror_ds->ds_versions[0].version,
394 mirror->mirror_ds->ds_versions[0].minor_version);
395
396 /* connect success, check rsize/wsize limit */
397 if (!status) {
398 max_payload =
399 nfs_block_size(rpc_max_payload(ds->ds_clp->cl_rpcclient),
400 NULL);
401 if (mirror->mirror_ds->ds_versions[0].rsize > max_payload)
402 mirror->mirror_ds->ds_versions[0].rsize = max_payload;
403 if (mirror->mirror_ds->ds_versions[0].wsize > max_payload)
404 mirror->mirror_ds->ds_versions[0].wsize = max_payload;
405 goto out;
406 }
407noconnect:
408 ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
409 mirror, lseg->pls_range.offset,
410 lseg->pls_range.length, NFS4ERR_NXIO,
411 OP_ILLEGAL, GFP_NOIO);
412 ff_layout_send_layouterror(lseg);
413 if (fail_return || !ff_layout_has_available_ds(lseg))
414 pnfs_error_mark_layout_for_return(ino, lseg);
415 ds = NULL;
416out:
417 return ds;
418}
419
420const struct cred *
421ff_layout_get_ds_cred(struct nfs4_ff_layout_mirror *mirror,
422 const struct pnfs_layout_range *range,
423 const struct cred *mdscred)
424{
425 const struct cred *cred;
426
427 if (mirror && !mirror->mirror_ds->ds_versions[0].tightly_coupled) {
428 cred = ff_layout_get_mirror_cred(mirror, range->iomode);
429 if (!cred)
430 cred = get_cred(mdscred);
431 } else {
432 cred = get_cred(mdscred);
433 }
434 return cred;
435}
436
437/**
438 * nfs4_ff_find_or_create_ds_client - Find or create a DS rpc client
439 * @mirror: pointer to the mirror
440 * @ds_clp: nfs_client for the DS
441 * @inode: pointer to inode
442 *
443 * Find or create a DS rpc client with th MDS server rpc client auth flavor
444 * in the nfs_client cl_ds_clients list.
445 */
446struct rpc_clnt *
447nfs4_ff_find_or_create_ds_client(struct nfs4_ff_layout_mirror *mirror,
448 struct nfs_client *ds_clp, struct inode *inode)
449{
450 switch (mirror->mirror_ds->ds_versions[0].version) {
451 case 3:
452 /* For NFSv3 DS, flavor is set when creating DS connections */
453 return ds_clp->cl_rpcclient;
454 case 4:
455 return nfs4_find_or_create_ds_client(ds_clp, inode);
456 default:
457 BUG();
458 }
459}
460
461void ff_layout_free_ds_ioerr(struct list_head *head)
462{
463 struct nfs4_ff_layout_ds_err *err;
464
465 while (!list_empty(head)) {
466 err = list_first_entry(head,
467 struct nfs4_ff_layout_ds_err,
468 list);
469 list_del(&err->list);
470 kfree(err);
471 }
472}
473
474/* called with inode i_lock held */
475int ff_layout_encode_ds_ioerr(struct xdr_stream *xdr, const struct list_head *head)
476{
477 struct nfs4_ff_layout_ds_err *err;
478 __be32 *p;
479
480 list_for_each_entry(err, head, list) {
481 /* offset(8) + length(8) + stateid(NFS4_STATEID_SIZE)
482 * + array length + deviceid(NFS4_DEVICEID4_SIZE)
483 * + status(4) + opnum(4)
484 */
485 p = xdr_reserve_space(xdr,
486 28 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE);
487 if (unlikely(!p))
488 return -ENOBUFS;
489 p = xdr_encode_hyper(p, err->offset);
490 p = xdr_encode_hyper(p, err->length);
491 p = xdr_encode_opaque_fixed(p, &err->stateid,
492 NFS4_STATEID_SIZE);
493 /* Encode 1 error */
494 *p++ = cpu_to_be32(1);
495 p = xdr_encode_opaque_fixed(p, &err->deviceid,
496 NFS4_DEVICEID4_SIZE);
497 *p++ = cpu_to_be32(err->status);
498 *p++ = cpu_to_be32(err->opnum);
499 dprintk("%s: offset %llu length %llu status %d op %d\n",
500 __func__, err->offset, err->length, err->status,
501 err->opnum);
502 }
503
504 return 0;
505}
506
507static
508unsigned int do_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
509 const struct pnfs_layout_range *range,
510 struct list_head *head,
511 unsigned int maxnum)
512{
513 struct nfs4_flexfile_layout *flo = FF_LAYOUT_FROM_HDR(lo);
514 struct inode *inode = lo->plh_inode;
515 struct nfs4_ff_layout_ds_err *err, *n;
516 unsigned int ret = 0;
517
518 spin_lock(&inode->i_lock);
519 list_for_each_entry_safe(err, n, &flo->error_list, list) {
520 if (!pnfs_is_range_intersecting(err->offset,
521 pnfs_end_offset(err->offset, err->length),
522 range->offset,
523 pnfs_end_offset(range->offset, range->length)))
524 continue;
525 if (!maxnum)
526 break;
527 list_move(&err->list, head);
528 maxnum--;
529 ret++;
530 }
531 spin_unlock(&inode->i_lock);
532 return ret;
533}
534
535unsigned int ff_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
536 const struct pnfs_layout_range *range,
537 struct list_head *head,
538 unsigned int maxnum)
539{
540 unsigned int ret;
541
542 ret = do_layout_fetch_ds_ioerr(lo, range, head, maxnum);
543 /* If we're over the max, discard all remaining entries */
544 if (ret == maxnum) {
545 LIST_HEAD(discard);
546 do_layout_fetch_ds_ioerr(lo, range, &discard, -1);
547 ff_layout_free_ds_ioerr(&discard);
548 }
549 return ret;
550}
551
552static bool ff_read_layout_has_available_ds(struct pnfs_layout_segment *lseg)
553{
554 struct nfs4_ff_layout_mirror *mirror;
555 struct nfs4_deviceid_node *devid;
556 u32 idx;
557
558 for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) {
559 mirror = FF_LAYOUT_COMP(lseg, idx);
560 if (mirror) {
561 if (!mirror->mirror_ds)
562 return true;
563 if (IS_ERR(mirror->mirror_ds))
564 continue;
565 devid = &mirror->mirror_ds->id_node;
566 if (!nfs4_test_deviceid_unavailable(devid))
567 return true;
568 }
569 }
570
571 return false;
572}
573
574static bool ff_rw_layout_has_available_ds(struct pnfs_layout_segment *lseg)
575{
576 struct nfs4_ff_layout_mirror *mirror;
577 struct nfs4_deviceid_node *devid;
578 u32 idx;
579
580 for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) {
581 mirror = FF_LAYOUT_COMP(lseg, idx);
582 if (!mirror || IS_ERR(mirror->mirror_ds))
583 return false;
584 if (!mirror->mirror_ds)
585 continue;
586 devid = &mirror->mirror_ds->id_node;
587 if (nfs4_test_deviceid_unavailable(devid))
588 return false;
589 }
590
591 return FF_LAYOUT_MIRROR_COUNT(lseg) != 0;
592}
593
594static bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg)
595{
596 if (lseg->pls_range.iomode == IOMODE_READ)
597 return ff_read_layout_has_available_ds(lseg);
598 /* Note: RW layout needs all mirrors available */
599 return ff_rw_layout_has_available_ds(lseg);
600}
601
602bool ff_layout_avoid_mds_available_ds(struct pnfs_layout_segment *lseg)
603{
604 return ff_layout_no_fallback_to_mds(lseg) ||
605 ff_layout_has_available_ds(lseg);
606}
607
608bool ff_layout_avoid_read_on_rw(struct pnfs_layout_segment *lseg)
609{
610 return lseg->pls_range.iomode == IOMODE_RW &&
611 ff_layout_no_read_on_rw(lseg);
612}
613
614module_param(dataserver_retrans, uint, 0644);
615MODULE_PARM_DESC(dataserver_retrans, "The number of times the NFSv4.1 client "
616 "retries a request before it attempts further "
617 " recovery action.");
618module_param(dataserver_timeo, uint, 0644);
619MODULE_PARM_DESC(dataserver_timeo, "The time (in tenths of a second) the "
620 "NFSv4.1 client waits for a response from a "
621 " data server before it retries an NFS request.");