Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 * Common NFS I/O  operations for the pnfs file based
  3 * layout drivers.
  4 *
  5 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
  6 *
  7 * Tom Haynes <loghyr@primarydata.com>
  8 */
  9
 10#include <linux/nfs_fs.h>
 11#include <linux/nfs_page.h>
 12#include <linux/sunrpc/addr.h>
 13#include <linux/module.h>
 14
 15#include "nfs4session.h"
 16#include "internal.h"
 17#include "pnfs.h"
 18
 19#define NFSDBG_FACILITY		NFSDBG_PNFS
 20
 21void pnfs_generic_rw_release(void *data)
 22{
 23	struct nfs_pgio_header *hdr = data;
 24
 25	nfs_put_client(hdr->ds_clp);
 26	hdr->mds_ops->rpc_release(data);
 27}
 28EXPORT_SYMBOL_GPL(pnfs_generic_rw_release);
 29
 30/* Fake up some data that will cause nfs_commit_release to retry the writes. */
 31void pnfs_generic_prepare_to_resend_writes(struct nfs_commit_data *data)
 32{
 33	struct nfs_page *first = nfs_list_entry(data->pages.next);
 34
 35	data->task.tk_status = 0;
 36	memcpy(&data->verf.verifier, &first->wb_verf,
 37	       sizeof(data->verf.verifier));
 38	data->verf.verifier.data[0]++; /* ensure verifier mismatch */
 39}
 40EXPORT_SYMBOL_GPL(pnfs_generic_prepare_to_resend_writes);
 41
 42void pnfs_generic_write_commit_done(struct rpc_task *task, void *data)
 43{
 44	struct nfs_commit_data *wdata = data;
 45
 46	/* Note this may cause RPC to be resent */
 47	wdata->mds_ops->rpc_call_done(task, data);
 48}
 49EXPORT_SYMBOL_GPL(pnfs_generic_write_commit_done);
 50
 51void pnfs_generic_commit_release(void *calldata)
 52{
 53	struct nfs_commit_data *data = calldata;
 54
 55	data->completion_ops->completion(data);
 56	pnfs_put_lseg(data->lseg);
 57	nfs_put_client(data->ds_clp);
 58	nfs_commitdata_release(data);
 59}
 60EXPORT_SYMBOL_GPL(pnfs_generic_commit_release);
 61
 
 
 
 
 
 
 
 
 
 
 
 62/* The generic layer is about to remove the req from the commit list.
 63 * If this will make the bucket empty, it will need to put the lseg reference.
 64 * Note this must be called holding i_lock
 65 */
 66void
 67pnfs_generic_clear_request_commit(struct nfs_page *req,
 68				  struct nfs_commit_info *cinfo)
 69{
 70	struct pnfs_layout_segment *freeme = NULL;
 71
 72	if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags))
 73		goto out;
 74	cinfo->ds->nwritten--;
 75	if (list_is_singular(&req->wb_list)) {
 76		struct pnfs_commit_bucket *bucket;
 77
 78		bucket = list_first_entry(&req->wb_list,
 79					  struct pnfs_commit_bucket,
 80					  written);
 81		freeme = bucket->wlseg;
 82		bucket->wlseg = NULL;
 83	}
 84out:
 85	nfs_request_remove_commit_list(req, cinfo);
 86	pnfs_put_lseg(freeme);
 
 87}
 88EXPORT_SYMBOL_GPL(pnfs_generic_clear_request_commit);
 89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 90static int
 91pnfs_generic_scan_ds_commit_list(struct pnfs_commit_bucket *bucket,
 92				 struct nfs_commit_info *cinfo,
 93				 int max)
 94{
 95	struct list_head *src = &bucket->written;
 96	struct list_head *dst = &bucket->committing;
 97	int ret;
 98
 99	lockdep_assert_held(&NFS_I(cinfo->inode)->commit_mutex);
100	ret = nfs_scan_commit_list(src, dst, cinfo, max);
101	if (ret) {
102		cinfo->ds->nwritten -= ret;
103		cinfo->ds->ncommitting += ret;
104		if (bucket->clseg == NULL)
105			bucket->clseg = pnfs_get_lseg(bucket->wlseg);
106		if (list_empty(src)) {
107			pnfs_put_lseg(bucket->wlseg);
108			bucket->wlseg = NULL;
109		}
110	}
111	return ret;
112}
113
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114/* Move reqs from written to committing lists, returning count
115 * of number moved.
116 */
117int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo,
118				   int max)
119{
120	int i, rv = 0, cnt;
 
 
121
122	lockdep_assert_held(&NFS_I(cinfo->inode)->commit_mutex);
123	for (i = 0; i < cinfo->ds->nbuckets && max != 0; i++) {
124		cnt = pnfs_generic_scan_ds_commit_list(&cinfo->ds->buckets[i],
125						       cinfo, max);
126		max -= cnt;
 
 
 
 
127		rv += cnt;
 
 
 
128	}
 
129	return rv;
130}
131EXPORT_SYMBOL_GPL(pnfs_generic_scan_commit_lists);
132
133/* Pull everything off the committing lists and dump into @dst.  */
134void pnfs_generic_recover_commit_reqs(struct list_head *dst,
135				      struct nfs_commit_info *cinfo)
 
 
136{
137	struct pnfs_commit_bucket *b;
138	struct pnfs_layout_segment *freeme;
139	int nwritten;
140	int i;
141
142	lockdep_assert_held(&NFS_I(cinfo->inode)->commit_mutex);
143restart:
144	for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) {
145		nwritten = nfs_scan_commit_list(&b->written, dst, cinfo, 0);
146		if (!nwritten)
147			continue;
148		cinfo->ds->nwritten -= nwritten;
149		if (list_empty(&b->written)) {
150			freeme = b->wlseg;
151			b->wlseg = NULL;
152			spin_unlock(&cinfo->inode->i_lock);
153			pnfs_put_lseg(freeme);
154			spin_lock(&cinfo->inode->i_lock);
155			goto restart;
156		}
157	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
158}
159EXPORT_SYMBOL_GPL(pnfs_generic_recover_commit_reqs);
160
161static void pnfs_generic_retry_commit(struct nfs_commit_info *cinfo, int idx)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162{
163	struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164	struct pnfs_commit_bucket *bucket;
165	struct pnfs_layout_segment *freeme;
166	struct list_head *pos;
167	LIST_HEAD(pages);
168	int i;
169
170	spin_lock(&cinfo->inode->i_lock);
171	for (i = idx; i < fl_cinfo->nbuckets; i++) {
172		bucket = &fl_cinfo->buckets[i];
173		if (list_empty(&bucket->committing))
174			continue;
175		freeme = bucket->clseg;
176		bucket->clseg = NULL;
177		list_for_each(pos, &bucket->committing)
178			cinfo->ds->ncommitting--;
179		list_splice_init(&bucket->committing, &pages);
180		spin_unlock(&cinfo->inode->i_lock);
181		nfs_retry_commit(&pages, freeme, cinfo, i);
182		pnfs_put_lseg(freeme);
183		spin_lock(&cinfo->inode->i_lock);
184	}
185	spin_unlock(&cinfo->inode->i_lock);
186}
187
188static unsigned int
189pnfs_generic_alloc_ds_commits(struct nfs_commit_info *cinfo,
190			      struct list_head *list)
 
 
191{
192	struct pnfs_ds_commit_info *fl_cinfo;
193	struct pnfs_commit_bucket *bucket;
194	struct nfs_commit_data *data;
195	int i;
196	unsigned int nreq = 0;
197
198	fl_cinfo = cinfo->ds;
199	bucket = fl_cinfo->buckets;
200	for (i = 0; i < fl_cinfo->nbuckets; i++, bucket++) {
201		if (list_empty(&bucket->committing))
202			continue;
203		data = nfs_commitdata_alloc(false);
204		if (!data)
205			break;
206		data->ds_commit_index = i;
207		list_add(&data->pages, list);
208		nreq++;
 
 
 
 
209	}
210
 
 
211	/* Clean up on error */
212	pnfs_generic_retry_commit(cinfo, i);
213	return nreq;
214}
215
216static inline
217void pnfs_fetch_commit_bucket_list(struct list_head *pages,
218		struct nfs_commit_data *data,
219		struct nfs_commit_info *cinfo)
220{
221	struct pnfs_commit_bucket *bucket;
222	struct list_head *pos;
223
224	bucket = &cinfo->ds->buckets[data->ds_commit_index];
225	spin_lock(&cinfo->inode->i_lock);
226	list_for_each(pos, &bucket->committing)
227		cinfo->ds->ncommitting--;
228	list_splice_init(&bucket->committing, pages);
229	data->lseg = bucket->clseg;
230	bucket->clseg = NULL;
231	spin_unlock(&cinfo->inode->i_lock);
232
233}
234
235/* Helper function for pnfs_generic_commit_pagelist to catch an empty
236 * page list. This can happen when two commits race.
237 *
238 * This must be called instead of nfs_init_commit - call one or the other, but
239 * not both!
240 */
241static bool
242pnfs_generic_commit_cancel_empty_pagelist(struct list_head *pages,
243					  struct nfs_commit_data *data,
244					  struct nfs_commit_info *cinfo)
245{
246	if (list_empty(pages)) {
247		if (atomic_dec_and_test(&cinfo->mds->rpcs_out))
248			wake_up_var(&cinfo->mds->rpcs_out);
249		/* don't call nfs_commitdata_release - it tries to put
250		 * the open_context which is not acquired until nfs_init_commit
251		 * which has not been called on @data */
252		WARN_ON_ONCE(data->context);
253		nfs_commit_free(data);
254		return true;
255	}
256
257	return false;
258}
259
260/* This follows nfs_commit_list pretty closely */
261int
262pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
263			     int how, struct nfs_commit_info *cinfo,
264			     int (*initiate_commit)(struct nfs_commit_data *data,
265						    int how))
266{
 
267	struct nfs_commit_data *data, *tmp;
268	LIST_HEAD(list);
269	unsigned int nreq = 0;
270
271	if (!list_empty(mds_pages)) {
272		data = nfs_commitdata_alloc(true);
 
 
 
 
273		data->ds_commit_index = -1;
274		list_add(&data->pages, &list);
 
275		nreq++;
276	}
277
278	nreq += pnfs_generic_alloc_ds_commits(cinfo, &list);
279
280	if (nreq == 0)
281		goto out;
282
283	atomic_add(nreq, &cinfo->mds->rpcs_out);
284
285	list_for_each_entry_safe(data, tmp, &list, pages) {
286		list_del_init(&data->pages);
287		if (data->ds_commit_index < 0) {
288			/* another commit raced with us */
289			if (pnfs_generic_commit_cancel_empty_pagelist(mds_pages,
290				data, cinfo))
291				continue;
292
293			nfs_init_commit(data, mds_pages, NULL, cinfo);
294			nfs_initiate_commit(NFS_CLIENT(inode), data,
295					    NFS_PROTO(data->inode),
296					    data->mds_ops, how, 0);
 
297		} else {
298			LIST_HEAD(pages);
299
300			pnfs_fetch_commit_bucket_list(&pages, data, cinfo);
301
302			/* another commit raced with us */
303			if (pnfs_generic_commit_cancel_empty_pagelist(&pages,
304				data, cinfo))
305				continue;
306
307			nfs_init_commit(data, &pages, data->lseg, cinfo);
308			initiate_commit(data, how);
309		}
310	}
311out:
312	return PNFS_ATTEMPTED;
313}
314EXPORT_SYMBOL_GPL(pnfs_generic_commit_pagelist);
315
316/*
317 * Data server cache
318 *
319 * Data servers can be mapped to different device ids.
320 * nfs4_pnfs_ds reference counting
321 *   - set to 1 on allocation
322 *   - incremented when a device id maps a data server already in the cache.
323 *   - decremented when deviceid is removed from the cache.
324 */
325static DEFINE_SPINLOCK(nfs4_ds_cache_lock);
326static LIST_HEAD(nfs4_data_server_cache);
327
328/* Debug routines */
329static void
330print_ds(struct nfs4_pnfs_ds *ds)
331{
332	if (ds == NULL) {
333		printk(KERN_WARNING "%s NULL device\n", __func__);
334		return;
335	}
336	printk(KERN_WARNING "        ds %s\n"
337		"        ref count %d\n"
338		"        client %p\n"
339		"        cl_exchange_flags %x\n",
340		ds->ds_remotestr,
341		refcount_read(&ds->ds_count), ds->ds_clp,
342		ds->ds_clp ? ds->ds_clp->cl_exchange_flags : 0);
343}
344
345static bool
346same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2)
347{
348	struct sockaddr_in *a, *b;
349	struct sockaddr_in6 *a6, *b6;
350
351	if (addr1->sa_family != addr2->sa_family)
352		return false;
353
354	switch (addr1->sa_family) {
355	case AF_INET:
356		a = (struct sockaddr_in *)addr1;
357		b = (struct sockaddr_in *)addr2;
358
359		if (a->sin_addr.s_addr == b->sin_addr.s_addr &&
360		    a->sin_port == b->sin_port)
361			return true;
362		break;
363
364	case AF_INET6:
365		a6 = (struct sockaddr_in6 *)addr1;
366		b6 = (struct sockaddr_in6 *)addr2;
367
368		/* LINKLOCAL addresses must have matching scope_id */
369		if (ipv6_addr_src_scope(&a6->sin6_addr) ==
370		    IPV6_ADDR_SCOPE_LINKLOCAL &&
371		    a6->sin6_scope_id != b6->sin6_scope_id)
372			return false;
373
374		if (ipv6_addr_equal(&a6->sin6_addr, &b6->sin6_addr) &&
375		    a6->sin6_port == b6->sin6_port)
376			return true;
377		break;
378
379	default:
380		dprintk("%s: unhandled address family: %u\n",
381			__func__, addr1->sa_family);
382		return false;
383	}
384
385	return false;
386}
387
388/*
389 * Checks if 'dsaddrs1' contains a subset of 'dsaddrs2'. If it does,
390 * declare a match.
391 */
392static bool
393_same_data_server_addrs_locked(const struct list_head *dsaddrs1,
394			       const struct list_head *dsaddrs2)
395{
396	struct nfs4_pnfs_ds_addr *da1, *da2;
397	struct sockaddr *sa1, *sa2;
398	bool match = false;
399
400	list_for_each_entry(da1, dsaddrs1, da_node) {
401		sa1 = (struct sockaddr *)&da1->da_addr;
402		match = false;
403		list_for_each_entry(da2, dsaddrs2, da_node) {
404			sa2 = (struct sockaddr *)&da2->da_addr;
405			match = same_sockaddr(sa1, sa2);
406			if (match)
407				break;
408		}
409		if (!match)
410			break;
411	}
412	return match;
413}
414
415/*
416 * Lookup DS by addresses.  nfs4_ds_cache_lock is held
417 */
418static struct nfs4_pnfs_ds *
419_data_server_lookup_locked(const struct list_head *dsaddrs)
420{
421	struct nfs4_pnfs_ds *ds;
422
423	list_for_each_entry(ds, &nfs4_data_server_cache, ds_node)
424		if (_same_data_server_addrs_locked(&ds->ds_addrs, dsaddrs))
425			return ds;
426	return NULL;
427}
428
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
429static void destroy_ds(struct nfs4_pnfs_ds *ds)
430{
431	struct nfs4_pnfs_ds_addr *da;
432
433	dprintk("--> %s\n", __func__);
434	ifdebug(FACILITY)
435		print_ds(ds);
436
437	nfs_put_client(ds->ds_clp);
438
439	while (!list_empty(&ds->ds_addrs)) {
440		da = list_first_entry(&ds->ds_addrs,
441				      struct nfs4_pnfs_ds_addr,
442				      da_node);
443		list_del_init(&da->da_node);
444		kfree(da->da_remotestr);
445		kfree(da);
446	}
447
448	kfree(ds->ds_remotestr);
449	kfree(ds);
450}
451
452void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds)
453{
454	if (refcount_dec_and_lock(&ds->ds_count,
455				&nfs4_ds_cache_lock)) {
456		list_del_init(&ds->ds_node);
457		spin_unlock(&nfs4_ds_cache_lock);
458		destroy_ds(ds);
459	}
460}
461EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_put);
462
463/*
464 * Create a string with a human readable address and port to avoid
465 * complicated setup around many dprinks.
466 */
467static char *
468nfs4_pnfs_remotestr(struct list_head *dsaddrs, gfp_t gfp_flags)
469{
470	struct nfs4_pnfs_ds_addr *da;
471	char *remotestr;
472	size_t len;
473	char *p;
474
475	len = 3;        /* '{', '}' and eol */
476	list_for_each_entry(da, dsaddrs, da_node) {
477		len += strlen(da->da_remotestr) + 1;    /* string plus comma */
478	}
479
480	remotestr = kzalloc(len, gfp_flags);
481	if (!remotestr)
482		return NULL;
483
484	p = remotestr;
485	*(p++) = '{';
486	len--;
487	list_for_each_entry(da, dsaddrs, da_node) {
488		size_t ll = strlen(da->da_remotestr);
489
490		if (ll > len)
491			goto out_err;
492
493		memcpy(p, da->da_remotestr, ll);
494		p += ll;
495		len -= ll;
496
497		if (len < 1)
498			goto out_err;
499		(*p++) = ',';
500		len--;
501	}
502	if (len < 2)
503		goto out_err;
504	*(p++) = '}';
505	*p = '\0';
506	return remotestr;
507out_err:
508	kfree(remotestr);
509	return NULL;
510}
511
512/*
513 * Given a list of multipath struct nfs4_pnfs_ds_addr, add it to ds cache if
514 * uncached and return cached struct nfs4_pnfs_ds.
515 */
516struct nfs4_pnfs_ds *
517nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags)
518{
519	struct nfs4_pnfs_ds *tmp_ds, *ds = NULL;
520	char *remotestr;
521
522	if (list_empty(dsaddrs)) {
523		dprintk("%s: no addresses defined\n", __func__);
524		goto out;
525	}
526
527	ds = kzalloc(sizeof(*ds), gfp_flags);
528	if (!ds)
529		goto out;
530
531	/* this is only used for debugging, so it's ok if its NULL */
532	remotestr = nfs4_pnfs_remotestr(dsaddrs, gfp_flags);
533
534	spin_lock(&nfs4_ds_cache_lock);
535	tmp_ds = _data_server_lookup_locked(dsaddrs);
536	if (tmp_ds == NULL) {
537		INIT_LIST_HEAD(&ds->ds_addrs);
538		list_splice_init(dsaddrs, &ds->ds_addrs);
539		ds->ds_remotestr = remotestr;
540		refcount_set(&ds->ds_count, 1);
541		INIT_LIST_HEAD(&ds->ds_node);
542		ds->ds_clp = NULL;
543		list_add(&ds->ds_node, &nfs4_data_server_cache);
544		dprintk("%s add new data server %s\n", __func__,
545			ds->ds_remotestr);
546	} else {
547		kfree(remotestr);
548		kfree(ds);
549		refcount_inc(&tmp_ds->ds_count);
550		dprintk("%s data server %s found, inc'ed ds_count to %d\n",
551			__func__, tmp_ds->ds_remotestr,
552			refcount_read(&tmp_ds->ds_count));
553		ds = tmp_ds;
554	}
555	spin_unlock(&nfs4_ds_cache_lock);
556out:
557	return ds;
558}
559EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_add);
560
561static void nfs4_wait_ds_connect(struct nfs4_pnfs_ds *ds)
562{
563	might_sleep();
564	wait_on_bit(&ds->ds_state, NFS4DS_CONNECTING,
565			TASK_KILLABLE);
566}
567
568static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds)
569{
570	smp_mb__before_atomic();
571	clear_bit(NFS4DS_CONNECTING, &ds->ds_state);
572	smp_mb__after_atomic();
573	wake_up_bit(&ds->ds_state, NFS4DS_CONNECTING);
574}
575
576static struct nfs_client *(*get_v3_ds_connect)(
577			struct nfs_server *mds_srv,
578			const struct sockaddr *ds_addr,
579			int ds_addrlen,
580			int ds_proto,
581			unsigned int ds_timeo,
582			unsigned int ds_retrans);
583
584static bool load_v3_ds_connect(void)
585{
586	if (!get_v3_ds_connect) {
587		get_v3_ds_connect = symbol_request(nfs3_set_ds_client);
588		WARN_ON_ONCE(!get_v3_ds_connect);
589	}
590
591	return(get_v3_ds_connect != NULL);
592}
593
594void nfs4_pnfs_v3_ds_connect_unload(void)
595{
596	if (get_v3_ds_connect) {
597		symbol_put(nfs3_set_ds_client);
598		get_v3_ds_connect = NULL;
599	}
600}
601
602static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
603				 struct nfs4_pnfs_ds *ds,
604				 unsigned int timeo,
605				 unsigned int retrans)
606{
607	struct nfs_client *clp = ERR_PTR(-EIO);
608	struct nfs4_pnfs_ds_addr *da;
 
609	int status = 0;
610
611	dprintk("--> %s DS %s\n", __func__, ds->ds_remotestr);
612
613	if (!load_v3_ds_connect())
614		goto out;
615
616	list_for_each_entry(da, &ds->ds_addrs, da_node) {
617		dprintk("%s: DS %s: trying address %s\n",
618			__func__, ds->ds_remotestr, da->da_remotestr);
619
620		if (!IS_ERR(clp)) {
621			struct xprt_create xprt_args = {
622				.ident = XPRT_TRANSPORT_TCP,
623				.net = clp->cl_net,
624				.dstaddr = (struct sockaddr *)&da->da_addr,
625				.addrlen = da->da_addrlen,
626				.servername = clp->cl_hostname,
 
 
627			};
 
 
 
 
 
628			/* Add this address as an alias */
629			rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
630					rpc_clnt_test_and_add_xprt, NULL);
631		} else
632			clp = get_v3_ds_connect(mds_srv,
633					(struct sockaddr *)&da->da_addr,
634					da->da_addrlen, IPPROTO_TCP,
635					timeo, retrans);
 
 
 
 
 
636	}
637
638	if (IS_ERR(clp)) {
639		status = PTR_ERR(clp);
640		goto out;
641	}
642
643	smp_wmb();
644	ds->ds_clp = clp;
645	dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr);
646out:
647	return status;
648}
649
650static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
651				 struct nfs4_pnfs_ds *ds,
652				 unsigned int timeo,
653				 unsigned int retrans,
654				 u32 minor_version)
655{
656	struct nfs_client *clp = ERR_PTR(-EIO);
657	struct nfs4_pnfs_ds_addr *da;
658	int status = 0;
659
660	dprintk("--> %s DS %s\n", __func__, ds->ds_remotestr);
661
662	list_for_each_entry(da, &ds->ds_addrs, da_node) {
663		dprintk("%s: DS %s: trying address %s\n",
664			__func__, ds->ds_remotestr, da->da_remotestr);
665
666		if (!IS_ERR(clp) && clp->cl_mvops->session_trunk) {
667			struct xprt_create xprt_args = {
668				.ident = XPRT_TRANSPORT_TCP,
669				.net = clp->cl_net,
670				.dstaddr = (struct sockaddr *)&da->da_addr,
671				.addrlen = da->da_addrlen,
672				.servername = clp->cl_hostname,
673			};
674			struct nfs4_add_xprt_data xprtdata = {
675				.clp = clp,
676				.cred = nfs4_get_clid_cred(clp),
677			};
678			struct rpc_add_xprt_test rpcdata = {
679				.add_xprt_test = clp->cl_mvops->session_trunk,
680				.data = &xprtdata,
681			};
682
 
 
 
 
683			/**
684			* Test this address for session trunking and
685			* add as an alias
686			*/
 
687			rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
688					  rpc_clnt_setup_test_and_add_xprt,
689					  &rpcdata);
690			if (xprtdata.cred)
691				put_rpccred(xprtdata.cred);
692		} else {
693			clp = nfs4_set_ds_client(mds_srv,
694						(struct sockaddr *)&da->da_addr,
695						da->da_addrlen, IPPROTO_TCP,
696						timeo, retrans, minor_version);
 
697			if (IS_ERR(clp))
698				continue;
699
700			status = nfs4_init_ds_session(clp,
701					mds_srv->nfs_client->cl_lease_time);
702			if (status) {
703				nfs_put_client(clp);
704				clp = ERR_PTR(-EIO);
705				continue;
706			}
707
708		}
709	}
710
711	if (IS_ERR(clp)) {
712		status = PTR_ERR(clp);
713		goto out;
714	}
715
716	smp_wmb();
717	ds->ds_clp = clp;
718	dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr);
719out:
720	return status;
721}
722
723/*
724 * Create an rpc connection to the nfs4_pnfs_ds data server.
725 * Currently only supports IPv4 and IPv6 addresses.
726 * If connection fails, make devid unavailable and return a -errno.
727 */
728int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
729			  struct nfs4_deviceid_node *devid, unsigned int timeo,
730			  unsigned int retrans, u32 version, u32 minor_version)
731{
732	int err;
733
734again:
735	err = 0;
736	if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) {
737		if (version == 3) {
738			err = _nfs4_pnfs_v3_ds_connect(mds_srv, ds, timeo,
739						       retrans);
740		} else if (version == 4) {
741			err = _nfs4_pnfs_v4_ds_connect(mds_srv, ds, timeo,
742						       retrans, minor_version);
743		} else {
744			dprintk("%s: unsupported DS version %d\n", __func__,
745				version);
746			err = -EPROTONOSUPPORT;
747		}
748
749		nfs4_clear_ds_conn_bit(ds);
750	} else {
751		nfs4_wait_ds_connect(ds);
752
753		/* what was waited on didn't connect AND didn't mark unavail */
754		if (!ds->ds_clp && !nfs4_test_deviceid_unavailable(devid))
755			goto again;
756	}
757
 
 
 
758	/*
759	 * At this point the ds->ds_clp should be ready, but it might have
760	 * hit an error.
761	 */
762	if (!err) {
763		if (!ds->ds_clp || !nfs_client_init_is_complete(ds->ds_clp)) {
764			WARN_ON_ONCE(ds->ds_clp ||
765				!nfs4_test_deviceid_unavailable(devid));
766			return -EINVAL;
767		}
768		err = nfs_client_init_status(ds->ds_clp);
769	}
770
771	return err;
772}
773EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_connect);
774
775/*
776 * Currently only supports ipv4, ipv6 and one multi-path address.
777 */
778struct nfs4_pnfs_ds_addr *
779nfs4_decode_mp_ds_addr(struct net *net, struct xdr_stream *xdr, gfp_t gfp_flags)
780{
781	struct nfs4_pnfs_ds_addr *da = NULL;
782	char *buf, *portstr;
783	__be16 port;
784	int nlen, rlen;
785	int tmp[2];
786	__be32 *p;
787	char *netid, *match_netid;
788	size_t len, match_netid_len;
789	char *startsep = "";
790	char *endsep = "";
791
792
793	/* r_netid */
794	p = xdr_inline_decode(xdr, 4);
795	if (unlikely(!p))
796		goto out_err;
797	nlen = be32_to_cpup(p++);
798
799	p = xdr_inline_decode(xdr, nlen);
800	if (unlikely(!p))
801		goto out_err;
802
803	netid = kmalloc(nlen+1, gfp_flags);
804	if (unlikely(!netid))
805		goto out_err;
806
807	netid[nlen] = '\0';
808	memcpy(netid, p, nlen);
809
810	/* r_addr: ip/ip6addr with port in dec octets - see RFC 5665 */
811	p = xdr_inline_decode(xdr, 4);
812	if (unlikely(!p))
813		goto out_free_netid;
814	rlen = be32_to_cpup(p);
815
816	p = xdr_inline_decode(xdr, rlen);
817	if (unlikely(!p))
818		goto out_free_netid;
819
820	/* port is ".ABC.DEF", 8 chars max */
821	if (rlen > INET6_ADDRSTRLEN + IPV6_SCOPE_ID_LEN + 8) {
822		dprintk("%s: Invalid address, length %d\n", __func__,
823			rlen);
824		goto out_free_netid;
825	}
826	buf = kmalloc(rlen + 1, gfp_flags);
827	if (!buf) {
828		dprintk("%s: Not enough memory\n", __func__);
829		goto out_free_netid;
830	}
831	buf[rlen] = '\0';
832	memcpy(buf, p, rlen);
833
834	/* replace port '.' with '-' */
835	portstr = strrchr(buf, '.');
836	if (!portstr) {
837		dprintk("%s: Failed finding expected dot in port\n",
838			__func__);
839		goto out_free_buf;
840	}
841	*portstr = '-';
842
843	/* find '.' between address and port */
844	portstr = strrchr(buf, '.');
845	if (!portstr) {
846		dprintk("%s: Failed finding expected dot between address and "
847			"port\n", __func__);
848		goto out_free_buf;
849	}
850	*portstr = '\0';
851
852	da = kzalloc(sizeof(*da), gfp_flags);
853	if (unlikely(!da))
854		goto out_free_buf;
855
856	INIT_LIST_HEAD(&da->da_node);
857
858	if (!rpc_pton(net, buf, portstr-buf, (struct sockaddr *)&da->da_addr,
859		      sizeof(da->da_addr))) {
860		dprintk("%s: error parsing address %s\n", __func__, buf);
861		goto out_free_da;
862	}
863
864	portstr++;
865	sscanf(portstr, "%d-%d", &tmp[0], &tmp[1]);
866	port = htons((tmp[0] << 8) | (tmp[1]));
867
868	switch (da->da_addr.ss_family) {
869	case AF_INET:
870		((struct sockaddr_in *)&da->da_addr)->sin_port = port;
871		da->da_addrlen = sizeof(struct sockaddr_in);
872		match_netid = "tcp";
873		match_netid_len = 3;
874		break;
875
876	case AF_INET6:
877		((struct sockaddr_in6 *)&da->da_addr)->sin6_port = port;
878		da->da_addrlen = sizeof(struct sockaddr_in6);
879		match_netid = "tcp6";
880		match_netid_len = 4;
881		startsep = "[";
882		endsep = "]";
883		break;
884
885	default:
886		dprintk("%s: unsupported address family: %u\n",
887			__func__, da->da_addr.ss_family);
888		goto out_free_da;
889	}
890
891	if (nlen != match_netid_len || strncmp(netid, match_netid, nlen)) {
892		dprintk("%s: ERROR: r_netid \"%s\" != \"%s\"\n",
893			__func__, netid, match_netid);
 
894		goto out_free_da;
895	}
896
 
 
897	/* save human readable address */
898	len = strlen(startsep) + strlen(buf) + strlen(endsep) + 7;
899	da->da_remotestr = kzalloc(len, gfp_flags);
900
901	/* NULL is ok, only used for dprintk */
902	if (da->da_remotestr)
903		snprintf(da->da_remotestr, len, "%s%s%s:%u", startsep,
904			 buf, endsep, ntohs(port));
905
906	dprintk("%s: Parsed DS addr %s\n", __func__, da->da_remotestr);
907	kfree(buf);
908	kfree(netid);
909	return da;
910
911out_free_da:
912	kfree(da);
913out_free_buf:
914	dprintk("%s: Error parsing DS addr: %s\n", __func__, buf);
915	kfree(buf);
916out_free_netid:
917	kfree(netid);
918out_err:
919	return NULL;
920}
921EXPORT_SYMBOL_GPL(nfs4_decode_mp_ds_addr);
922
923void
924pnfs_layout_mark_request_commit(struct nfs_page *req,
925				struct pnfs_layout_segment *lseg,
926				struct nfs_commit_info *cinfo,
927				u32 ds_commit_idx)
928{
929	struct list_head *list;
930	struct pnfs_commit_bucket *buckets;
 
931
932	mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
933	buckets = cinfo->ds->buckets;
934	list = &buckets[ds_commit_idx].written;
935	if (list_empty(list)) {
936		if (!pnfs_is_valid_lseg(lseg)) {
937			mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
938			cinfo->completion_ops->resched_write(cinfo, req);
939			return;
940		}
941		/* Non-empty buckets hold a reference on the lseg.  That ref
942		 * is normally transferred to the COMMIT call and released
943		 * there.  It could also be released if the last req is pulled
944		 * off due to a rewrite, in which case it will be done in
945		 * pnfs_common_clear_request_commit
946		 */
947		WARN_ON_ONCE(buckets[ds_commit_idx].wlseg != NULL);
948		buckets[ds_commit_idx].wlseg = pnfs_get_lseg(lseg);
949	}
950	set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
951	cinfo->ds->nwritten++;
952
953	nfs_request_add_commit_list_locked(req, list, cinfo);
954	mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
955	nfs_mark_page_unstable(req->wb_page, cinfo);
 
 
 
 
956}
957EXPORT_SYMBOL_GPL(pnfs_layout_mark_request_commit);
958
959int
960pnfs_nfs_generic_sync(struct inode *inode, bool datasync)
961{
962	int ret;
963
964	if (!pnfs_layoutcommit_outstanding(inode))
965		return 0;
966	ret = nfs_commit_inode(inode, FLUSH_SYNC);
967	if (ret < 0)
968		return ret;
969	if (datasync)
970		return 0;
971	return pnfs_layoutcommit_inode(inode, true);
972}
973EXPORT_SYMBOL_GPL(pnfs_nfs_generic_sync);
974
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Common NFS I/O  operations for the pnfs file based
   4 * layout drivers.
   5 *
   6 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
   7 *
   8 * Tom Haynes <loghyr@primarydata.com>
   9 */
  10
  11#include <linux/nfs_fs.h>
  12#include <linux/nfs_page.h>
  13#include <linux/sunrpc/addr.h>
  14#include <linux/module.h>
  15
  16#include "nfs4session.h"
  17#include "internal.h"
  18#include "pnfs.h"
  19
  20#define NFSDBG_FACILITY		NFSDBG_PNFS
  21
  22void pnfs_generic_rw_release(void *data)
  23{
  24	struct nfs_pgio_header *hdr = data;
  25
  26	nfs_put_client(hdr->ds_clp);
  27	hdr->mds_ops->rpc_release(data);
  28}
  29EXPORT_SYMBOL_GPL(pnfs_generic_rw_release);
  30
  31/* Fake up some data that will cause nfs_commit_release to retry the writes. */
  32void pnfs_generic_prepare_to_resend_writes(struct nfs_commit_data *data)
  33{
  34	struct nfs_writeverf *verf = data->res.verf;
  35
  36	data->task.tk_status = 0;
  37	memset(&verf->verifier, 0, sizeof(verf->verifier));
  38	verf->committed = NFS_UNSTABLE;
 
  39}
  40EXPORT_SYMBOL_GPL(pnfs_generic_prepare_to_resend_writes);
  41
  42void pnfs_generic_write_commit_done(struct rpc_task *task, void *data)
  43{
  44	struct nfs_commit_data *wdata = data;
  45
  46	/* Note this may cause RPC to be resent */
  47	wdata->mds_ops->rpc_call_done(task, data);
  48}
  49EXPORT_SYMBOL_GPL(pnfs_generic_write_commit_done);
  50
  51void pnfs_generic_commit_release(void *calldata)
  52{
  53	struct nfs_commit_data *data = calldata;
  54
  55	data->completion_ops->completion(data);
  56	pnfs_put_lseg(data->lseg);
  57	nfs_put_client(data->ds_clp);
  58	nfs_commitdata_release(data);
  59}
  60EXPORT_SYMBOL_GPL(pnfs_generic_commit_release);
  61
  62static struct pnfs_layout_segment *
  63pnfs_free_bucket_lseg(struct pnfs_commit_bucket *bucket)
  64{
  65	if (list_empty(&bucket->committing) && list_empty(&bucket->written)) {
  66		struct pnfs_layout_segment *freeme = bucket->lseg;
  67		bucket->lseg = NULL;
  68		return freeme;
  69	}
  70	return NULL;
  71}
  72
  73/* The generic layer is about to remove the req from the commit list.
  74 * If this will make the bucket empty, it will need to put the lseg reference.
  75 * Note this must be called holding nfsi->commit_mutex
  76 */
  77void
  78pnfs_generic_clear_request_commit(struct nfs_page *req,
  79				  struct nfs_commit_info *cinfo)
  80{
  81	struct pnfs_commit_bucket *bucket = NULL;
  82
  83	if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags))
  84		goto out;
  85	cinfo->ds->nwritten--;
  86	if (list_is_singular(&req->wb_list))
 
 
  87		bucket = list_first_entry(&req->wb_list,
  88					  struct pnfs_commit_bucket, written);
 
 
 
 
  89out:
  90	nfs_request_remove_commit_list(req, cinfo);
  91	if (bucket)
  92		pnfs_put_lseg(pnfs_free_bucket_lseg(bucket));
  93}
  94EXPORT_SYMBOL_GPL(pnfs_generic_clear_request_commit);
  95
  96struct pnfs_commit_array *
  97pnfs_alloc_commit_array(size_t n, gfp_t gfp_flags)
  98{
  99	struct pnfs_commit_array *p;
 100	struct pnfs_commit_bucket *b;
 101
 102	p = kmalloc(struct_size(p, buckets, n), gfp_flags);
 103	if (!p)
 104		return NULL;
 105	p->nbuckets = n;
 106	INIT_LIST_HEAD(&p->cinfo_list);
 107	INIT_LIST_HEAD(&p->lseg_list);
 108	p->lseg = NULL;
 109	for (b = &p->buckets[0]; n != 0; b++, n--) {
 110		INIT_LIST_HEAD(&b->written);
 111		INIT_LIST_HEAD(&b->committing);
 112		b->lseg = NULL;
 113		b->direct_verf.committed = NFS_INVALID_STABLE_HOW;
 114	}
 115	return p;
 116}
 117EXPORT_SYMBOL_GPL(pnfs_alloc_commit_array);
 118
 119void
 120pnfs_free_commit_array(struct pnfs_commit_array *p)
 121{
 122	kfree_rcu(p, rcu);
 123}
 124EXPORT_SYMBOL_GPL(pnfs_free_commit_array);
 125
 126static struct pnfs_commit_array *
 127pnfs_find_commit_array_by_lseg(struct pnfs_ds_commit_info *fl_cinfo,
 128		struct pnfs_layout_segment *lseg)
 129{
 130	struct pnfs_commit_array *array;
 131
 132	list_for_each_entry_rcu(array, &fl_cinfo->commits, cinfo_list) {
 133		if (array->lseg == lseg)
 134			return array;
 135	}
 136	return NULL;
 137}
 138
 139struct pnfs_commit_array *
 140pnfs_add_commit_array(struct pnfs_ds_commit_info *fl_cinfo,
 141		struct pnfs_commit_array *new,
 142		struct pnfs_layout_segment *lseg)
 143{
 144	struct pnfs_commit_array *array;
 145
 146	array = pnfs_find_commit_array_by_lseg(fl_cinfo, lseg);
 147	if (array)
 148		return array;
 149	new->lseg = lseg;
 150	refcount_set(&new->refcount, 1);
 151	list_add_rcu(&new->cinfo_list, &fl_cinfo->commits);
 152	list_add(&new->lseg_list, &lseg->pls_commits);
 153	return new;
 154}
 155EXPORT_SYMBOL_GPL(pnfs_add_commit_array);
 156
 157static struct pnfs_commit_array *
 158pnfs_lookup_commit_array(struct pnfs_ds_commit_info *fl_cinfo,
 159		struct pnfs_layout_segment *lseg)
 160{
 161	struct pnfs_commit_array *array;
 162
 163	rcu_read_lock();
 164	array = pnfs_find_commit_array_by_lseg(fl_cinfo, lseg);
 165	if (!array) {
 166		rcu_read_unlock();
 167		fl_cinfo->ops->setup_ds_info(fl_cinfo, lseg);
 168		rcu_read_lock();
 169		array = pnfs_find_commit_array_by_lseg(fl_cinfo, lseg);
 170	}
 171	rcu_read_unlock();
 172	return array;
 173}
 174
 175static void
 176pnfs_release_commit_array_locked(struct pnfs_commit_array *array)
 177{
 178	list_del_rcu(&array->cinfo_list);
 179	list_del(&array->lseg_list);
 180	pnfs_free_commit_array(array);
 181}
 182
 183static void
 184pnfs_put_commit_array_locked(struct pnfs_commit_array *array)
 185{
 186	if (refcount_dec_and_test(&array->refcount))
 187		pnfs_release_commit_array_locked(array);
 188}
 189
 190static void
 191pnfs_put_commit_array(struct pnfs_commit_array *array, struct inode *inode)
 192{
 193	if (refcount_dec_and_lock(&array->refcount, &inode->i_lock)) {
 194		pnfs_release_commit_array_locked(array);
 195		spin_unlock(&inode->i_lock);
 196	}
 197}
 198
 199static struct pnfs_commit_array *
 200pnfs_get_commit_array(struct pnfs_commit_array *array)
 201{
 202	if (refcount_inc_not_zero(&array->refcount))
 203		return array;
 204	return NULL;
 205}
 206
 207static void
 208pnfs_remove_and_free_commit_array(struct pnfs_commit_array *array)
 209{
 210	array->lseg = NULL;
 211	list_del_init(&array->lseg_list);
 212	pnfs_put_commit_array_locked(array);
 213}
 214
 215void
 216pnfs_generic_ds_cinfo_release_lseg(struct pnfs_ds_commit_info *fl_cinfo,
 217		struct pnfs_layout_segment *lseg)
 218{
 219	struct pnfs_commit_array *array, *tmp;
 220
 221	list_for_each_entry_safe(array, tmp, &lseg->pls_commits, lseg_list)
 222		pnfs_remove_and_free_commit_array(array);
 223}
 224EXPORT_SYMBOL_GPL(pnfs_generic_ds_cinfo_release_lseg);
 225
 226void
 227pnfs_generic_ds_cinfo_destroy(struct pnfs_ds_commit_info *fl_cinfo)
 228{
 229	struct pnfs_commit_array *array, *tmp;
 230
 231	list_for_each_entry_safe(array, tmp, &fl_cinfo->commits, cinfo_list)
 232		pnfs_remove_and_free_commit_array(array);
 233}
 234EXPORT_SYMBOL_GPL(pnfs_generic_ds_cinfo_destroy);
 235
 236/*
 237 * Locks the nfs_page requests for commit and moves them to
 238 * @bucket->committing.
 239 */
 240static int
 241pnfs_bucket_scan_ds_commit_list(struct pnfs_commit_bucket *bucket,
 242				struct nfs_commit_info *cinfo,
 243				int max)
 244{
 245	struct list_head *src = &bucket->written;
 246	struct list_head *dst = &bucket->committing;
 247	int ret;
 248
 249	lockdep_assert_held(&NFS_I(cinfo->inode)->commit_mutex);
 250	ret = nfs_scan_commit_list(src, dst, cinfo, max);
 251	if (ret) {
 252		cinfo->ds->nwritten -= ret;
 253		cinfo->ds->ncommitting += ret;
 
 
 
 
 
 
 254	}
 255	return ret;
 256}
 257
 258static int pnfs_bucket_scan_array(struct nfs_commit_info *cinfo,
 259				  struct pnfs_commit_bucket *buckets,
 260				  unsigned int nbuckets,
 261				  int max)
 262{
 263	unsigned int i;
 264	int rv = 0, cnt;
 265
 266	for (i = 0; i < nbuckets && max != 0; i++) {
 267		cnt = pnfs_bucket_scan_ds_commit_list(&buckets[i], cinfo, max);
 268		rv += cnt;
 269		max -= cnt;
 270	}
 271	return rv;
 272}
 273
 274/* Move reqs from written to committing lists, returning count
 275 * of number moved.
 276 */
 277int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo, int max)
 
 278{
 279	struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
 280	struct pnfs_commit_array *array;
 281	int rv = 0, cnt;
 282
 283	rcu_read_lock();
 284	list_for_each_entry_rcu(array, &fl_cinfo->commits, cinfo_list) {
 285		if (!array->lseg || !pnfs_get_commit_array(array))
 286			continue;
 287		rcu_read_unlock();
 288		cnt = pnfs_bucket_scan_array(cinfo, array->buckets,
 289				array->nbuckets, max);
 290		rcu_read_lock();
 291		pnfs_put_commit_array(array, cinfo->inode);
 292		rv += cnt;
 293		max -= cnt;
 294		if (!max)
 295			break;
 296	}
 297	rcu_read_unlock();
 298	return rv;
 299}
 300EXPORT_SYMBOL_GPL(pnfs_generic_scan_commit_lists);
 301
 302static unsigned int
 303pnfs_bucket_recover_commit_reqs(struct list_head *dst,
 304			        struct pnfs_commit_bucket *buckets,
 305				unsigned int nbuckets,
 306				struct nfs_commit_info *cinfo)
 307{
 308	struct pnfs_commit_bucket *b;
 309	struct pnfs_layout_segment *freeme;
 310	unsigned int nwritten, ret = 0;
 311	unsigned int i;
 312
 
 313restart:
 314	for (i = 0, b = buckets; i < nbuckets; i++, b++) {
 315		nwritten = nfs_scan_commit_list(&b->written, dst, cinfo, 0);
 316		if (!nwritten)
 317			continue;
 318		ret += nwritten;
 319		freeme = pnfs_free_bucket_lseg(b);
 320		if (freeme) {
 
 
 321			pnfs_put_lseg(freeme);
 
 322			goto restart;
 323		}
 324	}
 325	return ret;
 326}
 327
 328/* Pull everything off the committing lists and dump into @dst.  */
 329void pnfs_generic_recover_commit_reqs(struct list_head *dst,
 330				      struct nfs_commit_info *cinfo)
 331{
 332	struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
 333	struct pnfs_commit_array *array;
 334	unsigned int nwritten;
 335
 336	lockdep_assert_held(&NFS_I(cinfo->inode)->commit_mutex);
 337	rcu_read_lock();
 338	list_for_each_entry_rcu(array, &fl_cinfo->commits, cinfo_list) {
 339		if (!array->lseg || !pnfs_get_commit_array(array))
 340			continue;
 341		rcu_read_unlock();
 342		nwritten = pnfs_bucket_recover_commit_reqs(dst,
 343							   array->buckets,
 344							   array->nbuckets,
 345							   cinfo);
 346		rcu_read_lock();
 347		pnfs_put_commit_array(array, cinfo->inode);
 348		fl_cinfo->nwritten -= nwritten;
 349	}
 350	rcu_read_unlock();
 351}
 352EXPORT_SYMBOL_GPL(pnfs_generic_recover_commit_reqs);
 353
 354static struct nfs_page *
 355pnfs_bucket_search_commit_reqs(struct pnfs_commit_bucket *buckets,
 356			       unsigned int nbuckets, struct folio *folio)
 357{
 358	struct nfs_page *req;
 359	struct pnfs_commit_bucket *b;
 360	unsigned int i;
 361
 362	/* Linearly search the commit lists for each bucket until a matching
 363	 * request is found */
 364	for (i = 0, b = buckets; i < nbuckets; i++, b++) {
 365		list_for_each_entry(req, &b->written, wb_list) {
 366			if (nfs_page_to_folio(req) == folio)
 367				return req->wb_head;
 368		}
 369		list_for_each_entry(req, &b->committing, wb_list) {
 370			if (nfs_page_to_folio(req) == folio)
 371				return req->wb_head;
 372		}
 373	}
 374	return NULL;
 375}
 376
 377/* pnfs_generic_search_commit_reqs - Search lists in @cinfo for the head request
 378 *				   for @folio
 379 * @cinfo - commit info for current inode
 380 * @folio - page to search for matching head request
 381 *
 382 * Return: the head request if one is found, otherwise %NULL.
 383 */
 384struct nfs_page *pnfs_generic_search_commit_reqs(struct nfs_commit_info *cinfo,
 385						 struct folio *folio)
 386{
 387	struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
 388	struct pnfs_commit_array *array;
 389	struct nfs_page *req;
 390
 391	list_for_each_entry(array, &fl_cinfo->commits, cinfo_list) {
 392		req = pnfs_bucket_search_commit_reqs(array->buckets,
 393						     array->nbuckets, folio);
 394		if (req)
 395			return req;
 396	}
 397	return NULL;
 398}
 399EXPORT_SYMBOL_GPL(pnfs_generic_search_commit_reqs);
 400
 401static struct pnfs_layout_segment *
 402pnfs_bucket_get_committing(struct list_head *head,
 403			   struct pnfs_commit_bucket *bucket,
 404			   struct nfs_commit_info *cinfo)
 405{
 406	struct pnfs_layout_segment *lseg;
 407	struct list_head *pos;
 408
 409	list_for_each(pos, &bucket->committing)
 410		cinfo->ds->ncommitting--;
 411	list_splice_init(&bucket->committing, head);
 412	lseg = pnfs_free_bucket_lseg(bucket);
 413	if (!lseg)
 414		lseg = pnfs_get_lseg(bucket->lseg);
 415	return lseg;
 416}
 417
 418static struct nfs_commit_data *
 419pnfs_bucket_fetch_commitdata(struct pnfs_commit_bucket *bucket,
 420			     struct nfs_commit_info *cinfo)
 421{
 422	struct nfs_commit_data *data = nfs_commitdata_alloc();
 423
 424	if (!data)
 425		return NULL;
 426	data->lseg = pnfs_bucket_get_committing(&data->pages, bucket, cinfo);
 427	return data;
 428}
 429
 430static void pnfs_generic_retry_commit(struct pnfs_commit_bucket *buckets,
 431				      unsigned int nbuckets,
 432				      struct nfs_commit_info *cinfo,
 433				      unsigned int idx)
 434{
 435	struct pnfs_commit_bucket *bucket;
 436	struct pnfs_layout_segment *freeme;
 
 437	LIST_HEAD(pages);
 
 438
 439	for (bucket = buckets; idx < nbuckets; bucket++, idx++) {
 
 
 440		if (list_empty(&bucket->committing))
 441			continue;
 442		mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
 443		freeme = pnfs_bucket_get_committing(&pages, bucket, cinfo);
 444		mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
 445		nfs_retry_commit(&pages, freeme, cinfo, idx);
 
 
 
 446		pnfs_put_lseg(freeme);
 
 447	}
 
 448}
 449
 450static unsigned int
 451pnfs_bucket_alloc_ds_commits(struct list_head *list,
 452			     struct pnfs_commit_bucket *buckets,
 453			     unsigned int nbuckets,
 454			     struct nfs_commit_info *cinfo)
 455{
 
 456	struct pnfs_commit_bucket *bucket;
 457	struct nfs_commit_data *data;
 458	unsigned int i;
 459	unsigned int nreq = 0;
 460
 461	for (i = 0, bucket = buckets; i < nbuckets; i++, bucket++) {
 
 
 462		if (list_empty(&bucket->committing))
 463			continue;
 464		mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
 465		if (!list_empty(&bucket->committing)) {
 466			data = pnfs_bucket_fetch_commitdata(bucket, cinfo);
 467			if (!data)
 468				goto out_error;
 469			data->ds_commit_index = i;
 470			list_add_tail(&data->list, list);
 471			nreq++;
 472		}
 473		mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
 474	}
 475	return nreq;
 476out_error:
 477	mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
 478	/* Clean up on error */
 479	pnfs_generic_retry_commit(buckets, nbuckets, cinfo, i);
 480	return nreq;
 481}
 482
 483static unsigned int
 484pnfs_alloc_ds_commits_list(struct list_head *list,
 485			   struct pnfs_ds_commit_info *fl_cinfo,
 486			   struct nfs_commit_info *cinfo)
 487{
 488	struct pnfs_commit_array *array;
 489	unsigned int ret = 0;
 490
 491	rcu_read_lock();
 492	list_for_each_entry_rcu(array, &fl_cinfo->commits, cinfo_list) {
 493		if (!array->lseg || !pnfs_get_commit_array(array))
 494			continue;
 495		rcu_read_unlock();
 496		ret += pnfs_bucket_alloc_ds_commits(list, array->buckets,
 497				array->nbuckets, cinfo);
 498		rcu_read_lock();
 499		pnfs_put_commit_array(array, cinfo->inode);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 500	}
 501	rcu_read_unlock();
 502	return ret;
 503}
 504
 505/* This follows nfs_commit_list pretty closely */
 506int
 507pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
 508			     int how, struct nfs_commit_info *cinfo,
 509			     int (*initiate_commit)(struct nfs_commit_data *data,
 510						    int how))
 511{
 512	struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
 513	struct nfs_commit_data *data, *tmp;
 514	LIST_HEAD(list);
 515	unsigned int nreq = 0;
 516
 517	if (!list_empty(mds_pages)) {
 518		data = nfs_commitdata_alloc();
 519		if (!data) {
 520			nfs_retry_commit(mds_pages, NULL, cinfo, -1);
 521			return -ENOMEM;
 522		}
 523		data->ds_commit_index = -1;
 524		list_splice_init(mds_pages, &data->pages);
 525		list_add_tail(&data->list, &list);
 526		nreq++;
 527	}
 528
 529	nreq += pnfs_alloc_ds_commits_list(&list, fl_cinfo, cinfo);
 
 530	if (nreq == 0)
 531		goto out;
 532
 533	list_for_each_entry_safe(data, tmp, &list, list) {
 534		list_del(&data->list);
 
 
 535		if (data->ds_commit_index < 0) {
 536			nfs_init_commit(data, NULL, NULL, cinfo);
 
 
 
 
 
 537			nfs_initiate_commit(NFS_CLIENT(inode), data,
 538					    NFS_PROTO(data->inode),
 539					    data->mds_ops, how,
 540					    RPC_TASK_CRED_NOREF);
 541		} else {
 542			nfs_init_commit(data, NULL, data->lseg, cinfo);
 
 
 
 
 
 
 
 
 
 543			initiate_commit(data, how);
 544		}
 545	}
 546out:
 547	return PNFS_ATTEMPTED;
 548}
 549EXPORT_SYMBOL_GPL(pnfs_generic_commit_pagelist);
 550
 551/*
 552 * Data server cache
 553 *
 554 * Data servers can be mapped to different device ids.
 555 * nfs4_pnfs_ds reference counting
 556 *   - set to 1 on allocation
 557 *   - incremented when a device id maps a data server already in the cache.
 558 *   - decremented when deviceid is removed from the cache.
 559 */
 560static DEFINE_SPINLOCK(nfs4_ds_cache_lock);
 561static LIST_HEAD(nfs4_data_server_cache);
 562
 563/* Debug routines */
 564static void
 565print_ds(struct nfs4_pnfs_ds *ds)
 566{
 567	if (ds == NULL) {
 568		printk(KERN_WARNING "%s NULL device\n", __func__);
 569		return;
 570	}
 571	printk(KERN_WARNING "        ds %s\n"
 572		"        ref count %d\n"
 573		"        client %p\n"
 574		"        cl_exchange_flags %x\n",
 575		ds->ds_remotestr,
 576		refcount_read(&ds->ds_count), ds->ds_clp,
 577		ds->ds_clp ? ds->ds_clp->cl_exchange_flags : 0);
 578}
 579
 580static bool
 581same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2)
 582{
 583	struct sockaddr_in *a, *b;
 584	struct sockaddr_in6 *a6, *b6;
 585
 586	if (addr1->sa_family != addr2->sa_family)
 587		return false;
 588
 589	switch (addr1->sa_family) {
 590	case AF_INET:
 591		a = (struct sockaddr_in *)addr1;
 592		b = (struct sockaddr_in *)addr2;
 593
 594		if (a->sin_addr.s_addr == b->sin_addr.s_addr &&
 595		    a->sin_port == b->sin_port)
 596			return true;
 597		break;
 598
 599	case AF_INET6:
 600		a6 = (struct sockaddr_in6 *)addr1;
 601		b6 = (struct sockaddr_in6 *)addr2;
 602
 603		/* LINKLOCAL addresses must have matching scope_id */
 604		if (ipv6_addr_src_scope(&a6->sin6_addr) ==
 605		    IPV6_ADDR_SCOPE_LINKLOCAL &&
 606		    a6->sin6_scope_id != b6->sin6_scope_id)
 607			return false;
 608
 609		if (ipv6_addr_equal(&a6->sin6_addr, &b6->sin6_addr) &&
 610		    a6->sin6_port == b6->sin6_port)
 611			return true;
 612		break;
 613
 614	default:
 615		dprintk("%s: unhandled address family: %u\n",
 616			__func__, addr1->sa_family);
 617		return false;
 618	}
 619
 620	return false;
 621}
 622
 623/*
 624 * Checks if 'dsaddrs1' contains a subset of 'dsaddrs2'. If it does,
 625 * declare a match.
 626 */
 627static bool
 628_same_data_server_addrs_locked(const struct list_head *dsaddrs1,
 629			       const struct list_head *dsaddrs2)
 630{
 631	struct nfs4_pnfs_ds_addr *da1, *da2;
 632	struct sockaddr *sa1, *sa2;
 633	bool match = false;
 634
 635	list_for_each_entry(da1, dsaddrs1, da_node) {
 636		sa1 = (struct sockaddr *)&da1->da_addr;
 637		match = false;
 638		list_for_each_entry(da2, dsaddrs2, da_node) {
 639			sa2 = (struct sockaddr *)&da2->da_addr;
 640			match = same_sockaddr(sa1, sa2);
 641			if (match)
 642				break;
 643		}
 644		if (!match)
 645			break;
 646	}
 647	return match;
 648}
 649
 650/*
 651 * Lookup DS by addresses.  nfs4_ds_cache_lock is held
 652 */
 653static struct nfs4_pnfs_ds *
 654_data_server_lookup_locked(const struct list_head *dsaddrs)
 655{
 656	struct nfs4_pnfs_ds *ds;
 657
 658	list_for_each_entry(ds, &nfs4_data_server_cache, ds_node)
 659		if (_same_data_server_addrs_locked(&ds->ds_addrs, dsaddrs))
 660			return ds;
 661	return NULL;
 662}
 663
 664static struct nfs4_pnfs_ds_addr *nfs4_pnfs_ds_addr_alloc(gfp_t gfp_flags)
 665{
 666	struct nfs4_pnfs_ds_addr *da = kzalloc(sizeof(*da), gfp_flags);
 667	if (da)
 668		INIT_LIST_HEAD(&da->da_node);
 669	return da;
 670}
 671
 672static void nfs4_pnfs_ds_addr_free(struct nfs4_pnfs_ds_addr *da)
 673{
 674	kfree(da->da_remotestr);
 675	kfree(da->da_netid);
 676	kfree(da);
 677}
 678
 679static void destroy_ds(struct nfs4_pnfs_ds *ds)
 680{
 681	struct nfs4_pnfs_ds_addr *da;
 682
 683	dprintk("--> %s\n", __func__);
 684	ifdebug(FACILITY)
 685		print_ds(ds);
 686
 687	nfs_put_client(ds->ds_clp);
 688
 689	while (!list_empty(&ds->ds_addrs)) {
 690		da = list_first_entry(&ds->ds_addrs,
 691				      struct nfs4_pnfs_ds_addr,
 692				      da_node);
 693		list_del_init(&da->da_node);
 694		nfs4_pnfs_ds_addr_free(da);
 
 695	}
 696
 697	kfree(ds->ds_remotestr);
 698	kfree(ds);
 699}
 700
 701void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds)
 702{
 703	if (refcount_dec_and_lock(&ds->ds_count,
 704				&nfs4_ds_cache_lock)) {
 705		list_del_init(&ds->ds_node);
 706		spin_unlock(&nfs4_ds_cache_lock);
 707		destroy_ds(ds);
 708	}
 709}
 710EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_put);
 711
 712/*
 713 * Create a string with a human readable address and port to avoid
 714 * complicated setup around many dprinks.
 715 */
 716static char *
 717nfs4_pnfs_remotestr(struct list_head *dsaddrs, gfp_t gfp_flags)
 718{
 719	struct nfs4_pnfs_ds_addr *da;
 720	char *remotestr;
 721	size_t len;
 722	char *p;
 723
 724	len = 3;        /* '{', '}' and eol */
 725	list_for_each_entry(da, dsaddrs, da_node) {
 726		len += strlen(da->da_remotestr) + 1;    /* string plus comma */
 727	}
 728
 729	remotestr = kzalloc(len, gfp_flags);
 730	if (!remotestr)
 731		return NULL;
 732
 733	p = remotestr;
 734	*(p++) = '{';
 735	len--;
 736	list_for_each_entry(da, dsaddrs, da_node) {
 737		size_t ll = strlen(da->da_remotestr);
 738
 739		if (ll > len)
 740			goto out_err;
 741
 742		memcpy(p, da->da_remotestr, ll);
 743		p += ll;
 744		len -= ll;
 745
 746		if (len < 1)
 747			goto out_err;
 748		(*p++) = ',';
 749		len--;
 750	}
 751	if (len < 2)
 752		goto out_err;
 753	*(p++) = '}';
 754	*p = '\0';
 755	return remotestr;
 756out_err:
 757	kfree(remotestr);
 758	return NULL;
 759}
 760
 761/*
 762 * Given a list of multipath struct nfs4_pnfs_ds_addr, add it to ds cache if
 763 * uncached and return cached struct nfs4_pnfs_ds.
 764 */
 765struct nfs4_pnfs_ds *
 766nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags)
 767{
 768	struct nfs4_pnfs_ds *tmp_ds, *ds = NULL;
 769	char *remotestr;
 770
 771	if (list_empty(dsaddrs)) {
 772		dprintk("%s: no addresses defined\n", __func__);
 773		goto out;
 774	}
 775
 776	ds = kzalloc(sizeof(*ds), gfp_flags);
 777	if (!ds)
 778		goto out;
 779
 780	/* this is only used for debugging, so it's ok if its NULL */
 781	remotestr = nfs4_pnfs_remotestr(dsaddrs, gfp_flags);
 782
 783	spin_lock(&nfs4_ds_cache_lock);
 784	tmp_ds = _data_server_lookup_locked(dsaddrs);
 785	if (tmp_ds == NULL) {
 786		INIT_LIST_HEAD(&ds->ds_addrs);
 787		list_splice_init(dsaddrs, &ds->ds_addrs);
 788		ds->ds_remotestr = remotestr;
 789		refcount_set(&ds->ds_count, 1);
 790		INIT_LIST_HEAD(&ds->ds_node);
 791		ds->ds_clp = NULL;
 792		list_add(&ds->ds_node, &nfs4_data_server_cache);
 793		dprintk("%s add new data server %s\n", __func__,
 794			ds->ds_remotestr);
 795	} else {
 796		kfree(remotestr);
 797		kfree(ds);
 798		refcount_inc(&tmp_ds->ds_count);
 799		dprintk("%s data server %s found, inc'ed ds_count to %d\n",
 800			__func__, tmp_ds->ds_remotestr,
 801			refcount_read(&tmp_ds->ds_count));
 802		ds = tmp_ds;
 803	}
 804	spin_unlock(&nfs4_ds_cache_lock);
 805out:
 806	return ds;
 807}
 808EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_add);
 809
 810static int nfs4_wait_ds_connect(struct nfs4_pnfs_ds *ds)
 811{
 812	might_sleep();
 813	return wait_on_bit(&ds->ds_state, NFS4DS_CONNECTING, TASK_KILLABLE);
 
 814}
 815
 816static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds)
 817{
 818	smp_mb__before_atomic();
 819	clear_and_wake_up_bit(NFS4DS_CONNECTING, &ds->ds_state);
 
 
 820}
 821
 822static struct nfs_client *(*get_v3_ds_connect)(
 823			struct nfs_server *mds_srv,
 824			const struct sockaddr_storage *ds_addr,
 825			int ds_addrlen,
 826			int ds_proto,
 827			unsigned int ds_timeo,
 828			unsigned int ds_retrans);
 829
 830static bool load_v3_ds_connect(void)
 831{
 832	if (!get_v3_ds_connect) {
 833		get_v3_ds_connect = symbol_request(nfs3_set_ds_client);
 834		WARN_ON_ONCE(!get_v3_ds_connect);
 835	}
 836
 837	return(get_v3_ds_connect != NULL);
 838}
 839
 840void nfs4_pnfs_v3_ds_connect_unload(void)
 841{
 842	if (get_v3_ds_connect) {
 843		symbol_put(nfs3_set_ds_client);
 844		get_v3_ds_connect = NULL;
 845	}
 846}
 847
 848static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
 849				 struct nfs4_pnfs_ds *ds,
 850				 unsigned int timeo,
 851				 unsigned int retrans)
 852{
 853	struct nfs_client *clp = ERR_PTR(-EIO);
 854	struct nfs4_pnfs_ds_addr *da;
 855	unsigned long connect_timeout = timeo * (retrans + 1) * HZ / 10;
 856	int status = 0;
 857
 858	dprintk("--> %s DS %s\n", __func__, ds->ds_remotestr);
 859
 860	if (!load_v3_ds_connect())
 861		return -EPROTONOSUPPORT;
 862
 863	list_for_each_entry(da, &ds->ds_addrs, da_node) {
 864		dprintk("%s: DS %s: trying address %s\n",
 865			__func__, ds->ds_remotestr, da->da_remotestr);
 866
 867		if (!IS_ERR(clp)) {
 868			struct xprt_create xprt_args = {
 869				.ident = da->da_transport,
 870				.net = clp->cl_net,
 871				.dstaddr = (struct sockaddr *)&da->da_addr,
 872				.addrlen = da->da_addrlen,
 873				.servername = clp->cl_hostname,
 874				.connect_timeout = connect_timeout,
 875				.reconnect_timeout = connect_timeout,
 876			};
 877
 878			if (da->da_transport != clp->cl_proto)
 879				continue;
 880			if (da->da_addr.ss_family != clp->cl_addr.ss_family)
 881				continue;
 882			/* Add this address as an alias */
 883			rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
 884					rpc_clnt_test_and_add_xprt, NULL);
 885			continue;
 886		}
 887		clp = get_v3_ds_connect(mds_srv,
 888				&da->da_addr,
 889				da->da_addrlen, da->da_transport,
 890				timeo, retrans);
 891		if (IS_ERR(clp))
 892			continue;
 893		clp->cl_rpcclient->cl_softerr = 0;
 894		clp->cl_rpcclient->cl_softrtry = 0;
 895	}
 896
 897	if (IS_ERR(clp)) {
 898		status = PTR_ERR(clp);
 899		goto out;
 900	}
 901
 902	smp_wmb();
 903	WRITE_ONCE(ds->ds_clp, clp);
 904	dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr);
 905out:
 906	return status;
 907}
 908
 909static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
 910				 struct nfs4_pnfs_ds *ds,
 911				 unsigned int timeo,
 912				 unsigned int retrans,
 913				 u32 minor_version)
 914{
 915	struct nfs_client *clp = ERR_PTR(-EIO);
 916	struct nfs4_pnfs_ds_addr *da;
 917	int status = 0;
 918
 919	dprintk("--> %s DS %s\n", __func__, ds->ds_remotestr);
 920
 921	list_for_each_entry(da, &ds->ds_addrs, da_node) {
 922		dprintk("%s: DS %s: trying address %s\n",
 923			__func__, ds->ds_remotestr, da->da_remotestr);
 924
 925		if (!IS_ERR(clp) && clp->cl_mvops->session_trunk) {
 926			struct xprt_create xprt_args = {
 927				.ident = da->da_transport,
 928				.net = clp->cl_net,
 929				.dstaddr = (struct sockaddr *)&da->da_addr,
 930				.addrlen = da->da_addrlen,
 931				.servername = clp->cl_hostname,
 932			};
 933			struct nfs4_add_xprt_data xprtdata = {
 934				.clp = clp,
 
 935			};
 936			struct rpc_add_xprt_test rpcdata = {
 937				.add_xprt_test = clp->cl_mvops->session_trunk,
 938				.data = &xprtdata,
 939			};
 940
 941			if (da->da_transport != clp->cl_proto)
 942				continue;
 943			if (da->da_addr.ss_family != clp->cl_addr.ss_family)
 944				continue;
 945			/**
 946			* Test this address for session trunking and
 947			* add as an alias
 948			*/
 949			xprtdata.cred = nfs4_get_clid_cred(clp);
 950			rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
 951					  rpc_clnt_setup_test_and_add_xprt,
 952					  &rpcdata);
 953			if (xprtdata.cred)
 954				put_cred(xprtdata.cred);
 955		} else {
 956			clp = nfs4_set_ds_client(mds_srv,
 957						&da->da_addr,
 958						da->da_addrlen,
 959						da->da_transport, timeo,
 960						retrans, minor_version);
 961			if (IS_ERR(clp))
 962				continue;
 963
 964			status = nfs4_init_ds_session(clp,
 965					mds_srv->nfs_client->cl_lease_time);
 966			if (status) {
 967				nfs_put_client(clp);
 968				clp = ERR_PTR(-EIO);
 969				continue;
 970			}
 971
 972		}
 973	}
 974
 975	if (IS_ERR(clp)) {
 976		status = PTR_ERR(clp);
 977		goto out;
 978	}
 979
 980	smp_wmb();
 981	WRITE_ONCE(ds->ds_clp, clp);
 982	dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr);
 983out:
 984	return status;
 985}
 986
 987/*
 988 * Create an rpc connection to the nfs4_pnfs_ds data server.
 989 * Currently only supports IPv4 and IPv6 addresses.
 990 * If connection fails, make devid unavailable and return a -errno.
 991 */
 992int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
 993			  struct nfs4_deviceid_node *devid, unsigned int timeo,
 994			  unsigned int retrans, u32 version, u32 minor_version)
 995{
 996	int err;
 997
 998	do {
 999		err = nfs4_wait_ds_connect(ds);
1000		if (err || ds->ds_clp)
1001			goto out;
1002		if (nfs4_test_deviceid_unavailable(devid))
1003			return -ENODEV;
1004	} while (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) != 0);
1005
1006	if (ds->ds_clp)
1007		goto connect_done;
1008
1009	switch (version) {
1010	case 3:
1011		err = _nfs4_pnfs_v3_ds_connect(mds_srv, ds, timeo, retrans);
1012		break;
1013	case 4:
1014		err = _nfs4_pnfs_v4_ds_connect(mds_srv, ds, timeo, retrans,
1015					       minor_version);
1016		break;
1017	default:
1018		dprintk("%s: unsupported DS version %d\n", __func__, version);
1019		err = -EPROTONOSUPPORT;
1020	}
1021
1022connect_done:
1023	nfs4_clear_ds_conn_bit(ds);
1024out:
1025	/*
1026	 * At this point the ds->ds_clp should be ready, but it might have
1027	 * hit an error.
1028	 */
1029	if (!err) {
1030		if (!ds->ds_clp || !nfs_client_init_is_complete(ds->ds_clp)) {
1031			WARN_ON_ONCE(ds->ds_clp ||
1032				!nfs4_test_deviceid_unavailable(devid));
1033			return -EINVAL;
1034		}
1035		err = nfs_client_init_status(ds->ds_clp);
1036	}
1037
1038	return err;
1039}
1040EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_connect);
1041
1042/*
1043 * Currently only supports ipv4, ipv6 and one multi-path address.
1044 */
1045struct nfs4_pnfs_ds_addr *
1046nfs4_decode_mp_ds_addr(struct net *net, struct xdr_stream *xdr, gfp_t gfp_flags)
1047{
1048	struct nfs4_pnfs_ds_addr *da = NULL;
1049	char *buf, *portstr;
1050	__be16 port;
1051	ssize_t nlen, rlen;
1052	int tmp[2];
1053	char *netid;
1054	size_t len;
 
1055	char *startsep = "";
1056	char *endsep = "";
1057
1058
1059	/* r_netid */
1060	nlen = xdr_stream_decode_string_dup(xdr, &netid, XDR_MAX_NETOBJ,
1061					    gfp_flags);
1062	if (unlikely(nlen < 0))
 
 
 
 
1063		goto out_err;
1064
 
 
 
 
 
 
 
1065	/* r_addr: ip/ip6addr with port in dec octets - see RFC 5665 */
 
 
 
 
 
 
 
 
 
1066	/* port is ".ABC.DEF", 8 chars max */
1067	rlen = xdr_stream_decode_string_dup(xdr, &buf, INET6_ADDRSTRLEN +
1068					    IPV6_SCOPE_ID_LEN + 8, gfp_flags);
1069	if (unlikely(rlen < 0))
 
 
 
 
 
1070		goto out_free_netid;
 
 
 
1071
1072	/* replace port '.' with '-' */
1073	portstr = strrchr(buf, '.');
1074	if (!portstr) {
1075		dprintk("%s: Failed finding expected dot in port\n",
1076			__func__);
1077		goto out_free_buf;
1078	}
1079	*portstr = '-';
1080
1081	/* find '.' between address and port */
1082	portstr = strrchr(buf, '.');
1083	if (!portstr) {
1084		dprintk("%s: Failed finding expected dot between address and "
1085			"port\n", __func__);
1086		goto out_free_buf;
1087	}
1088	*portstr = '\0';
1089
1090	da = nfs4_pnfs_ds_addr_alloc(gfp_flags);
1091	if (unlikely(!da))
1092		goto out_free_buf;
1093
 
 
1094	if (!rpc_pton(net, buf, portstr-buf, (struct sockaddr *)&da->da_addr,
1095		      sizeof(da->da_addr))) {
1096		dprintk("%s: error parsing address %s\n", __func__, buf);
1097		goto out_free_da;
1098	}
1099
1100	portstr++;
1101	sscanf(portstr, "%d-%d", &tmp[0], &tmp[1]);
1102	port = htons((tmp[0] << 8) | (tmp[1]));
1103
1104	switch (da->da_addr.ss_family) {
1105	case AF_INET:
1106		((struct sockaddr_in *)&da->da_addr)->sin_port = port;
1107		da->da_addrlen = sizeof(struct sockaddr_in);
 
 
1108		break;
1109
1110	case AF_INET6:
1111		((struct sockaddr_in6 *)&da->da_addr)->sin6_port = port;
1112		da->da_addrlen = sizeof(struct sockaddr_in6);
 
 
1113		startsep = "[";
1114		endsep = "]";
1115		break;
1116
1117	default:
1118		dprintk("%s: unsupported address family: %u\n",
1119			__func__, da->da_addr.ss_family);
1120		goto out_free_da;
1121	}
1122
1123	da->da_transport = xprt_find_transport_ident(netid);
1124	if (da->da_transport < 0) {
1125		dprintk("%s: ERROR: unknown r_netid \"%s\"\n",
1126			__func__, netid);
1127		goto out_free_da;
1128	}
1129
1130	da->da_netid = netid;
1131
1132	/* save human readable address */
1133	len = strlen(startsep) + strlen(buf) + strlen(endsep) + 7;
1134	da->da_remotestr = kzalloc(len, gfp_flags);
1135
1136	/* NULL is ok, only used for dprintk */
1137	if (da->da_remotestr)
1138		snprintf(da->da_remotestr, len, "%s%s%s:%u", startsep,
1139			 buf, endsep, ntohs(port));
1140
1141	dprintk("%s: Parsed DS addr %s\n", __func__, da->da_remotestr);
1142	kfree(buf);
 
1143	return da;
1144
1145out_free_da:
1146	kfree(da);
1147out_free_buf:
1148	dprintk("%s: Error parsing DS addr: %s\n", __func__, buf);
1149	kfree(buf);
1150out_free_netid:
1151	kfree(netid);
1152out_err:
1153	return NULL;
1154}
1155EXPORT_SYMBOL_GPL(nfs4_decode_mp_ds_addr);
1156
1157void
1158pnfs_layout_mark_request_commit(struct nfs_page *req,
1159				struct pnfs_layout_segment *lseg,
1160				struct nfs_commit_info *cinfo,
1161				u32 ds_commit_idx)
1162{
1163	struct list_head *list;
1164	struct pnfs_commit_array *array;
1165	struct pnfs_commit_bucket *bucket;
1166
1167	mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
1168	array = pnfs_lookup_commit_array(cinfo->ds, lseg);
1169	if (!array || !pnfs_is_valid_lseg(lseg))
1170		goto out_resched;
1171	bucket = &array->buckets[ds_commit_idx];
1172	list = &bucket->written;
1173	/* Non-empty buckets hold a reference on the lseg.  That ref
1174	 * is normally transferred to the COMMIT call and released
1175	 * there.  It could also be released if the last req is pulled
1176	 * off due to a rewrite, in which case it will be done in
1177	 * pnfs_common_clear_request_commit
1178	 */
1179	if (!bucket->lseg)
1180		bucket->lseg = pnfs_get_lseg(lseg);
 
 
 
 
1181	set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
1182	cinfo->ds->nwritten++;
1183
1184	nfs_request_add_commit_list_locked(req, list, cinfo);
1185	mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
1186	nfs_folio_mark_unstable(nfs_page_to_folio(req), cinfo);
1187	return;
1188out_resched:
1189	mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
1190	cinfo->completion_ops->resched_write(cinfo, req);
1191}
1192EXPORT_SYMBOL_GPL(pnfs_layout_mark_request_commit);
1193
1194int
1195pnfs_nfs_generic_sync(struct inode *inode, bool datasync)
1196{
1197	int ret;
1198
1199	if (!pnfs_layoutcommit_outstanding(inode))
1200		return 0;
1201	ret = nfs_commit_inode(inode, FLUSH_SYNC);
1202	if (ret < 0)
1203		return ret;
1204	if (datasync)
1205		return 0;
1206	return pnfs_layoutcommit_inode(inode, true);
1207}
1208EXPORT_SYMBOL_GPL(pnfs_nfs_generic_sync);
1209