Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * DFS referral cache routines
   4 *
   5 * Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de>
   6 */
   7
   8#include <linux/jhash.h>
   9#include <linux/ktime.h>
  10#include <linux/slab.h>
  11#include <linux/proc_fs.h>
  12#include <linux/nls.h>
  13#include <linux/workqueue.h>
  14#include <linux/uuid.h>
  15#include "cifsglob.h"
  16#include "smb2pdu.h"
  17#include "smb2proto.h"
  18#include "cifsproto.h"
  19#include "cifs_debug.h"
  20#include "cifs_unicode.h"
  21#include "smb2glob.h"
  22#include "dns_resolve.h"
  23#include "dfs.h"
  24
  25#include "dfs_cache.h"
  26
  27#define CACHE_HTABLE_SIZE	32
  28#define CACHE_MAX_ENTRIES	64
  29#define CACHE_MIN_TTL		120 /* 2 minutes */
  30#define CACHE_DEFAULT_TTL	300 /* 5 minutes */
  31
  32struct cache_dfs_tgt {
  33	char *name;
  34	int path_consumed;
  35	struct list_head list;
  36};
  37
  38struct cache_entry {
  39	struct hlist_node hlist;
  40	const char *path;
  41	int hdr_flags; /* RESP_GET_DFS_REFERRAL.ReferralHeaderFlags */
  42	int ttl; /* DFS_REREFERRAL_V3.TimeToLive */
  43	int srvtype; /* DFS_REREFERRAL_V3.ServerType */
  44	int ref_flags; /* DFS_REREFERRAL_V3.ReferralEntryFlags */
  45	struct timespec64 etime;
  46	int path_consumed; /* RESP_GET_DFS_REFERRAL.PathConsumed */
  47	int numtgts;
  48	struct list_head tlist;
  49	struct cache_dfs_tgt *tgthint;
  50};
  51
  52static struct kmem_cache *cache_slab __read_mostly;
  53struct workqueue_struct *dfscache_wq;
  54
  55atomic_t dfs_cache_ttl;
  56
  57static struct nls_table *cache_cp;
  58
  59/*
  60 * Number of entries in the cache
  61 */
  62static atomic_t cache_count;
  63
  64static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
  65static DECLARE_RWSEM(htable_rw_lock);
  66
  67/**
  68 * dfs_cache_canonical_path - get a canonical DFS path
  69 *
  70 * @path: DFS path
  71 * @cp: codepage
  72 * @remap: mapping type
  73 *
  74 * Return canonical path if success, otherwise error.
  75 */
  76char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap)
  77{
  78	char *tmp;
  79	int plen = 0;
  80	char *npath;
  81
  82	if (!path || strlen(path) < 3 || (*path != '\\' && *path != '/'))
  83		return ERR_PTR(-EINVAL);
  84
  85	if (unlikely(strcmp(cp->charset, cache_cp->charset))) {
  86		tmp = (char *)cifs_strndup_to_utf16(path, strlen(path), &plen, cp, remap);
  87		if (!tmp) {
  88			cifs_dbg(VFS, "%s: failed to convert path to utf16\n", __func__);
  89			return ERR_PTR(-EINVAL);
  90		}
  91
  92		npath = cifs_strndup_from_utf16(tmp, plen, true, cache_cp);
  93		kfree(tmp);
  94
  95		if (!npath) {
  96			cifs_dbg(VFS, "%s: failed to convert path from utf16\n", __func__);
  97			return ERR_PTR(-EINVAL);
  98		}
  99	} else {
 100		npath = kstrdup(path, GFP_KERNEL);
 101		if (!npath)
 102			return ERR_PTR(-ENOMEM);
 103	}
 104	convert_delimiter(npath, '\\');
 105	return npath;
 106}
 107
 108static inline bool cache_entry_expired(const struct cache_entry *ce)
 109{
 110	struct timespec64 ts;
 111
 112	ktime_get_coarse_real_ts64(&ts);
 113	return timespec64_compare(&ts, &ce->etime) >= 0;
 114}
 115
 116static inline void free_tgts(struct cache_entry *ce)
 117{
 118	struct cache_dfs_tgt *t, *n;
 119
 120	list_for_each_entry_safe(t, n, &ce->tlist, list) {
 121		list_del(&t->list);
 122		kfree(t->name);
 123		kfree(t);
 124	}
 125}
 126
 127static inline void flush_cache_ent(struct cache_entry *ce)
 128{
 129	hlist_del_init(&ce->hlist);
 130	kfree(ce->path);
 131	free_tgts(ce);
 132	atomic_dec(&cache_count);
 133	kmem_cache_free(cache_slab, ce);
 134}
 135
 136static void flush_cache_ents(void)
 137{
 138	int i;
 139
 140	for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
 141		struct hlist_head *l = &cache_htable[i];
 142		struct hlist_node *n;
 143		struct cache_entry *ce;
 144
 145		hlist_for_each_entry_safe(ce, n, l, hlist) {
 146			if (!hlist_unhashed(&ce->hlist))
 147				flush_cache_ent(ce);
 148		}
 149	}
 150}
 151
 152/*
 153 * dfs cache /proc file
 154 */
 155static int dfscache_proc_show(struct seq_file *m, void *v)
 156{
 157	int i;
 158	struct cache_entry *ce;
 159	struct cache_dfs_tgt *t;
 160
 161	seq_puts(m, "DFS cache\n---------\n");
 162
 163	down_read(&htable_rw_lock);
 164	for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
 165		struct hlist_head *l = &cache_htable[i];
 166
 167		hlist_for_each_entry(ce, l, hlist) {
 168			if (hlist_unhashed(&ce->hlist))
 169				continue;
 170
 171			seq_printf(m,
 172				   "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
 173				   ce->path, ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
 174				   ce->ttl, ce->etime.tv_nsec, ce->hdr_flags, ce->ref_flags,
 175				   DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
 176				   ce->path_consumed, cache_entry_expired(ce) ? "yes" : "no");
 177
 178			list_for_each_entry(t, &ce->tlist, list) {
 179				seq_printf(m, "  %s%s\n",
 180					   t->name,
 181					   READ_ONCE(ce->tgthint) == t ? " (target hint)" : "");
 182			}
 183		}
 184	}
 185	up_read(&htable_rw_lock);
 186
 187	return 0;
 188}
 189
 190static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer,
 191				   size_t count, loff_t *ppos)
 192{
 193	char c;
 194	int rc;
 195
 196	rc = get_user(c, buffer);
 197	if (rc)
 198		return rc;
 199
 200	if (c != '0')
 201		return -EINVAL;
 202
 203	cifs_dbg(FYI, "clearing dfs cache\n");
 204
 205	down_write(&htable_rw_lock);
 206	flush_cache_ents();
 207	up_write(&htable_rw_lock);
 208
 209	return count;
 210}
 211
 212static int dfscache_proc_open(struct inode *inode, struct file *file)
 213{
 214	return single_open(file, dfscache_proc_show, NULL);
 215}
 216
 217const struct proc_ops dfscache_proc_ops = {
 218	.proc_open	= dfscache_proc_open,
 219	.proc_read	= seq_read,
 220	.proc_lseek	= seq_lseek,
 221	.proc_release	= single_release,
 222	.proc_write	= dfscache_proc_write,
 223};
 224
 225#ifdef CONFIG_CIFS_DEBUG2
 226static inline void dump_tgts(const struct cache_entry *ce)
 227{
 228	struct cache_dfs_tgt *t;
 229
 230	cifs_dbg(FYI, "target list:\n");
 231	list_for_each_entry(t, &ce->tlist, list) {
 232		cifs_dbg(FYI, "  %s%s\n", t->name,
 233			 READ_ONCE(ce->tgthint) == t ? " (target hint)" : "");
 234	}
 235}
 236
 237static inline void dump_ce(const struct cache_entry *ce)
 238{
 239	cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
 240		 ce->path,
 241		 ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
 242		 ce->etime.tv_nsec,
 243		 ce->hdr_flags, ce->ref_flags,
 244		 DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
 245		 ce->path_consumed,
 246		 cache_entry_expired(ce) ? "yes" : "no");
 247	dump_tgts(ce);
 248}
 249
 250static inline void dump_refs(const struct dfs_info3_param *refs, int numrefs)
 251{
 252	int i;
 253
 254	cifs_dbg(FYI, "DFS referrals returned by the server:\n");
 255	for (i = 0; i < numrefs; i++) {
 256		const struct dfs_info3_param *ref = &refs[i];
 257
 258		cifs_dbg(FYI,
 259			 "\n"
 260			 "flags:         0x%x\n"
 261			 "path_consumed: %d\n"
 262			 "server_type:   0x%x\n"
 263			 "ref_flag:      0x%x\n"
 264			 "path_name:     %s\n"
 265			 "node_name:     %s\n"
 266			 "ttl:           %d (%dm)\n",
 267			 ref->flags, ref->path_consumed, ref->server_type,
 268			 ref->ref_flag, ref->path_name, ref->node_name,
 269			 ref->ttl, ref->ttl / 60);
 270	}
 271}
 272#else
 273#define dump_tgts(e)
 274#define dump_ce(e)
 275#define dump_refs(r, n)
 276#endif
 277
 278/**
 279 * dfs_cache_init - Initialize DFS referral cache.
 280 *
 281 * Return zero if initialized successfully, otherwise non-zero.
 282 */
 283int dfs_cache_init(void)
 284{
 285	int rc;
 286	int i;
 287
 288	dfscache_wq = alloc_workqueue("cifs-dfscache",
 289				      WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM,
 290				      0);
 291	if (!dfscache_wq)
 292		return -ENOMEM;
 293
 294	cache_slab = kmem_cache_create("cifs_dfs_cache",
 295				       sizeof(struct cache_entry), 0,
 296				       SLAB_HWCACHE_ALIGN, NULL);
 297	if (!cache_slab) {
 298		rc = -ENOMEM;
 299		goto out_destroy_wq;
 300	}
 301
 302	for (i = 0; i < CACHE_HTABLE_SIZE; i++)
 303		INIT_HLIST_HEAD(&cache_htable[i]);
 304
 305	atomic_set(&cache_count, 0);
 306	atomic_set(&dfs_cache_ttl, CACHE_DEFAULT_TTL);
 307	cache_cp = load_nls("utf8");
 308	if (!cache_cp)
 309		cache_cp = load_nls_default();
 310
 311	cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__);
 312	return 0;
 313
 314out_destroy_wq:
 315	destroy_workqueue(dfscache_wq);
 316	return rc;
 317}
 318
 319static int cache_entry_hash(const void *data, int size, unsigned int *hash)
 320{
 321	int i, clen;
 322	const unsigned char *s = data;
 323	wchar_t c;
 324	unsigned int h = 0;
 325
 326	for (i = 0; i < size; i += clen) {
 327		clen = cache_cp->char2uni(&s[i], size - i, &c);
 328		if (unlikely(clen < 0)) {
 329			cifs_dbg(VFS, "%s: can't convert char\n", __func__);
 330			return clen;
 331		}
 332		c = cifs_toupper(c);
 333		h = jhash(&c, sizeof(c), h);
 334	}
 335	*hash = h % CACHE_HTABLE_SIZE;
 336	return 0;
 337}
 338
 339/* Return target hint of a DFS cache entry */
 340static inline char *get_tgt_name(const struct cache_entry *ce)
 341{
 342	struct cache_dfs_tgt *t = READ_ONCE(ce->tgthint);
 343
 344	return t ? t->name : ERR_PTR(-ENOENT);
 345}
 346
 347/* Return expire time out of a new entry's TTL */
 348static inline struct timespec64 get_expire_time(int ttl)
 349{
 350	struct timespec64 ts = {
 351		.tv_sec = ttl,
 352		.tv_nsec = 0,
 353	};
 354	struct timespec64 now;
 355
 356	ktime_get_coarse_real_ts64(&now);
 357	return timespec64_add(now, ts);
 358}
 359
 360/* Allocate a new DFS target */
 361static struct cache_dfs_tgt *alloc_target(const char *name, int path_consumed)
 362{
 363	struct cache_dfs_tgt *t;
 364
 365	t = kmalloc(sizeof(*t), GFP_ATOMIC);
 366	if (!t)
 367		return ERR_PTR(-ENOMEM);
 368	t->name = kstrdup(name, GFP_ATOMIC);
 369	if (!t->name) {
 370		kfree(t);
 371		return ERR_PTR(-ENOMEM);
 372	}
 373	t->path_consumed = path_consumed;
 374	INIT_LIST_HEAD(&t->list);
 375	return t;
 376}
 377
 378/*
 379 * Copy DFS referral information to a cache entry and conditionally update
 380 * target hint.
 381 */
 382static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
 383			 struct cache_entry *ce, const char *tgthint)
 384{
 385	struct cache_dfs_tgt *target;
 386	int i;
 387
 388	ce->ttl = max_t(int, refs[0].ttl, CACHE_MIN_TTL);
 389	ce->etime = get_expire_time(ce->ttl);
 390	ce->srvtype = refs[0].server_type;
 391	ce->hdr_flags = refs[0].flags;
 392	ce->ref_flags = refs[0].ref_flag;
 393	ce->path_consumed = refs[0].path_consumed;
 394
 395	for (i = 0; i < numrefs; i++) {
 396		struct cache_dfs_tgt *t;
 397
 398		t = alloc_target(refs[i].node_name, refs[i].path_consumed);
 399		if (IS_ERR(t)) {
 400			free_tgts(ce);
 401			return PTR_ERR(t);
 402		}
 403		if (tgthint && !strcasecmp(t->name, tgthint)) {
 404			list_add(&t->list, &ce->tlist);
 405			tgthint = NULL;
 406		} else {
 407			list_add_tail(&t->list, &ce->tlist);
 408		}
 409		ce->numtgts++;
 410	}
 411
 412	target = list_first_entry_or_null(&ce->tlist, struct cache_dfs_tgt,
 413					  list);
 414	WRITE_ONCE(ce->tgthint, target);
 415
 416	return 0;
 417}
 418
 419/* Allocate a new cache entry */
 420static struct cache_entry *alloc_cache_entry(struct dfs_info3_param *refs, int numrefs)
 421{
 422	struct cache_entry *ce;
 423	int rc;
 424
 425	ce = kmem_cache_zalloc(cache_slab, GFP_KERNEL);
 426	if (!ce)
 427		return ERR_PTR(-ENOMEM);
 428
 429	ce->path = refs[0].path_name;
 430	refs[0].path_name = NULL;
 431
 432	INIT_HLIST_NODE(&ce->hlist);
 433	INIT_LIST_HEAD(&ce->tlist);
 434
 435	rc = copy_ref_data(refs, numrefs, ce, NULL);
 436	if (rc) {
 437		kfree(ce->path);
 438		kmem_cache_free(cache_slab, ce);
 439		ce = ERR_PTR(rc);
 440	}
 441	return ce;
 442}
 443
 444static void remove_oldest_entry_locked(void)
 445{
 446	int i;
 447	struct cache_entry *ce;
 448	struct cache_entry *to_del = NULL;
 449
 450	WARN_ON(!rwsem_is_locked(&htable_rw_lock));
 451
 452	for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
 453		struct hlist_head *l = &cache_htable[i];
 454
 455		hlist_for_each_entry(ce, l, hlist) {
 456			if (hlist_unhashed(&ce->hlist))
 457				continue;
 458			if (!to_del || timespec64_compare(&ce->etime,
 459							  &to_del->etime) < 0)
 460				to_del = ce;
 461		}
 462	}
 463
 464	if (!to_del) {
 465		cifs_dbg(FYI, "%s: no entry to remove\n", __func__);
 466		return;
 467	}
 468
 469	cifs_dbg(FYI, "%s: removing entry\n", __func__);
 470	dump_ce(to_del);
 471	flush_cache_ent(to_del);
 472}
 473
 474/* Add a new DFS cache entry */
 475static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
 476						  int numrefs)
 477{
 478	int rc;
 479	struct cache_entry *ce;
 480	unsigned int hash;
 481	int ttl;
 482
 483	WARN_ON(!rwsem_is_locked(&htable_rw_lock));
 484
 485	if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
 486		cifs_dbg(FYI, "%s: reached max cache size (%d)\n", __func__, CACHE_MAX_ENTRIES);
 487		remove_oldest_entry_locked();
 488	}
 489
 490	rc = cache_entry_hash(refs[0].path_name, strlen(refs[0].path_name), &hash);
 491	if (rc)
 492		return ERR_PTR(rc);
 493
 494	ce = alloc_cache_entry(refs, numrefs);
 495	if (IS_ERR(ce))
 496		return ce;
 497
 498	ttl = min_t(int, atomic_read(&dfs_cache_ttl), ce->ttl);
 499	atomic_set(&dfs_cache_ttl, ttl);
 500
 501	hlist_add_head(&ce->hlist, &cache_htable[hash]);
 502	dump_ce(ce);
 503
 504	atomic_inc(&cache_count);
 505
 506	return ce;
 507}
 508
 509/* Check if two DFS paths are equal.  @s1 and @s2 are expected to be in @cache_cp's charset */
 510static bool dfs_path_equal(const char *s1, int len1, const char *s2, int len2)
 511{
 512	int i, l1, l2;
 513	wchar_t c1, c2;
 514
 515	if (len1 != len2)
 516		return false;
 517
 518	for (i = 0; i < len1; i += l1) {
 519		l1 = cache_cp->char2uni(&s1[i], len1 - i, &c1);
 520		l2 = cache_cp->char2uni(&s2[i], len2 - i, &c2);
 521		if (unlikely(l1 < 0 && l2 < 0)) {
 522			if (s1[i] != s2[i])
 523				return false;
 524			l1 = 1;
 525			continue;
 526		}
 527		if (l1 != l2)
 528			return false;
 529		if (cifs_toupper(c1) != cifs_toupper(c2))
 530			return false;
 531	}
 532	return true;
 533}
 534
 535static struct cache_entry *__lookup_cache_entry(const char *path, unsigned int hash, int len)
 536{
 537	struct cache_entry *ce;
 538
 539	hlist_for_each_entry(ce, &cache_htable[hash], hlist) {
 540		if (dfs_path_equal(ce->path, strlen(ce->path), path, len)) {
 541			dump_ce(ce);
 542			return ce;
 543		}
 544	}
 545	return ERR_PTR(-ENOENT);
 546}
 547
 548/*
 549 * Find a DFS cache entry in hash table and optionally check prefix path against normalized @path.
 550 *
 551 * Use whole path components in the match.  Must be called with htable_rw_lock held.
 552 *
 553 * Return cached entry if successful.
 554 * Return ERR_PTR(-ENOENT) if the entry is not found.
 555 * Return error ptr otherwise.
 556 */
 557static struct cache_entry *lookup_cache_entry(const char *path)
 558{
 559	struct cache_entry *ce;
 560	int cnt = 0;
 561	const char *s = path, *e;
 562	char sep = *s;
 563	unsigned int hash;
 564	int rc;
 565
 566	while ((s = strchr(s, sep)) && ++cnt < 3)
 567		s++;
 568
 569	if (cnt < 3) {
 570		rc = cache_entry_hash(path, strlen(path), &hash);
 571		if (rc)
 572			return ERR_PTR(rc);
 573		return __lookup_cache_entry(path, hash, strlen(path));
 574	}
 575	/*
 576	 * Handle paths that have more than two path components and are a complete prefix of the DFS
 577	 * referral request path (@path).
 578	 *
 579	 * See MS-DFSC 3.2.5.5 "Receiving a Root Referral Request or Link Referral Request".
 580	 */
 581	e = path + strlen(path) - 1;
 582	while (e > s) {
 583		int len;
 584
 585		/* skip separators */
 586		while (e > s && *e == sep)
 587			e--;
 588		if (e == s)
 589			break;
 590
 591		len = e + 1 - path;
 592		rc = cache_entry_hash(path, len, &hash);
 593		if (rc)
 594			return ERR_PTR(rc);
 595		ce = __lookup_cache_entry(path, hash, len);
 596		if (!IS_ERR(ce))
 597			return ce;
 598
 599		/* backward until separator */
 600		while (e > s && *e != sep)
 601			e--;
 602	}
 603	return ERR_PTR(-ENOENT);
 604}
 605
 606/**
 607 * dfs_cache_destroy - destroy DFS referral cache
 608 */
 609void dfs_cache_destroy(void)
 610{
 611	unload_nls(cache_cp);
 612	flush_cache_ents();
 613	kmem_cache_destroy(cache_slab);
 614	destroy_workqueue(dfscache_wq);
 615
 616	cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__);
 617}
 618
 619/* Update a cache entry with the new referral in @refs */
 620static int update_cache_entry_locked(struct cache_entry *ce, const struct dfs_info3_param *refs,
 621				     int numrefs)
 622{
 623	struct cache_dfs_tgt *target;
 624	char *th = NULL;
 625	int rc;
 626
 627	WARN_ON(!rwsem_is_locked(&htable_rw_lock));
 628
 629	target = READ_ONCE(ce->tgthint);
 630	if (target) {
 631		th = kstrdup(target->name, GFP_ATOMIC);
 632		if (!th)
 633			return -ENOMEM;
 634	}
 635
 636	free_tgts(ce);
 637	ce->numtgts = 0;
 638
 639	rc = copy_ref_data(refs, numrefs, ce, th);
 640
 641	kfree(th);
 642
 643	return rc;
 644}
 645
 646static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses, const char *path,
 647			    struct dfs_info3_param **refs, int *numrefs)
 648{
 649	int rc;
 650	int i;
 651
 652	*refs = NULL;
 653	*numrefs = 0;
 654
 655	if (!ses || !ses->server || !ses->server->ops->get_dfs_refer)
 656		return -EOPNOTSUPP;
 657	if (unlikely(!cache_cp))
 658		return -EINVAL;
 659
 660	cifs_dbg(FYI, "%s: ipc=%s referral=%s\n", __func__, ses->tcon_ipc->tree_name, path);
 661	rc =  ses->server->ops->get_dfs_refer(xid, ses, path, refs, numrefs, cache_cp,
 662					      NO_MAP_UNI_RSVD);
 663	if (!rc) {
 664		struct dfs_info3_param *ref = *refs;
 665
 666		for (i = 0; i < *numrefs; i++)
 667			convert_delimiter(ref[i].path_name, '\\');
 668	}
 669	return rc;
 670}
 671
 672/*
 673 * Find, create or update a DFS cache entry.
 674 *
 675 * If the entry wasn't found, it will create a new one. Or if it was found but
 676 * expired, then it will update the entry accordingly.
 677 *
 678 * For interlinks, cifs_mount() and expand_dfs_referral() are supposed to
 679 * handle them properly.
 680 *
 681 * On success, return entry with acquired lock for reading, otherwise error ptr.
 682 */
 683static struct cache_entry *cache_refresh_path(const unsigned int xid,
 684					      struct cifs_ses *ses,
 685					      const char *path,
 686					      bool force_refresh)
 687{
 688	struct dfs_info3_param *refs = NULL;
 689	struct cache_entry *ce;
 690	int numrefs = 0;
 691	int rc;
 692
 693	cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
 694
 695	down_read(&htable_rw_lock);
 696
 697	ce = lookup_cache_entry(path);
 698	if (!IS_ERR(ce)) {
 699		if (!force_refresh && !cache_entry_expired(ce))
 700			return ce;
 701	} else if (PTR_ERR(ce) != -ENOENT) {
 702		up_read(&htable_rw_lock);
 703		return ce;
 704	}
 705
 706	/*
 707	 * Unlock shared access as we don't want to hold any locks while getting
 708	 * a new referral.  The @ses used for performing the I/O could be
 709	 * reconnecting and it acquires @htable_rw_lock to look up the dfs cache
 710	 * in order to failover -- if necessary.
 711	 */
 712	up_read(&htable_rw_lock);
 713
 714	/*
 715	 * Either the entry was not found, or it is expired, or it is a forced
 716	 * refresh.
 717	 * Request a new DFS referral in order to create or update a cache entry.
 718	 */
 719	rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
 720	if (rc) {
 721		ce = ERR_PTR(rc);
 722		goto out;
 723	}
 724
 725	dump_refs(refs, numrefs);
 726
 727	down_write(&htable_rw_lock);
 728	/* Re-check as another task might have it added or refreshed already */
 729	ce = lookup_cache_entry(path);
 730	if (!IS_ERR(ce)) {
 731		if (force_refresh || cache_entry_expired(ce)) {
 732			rc = update_cache_entry_locked(ce, refs, numrefs);
 733			if (rc)
 734				ce = ERR_PTR(rc);
 735		}
 736	} else if (PTR_ERR(ce) == -ENOENT) {
 737		ce = add_cache_entry_locked(refs, numrefs);
 738	}
 739
 740	if (IS_ERR(ce)) {
 741		up_write(&htable_rw_lock);
 742		goto out;
 743	}
 744
 745	downgrade_write(&htable_rw_lock);
 746out:
 747	free_dfs_info_array(refs, numrefs);
 748	return ce;
 749}
 750
 751/*
 752 * Set up a DFS referral from a given cache entry.
 753 *
 754 * Must be called with htable_rw_lock held.
 755 */
 756static int setup_referral(const char *path, struct cache_entry *ce,
 757			  struct dfs_info3_param *ref, const char *target)
 758{
 759	int rc;
 760
 761	cifs_dbg(FYI, "%s: set up new ref\n", __func__);
 762
 763	memset(ref, 0, sizeof(*ref));
 764
 765	ref->path_name = kstrdup(path, GFP_ATOMIC);
 766	if (!ref->path_name)
 767		return -ENOMEM;
 768
 769	ref->node_name = kstrdup(target, GFP_ATOMIC);
 770	if (!ref->node_name) {
 771		rc = -ENOMEM;
 772		goto err_free_path;
 773	}
 774
 775	ref->path_consumed = ce->path_consumed;
 776	ref->ttl = ce->ttl;
 777	ref->server_type = ce->srvtype;
 778	ref->ref_flag = ce->ref_flags;
 779	ref->flags = ce->hdr_flags;
 780
 781	return 0;
 782
 783err_free_path:
 784	kfree(ref->path_name);
 785	ref->path_name = NULL;
 786	return rc;
 787}
 788
 789/* Return target list of a DFS cache entry */
 790static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
 791{
 792	int rc;
 793	struct list_head *head = &tl->tl_list;
 794	struct cache_dfs_tgt *t;
 795	struct dfs_cache_tgt_iterator *it, *nit;
 796
 797	memset(tl, 0, sizeof(*tl));
 798	INIT_LIST_HEAD(head);
 799
 800	list_for_each_entry(t, &ce->tlist, list) {
 801		it = kzalloc(sizeof(*it), GFP_ATOMIC);
 802		if (!it) {
 803			rc = -ENOMEM;
 804			goto err_free_it;
 805		}
 806
 807		it->it_name = kstrdup(t->name, GFP_ATOMIC);
 808		if (!it->it_name) {
 809			kfree(it);
 810			rc = -ENOMEM;
 811			goto err_free_it;
 812		}
 813		it->it_path_consumed = t->path_consumed;
 814
 815		if (READ_ONCE(ce->tgthint) == t)
 816			list_add(&it->it_list, head);
 817		else
 818			list_add_tail(&it->it_list, head);
 819	}
 820
 821	tl->tl_numtgts = ce->numtgts;
 822
 823	return 0;
 824
 825err_free_it:
 826	list_for_each_entry_safe(it, nit, head, it_list) {
 827		list_del(&it->it_list);
 828		kfree(it->it_name);
 829		kfree(it);
 830	}
 831	return rc;
 832}
 833
 834/**
 835 * dfs_cache_find - find a DFS cache entry
 836 *
 837 * If it doesn't find the cache entry, then it will get a DFS referral
 838 * for @path and create a new entry.
 839 *
 840 * In case the cache entry exists but expired, it will get a DFS referral
 841 * for @path and then update the respective cache entry.
 842 *
 843 * These parameters are passed down to the get_dfs_refer() call if it
 844 * needs to be issued:
 845 * @xid: syscall xid
 846 * @ses: smb session to issue the request on
 847 * @cp: codepage
 848 * @remap: path character remapping type
 849 * @path: path to lookup in DFS referral cache.
 850 *
 851 * @ref: when non-NULL, store single DFS referral result in it.
 852 * @tgt_list: when non-NULL, store complete DFS target list in it.
 853 *
 854 * Return zero if the target was found, otherwise non-zero.
 855 */
 856int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, const struct nls_table *cp,
 857		   int remap, const char *path, struct dfs_info3_param *ref,
 858		   struct dfs_cache_tgt_list *tgt_list)
 859{
 860	int rc;
 861	const char *npath;
 862	struct cache_entry *ce;
 863
 864	npath = dfs_cache_canonical_path(path, cp, remap);
 865	if (IS_ERR(npath))
 866		return PTR_ERR(npath);
 867
 868	ce = cache_refresh_path(xid, ses, npath, false);
 869	if (IS_ERR(ce)) {
 870		rc = PTR_ERR(ce);
 871		goto out_free_path;
 872	}
 873
 874	if (ref)
 875		rc = setup_referral(path, ce, ref, get_tgt_name(ce));
 876	else
 877		rc = 0;
 878	if (!rc && tgt_list)
 879		rc = get_targets(ce, tgt_list);
 880
 881	up_read(&htable_rw_lock);
 882
 883out_free_path:
 884	kfree(npath);
 885	return rc;
 886}
 887
 888/**
 889 * dfs_cache_noreq_find - find a DFS cache entry without sending any requests to
 890 * the currently connected server.
 891 *
 892 * NOTE: This function will neither update a cache entry in case it was
 893 * expired, nor create a new cache entry if @path hasn't been found. It heavily
 894 * relies on an existing cache entry.
 895 *
 896 * @path: canonical DFS path to lookup in the DFS referral cache.
 897 * @ref: when non-NULL, store single DFS referral result in it.
 898 * @tgt_list: when non-NULL, store complete DFS target list in it.
 899 *
 900 * Return 0 if successful.
 901 * Return -ENOENT if the entry was not found.
 902 * Return non-zero for other errors.
 903 */
 904int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
 905			 struct dfs_cache_tgt_list *tgt_list)
 906{
 907	int rc;
 908	struct cache_entry *ce;
 909
 910	cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
 911
 912	down_read(&htable_rw_lock);
 913
 914	ce = lookup_cache_entry(path);
 915	if (IS_ERR(ce)) {
 916		rc = PTR_ERR(ce);
 917		goto out_unlock;
 918	}
 919
 920	if (ref)
 921		rc = setup_referral(path, ce, ref, get_tgt_name(ce));
 922	else
 923		rc = 0;
 924	if (!rc && tgt_list)
 925		rc = get_targets(ce, tgt_list);
 926
 927out_unlock:
 928	up_read(&htable_rw_lock);
 929	return rc;
 930}
 931
 932/**
 933 * dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry
 934 * without sending any requests to the currently connected server.
 935 *
 936 * NOTE: This function will neither update a cache entry in case it was
 937 * expired, nor create a new cache entry if @path hasn't been found. It heavily
 938 * relies on an existing cache entry.
 939 *
 940 * @path: canonical DFS path to lookup in DFS referral cache.
 941 * @it: target iterator which contains the target hint to update the cache
 942 * entry with.
 943 *
 944 * Return zero if the target hint was updated successfully, otherwise non-zero.
 945 */
 946void dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt_iterator *it)
 947{
 948	struct cache_dfs_tgt *t;
 949	struct cache_entry *ce;
 950
 951	if (!path || !it)
 952		return;
 953
 954	cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
 955
 956	down_read(&htable_rw_lock);
 957
 958	ce = lookup_cache_entry(path);
 959	if (IS_ERR(ce))
 960		goto out_unlock;
 961
 962	t = READ_ONCE(ce->tgthint);
 963
 964	if (unlikely(!strcasecmp(it->it_name, t->name)))
 965		goto out_unlock;
 966
 967	list_for_each_entry(t, &ce->tlist, list) {
 968		if (!strcasecmp(t->name, it->it_name)) {
 969			WRITE_ONCE(ce->tgthint, t);
 970			cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
 971				 it->it_name);
 972			break;
 973		}
 974	}
 975
 976out_unlock:
 977	up_read(&htable_rw_lock);
 978}
 979
 980/**
 981 * dfs_cache_get_tgt_referral - returns a DFS referral (@ref) from a given
 982 * target iterator (@it).
 983 *
 984 * @path: canonical DFS path to lookup in DFS referral cache.
 985 * @it: DFS target iterator.
 986 * @ref: DFS referral pointer to set up the gathered information.
 987 *
 988 * Return zero if the DFS referral was set up correctly, otherwise non-zero.
 989 */
 990int dfs_cache_get_tgt_referral(const char *path, const struct dfs_cache_tgt_iterator *it,
 991			       struct dfs_info3_param *ref)
 992{
 993	int rc;
 994	struct cache_entry *ce;
 995
 996	if (!it || !ref)
 997		return -EINVAL;
 998
 999	cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
1000
1001	down_read(&htable_rw_lock);
1002
1003	ce = lookup_cache_entry(path);
1004	if (IS_ERR(ce)) {
1005		rc = PTR_ERR(ce);
1006		goto out_unlock;
1007	}
1008
1009	cifs_dbg(FYI, "%s: target name: %s\n", __func__, it->it_name);
1010
1011	rc = setup_referral(path, ce, ref, it->it_name);
1012
1013out_unlock:
1014	up_read(&htable_rw_lock);
1015	return rc;
1016}
1017
1018/* Extract share from DFS target and return a pointer to prefix path or NULL */
1019static const char *parse_target_share(const char *target, char **share)
1020{
1021	const char *s, *seps = "/\\";
1022	size_t len;
1023
1024	s = strpbrk(target + 1, seps);
1025	if (!s)
1026		return ERR_PTR(-EINVAL);
1027
1028	len = strcspn(s + 1, seps);
1029	if (!len)
1030		return ERR_PTR(-EINVAL);
1031	s += len;
1032
1033	len = s - target + 1;
1034	*share = kstrndup(target, len, GFP_KERNEL);
1035	if (!*share)
1036		return ERR_PTR(-ENOMEM);
1037
1038	s = target + len;
1039	return s + strspn(s, seps);
1040}
1041
1042/**
1043 * dfs_cache_get_tgt_share - parse a DFS target
1044 *
1045 * @path: DFS full path
1046 * @it: DFS target iterator.
1047 * @share: tree name.
1048 * @prefix: prefix path.
1049 *
1050 * Return zero if target was parsed correctly, otherwise non-zero.
1051 */
1052int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, char **share,
1053			    char **prefix)
1054{
1055	char sep;
1056	char *target_share;
1057	char *ppath = NULL;
1058	const char *target_ppath, *dfsref_ppath;
1059	size_t target_pplen, dfsref_pplen;
1060	size_t len, c;
1061
1062	if (!it || !path || !share || !prefix || strlen(path) < it->it_path_consumed)
1063		return -EINVAL;
1064
1065	sep = it->it_name[0];
1066	if (sep != '\\' && sep != '/')
1067		return -EINVAL;
1068
1069	target_ppath = parse_target_share(it->it_name, &target_share);
1070	if (IS_ERR(target_ppath))
1071		return PTR_ERR(target_ppath);
1072
1073	/* point to prefix in DFS referral path */
1074	dfsref_ppath = path + it->it_path_consumed;
1075	dfsref_ppath += strspn(dfsref_ppath, "/\\");
1076
1077	target_pplen = strlen(target_ppath);
1078	dfsref_pplen = strlen(dfsref_ppath);
1079
1080	/* merge prefix paths from DFS referral path and target node */
1081	if (target_pplen || dfsref_pplen) {
1082		len = target_pplen + dfsref_pplen + 2;
1083		ppath = kzalloc(len, GFP_KERNEL);
1084		if (!ppath) {
1085			kfree(target_share);
1086			return -ENOMEM;
1087		}
1088		c = strscpy(ppath, target_ppath, len);
1089		if (c && dfsref_pplen)
1090			ppath[c] = sep;
1091		strlcat(ppath, dfsref_ppath, len);
1092	}
1093	*share = target_share;
1094	*prefix = ppath;
1095	return 0;
1096}
1097
1098static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, const char *s2)
1099{
1100	char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0};
1101	const char *host;
1102	size_t hostlen;
1103	struct sockaddr_storage ss;
1104	bool match;
1105	int rc;
1106
1107	if (strcasecmp(s1, s2))
1108		return false;
1109
1110	/*
1111	 * Resolve share's hostname and check if server address matches.  Otherwise just ignore it
1112	 * as we could not have upcall to resolve hostname or failed to convert ip address.
1113	 */
1114	extract_unc_hostname(s1, &host, &hostlen);
1115	scnprintf(unc, sizeof(unc), "\\\\%.*s", (int)hostlen, host);
1116
1117	rc = dns_resolve_server_name_to_ip(unc, (struct sockaddr *)&ss, NULL);
1118	if (rc < 0) {
1119		cifs_dbg(FYI, "%s: could not resolve %.*s. assuming server address matches.\n",
1120			 __func__, (int)hostlen, host);
1121		return true;
1122	}
1123
1124	cifs_server_lock(server);
1125	match = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, (struct sockaddr *)&ss);
1126	cifs_server_unlock(server);
1127
1128	return match;
1129}
1130
1131/*
1132 * Mark dfs tcon for reconnecting when the currently connected tcon does not match any of the new
1133 * target shares in @refs.
1134 */
1135static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
1136					 const char *path,
1137					 struct dfs_cache_tgt_list *old_tl,
1138					 struct dfs_cache_tgt_list *new_tl)
1139{
1140	struct dfs_cache_tgt_iterator *oit, *nit;
1141
1142	for (oit = dfs_cache_get_tgt_iterator(old_tl); oit;
1143	     oit = dfs_cache_get_next_tgt(old_tl, oit)) {
1144		for (nit = dfs_cache_get_tgt_iterator(new_tl); nit;
1145		     nit = dfs_cache_get_next_tgt(new_tl, nit)) {
1146			if (target_share_equal(server,
1147					       dfs_cache_get_tgt_name(oit),
1148					       dfs_cache_get_tgt_name(nit))) {
1149				dfs_cache_noreq_update_tgthint(path, nit);
1150				return;
1151			}
1152		}
1153	}
1154
1155	cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
1156	cifs_signal_cifsd_for_reconnect(server, true);
1157}
1158
1159static bool is_ses_good(struct cifs_ses *ses)
1160{
1161	struct TCP_Server_Info *server = ses->server;
1162	struct cifs_tcon *tcon = ses->tcon_ipc;
1163	bool ret;
1164
1165	spin_lock(&ses->ses_lock);
1166	spin_lock(&ses->chan_lock);
1167	ret = !cifs_chan_needs_reconnect(ses, server) &&
1168		ses->ses_status == SES_GOOD &&
1169		!tcon->need_reconnect;
1170	spin_unlock(&ses->chan_lock);
1171	spin_unlock(&ses->ses_lock);
1172	return ret;
1173}
1174
1175/* Refresh dfs referral of tcon and mark it for reconnect if needed */
1176static int __refresh_tcon(const char *path, struct cifs_ses *ses, bool force_refresh)
1177{
1178	struct TCP_Server_Info *server = ses->server;
1179	DFS_CACHE_TGT_LIST(old_tl);
1180	DFS_CACHE_TGT_LIST(new_tl);
1181	bool needs_refresh = false;
1182	struct cache_entry *ce;
1183	unsigned int xid;
1184	int rc = 0;
1185
1186	xid = get_xid();
1187
1188	down_read(&htable_rw_lock);
1189	ce = lookup_cache_entry(path);
1190	needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
1191	if (!IS_ERR(ce)) {
1192		rc = get_targets(ce, &old_tl);
1193		cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
1194	}
1195	up_read(&htable_rw_lock);
1196
1197	if (!needs_refresh) {
1198		rc = 0;
1199		goto out;
1200	}
1201
1202	ses = CIFS_DFS_ROOT_SES(ses);
1203	if (!is_ses_good(ses)) {
1204		cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n",
1205			 __func__);
1206		goto out;
1207	}
1208
1209	ce = cache_refresh_path(xid, ses, path, true);
1210	if (!IS_ERR(ce)) {
1211		rc = get_targets(ce, &new_tl);
1212		up_read(&htable_rw_lock);
1213		cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
1214		mark_for_reconnect_if_needed(server, path, &old_tl, &new_tl);
1215	}
1216
1217out:
1218	free_xid(xid);
1219	dfs_cache_free_tgts(&old_tl);
1220	dfs_cache_free_tgts(&new_tl);
1221	return rc;
1222}
1223
1224static int refresh_tcon(struct cifs_tcon *tcon, bool force_refresh)
1225{
1226	struct TCP_Server_Info *server = tcon->ses->server;
1227	struct cifs_ses *ses = tcon->ses;
1228
1229	mutex_lock(&server->refpath_lock);
1230	if (server->leaf_fullpath)
1231		__refresh_tcon(server->leaf_fullpath + 1, ses, force_refresh);
1232	mutex_unlock(&server->refpath_lock);
1233	return 0;
1234}
1235
1236/**
1237 * dfs_cache_remount_fs - remount a DFS share
1238 *
1239 * Reconfigure dfs mount by forcing a new DFS referral and if the currently cached targets do not
1240 * match any of the new targets, mark it for reconnect.
1241 *
1242 * @cifs_sb: cifs superblock.
1243 *
1244 * Return zero if remounted, otherwise non-zero.
1245 */
1246int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
1247{
1248	struct cifs_tcon *tcon;
1249
1250	if (!cifs_sb || !cifs_sb->master_tlink)
1251		return -EINVAL;
1252
1253	tcon = cifs_sb_master_tcon(cifs_sb);
1254
1255	spin_lock(&tcon->tc_lock);
1256	if (!tcon->origin_fullpath) {
1257		spin_unlock(&tcon->tc_lock);
1258		cifs_dbg(FYI, "%s: not a dfs mount\n", __func__);
1259		return 0;
1260	}
1261	spin_unlock(&tcon->tc_lock);
1262
1263	/*
1264	 * After reconnecting to a different server, unique ids won't match anymore, so we disable
1265	 * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
1266	 */
1267	cifs_autodisable_serverino(cifs_sb);
1268	/*
1269	 * Force the use of prefix path to support failover on DFS paths that resolve to targets
1270	 * that have different prefix paths.
1271	 */
1272	cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
1273
1274	return refresh_tcon(tcon, true);
1275}
1276
1277/* Refresh all DFS referrals related to DFS tcon */
1278void dfs_cache_refresh(struct work_struct *work)
1279{
1280	struct TCP_Server_Info *server;
1281	struct dfs_root_ses *rses;
1282	struct cifs_tcon *tcon;
1283	struct cifs_ses *ses;
1284
1285	tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work);
1286	ses = tcon->ses;
1287	server = ses->server;
1288
1289	mutex_lock(&server->refpath_lock);
1290	if (server->leaf_fullpath)
1291		__refresh_tcon(server->leaf_fullpath + 1, ses, false);
1292	mutex_unlock(&server->refpath_lock);
1293
1294	list_for_each_entry(rses, &tcon->dfs_ses_list, list) {
1295		ses = rses->ses;
1296		server = ses->server;
1297		mutex_lock(&server->refpath_lock);
1298		if (server->leaf_fullpath)
1299			__refresh_tcon(server->leaf_fullpath + 1, ses, false);
1300		mutex_unlock(&server->refpath_lock);
1301	}
1302
1303	queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
1304			   atomic_read(&dfs_cache_ttl) * HZ);
1305}