Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * DFS referral cache routines
   4 *
   5 * Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de>
   6 */
   7
 
 
   8#include <linux/jhash.h>
   9#include <linux/ktime.h>
  10#include <linux/slab.h>
  11#include <linux/proc_fs.h>
  12#include <linux/nls.h>
  13#include <linux/workqueue.h>
  14#include <linux/uuid.h>
  15#include "cifsglob.h"
  16#include "smb2pdu.h"
  17#include "smb2proto.h"
  18#include "cifsproto.h"
  19#include "cifs_debug.h"
  20#include "cifs_unicode.h"
  21#include "smb2glob.h"
  22#include "dns_resolve.h"
  23
  24#include "dfs_cache.h"
  25
  26#define CACHE_HTABLE_SIZE 32
  27#define CACHE_MAX_ENTRIES 64
  28#define CACHE_MIN_TTL 120 /* 2 minutes */
  29
  30#define IS_DFS_INTERLINK(v) (((v) & DFSREF_REFERRAL_SERVER) && !((v) & DFSREF_STORAGE_SERVER))
  31
  32struct cache_dfs_tgt {
  33	char *name;
  34	int path_consumed;
  35	struct list_head list;
  36};
  37
  38struct cache_entry {
  39	struct hlist_node hlist;
  40	const char *path;
  41	int hdr_flags; /* RESP_GET_DFS_REFERRAL.ReferralHeaderFlags */
  42	int ttl; /* DFS_REREFERRAL_V3.TimeToLive */
  43	int srvtype; /* DFS_REREFERRAL_V3.ServerType */
  44	int ref_flags; /* DFS_REREFERRAL_V3.ReferralEntryFlags */
  45	struct timespec64 etime;
  46	int path_consumed; /* RESP_GET_DFS_REFERRAL.PathConsumed */
  47	int numtgts;
  48	struct list_head tlist;
  49	struct cache_dfs_tgt *tgthint;
  50};
  51
  52/* List of referral server sessions per dfs mount */
  53struct mount_group {
  54	struct list_head list;
  55	uuid_t id;
  56	struct cifs_ses *sessions[CACHE_MAX_ENTRIES];
  57	int num_sessions;
  58	spinlock_t lock;
  59	struct list_head refresh_list;
  60	struct kref refcount;
 
 
 
  61};
  62
  63static struct kmem_cache *cache_slab __read_mostly;
  64static struct workqueue_struct *dfscache_wq __read_mostly;
  65
  66static int cache_ttl;
  67static DEFINE_SPINLOCK(cache_ttl_lock);
 
 
 
 
  68
  69static struct nls_table *cache_cp;
 
 
 
 
 
 
 
 
  70
  71/*
  72 * Number of entries in the cache
  73 */
  74static atomic_t cache_count;
  75
  76static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
  77static DECLARE_RWSEM(htable_rw_lock);
  78
  79static LIST_HEAD(mount_group_list);
  80static DEFINE_MUTEX(mount_group_list_lock);
  81
  82static void refresh_cache_worker(struct work_struct *work);
  83
  84static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
  85
  86static void __mount_group_release(struct mount_group *mg)
  87{
  88	int i;
  89
  90	for (i = 0; i < mg->num_sessions; i++)
  91		cifs_put_smb_ses(mg->sessions[i]);
  92	kfree(mg);
  93}
  94
  95static void mount_group_release(struct kref *kref)
  96{
  97	struct mount_group *mg = container_of(kref, struct mount_group, refcount);
  98
  99	mutex_lock(&mount_group_list_lock);
 100	list_del(&mg->list);
 101	mutex_unlock(&mount_group_list_lock);
 102	__mount_group_release(mg);
 103}
 104
 105static struct mount_group *find_mount_group_locked(const uuid_t *id)
 106{
 107	struct mount_group *mg;
 108
 109	list_for_each_entry(mg, &mount_group_list, list) {
 110		if (uuid_equal(&mg->id, id))
 111			return mg;
 112	}
 113	return ERR_PTR(-ENOENT);
 114}
 115
 116static struct mount_group *__get_mount_group_locked(const uuid_t *id)
 117{
 118	struct mount_group *mg;
 119
 120	mg = find_mount_group_locked(id);
 121	if (!IS_ERR(mg))
 122		return mg;
 123
 124	mg = kmalloc(sizeof(*mg), GFP_KERNEL);
 125	if (!mg)
 126		return ERR_PTR(-ENOMEM);
 127	kref_init(&mg->refcount);
 128	uuid_copy(&mg->id, id);
 129	mg->num_sessions = 0;
 130	spin_lock_init(&mg->lock);
 131	list_add(&mg->list, &mount_group_list);
 132	return mg;
 133}
 134
 135static struct mount_group *get_mount_group(const uuid_t *id)
 136{
 137	struct mount_group *mg;
 138
 139	mutex_lock(&mount_group_list_lock);
 140	mg = __get_mount_group_locked(id);
 141	if (!IS_ERR(mg))
 142		kref_get(&mg->refcount);
 143	mutex_unlock(&mount_group_list_lock);
 144
 145	return mg;
 146}
 147
 148static void free_mount_group_list(void)
 149{
 150	struct mount_group *mg, *tmp_mg;
 151
 152	list_for_each_entry_safe(mg, tmp_mg, &mount_group_list, list) {
 153		list_del_init(&mg->list);
 154		__mount_group_release(mg);
 155	}
 156}
 157
 158/**
 159 * dfs_cache_canonical_path - get a canonical DFS path
 160 *
 161 * @path: DFS path
 162 * @cp: codepage
 163 * @remap: mapping type
 164 *
 165 * Return canonical path if success, otherwise error.
 166 */
 167char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap)
 168{
 169	char *tmp;
 170	int plen = 0;
 171	char *npath;
 172
 173	if (!path || strlen(path) < 3 || (*path != '\\' && *path != '/'))
 174		return ERR_PTR(-EINVAL);
 175
 176	if (unlikely(strcmp(cp->charset, cache_cp->charset))) {
 177		tmp = (char *)cifs_strndup_to_utf16(path, strlen(path), &plen, cp, remap);
 178		if (!tmp) {
 179			cifs_dbg(VFS, "%s: failed to convert path to utf16\n", __func__);
 180			return ERR_PTR(-EINVAL);
 181		}
 182
 183		npath = cifs_strndup_from_utf16(tmp, plen, true, cache_cp);
 184		kfree(tmp);
 185
 186		if (!npath) {
 187			cifs_dbg(VFS, "%s: failed to convert path from utf16\n", __func__);
 188			return ERR_PTR(-EINVAL);
 189		}
 190	} else {
 191		npath = kstrdup(path, GFP_KERNEL);
 192		if (!npath)
 193			return ERR_PTR(-ENOMEM);
 194	}
 195	convert_delimiter(npath, '\\');
 196	return npath;
 197}
 198
 199static inline bool cache_entry_expired(const struct cache_entry *ce)
 200{
 201	struct timespec64 ts;
 202
 203	ktime_get_coarse_real_ts64(&ts);
 204	return timespec64_compare(&ts, &ce->etime) >= 0;
 205}
 206
 207static inline void free_tgts(struct cache_entry *ce)
 208{
 209	struct cache_dfs_tgt *t, *n;
 210
 211	list_for_each_entry_safe(t, n, &ce->tlist, list) {
 212		list_del(&t->list);
 213		kfree(t->name);
 214		kfree(t);
 215	}
 216}
 217
 218static inline void flush_cache_ent(struct cache_entry *ce)
 219{
 220	hlist_del_init(&ce->hlist);
 221	kfree(ce->path);
 222	free_tgts(ce);
 223	atomic_dec(&cache_count);
 224	kmem_cache_free(cache_slab, ce);
 225}
 226
 227static void flush_cache_ents(void)
 228{
 229	int i;
 230
 231	for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
 232		struct hlist_head *l = &cache_htable[i];
 233		struct hlist_node *n;
 234		struct cache_entry *ce;
 235
 236		hlist_for_each_entry_safe(ce, n, l, hlist) {
 237			if (!hlist_unhashed(&ce->hlist))
 238				flush_cache_ent(ce);
 239		}
 240	}
 
 241}
 242
 243/*
 244 * dfs cache /proc file
 245 */
 246static int dfscache_proc_show(struct seq_file *m, void *v)
 247{
 248	int i;
 249	struct cache_entry *ce;
 250	struct cache_dfs_tgt *t;
 251
 252	seq_puts(m, "DFS cache\n---------\n");
 253
 254	down_read(&htable_rw_lock);
 255	for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
 256		struct hlist_head *l = &cache_htable[i];
 257
 258		hlist_for_each_entry(ce, l, hlist) {
 259			if (hlist_unhashed(&ce->hlist))
 260				continue;
 261
 262			seq_printf(m,
 263				   "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
 264				   ce->path, ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
 265				   ce->ttl, ce->etime.tv_nsec, ce->hdr_flags, ce->ref_flags,
 266				   IS_DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
 267				   ce->path_consumed, cache_entry_expired(ce) ? "yes" : "no");
 268
 269			list_for_each_entry(t, &ce->tlist, list) {
 270				seq_printf(m, "  %s%s\n",
 271					   t->name,
 272					   READ_ONCE(ce->tgthint) == t ? " (target hint)" : "");
 273			}
 274		}
 
 275	}
 276	up_read(&htable_rw_lock);
 277
 
 278	return 0;
 279}
 280
 281static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer,
 282				   size_t count, loff_t *ppos)
 283{
 284	char c;
 285	int rc;
 286
 287	rc = get_user(c, buffer);
 288	if (rc)
 289		return rc;
 290
 291	if (c != '0')
 292		return -EINVAL;
 293
 294	cifs_dbg(FYI, "clearing dfs cache\n");
 295
 296	down_write(&htable_rw_lock);
 297	flush_cache_ents();
 298	up_write(&htable_rw_lock);
 299
 300	return count;
 301}
 302
 303static int dfscache_proc_open(struct inode *inode, struct file *file)
 304{
 305	return single_open(file, dfscache_proc_show, NULL);
 306}
 307
 308const struct proc_ops dfscache_proc_ops = {
 309	.proc_open	= dfscache_proc_open,
 310	.proc_read	= seq_read,
 311	.proc_lseek	= seq_lseek,
 312	.proc_release	= single_release,
 313	.proc_write	= dfscache_proc_write,
 314};
 315
 316#ifdef CONFIG_CIFS_DEBUG2
 317static inline void dump_tgts(const struct cache_entry *ce)
 318{
 319	struct cache_dfs_tgt *t;
 320
 321	cifs_dbg(FYI, "target list:\n");
 322	list_for_each_entry(t, &ce->tlist, list) {
 323		cifs_dbg(FYI, "  %s%s\n", t->name,
 324			 READ_ONCE(ce->tgthint) == t ? " (target hint)" : "");
 325	}
 326}
 327
 328static inline void dump_ce(const struct cache_entry *ce)
 329{
 330	cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
 331		 ce->path,
 332		 ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
 333		 ce->etime.tv_nsec,
 334		 ce->hdr_flags, ce->ref_flags,
 335		 IS_DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
 336		 ce->path_consumed,
 337		 cache_entry_expired(ce) ? "yes" : "no");
 338	dump_tgts(ce);
 339}
 340
 341static inline void dump_refs(const struct dfs_info3_param *refs, int numrefs)
 342{
 343	int i;
 344
 345	cifs_dbg(FYI, "DFS referrals returned by the server:\n");
 346	for (i = 0; i < numrefs; i++) {
 347		const struct dfs_info3_param *ref = &refs[i];
 348
 349		cifs_dbg(FYI,
 350			 "\n"
 351			 "flags:         0x%x\n"
 352			 "path_consumed: %d\n"
 353			 "server_type:   0x%x\n"
 354			 "ref_flag:      0x%x\n"
 355			 "path_name:     %s\n"
 356			 "node_name:     %s\n"
 357			 "ttl:           %d (%dm)\n",
 358			 ref->flags, ref->path_consumed, ref->server_type,
 359			 ref->ref_flag, ref->path_name, ref->node_name,
 360			 ref->ttl, ref->ttl / 60);
 361	}
 362}
 363#else
 364#define dump_tgts(e)
 365#define dump_ce(e)
 366#define dump_refs(r, n)
 367#endif
 368
 369/**
 370 * dfs_cache_init - Initialize DFS referral cache.
 371 *
 372 * Return zero if initialized successfully, otherwise non-zero.
 373 */
 374int dfs_cache_init(void)
 375{
 376	int rc;
 377	int i;
 378
 379	dfscache_wq = alloc_workqueue("cifs-dfscache", WQ_FREEZABLE | WQ_UNBOUND, 1);
 380	if (!dfscache_wq)
 
 
 381		return -ENOMEM;
 382
 383	cache_slab = kmem_cache_create("cifs_dfs_cache",
 384				       sizeof(struct cache_entry), 0,
 385				       SLAB_HWCACHE_ALIGN, NULL);
 386	if (!cache_slab) {
 387		rc = -ENOMEM;
 388		goto out_destroy_wq;
 389	}
 390
 391	for (i = 0; i < CACHE_HTABLE_SIZE; i++)
 392		INIT_HLIST_HEAD(&cache_htable[i]);
 393
 394	atomic_set(&cache_count, 0);
 395	cache_cp = load_nls("utf8");
 396	if (!cache_cp)
 397		cache_cp = load_nls_default();
 
 398
 399	cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__);
 400	return 0;
 
 401
 402out_destroy_wq:
 403	destroy_workqueue(dfscache_wq);
 404	return rc;
 
 
 
 405}
 406
 407static int cache_entry_hash(const void *data, int size, unsigned int *hash)
 
 408{
 409	int i, clen;
 410	const unsigned char *s = data;
 411	wchar_t c;
 412	unsigned int h = 0;
 413
 414	for (i = 0; i < size; i += clen) {
 415		clen = cache_cp->char2uni(&s[i], size - i, &c);
 416		if (unlikely(clen < 0)) {
 417			cifs_dbg(VFS, "%s: can't convert char\n", __func__);
 418			return clen;
 419		}
 420		c = cifs_toupper(c);
 421		h = jhash(&c, sizeof(c), h);
 422	}
 423	*hash = h % CACHE_HTABLE_SIZE;
 424	return 0;
 425}
 426
 427/* Return target hint of a DFS cache entry */
 428static inline char *get_tgt_name(const struct cache_entry *ce)
 429{
 430	struct cache_dfs_tgt *t = READ_ONCE(ce->tgthint);
 431
 432	return t ? t->name : ERR_PTR(-ENOENT);
 433}
 434
 435/* Return expire time out of a new entry's TTL */
 436static inline struct timespec64 get_expire_time(int ttl)
 437{
 438	struct timespec64 ts = {
 439		.tv_sec = ttl,
 440		.tv_nsec = 0,
 441	};
 442	struct timespec64 now;
 443
 444	ktime_get_coarse_real_ts64(&now);
 445	return timespec64_add(now, ts);
 446}
 447
 448/* Allocate a new DFS target */
 449static struct cache_dfs_tgt *alloc_target(const char *name, int path_consumed)
 450{
 451	struct cache_dfs_tgt *t;
 452
 453	t = kmalloc(sizeof(*t), GFP_ATOMIC);
 454	if (!t)
 455		return ERR_PTR(-ENOMEM);
 456	t->name = kstrdup(name, GFP_ATOMIC);
 457	if (!t->name) {
 458		kfree(t);
 459		return ERR_PTR(-ENOMEM);
 460	}
 461	t->path_consumed = path_consumed;
 462	INIT_LIST_HEAD(&t->list);
 463	return t;
 464}
 465
 466/*
 467 * Copy DFS referral information to a cache entry and conditionally update
 468 * target hint.
 469 */
 470static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
 471			 struct cache_entry *ce, const char *tgthint)
 472{
 473	struct cache_dfs_tgt *target;
 474	int i;
 475
 476	ce->ttl = max_t(int, refs[0].ttl, CACHE_MIN_TTL);
 477	ce->etime = get_expire_time(ce->ttl);
 478	ce->srvtype = refs[0].server_type;
 479	ce->hdr_flags = refs[0].flags;
 480	ce->ref_flags = refs[0].ref_flag;
 481	ce->path_consumed = refs[0].path_consumed;
 482
 483	for (i = 0; i < numrefs; i++) {
 484		struct cache_dfs_tgt *t;
 485
 486		t = alloc_target(refs[i].node_name, refs[i].path_consumed);
 487		if (IS_ERR(t)) {
 488			free_tgts(ce);
 489			return PTR_ERR(t);
 490		}
 491		if (tgthint && !strcasecmp(t->name, tgthint)) {
 492			list_add(&t->list, &ce->tlist);
 493			tgthint = NULL;
 494		} else {
 495			list_add_tail(&t->list, &ce->tlist);
 496		}
 497		ce->numtgts++;
 498	}
 499
 500	target = list_first_entry_or_null(&ce->tlist, struct cache_dfs_tgt,
 501					  list);
 502	WRITE_ONCE(ce->tgthint, target);
 503
 504	return 0;
 505}
 506
 507/* Allocate a new cache entry */
 508static struct cache_entry *alloc_cache_entry(struct dfs_info3_param *refs, int numrefs)
 
 
 509{
 510	struct cache_entry *ce;
 511	int rc;
 512
 513	ce = kmem_cache_zalloc(cache_slab, GFP_KERNEL);
 514	if (!ce)
 515		return ERR_PTR(-ENOMEM);
 516
 517	ce->path = refs[0].path_name;
 518	refs[0].path_name = NULL;
 519
 520	INIT_HLIST_NODE(&ce->hlist);
 521	INIT_LIST_HEAD(&ce->tlist);
 
 
 522
 523	rc = copy_ref_data(refs, numrefs, ce, NULL);
 524	if (rc) {
 525		kfree(ce->path);
 526		kmem_cache_free(cache_slab, ce);
 527		ce = ERR_PTR(rc);
 528	}
 529	return ce;
 530}
 531
 532static void remove_oldest_entry_locked(void)
 533{
 534	int i;
 535	struct cache_entry *ce;
 536	struct cache_entry *to_del = NULL;
 537
 538	WARN_ON(!rwsem_is_locked(&htable_rw_lock));
 539
 540	for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
 541		struct hlist_head *l = &cache_htable[i];
 542
 543		hlist_for_each_entry(ce, l, hlist) {
 544			if (hlist_unhashed(&ce->hlist))
 545				continue;
 546			if (!to_del || timespec64_compare(&ce->etime,
 547							  &to_del->etime) < 0)
 548				to_del = ce;
 549		}
 550	}
 551
 552	if (!to_del) {
 553		cifs_dbg(FYI, "%s: no entry to remove\n", __func__);
 554		return;
 555	}
 556
 557	cifs_dbg(FYI, "%s: removing entry\n", __func__);
 558	dump_ce(to_del);
 559	flush_cache_ent(to_del);
 
 
 560}
 561
 562/* Add a new DFS cache entry */
 563static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
 564						  int numrefs)
 
 565{
 566	int rc;
 567	struct cache_entry *ce;
 568	unsigned int hash;
 569
 570	WARN_ON(!rwsem_is_locked(&htable_rw_lock));
 571
 572	if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
 573		cifs_dbg(FYI, "%s: reached max cache size (%d)\n", __func__, CACHE_MAX_ENTRIES);
 574		remove_oldest_entry_locked();
 575	}
 576
 577	rc = cache_entry_hash(refs[0].path_name, strlen(refs[0].path_name), &hash);
 578	if (rc)
 579		return ERR_PTR(rc);
 580
 581	ce = alloc_cache_entry(refs, numrefs);
 582	if (IS_ERR(ce))
 583		return ce;
 584
 585	spin_lock(&cache_ttl_lock);
 586	if (!cache_ttl) {
 587		cache_ttl = ce->ttl;
 588		queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
 
 
 
 589	} else {
 590		cache_ttl = min_t(int, cache_ttl, ce->ttl);
 591		mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
 
 592	}
 593	spin_unlock(&cache_ttl_lock);
 594
 595	hlist_add_head(&ce->hlist, &cache_htable[hash]);
 596	dump_ce(ce);
 597
 598	atomic_inc(&cache_count);
 599
 600	return ce;
 601}
 602
 603/* Check if two DFS paths are equal.  @s1 and @s2 are expected to be in @cache_cp's charset */
 604static bool dfs_path_equal(const char *s1, int len1, const char *s2, int len2)
 605{
 606	int i, l1, l2;
 607	wchar_t c1, c2;
 608
 609	if (len1 != len2)
 610		return false;
 611
 612	for (i = 0; i < len1; i += l1) {
 613		l1 = cache_cp->char2uni(&s1[i], len1 - i, &c1);
 614		l2 = cache_cp->char2uni(&s2[i], len2 - i, &c2);
 615		if (unlikely(l1 < 0 && l2 < 0)) {
 616			if (s1[i] != s2[i])
 617				return false;
 618			l1 = 1;
 619			continue;
 620		}
 621		if (l1 != l2)
 622			return false;
 623		if (cifs_toupper(c1) != cifs_toupper(c2))
 624			return false;
 625	}
 626	return true;
 627}
 628
 629static struct cache_entry *__lookup_cache_entry(const char *path, unsigned int hash, int len)
 630{
 631	struct cache_entry *ce;
 632
 633	hlist_for_each_entry(ce, &cache_htable[hash], hlist) {
 634		if (dfs_path_equal(ce->path, strlen(ce->path), path, len)) {
 635			dump_ce(ce);
 636			return ce;
 
 
 
 
 
 637		}
 638	}
 639	return ERR_PTR(-ENOENT);
 
 640}
 641
 642/*
 643 * Find a DFS cache entry in hash table and optionally check prefix path against normalized @path.
 644 *
 645 * Use whole path components in the match.  Must be called with htable_rw_lock held.
 646 *
 647 * Return cached entry if successful.
 648 * Return ERR_PTR(-ENOENT) if the entry is not found.
 649 * Return error ptr otherwise.
 650 */
 651static struct cache_entry *lookup_cache_entry(const char *path)
 
 652{
 653	struct cache_entry *ce;
 654	int cnt = 0;
 655	const char *s = path, *e;
 656	char sep = *s;
 657	unsigned int hash;
 658	int rc;
 659
 660	while ((s = strchr(s, sep)) && ++cnt < 3)
 661		s++;
 
 
 
 662
 663	if (cnt < 3) {
 664		rc = cache_entry_hash(path, strlen(path), &hash);
 665		if (rc)
 666			return ERR_PTR(rc);
 667		return __lookup_cache_entry(path, hash, strlen(path));
 668	}
 669	/*
 670	 * Handle paths that have more than two path components and are a complete prefix of the DFS
 671	 * referral request path (@path).
 672	 *
 673	 * See MS-DFSC 3.2.5.5 "Receiving a Root Referral Request or Link Referral Request".
 674	 */
 675	e = path + strlen(path) - 1;
 676	while (e > s) {
 677		int len;
 678
 679		/* skip separators */
 680		while (e > s && *e == sep)
 681			e--;
 682		if (e == s)
 683			break;
 684
 685		len = e + 1 - path;
 686		rc = cache_entry_hash(path, len, &hash);
 687		if (rc)
 688			return ERR_PTR(rc);
 689		ce = __lookup_cache_entry(path, hash, len);
 690		if (!IS_ERR(ce))
 691			return ce;
 692
 693		/* backward until separator */
 694		while (e > s && *e != sep)
 695			e--;
 696	}
 697	return ERR_PTR(-ENOENT);
 698}
 699
 700/**
 701 * dfs_cache_destroy - destroy DFS referral cache
 702 */
 703void dfs_cache_destroy(void)
 704{
 705	cancel_delayed_work_sync(&refresh_task);
 706	unload_nls(cache_cp);
 707	free_mount_group_list();
 
 
 708	flush_cache_ents();
 709	kmem_cache_destroy(cache_slab);
 710	destroy_workqueue(dfscache_wq);
 711
 712	cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__);
 713}
 714
 715/* Update a cache entry with the new referral in @refs */
 716static int update_cache_entry_locked(struct cache_entry *ce, const struct dfs_info3_param *refs,
 717				     int numrefs)
 718{
 719	struct cache_dfs_tgt *target;
 720	char *th = NULL;
 721	int rc;
 
 
 
 722
 723	WARN_ON(!rwsem_is_locked(&htable_rw_lock));
 
 
 724
 725	target = READ_ONCE(ce->tgthint);
 726	if (target) {
 727		th = kstrdup(target->name, GFP_ATOMIC);
 728		if (!th)
 729			return -ENOMEM;
 730	}
 731
 732	free_tgts(ce);
 733	ce->numtgts = 0;
 734
 735	rc = copy_ref_data(refs, numrefs, ce, th);
 736
 737	kfree(th);
 738
 739	return rc;
 
 
 
 740}
 741
 742static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses, const char *path,
 743			    struct dfs_info3_param **refs, int *numrefs)
 
 
 
 744{
 745	int rc;
 746	int i;
 747
 748	*refs = NULL;
 749	*numrefs = 0;
 750
 
 
 
 
 
 751	if (!ses || !ses->server || !ses->server->ops->get_dfs_refer)
 752		return -EOPNOTSUPP;
 753	if (unlikely(!cache_cp))
 754		return -EINVAL;
 755
 756	cifs_dbg(FYI, "%s: ipc=%s referral=%s\n", __func__, ses->tcon_ipc->tree_name, path);
 757	rc =  ses->server->ops->get_dfs_refer(xid, ses, path, refs, numrefs, cache_cp,
 758					      NO_MAP_UNI_RSVD);
 759	if (!rc) {
 760		struct dfs_info3_param *ref = *refs;
 761
 762		for (i = 0; i < *numrefs; i++)
 763			convert_delimiter(ref[i].path_name, '\\');
 764	}
 765	return rc;
 
 
 
 
 
 
 
 766}
 767
 768/*
 769 * Find, create or update a DFS cache entry.
 770 *
 771 * If the entry wasn't found, it will create a new one. Or if it was found but
 772 * expired, then it will update the entry accordingly.
 773 *
 774 * For interlinks, cifs_mount() and expand_dfs_referral() are supposed to
 775 * handle them properly.
 776 *
 777 * On success, return entry with acquired lock for reading, otherwise error ptr.
 778 */
 779static struct cache_entry *cache_refresh_path(const unsigned int xid,
 780					      struct cifs_ses *ses,
 781					      const char *path,
 782					      bool force_refresh)
 783{
 784	struct dfs_info3_param *refs = NULL;
 785	struct cache_entry *ce;
 786	int numrefs = 0;
 787	int rc;
 
 
 
 
 788
 789	cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
 790
 791	down_read(&htable_rw_lock);
 792
 793	ce = lookup_cache_entry(path);
 794	if (!IS_ERR(ce)) {
 795		if (!force_refresh && !cache_entry_expired(ce))
 
 
 
 796			return ce;
 797	} else if (PTR_ERR(ce) != -ENOENT) {
 798		up_read(&htable_rw_lock);
 799		return ce;
 800	}
 
 
 
 
 
 
 
 
 
 801
 802	/*
 803	 * Unlock shared access as we don't want to hold any locks while getting
 804	 * a new referral.  The @ses used for performing the I/O could be
 805	 * reconnecting and it acquires @htable_rw_lock to look up the dfs cache
 806	 * in order to failover -- if necessary.
 807	 */
 808	up_read(&htable_rw_lock);
 809
 810	/*
 811	 * Either the entry was not found, or it is expired, or it is a forced
 812	 * refresh.
 813	 * Request a new DFS referral in order to create or update a cache entry.
 814	 */
 815	rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
 816	if (rc) {
 817		ce = ERR_PTR(rc);
 818		goto out;
 819	}
 820
 821	dump_refs(refs, numrefs);
 
 
 
 
 
 
 822
 823	down_write(&htable_rw_lock);
 824	/* Re-check as another task might have it added or refreshed already */
 825	ce = lookup_cache_entry(path);
 826	if (!IS_ERR(ce)) {
 827		if (force_refresh || cache_entry_expired(ce)) {
 828			rc = update_cache_entry_locked(ce, refs, numrefs);
 829			if (rc)
 830				ce = ERR_PTR(rc);
 831		}
 832	} else if (PTR_ERR(ce) == -ENOENT) {
 833		ce = add_cache_entry_locked(refs, numrefs);
 834	}
 835
 836	if (IS_ERR(ce)) {
 837		up_write(&htable_rw_lock);
 838		goto out;
 
 839	}
 840
 841	downgrade_write(&htable_rw_lock);
 842out:
 843	free_dfs_info_array(refs, numrefs);
 
 
 
 
 
 
 
 
 
 
 
 
 844	return ce;
 845}
 846
 847/*
 848 * Set up a DFS referral from a given cache entry.
 849 *
 850 * Must be called with htable_rw_lock held.
 851 */
 852static int setup_referral(const char *path, struct cache_entry *ce,
 853			  struct dfs_info3_param *ref, const char *target)
 854{
 855	int rc;
 856
 857	cifs_dbg(FYI, "%s: set up new ref\n", __func__);
 858
 859	memset(ref, 0, sizeof(*ref));
 860
 861	ref->path_name = kstrdup(path, GFP_ATOMIC);
 862	if (!ref->path_name)
 863		return -ENOMEM;
 864
 865	ref->node_name = kstrdup(target, GFP_ATOMIC);
 
 
 866	if (!ref->node_name) {
 867		rc = -ENOMEM;
 868		goto err_free_path;
 869	}
 870
 871	ref->path_consumed = ce->path_consumed;
 872	ref->ttl = ce->ttl;
 873	ref->server_type = ce->srvtype;
 874	ref->ref_flag = ce->ref_flags;
 875	ref->flags = ce->hdr_flags;
 876
 877	return 0;
 878
 879err_free_path:
 880	kfree(ref->path_name);
 881	ref->path_name = NULL;
 882	return rc;
 883}
 884
 885/* Return target list of a DFS cache entry */
 886static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
 
 887{
 888	int rc;
 889	struct list_head *head = &tl->tl_list;
 890	struct cache_dfs_tgt *t;
 891	struct dfs_cache_tgt_iterator *it, *nit;
 892
 893	memset(tl, 0, sizeof(*tl));
 894	INIT_LIST_HEAD(head);
 895
 896	list_for_each_entry(t, &ce->tlist, list) {
 897		it = kzalloc(sizeof(*it), GFP_ATOMIC);
 898		if (!it) {
 899			rc = -ENOMEM;
 900			goto err_free_it;
 901		}
 902
 903		it->it_name = kstrdup(t->name, GFP_ATOMIC);
 
 904		if (!it->it_name) {
 905			kfree(it);
 906			rc = -ENOMEM;
 907			goto err_free_it;
 908		}
 909		it->it_path_consumed = t->path_consumed;
 910
 911		if (READ_ONCE(ce->tgthint) == t)
 912			list_add(&it->it_list, head);
 913		else
 914			list_add_tail(&it->it_list, head);
 915	}
 916
 917	tl->tl_numtgts = ce->numtgts;
 918
 919	return 0;
 920
 921err_free_it:
 922	list_for_each_entry_safe(it, nit, head, it_list) {
 923		list_del(&it->it_list);
 924		kfree(it->it_name);
 925		kfree(it);
 926	}
 927	return rc;
 928}
 929
 930/**
 931 * dfs_cache_find - find a DFS cache entry
 932 *
 933 * If it doesn't find the cache entry, then it will get a DFS referral
 934 * for @path and create a new entry.
 935 *
 936 * In case the cache entry exists but expired, it will get a DFS referral
 937 * for @path and then update the respective cache entry.
 938 *
 939 * These parameters are passed down to the get_dfs_refer() call if it
 940 * needs to be issued:
 941 * @xid: syscall xid
 942 * @ses: smb session to issue the request on
 943 * @cp: codepage
 944 * @remap: path character remapping type
 945 * @path: path to lookup in DFS referral cache.
 946 *
 947 * @ref: when non-NULL, store single DFS referral result in it.
 948 * @tgt_list: when non-NULL, store complete DFS target list in it.
 949 *
 950 * Return zero if the target was found, otherwise non-zero.
 951 */
 952int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, const struct nls_table *cp,
 953		   int remap, const char *path, struct dfs_info3_param *ref,
 
 954		   struct dfs_cache_tgt_list *tgt_list)
 955{
 956	int rc;
 957	const char *npath;
 958	struct cache_entry *ce;
 
 
 
 959
 960	npath = dfs_cache_canonical_path(path, cp, remap);
 961	if (IS_ERR(npath))
 962		return PTR_ERR(npath);
 963
 964	ce = cache_refresh_path(xid, ses, npath, false);
 965	if (IS_ERR(ce)) {
 
 
 
 
 
 
 
 
 966		rc = PTR_ERR(ce);
 967		goto out_free_path;
 968	}
 969
 970	if (ref)
 971		rc = setup_referral(path, ce, ref, get_tgt_name(ce));
 972	else
 973		rc = 0;
 974	if (!rc && tgt_list)
 975		rc = get_targets(ce, tgt_list);
 976
 977	up_read(&htable_rw_lock);
 978
 979out_free_path:
 980	kfree(npath);
 981	return rc;
 982}
 983
 984/**
 985 * dfs_cache_noreq_find - find a DFS cache entry without sending any requests to
 986 * the currently connected server.
 987 *
 988 * NOTE: This function will neither update a cache entry in case it was
 989 * expired, nor create a new cache entry if @path hasn't been found. It heavily
 990 * relies on an existing cache entry.
 991 *
 992 * @path: canonical DFS path to lookup in the DFS referral cache.
 993 * @ref: when non-NULL, store single DFS referral result in it.
 994 * @tgt_list: when non-NULL, store complete DFS target list in it.
 995 *
 996 * Return 0 if successful.
 997 * Return -ENOENT if the entry was not found.
 998 * Return non-zero for other errors.
 999 */
1000int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
1001			 struct dfs_cache_tgt_list *tgt_list)
1002{
1003	int rc;
1004	struct cache_entry *ce;
 
1005
1006	cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
 
1007
1008	down_read(&htable_rw_lock);
 
 
1009
1010	ce = lookup_cache_entry(path);
 
1011	if (IS_ERR(ce)) {
1012		rc = PTR_ERR(ce);
1013		goto out_unlock;
1014	}
1015
1016	if (ref)
1017		rc = setup_referral(path, ce, ref, get_tgt_name(ce));
1018	else
1019		rc = 0;
1020	if (!rc && tgt_list)
1021		rc = get_targets(ce, tgt_list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1022
1023out_unlock:
1024	up_read(&htable_rw_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1025	return rc;
1026}
1027
1028/**
1029 * dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry
1030 * without sending any requests to the currently connected server.
1031 *
1032 * NOTE: This function will neither update a cache entry in case it was
1033 * expired, nor create a new cache entry if @path hasn't been found. It heavily
1034 * relies on an existing cache entry.
1035 *
1036 * @path: canonical DFS path to lookup in DFS referral cache.
1037 * @it: target iterator which contains the target hint to update the cache
1038 * entry with.
1039 *
1040 * Return zero if the target hint was updated successfully, otherwise non-zero.
1041 */
1042void dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt_iterator *it)
 
1043{
1044	struct cache_dfs_tgt *t;
1045	struct cache_entry *ce;
 
 
1046
1047	if (!path || !it)
1048		return;
1049
1050	cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
 
 
1051
1052	down_read(&htable_rw_lock);
1053
1054	ce = lookup_cache_entry(path);
1055	if (IS_ERR(ce))
1056		goto out_unlock;
1057
1058	t = READ_ONCE(ce->tgthint);
 
 
 
 
 
 
 
 
1059
1060	if (unlikely(!strcasecmp(it->it_name, t->name)))
1061		goto out_unlock;
1062
1063	list_for_each_entry(t, &ce->tlist, list) {
1064		if (!strcasecmp(t->name, it->it_name)) {
1065			WRITE_ONCE(ce->tgthint, t);
1066			cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
1067				 it->it_name);
1068			break;
1069		}
1070	}
1071
1072out_unlock:
1073	up_read(&htable_rw_lock);
 
 
1074}
1075
1076/**
1077 * dfs_cache_get_tgt_referral - returns a DFS referral (@ref) from a given
1078 * target iterator (@it).
1079 *
1080 * @path: canonical DFS path to lookup in DFS referral cache.
1081 * @it: DFS target iterator.
1082 * @ref: DFS referral pointer to set up the gathered information.
1083 *
1084 * Return zero if the DFS referral was set up correctly, otherwise non-zero.
1085 */
1086int dfs_cache_get_tgt_referral(const char *path, const struct dfs_cache_tgt_iterator *it,
 
1087			       struct dfs_info3_param *ref)
1088{
1089	int rc;
1090	struct cache_entry *ce;
 
 
1091
1092	if (!it || !ref)
1093		return -EINVAL;
 
 
1094
1095	cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
 
 
 
 
1096
1097	down_read(&htable_rw_lock);
1098
1099	ce = lookup_cache_entry(path);
1100	if (IS_ERR(ce)) {
1101		rc = PTR_ERR(ce);
1102		goto out_unlock;
1103	}
1104
1105	cifs_dbg(FYI, "%s: target name: %s\n", __func__, it->it_name);
1106
1107	rc = setup_referral(path, ce, ref, it->it_name);
1108
1109out_unlock:
1110	up_read(&htable_rw_lock);
 
1111	return rc;
1112}
1113
1114/**
1115 * dfs_cache_add_refsrv_session - add SMB session of referral server
1116 *
1117 * @mount_id: mount group uuid to lookup.
1118 * @ses: reference counted SMB session of referral server.
1119 */
1120void dfs_cache_add_refsrv_session(const uuid_t *mount_id, struct cifs_ses *ses)
1121{
1122	struct mount_group *mg;
1123
1124	if (WARN_ON_ONCE(!mount_id || uuid_is_null(mount_id) || !ses))
1125		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1126
1127	mg = get_mount_group(mount_id);
1128	if (WARN_ON_ONCE(IS_ERR(mg)))
1129		return;
1130
1131	spin_lock(&mg->lock);
1132	if (mg->num_sessions < ARRAY_SIZE(mg->sessions))
1133		mg->sessions[mg->num_sessions++] = ses;
1134	spin_unlock(&mg->lock);
1135	kref_put(&mg->refcount, mount_group_release);
 
 
 
 
 
 
 
1136}
1137
1138/**
1139 * dfs_cache_put_refsrv_sessions - put all referral server sessions
 
1140 *
1141 * Put all SMB sessions from the given mount group id.
 
 
1142 *
1143 * @mount_id: mount group uuid to lookup.
1144 */
1145void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id)
1146{
1147	struct mount_group *mg;
 
1148
1149	if (!mount_id || uuid_is_null(mount_id))
1150		return;
1151
1152	mutex_lock(&mount_group_list_lock);
1153	mg = find_mount_group_locked(mount_id);
1154	if (IS_ERR(mg)) {
1155		mutex_unlock(&mount_group_list_lock);
1156		return;
 
 
 
 
 
1157	}
1158	mutex_unlock(&mount_group_list_lock);
1159	kref_put(&mg->refcount, mount_group_release);
1160}
1161
1162/* Extract share from DFS target and return a pointer to prefix path or NULL */
1163static const char *parse_target_share(const char *target, char **share)
1164{
1165	const char *s, *seps = "/\\";
1166	size_t len;
1167
1168	s = strpbrk(target + 1, seps);
1169	if (!s)
1170		return ERR_PTR(-EINVAL);
1171
1172	len = strcspn(s + 1, seps);
1173	if (!len)
1174		return ERR_PTR(-EINVAL);
1175	s += len;
1176
1177	len = s - target + 1;
1178	*share = kstrndup(target, len, GFP_KERNEL);
1179	if (!*share)
1180		return ERR_PTR(-ENOMEM);
 
 
 
 
 
 
1181
1182	s = target + len;
1183	return s + strspn(s, seps);
 
 
 
 
 
1184}
1185
1186/**
1187 * dfs_cache_get_tgt_share - parse a DFS target
1188 *
1189 * @path: DFS full path
1190 * @it: DFS target iterator.
1191 * @share: tree name.
1192 * @prefix: prefix path.
1193 *
1194 * Return zero if target was parsed correctly, otherwise non-zero.
1195 */
1196int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, char **share,
1197			    char **prefix)
1198{
1199	char sep;
1200	char *target_share;
1201	char *ppath = NULL;
1202	const char *target_ppath, *dfsref_ppath;
1203	size_t target_pplen, dfsref_pplen;
1204	size_t len, c;
1205
1206	if (!it || !path || !share || !prefix || strlen(path) < it->it_path_consumed)
1207		return -EINVAL;
1208
1209	sep = it->it_name[0];
1210	if (sep != '\\' && sep != '/')
1211		return -EINVAL;
1212
1213	target_ppath = parse_target_share(it->it_name, &target_share);
1214	if (IS_ERR(target_ppath))
1215		return PTR_ERR(target_ppath);
1216
1217	/* point to prefix in DFS referral path */
1218	dfsref_ppath = path + it->it_path_consumed;
1219	dfsref_ppath += strspn(dfsref_ppath, "/\\");
1220
1221	target_pplen = strlen(target_ppath);
1222	dfsref_pplen = strlen(dfsref_ppath);
1223
1224	/* merge prefix paths from DFS referral path and target node */
1225	if (target_pplen || dfsref_pplen) {
1226		len = target_pplen + dfsref_pplen + 2;
1227		ppath = kzalloc(len, GFP_KERNEL);
1228		if (!ppath) {
1229			kfree(target_share);
1230			return -ENOMEM;
1231		}
1232		c = strscpy(ppath, target_ppath, len);
1233		if (c && dfsref_pplen)
1234			ppath[c] = sep;
1235		strlcat(ppath, dfsref_ppath, len);
1236	}
1237	*share = target_share;
1238	*prefix = ppath;
1239	return 0;
 
 
 
 
 
 
1240}
1241
1242static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, const char *s2)
 
 
 
 
 
1243{
1244	char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0};
1245	const char *host;
1246	size_t hostlen;
1247	struct sockaddr_storage ss;
1248	bool match;
1249	int rc;
1250
1251	if (strcasecmp(s1, s2))
1252		return false;
1253
1254	/*
1255	 * Resolve share's hostname and check if server address matches.  Otherwise just ignore it
1256	 * as we could not have upcall to resolve hostname or failed to convert ip address.
1257	 */
1258	extract_unc_hostname(s1, &host, &hostlen);
1259	scnprintf(unc, sizeof(unc), "\\\\%.*s", (int)hostlen, host);
1260
1261	rc = dns_resolve_server_name_to_ip(unc, (struct sockaddr *)&ss, NULL);
1262	if (rc < 0) {
1263		cifs_dbg(FYI, "%s: could not resolve %.*s. assuming server address matches.\n",
1264			 __func__, (int)hostlen, host);
1265		return true;
1266	}
1267
1268	cifs_server_lock(server);
1269	match = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, (struct sockaddr *)&ss);
1270	cifs_server_unlock(server);
 
 
1271
1272	return match;
1273}
1274
1275/*
1276 * Mark dfs tcon for reconnecting when the currently connected tcon does not match any of the new
1277 * target shares in @refs.
1278 */
1279static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
1280					 struct dfs_cache_tgt_list *old_tl,
1281					 struct dfs_cache_tgt_list *new_tl)
1282{
1283	struct dfs_cache_tgt_iterator *oit, *nit;
1284
1285	for (oit = dfs_cache_get_tgt_iterator(old_tl); oit;
1286	     oit = dfs_cache_get_next_tgt(old_tl, oit)) {
1287		for (nit = dfs_cache_get_tgt_iterator(new_tl); nit;
1288		     nit = dfs_cache_get_next_tgt(new_tl, nit)) {
1289			if (target_share_equal(server,
1290					       dfs_cache_get_tgt_name(oit),
1291					       dfs_cache_get_tgt_name(nit)))
1292				return;
1293		}
1294	}
 
 
 
 
 
 
1295
1296	cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
1297	cifs_signal_cifsd_for_reconnect(server, true);
 
 
1298}
1299
1300/* Refresh dfs referral of tcon and mark it for reconnect if needed */
1301static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_refresh)
1302{
1303	struct dfs_cache_tgt_list old_tl = DFS_CACHE_TGT_LIST_INIT(old_tl);
1304	struct dfs_cache_tgt_list new_tl = DFS_CACHE_TGT_LIST_INIT(new_tl);
1305	struct cifs_ses *ses = CIFS_DFS_ROOT_SES(tcon->ses);
1306	struct cifs_tcon *ipc = ses->tcon_ipc;
1307	bool needs_refresh = false;
1308	struct cache_entry *ce;
1309	unsigned int xid;
1310	int rc = 0;
1311
1312	xid = get_xid();
 
 
1313
1314	down_read(&htable_rw_lock);
1315	ce = lookup_cache_entry(path);
1316	needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
1317	if (!IS_ERR(ce)) {
1318		rc = get_targets(ce, &old_tl);
1319		cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1320	}
1321	up_read(&htable_rw_lock);
1322
1323	if (!needs_refresh) {
1324		rc = 0;
 
 
 
 
 
1325		goto out;
1326	}
1327
1328	spin_lock(&ipc->tc_lock);
1329	if (ses->ses_status != SES_GOOD || ipc->status != TID_GOOD) {
1330		spin_unlock(&ipc->tc_lock);
1331		cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n", __func__);
 
1332		goto out;
1333	}
1334	spin_unlock(&ipc->tc_lock);
1335
1336	ce = cache_refresh_path(xid, ses, path, true);
1337	if (!IS_ERR(ce)) {
1338		rc = get_targets(ce, &new_tl);
1339		up_read(&htable_rw_lock);
1340		cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
1341		mark_for_reconnect_if_needed(tcon->ses->server, &old_tl, &new_tl);
 
 
 
1342	}
1343
 
 
1344out:
1345	free_xid(xid);
1346	dfs_cache_free_tgts(&old_tl);
1347	dfs_cache_free_tgts(&new_tl);
1348	return rc;
 
1349}
1350
1351static int refresh_tcon(struct cifs_tcon *tcon, bool force_refresh)
 
 
1352{
1353	struct TCP_Server_Info *server = tcon->ses->server;
 
 
 
 
 
 
 
1354
1355	mutex_lock(&server->refpath_lock);
1356	if (server->leaf_fullpath)
1357		__refresh_tcon(server->leaf_fullpath + 1, tcon, force_refresh);
1358	mutex_unlock(&server->refpath_lock);
1359	return 0;
1360}
1361
1362/**
1363 * dfs_cache_remount_fs - remount a DFS share
1364 *
1365 * Reconfigure dfs mount by forcing a new DFS referral and if the currently cached targets do not
1366 * match any of the new targets, mark it for reconnect.
1367 *
1368 * @cifs_sb: cifs superblock.
1369 *
1370 * Return zero if remounted, otherwise non-zero.
1371 */
1372int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
1373{
1374	struct cifs_tcon *tcon;
1375	struct TCP_Server_Info *server;
1376
1377	if (!cifs_sb || !cifs_sb->master_tlink)
1378		return -EINVAL;
 
1379
1380	tcon = cifs_sb_master_tcon(cifs_sb);
1381	server = tcon->ses->server;
 
1382
1383	if (!server->origin_fullpath) {
1384		cifs_dbg(FYI, "%s: not a dfs mount\n", __func__);
1385		return 0;
1386	}
1387
1388	if (uuid_is_null(&cifs_sb->dfs_mount_id)) {
1389		cifs_dbg(FYI, "%s: no dfs mount group id\n", __func__);
1390		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
1391	}
1392	/*
1393	 * After reconnecting to a different server, unique ids won't match anymore, so we disable
1394	 * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
1395	 */
1396	cifs_autodisable_serverino(cifs_sb);
1397	/*
1398	 * Force the use of prefix path to support failover on DFS paths that resolve to targets
1399	 * that have different prefix paths.
1400	 */
1401	cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
1402
1403	return refresh_tcon(tcon, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1404}
1405
1406/*
1407 * Worker that will refresh DFS cache from all active mounts based on lowest TTL value
1408 * from a DFS referral.
1409 */
1410static void refresh_cache_worker(struct work_struct *work)
1411{
 
 
 
1412	struct TCP_Server_Info *server;
 
1413	struct cifs_tcon *tcon, *ntcon;
1414	struct list_head tcons;
1415	struct cifs_ses *ses;
1416
1417	INIT_LIST_HEAD(&tcons);
1418
1419	spin_lock(&cifs_tcp_ses_lock);
1420	list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
1421		if (!server->leaf_fullpath)
1422			continue;
1423
1424		list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
1425			if (ses->tcon_ipc) {
1426				ses->ses_count++;
1427				list_add_tail(&ses->tcon_ipc->ulist, &tcons);
1428			}
1429			list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
1430				if (!tcon->ipc) {
1431					tcon->tc_count++;
1432					list_add_tail(&tcon->ulist, &tcons);
1433				}
1434			}
1435		}
1436	}
1437	spin_unlock(&cifs_tcp_ses_lock);
1438
1439	list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
1440		struct TCP_Server_Info *server = tcon->ses->server;
1441
1442		list_del_init(&tcon->ulist);
1443
1444		mutex_lock(&server->refpath_lock);
1445		if (server->leaf_fullpath)
1446			__refresh_tcon(server->leaf_fullpath + 1, tcon, false);
1447		mutex_unlock(&server->refpath_lock);
1448
1449		if (tcon->ipc)
1450			cifs_put_smb_ses(tcon->ses);
1451		else
1452			cifs_put_tcon(tcon);
 
 
 
1453	}
1454
1455	spin_lock(&cache_ttl_lock);
1456	queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
1457	spin_unlock(&cache_ttl_lock);
1458}
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * DFS referral cache routines
   4 *
   5 * Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de>
   6 */
   7
   8#include <linux/rcupdate.h>
   9#include <linux/rculist.h>
  10#include <linux/jhash.h>
  11#include <linux/ktime.h>
  12#include <linux/slab.h>
 
  13#include <linux/nls.h>
  14#include <linux/workqueue.h>
 
  15#include "cifsglob.h"
  16#include "smb2pdu.h"
  17#include "smb2proto.h"
  18#include "cifsproto.h"
  19#include "cifs_debug.h"
  20#include "cifs_unicode.h"
  21#include "smb2glob.h"
 
  22
  23#include "dfs_cache.h"
  24
  25#define DFS_CACHE_HTABLE_SIZE 32
  26#define DFS_CACHE_MAX_ENTRIES 64
  27
  28#define IS_INTERLINK_SET(v) ((v) & (DFSREF_REFERRAL_SERVER | \
  29				    DFSREF_STORAGE_SERVER))
 
 
 
 
 
 
  30
  31struct dfs_cache_tgt {
  32	char *t_name;
  33	struct list_head t_list;
 
 
 
 
 
 
 
 
 
  34};
  35
  36struct dfs_cache_entry {
  37	struct hlist_node ce_hlist;
  38	const char *ce_path;
  39	int ce_ttl;
  40	int ce_srvtype;
  41	int ce_flags;
  42	struct timespec64 ce_etime;
  43	int ce_path_consumed;
  44	int ce_numtgts;
  45	struct list_head ce_tlist;
  46	struct dfs_cache_tgt *ce_tgthint;
  47	struct rcu_head ce_rcu;
  48};
  49
  50static struct kmem_cache *dfs_cache_slab __read_mostly;
 
  51
  52struct dfs_cache_vol_info {
  53	char *vi_fullpath;
  54	struct smb_vol vi_vol;
  55	char *vi_mntdata;
  56	struct list_head vi_list;
  57};
  58
  59struct dfs_cache {
  60	struct mutex dc_lock;
  61	struct nls_table *dc_nlsc;
  62	struct list_head dc_vol_list;
  63	int dc_ttl;
  64	struct delayed_work dc_refresh;
  65};
  66
  67static struct dfs_cache dfs_cache;
  68
  69/*
  70 * Number of entries in the cache
  71 */
  72static size_t dfs_cache_count;
 
 
 
  73
  74static DEFINE_MUTEX(dfs_cache_list_lock);
  75static struct hlist_head dfs_cache_htable[DFS_CACHE_HTABLE_SIZE];
  76
  77static void refresh_cache_worker(struct work_struct *work);
  78
  79static inline bool is_path_valid(const char *path)
 
 
  80{
  81	return path && (strchr(path + 1, '\\') || strchr(path + 1, '/'));
 
 
 
 
  82}
  83
  84static inline int get_normalized_path(const char *path, char **npath)
  85{
  86	if (*path == '\\') {
  87		*npath = (char *)path;
  88	} else {
  89		*npath = kstrndup(path, strlen(path), GFP_KERNEL);
  90		if (!*npath)
  91			return -ENOMEM;
  92		convert_delimiter(*npath, '\\');
 
 
 
 
 
 
 
 
  93	}
  94	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  95}
  96
  97static inline void free_normalized_path(const char *path, char *npath)
  98{
  99	if (path != npath)
 100		kfree(npath);
 
 
 
 
 
 
 
 101}
 102
 103static inline bool cache_entry_expired(const struct dfs_cache_entry *ce)
 104{
 105	struct timespec64 ts;
 106
 107	ktime_get_coarse_real_ts64(&ts);
 108	return timespec64_compare(&ts, &ce->ce_etime) >= 0;
 
 
 109}
 110
 111static inline void free_tgts(struct dfs_cache_entry *ce)
 
 
 
 
 
 
 
 
 
 112{
 113	struct dfs_cache_tgt *t, *n;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 114
 115	list_for_each_entry_safe(t, n, &ce->ce_tlist, t_list) {
 116		list_del(&t->t_list);
 117		kfree(t->t_name);
 118		kfree(t);
 
 
 
 
 119	}
 
 
 120}
 121
 122static void free_cache_entry(struct rcu_head *rcu)
 123{
 124	struct dfs_cache_entry *ce = container_of(rcu, struct dfs_cache_entry,
 125						  ce_rcu);
 126	kmem_cache_free(dfs_cache_slab, ce);
 
 127}
 128
 129static inline void flush_cache_ent(struct dfs_cache_entry *ce)
 130{
 131	if (hlist_unhashed(&ce->ce_hlist))
 132		return;
 
 
 
 
 
 
 133
 134	hlist_del_init_rcu(&ce->ce_hlist);
 135	kfree_const(ce->ce_path);
 
 
 136	free_tgts(ce);
 137	dfs_cache_count--;
 138	call_rcu(&ce->ce_rcu, free_cache_entry);
 139}
 140
 141static void flush_cache_ents(void)
 142{
 143	int i;
 144
 145	rcu_read_lock();
 146	for (i = 0; i < DFS_CACHE_HTABLE_SIZE; i++) {
 147		struct hlist_head *l = &dfs_cache_htable[i];
 148		struct dfs_cache_entry *ce;
 149
 150		hlist_for_each_entry_rcu(ce, l, ce_hlist)
 151			flush_cache_ent(ce);
 
 
 152	}
 153	rcu_read_unlock();
 154}
 155
 156/*
 157 * dfs cache /proc file
 158 */
 159static int dfscache_proc_show(struct seq_file *m, void *v)
 160{
 161	int bucket;
 162	struct dfs_cache_entry *ce;
 163	struct dfs_cache_tgt *t;
 164
 165	seq_puts(m, "DFS cache\n---------\n");
 166
 167	mutex_lock(&dfs_cache_list_lock);
 168
 169	rcu_read_lock();
 170	hash_for_each_rcu(dfs_cache_htable, bucket, ce, ce_hlist) {
 171		seq_printf(m,
 172			   "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,"
 173			   "interlink=%s,path_consumed=%d,expired=%s\n",
 174			   ce->ce_path,
 175			   ce->ce_srvtype == DFS_TYPE_ROOT ? "root" : "link",
 176			   ce->ce_ttl, ce->ce_etime.tv_nsec,
 177			   IS_INTERLINK_SET(ce->ce_flags) ? "yes" : "no",
 178			   ce->ce_path_consumed,
 179			   cache_entry_expired(ce) ? "yes" : "no");
 180
 181		list_for_each_entry(t, &ce->ce_tlist, t_list) {
 182			seq_printf(m, "  %s%s\n",
 183				   t->t_name,
 184				   ce->ce_tgthint == t ? " (target hint)" : "");
 
 
 185		}
 186
 187	}
 188	rcu_read_unlock();
 189
 190	mutex_unlock(&dfs_cache_list_lock);
 191	return 0;
 192}
 193
 194static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer,
 195				   size_t count, loff_t *ppos)
 196{
 197	char c;
 198	int rc;
 199
 200	rc = get_user(c, buffer);
 201	if (rc)
 202		return rc;
 203
 204	if (c != '0')
 205		return -EINVAL;
 206
 207	cifs_dbg(FYI, "clearing dfs cache");
 208	mutex_lock(&dfs_cache_list_lock);
 
 209	flush_cache_ents();
 210	mutex_unlock(&dfs_cache_list_lock);
 211
 212	return count;
 213}
 214
 215static int dfscache_proc_open(struct inode *inode, struct file *file)
 216{
 217	return single_open(file, dfscache_proc_show, NULL);
 218}
 219
 220const struct file_operations dfscache_proc_fops = {
 221	.open		= dfscache_proc_open,
 222	.read		= seq_read,
 223	.llseek		= seq_lseek,
 224	.release	= single_release,
 225	.write		= dfscache_proc_write,
 226};
 227
 228#ifdef CONFIG_CIFS_DEBUG2
 229static inline void dump_tgts(const struct dfs_cache_entry *ce)
 230{
 231	struct dfs_cache_tgt *t;
 232
 233	cifs_dbg(FYI, "target list:\n");
 234	list_for_each_entry(t, &ce->ce_tlist, t_list) {
 235		cifs_dbg(FYI, "  %s%s\n", t->t_name,
 236			 ce->ce_tgthint == t ? " (target hint)" : "");
 237	}
 238}
 239
 240static inline void dump_ce(const struct dfs_cache_entry *ce)
 241{
 242	cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,"
 243		 "interlink=%s,path_consumed=%d,expired=%s\n", ce->ce_path,
 244		 ce->ce_srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ce_ttl,
 245		 ce->ce_etime.tv_nsec,
 246		 IS_INTERLINK_SET(ce->ce_flags) ? "yes" : "no",
 247		 ce->ce_path_consumed,
 
 248		 cache_entry_expired(ce) ? "yes" : "no");
 249	dump_tgts(ce);
 250}
 251
 252static inline void dump_refs(const struct dfs_info3_param *refs, int numrefs)
 253{
 254	int i;
 255
 256	cifs_dbg(FYI, "DFS referrals returned by the server:\n");
 257	for (i = 0; i < numrefs; i++) {
 258		const struct dfs_info3_param *ref = &refs[i];
 259
 260		cifs_dbg(FYI,
 261			 "\n"
 262			 "flags:         0x%x\n"
 263			 "path_consumed: %d\n"
 264			 "server_type:   0x%x\n"
 265			 "ref_flag:      0x%x\n"
 266			 "path_name:     %s\n"
 267			 "node_name:     %s\n"
 268			 "ttl:           %d (%dm)\n",
 269			 ref->flags, ref->path_consumed, ref->server_type,
 270			 ref->ref_flag, ref->path_name, ref->node_name,
 271			 ref->ttl, ref->ttl / 60);
 272	}
 273}
 274#else
 275#define dump_tgts(e)
 276#define dump_ce(e)
 277#define dump_refs(r, n)
 278#endif
 279
 280/**
 281 * dfs_cache_init - Initialize DFS referral cache.
 282 *
 283 * Return zero if initialized successfully, otherwise non-zero.
 284 */
 285int dfs_cache_init(void)
 286{
 
 287	int i;
 288
 289	dfs_cache_slab = kmem_cache_create("cifs_dfs_cache",
 290					   sizeof(struct dfs_cache_entry), 0,
 291					   SLAB_HWCACHE_ALIGN, NULL);
 292	if (!dfs_cache_slab)
 293		return -ENOMEM;
 294
 295	for (i = 0; i < DFS_CACHE_HTABLE_SIZE; i++)
 296		INIT_HLIST_HEAD(&dfs_cache_htable[i]);
 
 
 
 
 
 
 
 
 297
 298	INIT_LIST_HEAD(&dfs_cache.dc_vol_list);
 299	mutex_init(&dfs_cache.dc_lock);
 300	INIT_DELAYED_WORK(&dfs_cache.dc_refresh, refresh_cache_worker);
 301	dfs_cache.dc_ttl = -1;
 302	dfs_cache.dc_nlsc = load_nls_default();
 303
 304	cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__);
 305	return 0;
 306}
 307
 308static inline unsigned int cache_entry_hash(const void *data, int size)
 309{
 310	unsigned int h;
 311
 312	h = jhash(data, size, 0);
 313	return h & (DFS_CACHE_HTABLE_SIZE - 1);
 314}
 315
 316/* Check whether second path component of @path is SYSVOL or NETLOGON */
 317static inline bool is_sysvol_or_netlogon(const char *path)
 318{
 319	const char *s;
 320	char sep = path[0];
 321
 322	s = strchr(path + 1, sep) + 1;
 323	return !strncasecmp(s, "sysvol", strlen("sysvol")) ||
 324		!strncasecmp(s, "netlogon", strlen("netlogon"));
 
 
 
 
 
 
 
 
 
 
 325}
 326
 327/* Return target hint of a DFS cache entry */
 328static inline char *get_tgt_name(const struct dfs_cache_entry *ce)
 329{
 330	struct dfs_cache_tgt *t = ce->ce_tgthint;
 331
 332	return t ? t->t_name : ERR_PTR(-ENOENT);
 333}
 334
 335/* Return expire time out of a new entry's TTL */
 336static inline struct timespec64 get_expire_time(int ttl)
 337{
 338	struct timespec64 ts = {
 339		.tv_sec = ttl,
 340		.tv_nsec = 0,
 341	};
 342	struct timespec64 now;
 343
 344	ktime_get_coarse_real_ts64(&now);
 345	return timespec64_add(now, ts);
 346}
 347
 348/* Allocate a new DFS target */
 349static inline struct dfs_cache_tgt *alloc_tgt(const char *name)
 350{
 351	struct dfs_cache_tgt *t;
 352
 353	t = kmalloc(sizeof(*t), GFP_KERNEL);
 354	if (!t)
 355		return ERR_PTR(-ENOMEM);
 356	t->t_name = kstrndup(name, strlen(name), GFP_KERNEL);
 357	if (!t->t_name) {
 358		kfree(t);
 359		return ERR_PTR(-ENOMEM);
 360	}
 361	INIT_LIST_HEAD(&t->t_list);
 
 362	return t;
 363}
 364
 365/*
 366 * Copy DFS referral information to a cache entry and conditionally update
 367 * target hint.
 368 */
 369static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
 370			 struct dfs_cache_entry *ce, const char *tgthint)
 371{
 
 372	int i;
 373
 374	ce->ce_ttl = refs[0].ttl;
 375	ce->ce_etime = get_expire_time(ce->ce_ttl);
 376	ce->ce_srvtype = refs[0].server_type;
 377	ce->ce_flags = refs[0].ref_flag;
 378	ce->ce_path_consumed = refs[0].path_consumed;
 
 379
 380	for (i = 0; i < numrefs; i++) {
 381		struct dfs_cache_tgt *t;
 382
 383		t = alloc_tgt(refs[i].node_name);
 384		if (IS_ERR(t)) {
 385			free_tgts(ce);
 386			return PTR_ERR(t);
 387		}
 388		if (tgthint && !strcasecmp(t->t_name, tgthint)) {
 389			list_add(&t->t_list, &ce->ce_tlist);
 390			tgthint = NULL;
 391		} else {
 392			list_add_tail(&t->t_list, &ce->ce_tlist);
 393		}
 394		ce->ce_numtgts++;
 395	}
 396
 397	ce->ce_tgthint = list_first_entry_or_null(&ce->ce_tlist,
 398						  struct dfs_cache_tgt, t_list);
 
 399
 400	return 0;
 401}
 402
 403/* Allocate a new cache entry */
 404static struct dfs_cache_entry *
 405alloc_cache_entry(const char *path, const struct dfs_info3_param *refs,
 406		  int numrefs)
 407{
 408	struct dfs_cache_entry *ce;
 409	int rc;
 410
 411	ce = kmem_cache_zalloc(dfs_cache_slab, GFP_KERNEL);
 412	if (!ce)
 413		return ERR_PTR(-ENOMEM);
 414
 415	ce->ce_path = kstrdup_const(path, GFP_KERNEL);
 416	if (!ce->ce_path) {
 417		kmem_cache_free(dfs_cache_slab, ce);
 418		return ERR_PTR(-ENOMEM);
 419	}
 420	INIT_HLIST_NODE(&ce->ce_hlist);
 421	INIT_LIST_HEAD(&ce->ce_tlist);
 422
 423	rc = copy_ref_data(refs, numrefs, ce, NULL);
 424	if (rc) {
 425		kfree_const(ce->ce_path);
 426		kmem_cache_free(dfs_cache_slab, ce);
 427		ce = ERR_PTR(rc);
 428	}
 429	return ce;
 430}
 431
 432static void remove_oldest_entry(void)
 433{
 434	int bucket;
 435	struct dfs_cache_entry *ce;
 436	struct dfs_cache_entry *to_del = NULL;
 437
 438	rcu_read_lock();
 439	hash_for_each_rcu(dfs_cache_htable, bucket, ce, ce_hlist) {
 440		if (!to_del || timespec64_compare(&ce->ce_etime,
 441						  &to_del->ce_etime) < 0)
 442			to_del = ce;
 
 
 
 
 
 
 
 443	}
 
 444	if (!to_del) {
 445		cifs_dbg(FYI, "%s: no entry to remove", __func__);
 446		goto out;
 447	}
 448	cifs_dbg(FYI, "%s: removing entry", __func__);
 
 449	dump_ce(to_del);
 450	flush_cache_ent(to_del);
 451out:
 452	rcu_read_unlock();
 453}
 454
 455/* Add a new DFS cache entry */
 456static inline struct dfs_cache_entry *
 457add_cache_entry(unsigned int hash, const char *path,
 458		const struct dfs_info3_param *refs, int numrefs)
 459{
 460	struct dfs_cache_entry *ce;
 
 
 
 
 
 
 
 
 
 
 
 
 
 461
 462	ce = alloc_cache_entry(path, refs, numrefs);
 463	if (IS_ERR(ce))
 464		return ce;
 465
 466	hlist_add_head_rcu(&ce->ce_hlist, &dfs_cache_htable[hash]);
 467
 468	mutex_lock(&dfs_cache.dc_lock);
 469	if (dfs_cache.dc_ttl < 0) {
 470		dfs_cache.dc_ttl = ce->ce_ttl;
 471		queue_delayed_work(cifsiod_wq, &dfs_cache.dc_refresh,
 472				   dfs_cache.dc_ttl * HZ);
 473	} else {
 474		dfs_cache.dc_ttl = min_t(int, dfs_cache.dc_ttl, ce->ce_ttl);
 475		mod_delayed_work(cifsiod_wq, &dfs_cache.dc_refresh,
 476				 dfs_cache.dc_ttl * HZ);
 477	}
 478	mutex_unlock(&dfs_cache.dc_lock);
 
 
 
 
 
 479
 480	return ce;
 481}
 482
 483static struct dfs_cache_entry *__find_cache_entry(unsigned int hash,
 484						  const char *path)
 485{
 486	struct dfs_cache_entry *ce;
 487	bool found = false;
 
 
 
 488
 489	rcu_read_lock();
 490	hlist_for_each_entry_rcu(ce, &dfs_cache_htable[hash], ce_hlist) {
 491		if (!strcasecmp(path, ce->ce_path)) {
 492#ifdef CONFIG_CIFS_DEBUG2
 493			char *name = get_tgt_name(ce);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 494
 495			if (IS_ERR(name)) {
 496				rcu_read_unlock();
 497				return ERR_CAST(name);
 498			}
 499			cifs_dbg(FYI, "%s: cache hit\n", __func__);
 500			cifs_dbg(FYI, "%s: target hint: %s\n", __func__, name);
 501#endif
 502			found = true;
 503			break;
 504		}
 505	}
 506	rcu_read_unlock();
 507	return found ? ce : ERR_PTR(-ENOENT);
 508}
 509
 510/*
 511 * Find a DFS cache entry in hash table and optionally check prefix path against
 512 * @path.
 513 * Use whole path components in the match.
 
 
 514 * Return ERR_PTR(-ENOENT) if the entry is not found.
 
 515 */
 516static inline struct dfs_cache_entry *find_cache_entry(const char *path,
 517						       unsigned int *hash)
 518{
 519	*hash = cache_entry_hash(path, strlen(path));
 520	return __find_cache_entry(*hash, path);
 521}
 
 
 
 522
 523static inline void destroy_slab_cache(void)
 524{
 525	rcu_barrier();
 526	kmem_cache_destroy(dfs_cache_slab);
 527}
 528
 529static inline void free_vol(struct dfs_cache_vol_info *vi)
 530{
 531	list_del(&vi->vi_list);
 532	kfree(vi->vi_fullpath);
 533	kfree(vi->vi_mntdata);
 534	cifs_cleanup_volume_info_contents(&vi->vi_vol);
 535	kfree(vi);
 536}
 
 
 
 
 
 
 
 
 
 
 
 
 
 537
 538static inline void free_vol_list(void)
 539{
 540	struct dfs_cache_vol_info *vi, *nvi;
 
 
 
 
 541
 542	list_for_each_entry_safe(vi, nvi, &dfs_cache.dc_vol_list, vi_list)
 543		free_vol(vi);
 
 
 
 544}
 545
 546/**
 547 * dfs_cache_destroy - destroy DFS referral cache
 548 */
 549void dfs_cache_destroy(void)
 550{
 551	cancel_delayed_work_sync(&dfs_cache.dc_refresh);
 552	unload_nls(dfs_cache.dc_nlsc);
 553	free_vol_list();
 554	mutex_destroy(&dfs_cache.dc_lock);
 555
 556	flush_cache_ents();
 557	destroy_slab_cache();
 558	mutex_destroy(&dfs_cache_list_lock);
 559
 560	cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__);
 561}
 562
 563static inline struct dfs_cache_entry *
 564__update_cache_entry(const char *path, const struct dfs_info3_param *refs,
 565		     int numrefs)
 566{
 
 
 567	int rc;
 568	unsigned int h;
 569	struct dfs_cache_entry *ce;
 570	char *s, *th = NULL;
 571
 572	ce = find_cache_entry(path, &h);
 573	if (IS_ERR(ce))
 574		return ce;
 575
 576	if (ce->ce_tgthint) {
 577		s = ce->ce_tgthint->t_name;
 578		th = kstrndup(s, strlen(s), GFP_KERNEL);
 579		if (!th)
 580			return ERR_PTR(-ENOMEM);
 581	}
 582
 583	free_tgts(ce);
 584	ce->ce_numtgts = 0;
 585
 586	rc = copy_ref_data(refs, numrefs, ce, th);
 
 587	kfree(th);
 588
 589	if (rc)
 590		ce = ERR_PTR(rc);
 591
 592	return ce;
 593}
 594
 595/* Update an expired cache entry by getting a new DFS referral from server */
 596static struct dfs_cache_entry *
 597update_cache_entry(const unsigned int xid, struct cifs_ses *ses,
 598		   const struct nls_table *nls_codepage, int remap,
 599		   const char *path, struct dfs_cache_entry *ce)
 600{
 601	int rc;
 602	struct dfs_info3_param *refs = NULL;
 603	int numrefs = 0;
 
 
 604
 605	cifs_dbg(FYI, "%s: update expired cache entry\n", __func__);
 606	/*
 607	 * Check if caller provided enough parameters to update an expired
 608	 * entry.
 609	 */
 610	if (!ses || !ses->server || !ses->server->ops->get_dfs_refer)
 611		return ERR_PTR(-ETIME);
 612	if (unlikely(!nls_codepage))
 613		return ERR_PTR(-ETIME);
 614
 615	cifs_dbg(FYI, "%s: DFS referral request for %s\n", __func__, path);
 
 
 
 
 616
 617	rc = ses->server->ops->get_dfs_refer(xid, ses, path, &refs, &numrefs,
 618					     nls_codepage, remap);
 619	if (rc)
 620		ce = ERR_PTR(rc);
 621	else
 622		ce = __update_cache_entry(path, refs, numrefs);
 623
 624	dump_refs(refs, numrefs);
 625	free_dfs_info_array(refs, numrefs);
 626
 627	return ce;
 628}
 629
 630/*
 631 * Find, create or update a DFS cache entry.
 632 *
 633 * If the entry wasn't found, it will create a new one. Or if it was found but
 634 * expired, then it will update the entry accordingly.
 635 *
 636 * For interlinks, __cifs_dfs_mount() and expand_dfs_referral() are supposed to
 637 * handle them properly.
 
 
 638 */
 639static struct dfs_cache_entry *
 640do_dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
 641		  const struct nls_table *nls_codepage, int remap,
 642		  const char *path, bool noreq)
 643{
 
 
 
 644	int rc;
 645	unsigned int h;
 646	struct dfs_cache_entry *ce;
 647	struct dfs_info3_param *nrefs;
 648	int numnrefs;
 649
 650	cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
 651
 652	ce = find_cache_entry(path, &h);
 653	if (IS_ERR(ce)) {
 654		cifs_dbg(FYI, "%s: cache miss\n", __func__);
 655		/*
 656		 * If @noreq is set, no requests will be sent to the server for
 657		 * either updating or getting a new DFS referral.
 658		 */
 659		if (noreq)
 660			return ce;
 661		/*
 662		 * No cache entry was found, so check for valid parameters that
 663		 * will be required to get a new DFS referral and then create a
 664		 * new cache entry.
 665		 */
 666		if (!ses || !ses->server || !ses->server->ops->get_dfs_refer) {
 667			ce = ERR_PTR(-EOPNOTSUPP);
 668			return ce;
 669		}
 670		if (unlikely(!nls_codepage)) {
 671			ce = ERR_PTR(-EINVAL);
 672			return ce;
 673		}
 674
 675		nrefs = NULL;
 676		numnrefs = 0;
 
 
 
 
 
 677
 678		cifs_dbg(FYI, "%s: DFS referral request for %s\n", __func__,
 679			 path);
 
 
 
 
 
 
 
 
 680
 681		rc = ses->server->ops->get_dfs_refer(xid, ses, path, &nrefs,
 682						     &numnrefs, nls_codepage,
 683						     remap);
 684		if (rc) {
 685			ce = ERR_PTR(rc);
 686			return ce;
 687		}
 688
 689		dump_refs(nrefs, numnrefs);
 690
 691		cifs_dbg(FYI, "%s: new cache entry\n", __func__);
 692
 693		if (dfs_cache_count >= DFS_CACHE_MAX_ENTRIES) {
 694			cifs_dbg(FYI, "%s: reached max cache size (%d)",
 695				 __func__, DFS_CACHE_MAX_ENTRIES);
 696			remove_oldest_entry();
 697		}
 698		ce = add_cache_entry(h, path, nrefs, numnrefs);
 699		free_dfs_info_array(nrefs, numnrefs);
 
 700
 701		if (IS_ERR(ce))
 702			return ce;
 703
 704		dfs_cache_count++;
 705	}
 706
 707	dump_ce(ce);
 708
 709	/* Just return the found cache entry in case @noreq is set */
 710	if (noreq)
 711		return ce;
 712
 713	if (cache_entry_expired(ce)) {
 714		cifs_dbg(FYI, "%s: expired cache entry\n", __func__);
 715		ce = update_cache_entry(xid, ses, nls_codepage, remap, path,
 716					ce);
 717		if (IS_ERR(ce)) {
 718			cifs_dbg(FYI, "%s: failed to update expired entry\n",
 719				 __func__);
 720		}
 721	}
 722	return ce;
 723}
 724
 725/* Set up a new DFS referral from a given cache entry */
 726static int setup_ref(const char *path, const struct dfs_cache_entry *ce,
 727		     struct dfs_info3_param *ref, const char *tgt)
 
 
 
 
 728{
 729	int rc;
 730
 731	cifs_dbg(FYI, "%s: set up new ref\n", __func__);
 732
 733	memset(ref, 0, sizeof(*ref));
 734
 735	ref->path_name = kstrndup(path, strlen(path), GFP_KERNEL);
 736	if (!ref->path_name)
 737		return -ENOMEM;
 738
 739	ref->path_consumed = ce->ce_path_consumed;
 740
 741	ref->node_name = kstrndup(tgt, strlen(tgt), GFP_KERNEL);
 742	if (!ref->node_name) {
 743		rc = -ENOMEM;
 744		goto err_free_path;
 745	}
 746
 747	ref->ttl = ce->ce_ttl;
 748	ref->server_type = ce->ce_srvtype;
 749	ref->ref_flag = ce->ce_flags;
 
 
 750
 751	return 0;
 752
 753err_free_path:
 754	kfree(ref->path_name);
 755	ref->path_name = NULL;
 756	return rc;
 757}
 758
 759/* Return target list of a DFS cache entry */
 760static int get_tgt_list(const struct dfs_cache_entry *ce,
 761			struct dfs_cache_tgt_list *tl)
 762{
 763	int rc;
 764	struct list_head *head = &tl->tl_list;
 765	struct dfs_cache_tgt *t;
 766	struct dfs_cache_tgt_iterator *it, *nit;
 767
 768	memset(tl, 0, sizeof(*tl));
 769	INIT_LIST_HEAD(head);
 770
 771	list_for_each_entry(t, &ce->ce_tlist, t_list) {
 772		it = kzalloc(sizeof(*it), GFP_KERNEL);
 773		if (!it) {
 774			rc = -ENOMEM;
 775			goto err_free_it;
 776		}
 777
 778		it->it_name = kstrndup(t->t_name, strlen(t->t_name),
 779				       GFP_KERNEL);
 780		if (!it->it_name) {
 781			kfree(it);
 782			rc = -ENOMEM;
 783			goto err_free_it;
 784		}
 
 785
 786		if (ce->ce_tgthint == t)
 787			list_add(&it->it_list, head);
 788		else
 789			list_add_tail(&it->it_list, head);
 790	}
 791	tl->tl_numtgts = ce->ce_numtgts;
 
 792
 793	return 0;
 794
 795err_free_it:
 796	list_for_each_entry_safe(it, nit, head, it_list) {
 
 797		kfree(it->it_name);
 798		kfree(it);
 799	}
 800	return rc;
 801}
 802
 803/**
 804 * dfs_cache_find - find a DFS cache entry
 805 *
 806 * If it doesn't find the cache entry, then it will get a DFS referral
 807 * for @path and create a new entry.
 808 *
 809 * In case the cache entry exists but expired, it will get a DFS referral
 810 * for @path and then update the respective cache entry.
 811 *
 812 * These parameters are passed down to the get_dfs_refer() call if it
 813 * needs to be issued:
 814 * @xid: syscall xid
 815 * @ses: smb session to issue the request on
 816 * @nls_codepage: charset conversion
 817 * @remap: path character remapping type
 818 * @path: path to lookup in DFS referral cache.
 819 *
 820 * @ref: when non-NULL, store single DFS referral result in it.
 821 * @tgt_list: when non-NULL, store complete DFS target list in it.
 822 *
 823 * Return zero if the target was found, otherwise non-zero.
 824 */
 825int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
 826		   const struct nls_table *nls_codepage, int remap,
 827		   const char *path, struct dfs_info3_param *ref,
 828		   struct dfs_cache_tgt_list *tgt_list)
 829{
 830	int rc;
 831	char *npath;
 832	struct dfs_cache_entry *ce;
 833
 834	if (unlikely(!is_path_valid(path)))
 835		return -EINVAL;
 836
 837	rc = get_normalized_path(path, &npath);
 838	if (rc)
 839		return rc;
 840
 841	mutex_lock(&dfs_cache_list_lock);
 842	ce = do_dfs_cache_find(xid, ses, nls_codepage, remap, npath, false);
 843	if (!IS_ERR(ce)) {
 844		if (ref)
 845			rc = setup_ref(path, ce, ref, get_tgt_name(ce));
 846		else
 847			rc = 0;
 848		if (!rc && tgt_list)
 849			rc = get_tgt_list(ce, tgt_list);
 850	} else {
 851		rc = PTR_ERR(ce);
 
 852	}
 853	mutex_unlock(&dfs_cache_list_lock);
 854	free_normalized_path(path, npath);
 
 
 
 
 
 
 
 
 
 
 855	return rc;
 856}
 857
 858/**
 859 * dfs_cache_noreq_find - find a DFS cache entry without sending any requests to
 860 * the currently connected server.
 861 *
 862 * NOTE: This function will neither update a cache entry in case it was
 863 * expired, nor create a new cache entry if @path hasn't been found. It heavily
 864 * relies on an existing cache entry.
 865 *
 866 * @path: path to lookup in the DFS referral cache.
 867 * @ref: when non-NULL, store single DFS referral result in it.
 868 * @tgt_list: when non-NULL, store complete DFS target list in it.
 869 *
 870 * Return 0 if successful.
 871 * Return -ENOENT if the entry was not found.
 872 * Return non-zero for other errors.
 873 */
 874int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
 875			 struct dfs_cache_tgt_list *tgt_list)
 876{
 877	int rc;
 878	char *npath;
 879	struct dfs_cache_entry *ce;
 880
 881	if (unlikely(!is_path_valid(path)))
 882		return -EINVAL;
 883
 884	rc = get_normalized_path(path, &npath);
 885	if (rc)
 886		return rc;
 887
 888	mutex_lock(&dfs_cache_list_lock);
 889	ce = do_dfs_cache_find(0, NULL, NULL, 0, npath, true);
 890	if (IS_ERR(ce)) {
 891		rc = PTR_ERR(ce);
 892		goto out;
 893	}
 894
 895	if (ref)
 896		rc = setup_ref(path, ce, ref, get_tgt_name(ce));
 897	else
 898		rc = 0;
 899	if (!rc && tgt_list)
 900		rc = get_tgt_list(ce, tgt_list);
 901out:
 902	mutex_unlock(&dfs_cache_list_lock);
 903	free_normalized_path(path, npath);
 904	return rc;
 905}
 906
 907/**
 908 * dfs_cache_update_tgthint - update target hint of a DFS cache entry
 909 *
 910 * If it doesn't find the cache entry, then it will get a DFS referral for @path
 911 * and create a new entry.
 912 *
 913 * In case the cache entry exists but expired, it will get a DFS referral
 914 * for @path and then update the respective cache entry.
 915 *
 916 * @xid: syscall id
 917 * @ses: smb session
 918 * @nls_codepage: charset conversion
 919 * @remap: type of character remapping for paths
 920 * @path: path to lookup in DFS referral cache.
 921 * @it: DFS target iterator
 922 *
 923 * Return zero if the target hint was updated successfully, otherwise non-zero.
 924 */
 925int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
 926			     const struct nls_table *nls_codepage, int remap,
 927			     const char *path,
 928			     const struct dfs_cache_tgt_iterator *it)
 929{
 930	int rc;
 931	char *npath;
 932	struct dfs_cache_entry *ce;
 933	struct dfs_cache_tgt *t;
 934
 935	if (unlikely(!is_path_valid(path)))
 936		return -EINVAL;
 937
 938	rc = get_normalized_path(path, &npath);
 939	if (rc)
 940		return rc;
 941
 942	cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
 943
 944	mutex_lock(&dfs_cache_list_lock);
 945	ce = do_dfs_cache_find(xid, ses, nls_codepage, remap, npath, false);
 946	if (IS_ERR(ce)) {
 947		rc = PTR_ERR(ce);
 948		goto out;
 949	}
 950
 951	rc = 0;
 952
 953	t = ce->ce_tgthint;
 954
 955	if (likely(!strcasecmp(it->it_name, t->t_name)))
 956		goto out;
 957
 958	list_for_each_entry(t, &ce->ce_tlist, t_list) {
 959		if (!strcasecmp(t->t_name, it->it_name)) {
 960			ce->ce_tgthint = t;
 961			cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
 962				 it->it_name);
 963			break;
 964		}
 965	}
 966
 967out:
 968	mutex_unlock(&dfs_cache_list_lock);
 969	free_normalized_path(path, npath);
 970	return rc;
 971}
 972
 973/**
 974 * dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry
 975 * without sending any requests to the currently connected server.
 976 *
 977 * NOTE: This function will neither update a cache entry in case it was
 978 * expired, nor create a new cache entry if @path hasn't been found. It heavily
 979 * relies on an existing cache entry.
 980 *
 981 * @path: path to lookup in DFS referral cache.
 982 * @it: target iterator which contains the target hint to update the cache
 983 * entry with.
 984 *
 985 * Return zero if the target hint was updated successfully, otherwise non-zero.
 986 */
 987int dfs_cache_noreq_update_tgthint(const char *path,
 988				   const struct dfs_cache_tgt_iterator *it)
 989{
 990	int rc;
 991	char *npath;
 992	struct dfs_cache_entry *ce;
 993	struct dfs_cache_tgt *t;
 994
 995	if (unlikely(!is_path_valid(path)) || !it)
 996		return -EINVAL;
 997
 998	rc = get_normalized_path(path, &npath);
 999	if (rc)
1000		return rc;
1001
1002	cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
1003
1004	mutex_lock(&dfs_cache_list_lock);
 
 
1005
1006	ce = do_dfs_cache_find(0, NULL, NULL, 0, npath, true);
1007	if (IS_ERR(ce)) {
1008		rc = PTR_ERR(ce);
1009		goto out;
1010	}
1011
1012	rc = 0;
1013
1014	t = ce->ce_tgthint;
1015
1016	if (unlikely(!strcasecmp(it->it_name, t->t_name)))
1017		goto out;
1018
1019	list_for_each_entry(t, &ce->ce_tlist, t_list) {
1020		if (!strcasecmp(t->t_name, it->it_name)) {
1021			ce->ce_tgthint = t;
1022			cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
1023				 it->it_name);
1024			break;
1025		}
1026	}
1027
1028out:
1029	mutex_unlock(&dfs_cache_list_lock);
1030	free_normalized_path(path, npath);
1031	return rc;
1032}
1033
1034/**
1035 * dfs_cache_get_tgt_referral - returns a DFS referral (@ref) from a given
1036 * target iterator (@it).
1037 *
1038 * @path: path to lookup in DFS referral cache.
1039 * @it: DFS target iterator.
1040 * @ref: DFS referral pointer to set up the gathered information.
1041 *
1042 * Return zero if the DFS referral was set up correctly, otherwise non-zero.
1043 */
1044int dfs_cache_get_tgt_referral(const char *path,
1045			       const struct dfs_cache_tgt_iterator *it,
1046			       struct dfs_info3_param *ref)
1047{
1048	int rc;
1049	char *npath;
1050	struct dfs_cache_entry *ce;
1051	unsigned int h;
1052
1053	if (!it || !ref)
1054		return -EINVAL;
1055	if (unlikely(!is_path_valid(path)))
1056		return -EINVAL;
1057
1058	rc = get_normalized_path(path, &npath);
1059	if (rc)
1060		return rc;
1061
1062	cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
1063
1064	mutex_lock(&dfs_cache_list_lock);
1065
1066	ce = find_cache_entry(npath, &h);
1067	if (IS_ERR(ce)) {
1068		rc = PTR_ERR(ce);
1069		goto out;
1070	}
1071
1072	cifs_dbg(FYI, "%s: target name: %s\n", __func__, it->it_name);
1073
1074	rc = setup_ref(path, ce, ref, it->it_name);
1075
1076out:
1077	mutex_unlock(&dfs_cache_list_lock);
1078	free_normalized_path(path, npath);
1079	return rc;
1080}
1081
1082static int dup_vol(struct smb_vol *vol, struct smb_vol *new)
 
 
 
 
 
 
1083{
1084	memcpy(new, vol, sizeof(*new));
1085
1086	if (vol->username) {
1087		new->username = kstrndup(vol->username, strlen(vol->username),
1088					GFP_KERNEL);
1089		if (!new->username)
1090			return -ENOMEM;
1091	}
1092	if (vol->password) {
1093		new->password = kstrndup(vol->password, strlen(vol->password),
1094					 GFP_KERNEL);
1095		if (!new->password)
1096			goto err_free_username;
1097	}
1098	if (vol->UNC) {
1099		cifs_dbg(FYI, "%s: vol->UNC: %s\n", __func__, vol->UNC);
1100		new->UNC = kstrndup(vol->UNC, strlen(vol->UNC), GFP_KERNEL);
1101		if (!new->UNC)
1102			goto err_free_password;
1103	}
1104	if (vol->domainname) {
1105		new->domainname = kstrndup(vol->domainname,
1106					  strlen(vol->domainname), GFP_KERNEL);
1107		if (!new->domainname)
1108			goto err_free_unc;
1109	}
1110	if (vol->iocharset) {
1111		new->iocharset = kstrndup(vol->iocharset,
1112					  strlen(vol->iocharset), GFP_KERNEL);
1113		if (!new->iocharset)
1114			goto err_free_domainname;
1115	}
1116	if (vol->prepath) {
1117		cifs_dbg(FYI, "%s: vol->prepath: %s\n", __func__, vol->prepath);
1118		new->prepath = kstrndup(vol->prepath, strlen(vol->prepath),
1119					GFP_KERNEL);
1120		if (!new->prepath)
1121			goto err_free_iocharset;
1122	}
1123
1124	return 0;
 
 
1125
1126err_free_iocharset:
1127	kfree(new->iocharset);
1128err_free_domainname:
1129	kfree(new->domainname);
1130err_free_unc:
1131	kfree(new->UNC);
1132err_free_password:
1133	kzfree(new->password);
1134err_free_username:
1135	kfree(new->username);
1136	kfree(new);
1137	return -ENOMEM;
1138}
1139
1140/**
1141 * dfs_cache_add_vol - add a cifs volume during mount() that will be handled by
1142 * DFS cache refresh worker.
1143 *
1144 * @mntdata: mount data.
1145 * @vol: cifs volume.
1146 * @fullpath: origin full path.
1147 *
1148 * Return zero if volume was set up correctly, otherwise non-zero.
1149 */
1150int dfs_cache_add_vol(char *mntdata, struct smb_vol *vol, const char *fullpath)
1151{
1152	int rc;
1153	struct dfs_cache_vol_info *vi;
1154
1155	if (!vol || !fullpath || !mntdata)
1156		return -EINVAL;
1157
1158	cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
1159
1160	vi = kzalloc(sizeof(*vi), GFP_KERNEL);
1161	if (!vi)
1162		return -ENOMEM;
1163
1164	vi->vi_fullpath = kstrndup(fullpath, strlen(fullpath), GFP_KERNEL);
1165	if (!vi->vi_fullpath) {
1166		rc = -ENOMEM;
1167		goto err_free_vi;
1168	}
 
 
 
1169
1170	rc = dup_vol(vol, &vi->vi_vol);
1171	if (rc)
1172		goto err_free_fullpath;
 
 
1173
1174	vi->vi_mntdata = mntdata;
 
 
1175
1176	mutex_lock(&dfs_cache.dc_lock);
1177	list_add_tail(&vi->vi_list, &dfs_cache.dc_vol_list);
1178	mutex_unlock(&dfs_cache.dc_lock);
1179	return 0;
1180
1181err_free_fullpath:
1182	kfree(vi->vi_fullpath);
1183err_free_vi:
1184	kfree(vi);
1185	return rc;
1186}
1187
1188static inline struct dfs_cache_vol_info *find_vol(const char *fullpath)
1189{
1190	struct dfs_cache_vol_info *vi;
1191
1192	list_for_each_entry(vi, &dfs_cache.dc_vol_list, vi_list) {
1193		cifs_dbg(FYI, "%s: vi->vi_fullpath: %s\n", __func__,
1194			 vi->vi_fullpath);
1195		if (!strcasecmp(vi->vi_fullpath, fullpath))
1196			return vi;
1197	}
1198	return ERR_PTR(-ENOENT);
1199}
1200
1201/**
1202 * dfs_cache_update_vol - update vol info in DFS cache after failover
1203 *
1204 * @fullpath: fullpath to look up in volume list.
1205 * @server: TCP ses pointer.
 
 
1206 *
1207 * Return zero if volume was updated, otherwise non-zero.
1208 */
1209int dfs_cache_update_vol(const char *fullpath, struct TCP_Server_Info *server)
 
1210{
1211	int rc;
1212	struct dfs_cache_vol_info *vi;
 
 
 
 
1213
1214	if (!fullpath || !server)
1215		return -EINVAL;
1216
1217	cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
 
 
1218
1219	mutex_lock(&dfs_cache.dc_lock);
1220
1221	vi = find_vol(fullpath);
1222	if (IS_ERR(vi)) {
1223		rc = PTR_ERR(vi);
1224		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1225	}
1226
1227	cifs_dbg(FYI, "%s: updating volume info\n", __func__);
1228	memcpy(&vi->vi_vol.dstaddr, &server->dstaddr,
1229	       sizeof(vi->vi_vol.dstaddr));
1230	rc = 0;
1231
1232out:
1233	mutex_unlock(&dfs_cache.dc_lock);
1234	return rc;
1235}
1236
1237/**
1238 * dfs_cache_del_vol - remove volume info in DFS cache during umount()
1239 *
1240 * @fullpath: fullpath to look up in volume list.
1241 */
1242void dfs_cache_del_vol(const char *fullpath)
1243{
1244	struct dfs_cache_vol_info *vi;
 
 
 
 
 
1245
1246	if (!fullpath || !*fullpath)
1247		return;
1248
1249	cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
 
 
 
 
 
1250
1251	mutex_lock(&dfs_cache.dc_lock);
1252	vi = find_vol(fullpath);
1253	if (!IS_ERR(vi))
1254		free_vol(vi);
1255	mutex_unlock(&dfs_cache.dc_lock);
1256}
1257
1258/* Get all tcons that are within a DFS namespace and can be refreshed */
1259static void get_tcons(struct TCP_Server_Info *server, struct list_head *head)
1260{
1261	struct cifs_ses *ses;
1262	struct cifs_tcon *tcon;
1263
1264	INIT_LIST_HEAD(head);
 
1265
1266	spin_lock(&cifs_tcp_ses_lock);
1267	list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
1268		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
1269			if (!tcon->need_reconnect && !tcon->need_reopen_files &&
1270			    tcon->dfs_path) {
1271				tcon->tc_count++;
1272				list_add_tail(&tcon->ulist, head);
1273			}
1274		}
1275		if (ses->tcon_ipc && !ses->tcon_ipc->need_reconnect &&
1276		    ses->tcon_ipc->dfs_path) {
1277			list_add_tail(&ses->tcon_ipc->ulist, head);
 
 
 
 
 
 
1278		}
1279	}
1280	spin_unlock(&cifs_tcp_ses_lock);
1281}
1282
1283static inline bool is_dfs_link(const char *path)
1284{
1285	char *s;
1286
1287	s = strchr(path + 1, '\\');
1288	if (!s)
1289		return false;
1290	return !!strchr(s + 1, '\\');
1291}
1292
1293static inline char *get_dfs_root(const char *path)
 
1294{
1295	char *s, *npath;
 
 
 
 
 
 
 
1296
1297	s = strchr(path + 1, '\\');
1298	if (!s)
1299		return ERR_PTR(-EINVAL);
1300
1301	s = strchr(s + 1, '\\');
1302	if (!s)
1303		return ERR_PTR(-EINVAL);
1304
1305	npath = kstrndup(path, s - path, GFP_KERNEL);
1306	if (!npath)
1307		return ERR_PTR(-ENOMEM);
1308
1309	return npath;
1310}
1311
1312/* Find root SMB session out of a DFS link path */
1313static struct cifs_ses *find_root_ses(struct dfs_cache_vol_info *vi,
1314				      struct cifs_tcon *tcon, const char *path)
1315{
1316	char *rpath;
1317	int rc;
1318	struct dfs_info3_param ref = {0};
1319	char *mdata = NULL, *devname = NULL;
1320	bool is_smb3 = tcon->ses->server->vals->header_preamble_size == 0;
1321	struct TCP_Server_Info *server;
1322	struct cifs_ses *ses;
1323	struct smb_vol vol;
1324
1325	rpath = get_dfs_root(path);
1326	if (IS_ERR(rpath))
1327		return ERR_CAST(rpath);
1328
1329	memset(&vol, 0, sizeof(vol));
1330
1331	rc = dfs_cache_noreq_find(rpath, &ref, NULL);
1332	if (rc) {
1333		ses = ERR_PTR(rc);
1334		goto out;
1335	}
 
1336
1337	mdata = cifs_compose_mount_options(vi->vi_mntdata, rpath, &ref,
1338					   &devname);
1339	free_dfs_info_param(&ref);
1340
1341	if (IS_ERR(mdata)) {
1342		ses = ERR_CAST(mdata);
1343		mdata = NULL;
1344		goto out;
1345	}
1346
1347	rc = cifs_setup_volume_info(&vol, mdata, devname, is_smb3);
1348	kfree(devname);
1349
1350	if (rc) {
1351		ses = ERR_PTR(rc);
1352		goto out;
1353	}
 
1354
1355	server = cifs_find_tcp_session(&vol);
1356	if (IS_ERR_OR_NULL(server)) {
1357		ses = ERR_PTR(-EHOSTDOWN);
1358		goto out;
1359	}
1360	if (server->tcpStatus != CifsGood) {
1361		cifs_put_tcp_session(server, 0);
1362		ses = ERR_PTR(-EHOSTDOWN);
1363		goto out;
1364	}
1365
1366	ses = cifs_get_smb_ses(server, &vol);
1367
1368out:
1369	cifs_cleanup_volume_info_contents(&vol);
1370	kfree(mdata);
1371	kfree(rpath);
1372
1373	return ses;
1374}
1375
1376/* Refresh DFS cache entry from a given tcon */
1377static void do_refresh_tcon(struct dfs_cache *dc, struct dfs_cache_vol_info *vi,
1378			    struct cifs_tcon *tcon)
1379{
1380	int rc = 0;
1381	unsigned int xid;
1382	char *path, *npath;
1383	unsigned int h;
1384	struct dfs_cache_entry *ce;
1385	struct dfs_info3_param *refs = NULL;
1386	int numrefs = 0;
1387	struct cifs_ses *root_ses = NULL, *ses;
1388
1389	xid = get_xid();
 
 
 
 
 
1390
1391	path = tcon->dfs_path + 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
1392
1393	rc = get_normalized_path(path, &npath);
1394	if (rc)
1395		goto out;
1396
1397	mutex_lock(&dfs_cache_list_lock);
1398	ce = find_cache_entry(npath, &h);
1399	mutex_unlock(&dfs_cache_list_lock);
1400
1401	if (IS_ERR(ce)) {
1402		rc = PTR_ERR(ce);
1403		goto out;
1404	}
1405
1406	if (!cache_entry_expired(ce))
1407		goto out;
1408
1409	/* If it's a DFS Link, then use root SMB session for refreshing it */
1410	if (is_dfs_link(npath)) {
1411		ses = root_ses = find_root_ses(vi, tcon, npath);
1412		if (IS_ERR(ses)) {
1413			rc = PTR_ERR(ses);
1414			root_ses = NULL;
1415			goto out;
1416		}
1417	} else {
1418		ses = tcon->ses;
1419	}
 
 
 
 
 
 
 
 
 
 
1420
1421	if (unlikely(!ses->server->ops->get_dfs_refer)) {
1422		rc = -EOPNOTSUPP;
1423	} else {
1424		rc = ses->server->ops->get_dfs_refer(xid, ses, path, &refs,
1425						     &numrefs, dc->dc_nlsc,
1426						     tcon->remap);
1427		if (!rc) {
1428			mutex_lock(&dfs_cache_list_lock);
1429			ce = __update_cache_entry(npath, refs, numrefs);
1430			mutex_unlock(&dfs_cache_list_lock);
1431			dump_refs(refs, numrefs);
1432			free_dfs_info_array(refs, numrefs);
1433			if (IS_ERR(ce))
1434				rc = PTR_ERR(ce);
1435		}
1436	}
1437
1438out:
1439	if (root_ses)
1440		cifs_put_smb_ses(root_ses);
1441
1442	free_xid(xid);
1443	free_normalized_path(path, npath);
1444}
1445
1446/*
1447 * Worker that will refresh DFS cache based on lowest TTL value from a DFS
1448 * referral.
1449 */
1450static void refresh_cache_worker(struct work_struct *work)
1451{
1452	struct dfs_cache *dc = container_of(work, struct dfs_cache,
1453					    dc_refresh.work);
1454	struct dfs_cache_vol_info *vi;
1455	struct TCP_Server_Info *server;
1456	LIST_HEAD(list);
1457	struct cifs_tcon *tcon, *ntcon;
 
 
1458
1459	mutex_lock(&dc->dc_lock);
1460
1461	list_for_each_entry(vi, &dc->dc_vol_list, vi_list) {
1462		server = cifs_find_tcp_session(&vi->vi_vol);
1463		if (IS_ERR_OR_NULL(server))
1464			continue;
1465		if (server->tcpStatus != CifsGood)
1466			goto next;
1467		get_tcons(server, &list);
1468		list_for_each_entry_safe(tcon, ntcon, &list, ulist) {
1469			do_refresh_tcon(dc, vi, tcon);
1470			list_del_init(&tcon->ulist);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1471			cifs_put_tcon(tcon);
1472		}
1473next:
1474		cifs_put_tcp_session(server, 0);
1475	}
1476	queue_delayed_work(cifsiod_wq, &dc->dc_refresh, dc->dc_ttl * HZ);
1477	mutex_unlock(&dc->dc_lock);
 
 
1478}