Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * DFS referral cache routines
4 *
5 * Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de>
6 */
7
8#include <linux/jhash.h>
9#include <linux/ktime.h>
10#include <linux/slab.h>
11#include <linux/proc_fs.h>
12#include <linux/nls.h>
13#include <linux/workqueue.h>
14#include <linux/uuid.h>
15#include "cifsglob.h"
16#include "smb2pdu.h"
17#include "smb2proto.h"
18#include "cifsproto.h"
19#include "cifs_debug.h"
20#include "cifs_unicode.h"
21#include "smb2glob.h"
22#include "dns_resolve.h"
23
24#include "dfs_cache.h"
25
26#define CACHE_HTABLE_SIZE 32
27#define CACHE_MAX_ENTRIES 64
28#define CACHE_MIN_TTL 120 /* 2 minutes */
29
30#define IS_DFS_INTERLINK(v) (((v) & DFSREF_REFERRAL_SERVER) && !((v) & DFSREF_STORAGE_SERVER))
31
32struct cache_dfs_tgt {
33 char *name;
34 int path_consumed;
35 struct list_head list;
36};
37
38struct cache_entry {
39 struct hlist_node hlist;
40 const char *path;
41 int hdr_flags; /* RESP_GET_DFS_REFERRAL.ReferralHeaderFlags */
42 int ttl; /* DFS_REREFERRAL_V3.TimeToLive */
43 int srvtype; /* DFS_REREFERRAL_V3.ServerType */
44 int ref_flags; /* DFS_REREFERRAL_V3.ReferralEntryFlags */
45 struct timespec64 etime;
46 int path_consumed; /* RESP_GET_DFS_REFERRAL.PathConsumed */
47 int numtgts;
48 struct list_head tlist;
49 struct cache_dfs_tgt *tgthint;
50};
51
52/* List of referral server sessions per dfs mount */
53struct mount_group {
54 struct list_head list;
55 uuid_t id;
56 struct cifs_ses *sessions[CACHE_MAX_ENTRIES];
57 int num_sessions;
58 spinlock_t lock;
59 struct list_head refresh_list;
60 struct kref refcount;
61};
62
63static struct kmem_cache *cache_slab __read_mostly;
64static struct workqueue_struct *dfscache_wq __read_mostly;
65
66static int cache_ttl;
67static DEFINE_SPINLOCK(cache_ttl_lock);
68
69static struct nls_table *cache_cp;
70
71/*
72 * Number of entries in the cache
73 */
74static atomic_t cache_count;
75
76static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
77static DECLARE_RWSEM(htable_rw_lock);
78
79static LIST_HEAD(mount_group_list);
80static DEFINE_MUTEX(mount_group_list_lock);
81
82static void refresh_cache_worker(struct work_struct *work);
83
84static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
85
86static void __mount_group_release(struct mount_group *mg)
87{
88 int i;
89
90 for (i = 0; i < mg->num_sessions; i++)
91 cifs_put_smb_ses(mg->sessions[i]);
92 kfree(mg);
93}
94
95static void mount_group_release(struct kref *kref)
96{
97 struct mount_group *mg = container_of(kref, struct mount_group, refcount);
98
99 mutex_lock(&mount_group_list_lock);
100 list_del(&mg->list);
101 mutex_unlock(&mount_group_list_lock);
102 __mount_group_release(mg);
103}
104
105static struct mount_group *find_mount_group_locked(const uuid_t *id)
106{
107 struct mount_group *mg;
108
109 list_for_each_entry(mg, &mount_group_list, list) {
110 if (uuid_equal(&mg->id, id))
111 return mg;
112 }
113 return ERR_PTR(-ENOENT);
114}
115
116static struct mount_group *__get_mount_group_locked(const uuid_t *id)
117{
118 struct mount_group *mg;
119
120 mg = find_mount_group_locked(id);
121 if (!IS_ERR(mg))
122 return mg;
123
124 mg = kmalloc(sizeof(*mg), GFP_KERNEL);
125 if (!mg)
126 return ERR_PTR(-ENOMEM);
127 kref_init(&mg->refcount);
128 uuid_copy(&mg->id, id);
129 mg->num_sessions = 0;
130 spin_lock_init(&mg->lock);
131 list_add(&mg->list, &mount_group_list);
132 return mg;
133}
134
135static struct mount_group *get_mount_group(const uuid_t *id)
136{
137 struct mount_group *mg;
138
139 mutex_lock(&mount_group_list_lock);
140 mg = __get_mount_group_locked(id);
141 if (!IS_ERR(mg))
142 kref_get(&mg->refcount);
143 mutex_unlock(&mount_group_list_lock);
144
145 return mg;
146}
147
148static void free_mount_group_list(void)
149{
150 struct mount_group *mg, *tmp_mg;
151
152 list_for_each_entry_safe(mg, tmp_mg, &mount_group_list, list) {
153 list_del_init(&mg->list);
154 __mount_group_release(mg);
155 }
156}
157
158/**
159 * dfs_cache_canonical_path - get a canonical DFS path
160 *
161 * @path: DFS path
162 * @cp: codepage
163 * @remap: mapping type
164 *
165 * Return canonical path if success, otherwise error.
166 */
167char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap)
168{
169 char *tmp;
170 int plen = 0;
171 char *npath;
172
173 if (!path || strlen(path) < 3 || (*path != '\\' && *path != '/'))
174 return ERR_PTR(-EINVAL);
175
176 if (unlikely(strcmp(cp->charset, cache_cp->charset))) {
177 tmp = (char *)cifs_strndup_to_utf16(path, strlen(path), &plen, cp, remap);
178 if (!tmp) {
179 cifs_dbg(VFS, "%s: failed to convert path to utf16\n", __func__);
180 return ERR_PTR(-EINVAL);
181 }
182
183 npath = cifs_strndup_from_utf16(tmp, plen, true, cache_cp);
184 kfree(tmp);
185
186 if (!npath) {
187 cifs_dbg(VFS, "%s: failed to convert path from utf16\n", __func__);
188 return ERR_PTR(-EINVAL);
189 }
190 } else {
191 npath = kstrdup(path, GFP_KERNEL);
192 if (!npath)
193 return ERR_PTR(-ENOMEM);
194 }
195 convert_delimiter(npath, '\\');
196 return npath;
197}
198
199static inline bool cache_entry_expired(const struct cache_entry *ce)
200{
201 struct timespec64 ts;
202
203 ktime_get_coarse_real_ts64(&ts);
204 return timespec64_compare(&ts, &ce->etime) >= 0;
205}
206
207static inline void free_tgts(struct cache_entry *ce)
208{
209 struct cache_dfs_tgt *t, *n;
210
211 list_for_each_entry_safe(t, n, &ce->tlist, list) {
212 list_del(&t->list);
213 kfree(t->name);
214 kfree(t);
215 }
216}
217
218static inline void flush_cache_ent(struct cache_entry *ce)
219{
220 hlist_del_init(&ce->hlist);
221 kfree(ce->path);
222 free_tgts(ce);
223 atomic_dec(&cache_count);
224 kmem_cache_free(cache_slab, ce);
225}
226
227static void flush_cache_ents(void)
228{
229 int i;
230
231 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
232 struct hlist_head *l = &cache_htable[i];
233 struct hlist_node *n;
234 struct cache_entry *ce;
235
236 hlist_for_each_entry_safe(ce, n, l, hlist) {
237 if (!hlist_unhashed(&ce->hlist))
238 flush_cache_ent(ce);
239 }
240 }
241}
242
243/*
244 * dfs cache /proc file
245 */
246static int dfscache_proc_show(struct seq_file *m, void *v)
247{
248 int i;
249 struct cache_entry *ce;
250 struct cache_dfs_tgt *t;
251
252 seq_puts(m, "DFS cache\n---------\n");
253
254 down_read(&htable_rw_lock);
255 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
256 struct hlist_head *l = &cache_htable[i];
257
258 hlist_for_each_entry(ce, l, hlist) {
259 if (hlist_unhashed(&ce->hlist))
260 continue;
261
262 seq_printf(m,
263 "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
264 ce->path, ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
265 ce->ttl, ce->etime.tv_nsec, ce->hdr_flags, ce->ref_flags,
266 IS_DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
267 ce->path_consumed, cache_entry_expired(ce) ? "yes" : "no");
268
269 list_for_each_entry(t, &ce->tlist, list) {
270 seq_printf(m, " %s%s\n",
271 t->name,
272 READ_ONCE(ce->tgthint) == t ? " (target hint)" : "");
273 }
274 }
275 }
276 up_read(&htable_rw_lock);
277
278 return 0;
279}
280
281static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer,
282 size_t count, loff_t *ppos)
283{
284 char c;
285 int rc;
286
287 rc = get_user(c, buffer);
288 if (rc)
289 return rc;
290
291 if (c != '0')
292 return -EINVAL;
293
294 cifs_dbg(FYI, "clearing dfs cache\n");
295
296 down_write(&htable_rw_lock);
297 flush_cache_ents();
298 up_write(&htable_rw_lock);
299
300 return count;
301}
302
303static int dfscache_proc_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dfscache_proc_show, NULL);
306}
307
308const struct proc_ops dfscache_proc_ops = {
309 .proc_open = dfscache_proc_open,
310 .proc_read = seq_read,
311 .proc_lseek = seq_lseek,
312 .proc_release = single_release,
313 .proc_write = dfscache_proc_write,
314};
315
316#ifdef CONFIG_CIFS_DEBUG2
317static inline void dump_tgts(const struct cache_entry *ce)
318{
319 struct cache_dfs_tgt *t;
320
321 cifs_dbg(FYI, "target list:\n");
322 list_for_each_entry(t, &ce->tlist, list) {
323 cifs_dbg(FYI, " %s%s\n", t->name,
324 READ_ONCE(ce->tgthint) == t ? " (target hint)" : "");
325 }
326}
327
328static inline void dump_ce(const struct cache_entry *ce)
329{
330 cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
331 ce->path,
332 ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
333 ce->etime.tv_nsec,
334 ce->hdr_flags, ce->ref_flags,
335 IS_DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
336 ce->path_consumed,
337 cache_entry_expired(ce) ? "yes" : "no");
338 dump_tgts(ce);
339}
340
341static inline void dump_refs(const struct dfs_info3_param *refs, int numrefs)
342{
343 int i;
344
345 cifs_dbg(FYI, "DFS referrals returned by the server:\n");
346 for (i = 0; i < numrefs; i++) {
347 const struct dfs_info3_param *ref = &refs[i];
348
349 cifs_dbg(FYI,
350 "\n"
351 "flags: 0x%x\n"
352 "path_consumed: %d\n"
353 "server_type: 0x%x\n"
354 "ref_flag: 0x%x\n"
355 "path_name: %s\n"
356 "node_name: %s\n"
357 "ttl: %d (%dm)\n",
358 ref->flags, ref->path_consumed, ref->server_type,
359 ref->ref_flag, ref->path_name, ref->node_name,
360 ref->ttl, ref->ttl / 60);
361 }
362}
363#else
364#define dump_tgts(e)
365#define dump_ce(e)
366#define dump_refs(r, n)
367#endif
368
369/**
370 * dfs_cache_init - Initialize DFS referral cache.
371 *
372 * Return zero if initialized successfully, otherwise non-zero.
373 */
374int dfs_cache_init(void)
375{
376 int rc;
377 int i;
378
379 dfscache_wq = alloc_workqueue("cifs-dfscache", WQ_FREEZABLE | WQ_UNBOUND, 1);
380 if (!dfscache_wq)
381 return -ENOMEM;
382
383 cache_slab = kmem_cache_create("cifs_dfs_cache",
384 sizeof(struct cache_entry), 0,
385 SLAB_HWCACHE_ALIGN, NULL);
386 if (!cache_slab) {
387 rc = -ENOMEM;
388 goto out_destroy_wq;
389 }
390
391 for (i = 0; i < CACHE_HTABLE_SIZE; i++)
392 INIT_HLIST_HEAD(&cache_htable[i]);
393
394 atomic_set(&cache_count, 0);
395 cache_cp = load_nls("utf8");
396 if (!cache_cp)
397 cache_cp = load_nls_default();
398
399 cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__);
400 return 0;
401
402out_destroy_wq:
403 destroy_workqueue(dfscache_wq);
404 return rc;
405}
406
407static int cache_entry_hash(const void *data, int size, unsigned int *hash)
408{
409 int i, clen;
410 const unsigned char *s = data;
411 wchar_t c;
412 unsigned int h = 0;
413
414 for (i = 0; i < size; i += clen) {
415 clen = cache_cp->char2uni(&s[i], size - i, &c);
416 if (unlikely(clen < 0)) {
417 cifs_dbg(VFS, "%s: can't convert char\n", __func__);
418 return clen;
419 }
420 c = cifs_toupper(c);
421 h = jhash(&c, sizeof(c), h);
422 }
423 *hash = h % CACHE_HTABLE_SIZE;
424 return 0;
425}
426
427/* Return target hint of a DFS cache entry */
428static inline char *get_tgt_name(const struct cache_entry *ce)
429{
430 struct cache_dfs_tgt *t = READ_ONCE(ce->tgthint);
431
432 return t ? t->name : ERR_PTR(-ENOENT);
433}
434
435/* Return expire time out of a new entry's TTL */
436static inline struct timespec64 get_expire_time(int ttl)
437{
438 struct timespec64 ts = {
439 .tv_sec = ttl,
440 .tv_nsec = 0,
441 };
442 struct timespec64 now;
443
444 ktime_get_coarse_real_ts64(&now);
445 return timespec64_add(now, ts);
446}
447
448/* Allocate a new DFS target */
449static struct cache_dfs_tgt *alloc_target(const char *name, int path_consumed)
450{
451 struct cache_dfs_tgt *t;
452
453 t = kmalloc(sizeof(*t), GFP_ATOMIC);
454 if (!t)
455 return ERR_PTR(-ENOMEM);
456 t->name = kstrdup(name, GFP_ATOMIC);
457 if (!t->name) {
458 kfree(t);
459 return ERR_PTR(-ENOMEM);
460 }
461 t->path_consumed = path_consumed;
462 INIT_LIST_HEAD(&t->list);
463 return t;
464}
465
466/*
467 * Copy DFS referral information to a cache entry and conditionally update
468 * target hint.
469 */
470static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
471 struct cache_entry *ce, const char *tgthint)
472{
473 struct cache_dfs_tgt *target;
474 int i;
475
476 ce->ttl = max_t(int, refs[0].ttl, CACHE_MIN_TTL);
477 ce->etime = get_expire_time(ce->ttl);
478 ce->srvtype = refs[0].server_type;
479 ce->hdr_flags = refs[0].flags;
480 ce->ref_flags = refs[0].ref_flag;
481 ce->path_consumed = refs[0].path_consumed;
482
483 for (i = 0; i < numrefs; i++) {
484 struct cache_dfs_tgt *t;
485
486 t = alloc_target(refs[i].node_name, refs[i].path_consumed);
487 if (IS_ERR(t)) {
488 free_tgts(ce);
489 return PTR_ERR(t);
490 }
491 if (tgthint && !strcasecmp(t->name, tgthint)) {
492 list_add(&t->list, &ce->tlist);
493 tgthint = NULL;
494 } else {
495 list_add_tail(&t->list, &ce->tlist);
496 }
497 ce->numtgts++;
498 }
499
500 target = list_first_entry_or_null(&ce->tlist, struct cache_dfs_tgt,
501 list);
502 WRITE_ONCE(ce->tgthint, target);
503
504 return 0;
505}
506
507/* Allocate a new cache entry */
508static struct cache_entry *alloc_cache_entry(struct dfs_info3_param *refs, int numrefs)
509{
510 struct cache_entry *ce;
511 int rc;
512
513 ce = kmem_cache_zalloc(cache_slab, GFP_KERNEL);
514 if (!ce)
515 return ERR_PTR(-ENOMEM);
516
517 ce->path = refs[0].path_name;
518 refs[0].path_name = NULL;
519
520 INIT_HLIST_NODE(&ce->hlist);
521 INIT_LIST_HEAD(&ce->tlist);
522
523 rc = copy_ref_data(refs, numrefs, ce, NULL);
524 if (rc) {
525 kfree(ce->path);
526 kmem_cache_free(cache_slab, ce);
527 ce = ERR_PTR(rc);
528 }
529 return ce;
530}
531
532static void remove_oldest_entry_locked(void)
533{
534 int i;
535 struct cache_entry *ce;
536 struct cache_entry *to_del = NULL;
537
538 WARN_ON(!rwsem_is_locked(&htable_rw_lock));
539
540 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
541 struct hlist_head *l = &cache_htable[i];
542
543 hlist_for_each_entry(ce, l, hlist) {
544 if (hlist_unhashed(&ce->hlist))
545 continue;
546 if (!to_del || timespec64_compare(&ce->etime,
547 &to_del->etime) < 0)
548 to_del = ce;
549 }
550 }
551
552 if (!to_del) {
553 cifs_dbg(FYI, "%s: no entry to remove\n", __func__);
554 return;
555 }
556
557 cifs_dbg(FYI, "%s: removing entry\n", __func__);
558 dump_ce(to_del);
559 flush_cache_ent(to_del);
560}
561
562/* Add a new DFS cache entry */
563static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
564 int numrefs)
565{
566 int rc;
567 struct cache_entry *ce;
568 unsigned int hash;
569
570 WARN_ON(!rwsem_is_locked(&htable_rw_lock));
571
572 if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
573 cifs_dbg(FYI, "%s: reached max cache size (%d)\n", __func__, CACHE_MAX_ENTRIES);
574 remove_oldest_entry_locked();
575 }
576
577 rc = cache_entry_hash(refs[0].path_name, strlen(refs[0].path_name), &hash);
578 if (rc)
579 return ERR_PTR(rc);
580
581 ce = alloc_cache_entry(refs, numrefs);
582 if (IS_ERR(ce))
583 return ce;
584
585 spin_lock(&cache_ttl_lock);
586 if (!cache_ttl) {
587 cache_ttl = ce->ttl;
588 queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
589 } else {
590 cache_ttl = min_t(int, cache_ttl, ce->ttl);
591 mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
592 }
593 spin_unlock(&cache_ttl_lock);
594
595 hlist_add_head(&ce->hlist, &cache_htable[hash]);
596 dump_ce(ce);
597
598 atomic_inc(&cache_count);
599
600 return ce;
601}
602
603/* Check if two DFS paths are equal. @s1 and @s2 are expected to be in @cache_cp's charset */
604static bool dfs_path_equal(const char *s1, int len1, const char *s2, int len2)
605{
606 int i, l1, l2;
607 wchar_t c1, c2;
608
609 if (len1 != len2)
610 return false;
611
612 for (i = 0; i < len1; i += l1) {
613 l1 = cache_cp->char2uni(&s1[i], len1 - i, &c1);
614 l2 = cache_cp->char2uni(&s2[i], len2 - i, &c2);
615 if (unlikely(l1 < 0 && l2 < 0)) {
616 if (s1[i] != s2[i])
617 return false;
618 l1 = 1;
619 continue;
620 }
621 if (l1 != l2)
622 return false;
623 if (cifs_toupper(c1) != cifs_toupper(c2))
624 return false;
625 }
626 return true;
627}
628
629static struct cache_entry *__lookup_cache_entry(const char *path, unsigned int hash, int len)
630{
631 struct cache_entry *ce;
632
633 hlist_for_each_entry(ce, &cache_htable[hash], hlist) {
634 if (dfs_path_equal(ce->path, strlen(ce->path), path, len)) {
635 dump_ce(ce);
636 return ce;
637 }
638 }
639 return ERR_PTR(-ENOENT);
640}
641
642/*
643 * Find a DFS cache entry in hash table and optionally check prefix path against normalized @path.
644 *
645 * Use whole path components in the match. Must be called with htable_rw_lock held.
646 *
647 * Return cached entry if successful.
648 * Return ERR_PTR(-ENOENT) if the entry is not found.
649 * Return error ptr otherwise.
650 */
651static struct cache_entry *lookup_cache_entry(const char *path)
652{
653 struct cache_entry *ce;
654 int cnt = 0;
655 const char *s = path, *e;
656 char sep = *s;
657 unsigned int hash;
658 int rc;
659
660 while ((s = strchr(s, sep)) && ++cnt < 3)
661 s++;
662
663 if (cnt < 3) {
664 rc = cache_entry_hash(path, strlen(path), &hash);
665 if (rc)
666 return ERR_PTR(rc);
667 return __lookup_cache_entry(path, hash, strlen(path));
668 }
669 /*
670 * Handle paths that have more than two path components and are a complete prefix of the DFS
671 * referral request path (@path).
672 *
673 * See MS-DFSC 3.2.5.5 "Receiving a Root Referral Request or Link Referral Request".
674 */
675 e = path + strlen(path) - 1;
676 while (e > s) {
677 int len;
678
679 /* skip separators */
680 while (e > s && *e == sep)
681 e--;
682 if (e == s)
683 break;
684
685 len = e + 1 - path;
686 rc = cache_entry_hash(path, len, &hash);
687 if (rc)
688 return ERR_PTR(rc);
689 ce = __lookup_cache_entry(path, hash, len);
690 if (!IS_ERR(ce))
691 return ce;
692
693 /* backward until separator */
694 while (e > s && *e != sep)
695 e--;
696 }
697 return ERR_PTR(-ENOENT);
698}
699
700/**
701 * dfs_cache_destroy - destroy DFS referral cache
702 */
703void dfs_cache_destroy(void)
704{
705 cancel_delayed_work_sync(&refresh_task);
706 unload_nls(cache_cp);
707 free_mount_group_list();
708 flush_cache_ents();
709 kmem_cache_destroy(cache_slab);
710 destroy_workqueue(dfscache_wq);
711
712 cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__);
713}
714
715/* Update a cache entry with the new referral in @refs */
716static int update_cache_entry_locked(struct cache_entry *ce, const struct dfs_info3_param *refs,
717 int numrefs)
718{
719 struct cache_dfs_tgt *target;
720 char *th = NULL;
721 int rc;
722
723 WARN_ON(!rwsem_is_locked(&htable_rw_lock));
724
725 target = READ_ONCE(ce->tgthint);
726 if (target) {
727 th = kstrdup(target->name, GFP_ATOMIC);
728 if (!th)
729 return -ENOMEM;
730 }
731
732 free_tgts(ce);
733 ce->numtgts = 0;
734
735 rc = copy_ref_data(refs, numrefs, ce, th);
736
737 kfree(th);
738
739 return rc;
740}
741
742static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses, const char *path,
743 struct dfs_info3_param **refs, int *numrefs)
744{
745 int rc;
746 int i;
747
748 *refs = NULL;
749 *numrefs = 0;
750
751 if (!ses || !ses->server || !ses->server->ops->get_dfs_refer)
752 return -EOPNOTSUPP;
753 if (unlikely(!cache_cp))
754 return -EINVAL;
755
756 cifs_dbg(FYI, "%s: ipc=%s referral=%s\n", __func__, ses->tcon_ipc->tree_name, path);
757 rc = ses->server->ops->get_dfs_refer(xid, ses, path, refs, numrefs, cache_cp,
758 NO_MAP_UNI_RSVD);
759 if (!rc) {
760 struct dfs_info3_param *ref = *refs;
761
762 for (i = 0; i < *numrefs; i++)
763 convert_delimiter(ref[i].path_name, '\\');
764 }
765 return rc;
766}
767
768/*
769 * Find, create or update a DFS cache entry.
770 *
771 * If the entry wasn't found, it will create a new one. Or if it was found but
772 * expired, then it will update the entry accordingly.
773 *
774 * For interlinks, cifs_mount() and expand_dfs_referral() are supposed to
775 * handle them properly.
776 *
777 * On success, return entry with acquired lock for reading, otherwise error ptr.
778 */
779static struct cache_entry *cache_refresh_path(const unsigned int xid,
780 struct cifs_ses *ses,
781 const char *path,
782 bool force_refresh)
783{
784 struct dfs_info3_param *refs = NULL;
785 struct cache_entry *ce;
786 int numrefs = 0;
787 int rc;
788
789 cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
790
791 down_read(&htable_rw_lock);
792
793 ce = lookup_cache_entry(path);
794 if (!IS_ERR(ce)) {
795 if (!force_refresh && !cache_entry_expired(ce))
796 return ce;
797 } else if (PTR_ERR(ce) != -ENOENT) {
798 up_read(&htable_rw_lock);
799 return ce;
800 }
801
802 /*
803 * Unlock shared access as we don't want to hold any locks while getting
804 * a new referral. The @ses used for performing the I/O could be
805 * reconnecting and it acquires @htable_rw_lock to look up the dfs cache
806 * in order to failover -- if necessary.
807 */
808 up_read(&htable_rw_lock);
809
810 /*
811 * Either the entry was not found, or it is expired, or it is a forced
812 * refresh.
813 * Request a new DFS referral in order to create or update a cache entry.
814 */
815 rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
816 if (rc) {
817 ce = ERR_PTR(rc);
818 goto out;
819 }
820
821 dump_refs(refs, numrefs);
822
823 down_write(&htable_rw_lock);
824 /* Re-check as another task might have it added or refreshed already */
825 ce = lookup_cache_entry(path);
826 if (!IS_ERR(ce)) {
827 if (force_refresh || cache_entry_expired(ce)) {
828 rc = update_cache_entry_locked(ce, refs, numrefs);
829 if (rc)
830 ce = ERR_PTR(rc);
831 }
832 } else if (PTR_ERR(ce) == -ENOENT) {
833 ce = add_cache_entry_locked(refs, numrefs);
834 }
835
836 if (IS_ERR(ce)) {
837 up_write(&htable_rw_lock);
838 goto out;
839 }
840
841 downgrade_write(&htable_rw_lock);
842out:
843 free_dfs_info_array(refs, numrefs);
844 return ce;
845}
846
847/*
848 * Set up a DFS referral from a given cache entry.
849 *
850 * Must be called with htable_rw_lock held.
851 */
852static int setup_referral(const char *path, struct cache_entry *ce,
853 struct dfs_info3_param *ref, const char *target)
854{
855 int rc;
856
857 cifs_dbg(FYI, "%s: set up new ref\n", __func__);
858
859 memset(ref, 0, sizeof(*ref));
860
861 ref->path_name = kstrdup(path, GFP_ATOMIC);
862 if (!ref->path_name)
863 return -ENOMEM;
864
865 ref->node_name = kstrdup(target, GFP_ATOMIC);
866 if (!ref->node_name) {
867 rc = -ENOMEM;
868 goto err_free_path;
869 }
870
871 ref->path_consumed = ce->path_consumed;
872 ref->ttl = ce->ttl;
873 ref->server_type = ce->srvtype;
874 ref->ref_flag = ce->ref_flags;
875 ref->flags = ce->hdr_flags;
876
877 return 0;
878
879err_free_path:
880 kfree(ref->path_name);
881 ref->path_name = NULL;
882 return rc;
883}
884
885/* Return target list of a DFS cache entry */
886static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
887{
888 int rc;
889 struct list_head *head = &tl->tl_list;
890 struct cache_dfs_tgt *t;
891 struct dfs_cache_tgt_iterator *it, *nit;
892
893 memset(tl, 0, sizeof(*tl));
894 INIT_LIST_HEAD(head);
895
896 list_for_each_entry(t, &ce->tlist, list) {
897 it = kzalloc(sizeof(*it), GFP_ATOMIC);
898 if (!it) {
899 rc = -ENOMEM;
900 goto err_free_it;
901 }
902
903 it->it_name = kstrdup(t->name, GFP_ATOMIC);
904 if (!it->it_name) {
905 kfree(it);
906 rc = -ENOMEM;
907 goto err_free_it;
908 }
909 it->it_path_consumed = t->path_consumed;
910
911 if (READ_ONCE(ce->tgthint) == t)
912 list_add(&it->it_list, head);
913 else
914 list_add_tail(&it->it_list, head);
915 }
916
917 tl->tl_numtgts = ce->numtgts;
918
919 return 0;
920
921err_free_it:
922 list_for_each_entry_safe(it, nit, head, it_list) {
923 list_del(&it->it_list);
924 kfree(it->it_name);
925 kfree(it);
926 }
927 return rc;
928}
929
930/**
931 * dfs_cache_find - find a DFS cache entry
932 *
933 * If it doesn't find the cache entry, then it will get a DFS referral
934 * for @path and create a new entry.
935 *
936 * In case the cache entry exists but expired, it will get a DFS referral
937 * for @path and then update the respective cache entry.
938 *
939 * These parameters are passed down to the get_dfs_refer() call if it
940 * needs to be issued:
941 * @xid: syscall xid
942 * @ses: smb session to issue the request on
943 * @cp: codepage
944 * @remap: path character remapping type
945 * @path: path to lookup in DFS referral cache.
946 *
947 * @ref: when non-NULL, store single DFS referral result in it.
948 * @tgt_list: when non-NULL, store complete DFS target list in it.
949 *
950 * Return zero if the target was found, otherwise non-zero.
951 */
952int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, const struct nls_table *cp,
953 int remap, const char *path, struct dfs_info3_param *ref,
954 struct dfs_cache_tgt_list *tgt_list)
955{
956 int rc;
957 const char *npath;
958 struct cache_entry *ce;
959
960 npath = dfs_cache_canonical_path(path, cp, remap);
961 if (IS_ERR(npath))
962 return PTR_ERR(npath);
963
964 ce = cache_refresh_path(xid, ses, npath, false);
965 if (IS_ERR(ce)) {
966 rc = PTR_ERR(ce);
967 goto out_free_path;
968 }
969
970 if (ref)
971 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
972 else
973 rc = 0;
974 if (!rc && tgt_list)
975 rc = get_targets(ce, tgt_list);
976
977 up_read(&htable_rw_lock);
978
979out_free_path:
980 kfree(npath);
981 return rc;
982}
983
984/**
985 * dfs_cache_noreq_find - find a DFS cache entry without sending any requests to
986 * the currently connected server.
987 *
988 * NOTE: This function will neither update a cache entry in case it was
989 * expired, nor create a new cache entry if @path hasn't been found. It heavily
990 * relies on an existing cache entry.
991 *
992 * @path: canonical DFS path to lookup in the DFS referral cache.
993 * @ref: when non-NULL, store single DFS referral result in it.
994 * @tgt_list: when non-NULL, store complete DFS target list in it.
995 *
996 * Return 0 if successful.
997 * Return -ENOENT if the entry was not found.
998 * Return non-zero for other errors.
999 */
1000int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
1001 struct dfs_cache_tgt_list *tgt_list)
1002{
1003 int rc;
1004 struct cache_entry *ce;
1005
1006 cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
1007
1008 down_read(&htable_rw_lock);
1009
1010 ce = lookup_cache_entry(path);
1011 if (IS_ERR(ce)) {
1012 rc = PTR_ERR(ce);
1013 goto out_unlock;
1014 }
1015
1016 if (ref)
1017 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
1018 else
1019 rc = 0;
1020 if (!rc && tgt_list)
1021 rc = get_targets(ce, tgt_list);
1022
1023out_unlock:
1024 up_read(&htable_rw_lock);
1025 return rc;
1026}
1027
1028/**
1029 * dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry
1030 * without sending any requests to the currently connected server.
1031 *
1032 * NOTE: This function will neither update a cache entry in case it was
1033 * expired, nor create a new cache entry if @path hasn't been found. It heavily
1034 * relies on an existing cache entry.
1035 *
1036 * @path: canonical DFS path to lookup in DFS referral cache.
1037 * @it: target iterator which contains the target hint to update the cache
1038 * entry with.
1039 *
1040 * Return zero if the target hint was updated successfully, otherwise non-zero.
1041 */
1042void dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt_iterator *it)
1043{
1044 struct cache_dfs_tgt *t;
1045 struct cache_entry *ce;
1046
1047 if (!path || !it)
1048 return;
1049
1050 cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
1051
1052 down_read(&htable_rw_lock);
1053
1054 ce = lookup_cache_entry(path);
1055 if (IS_ERR(ce))
1056 goto out_unlock;
1057
1058 t = READ_ONCE(ce->tgthint);
1059
1060 if (unlikely(!strcasecmp(it->it_name, t->name)))
1061 goto out_unlock;
1062
1063 list_for_each_entry(t, &ce->tlist, list) {
1064 if (!strcasecmp(t->name, it->it_name)) {
1065 WRITE_ONCE(ce->tgthint, t);
1066 cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
1067 it->it_name);
1068 break;
1069 }
1070 }
1071
1072out_unlock:
1073 up_read(&htable_rw_lock);
1074}
1075
1076/**
1077 * dfs_cache_get_tgt_referral - returns a DFS referral (@ref) from a given
1078 * target iterator (@it).
1079 *
1080 * @path: canonical DFS path to lookup in DFS referral cache.
1081 * @it: DFS target iterator.
1082 * @ref: DFS referral pointer to set up the gathered information.
1083 *
1084 * Return zero if the DFS referral was set up correctly, otherwise non-zero.
1085 */
1086int dfs_cache_get_tgt_referral(const char *path, const struct dfs_cache_tgt_iterator *it,
1087 struct dfs_info3_param *ref)
1088{
1089 int rc;
1090 struct cache_entry *ce;
1091
1092 if (!it || !ref)
1093 return -EINVAL;
1094
1095 cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
1096
1097 down_read(&htable_rw_lock);
1098
1099 ce = lookup_cache_entry(path);
1100 if (IS_ERR(ce)) {
1101 rc = PTR_ERR(ce);
1102 goto out_unlock;
1103 }
1104
1105 cifs_dbg(FYI, "%s: target name: %s\n", __func__, it->it_name);
1106
1107 rc = setup_referral(path, ce, ref, it->it_name);
1108
1109out_unlock:
1110 up_read(&htable_rw_lock);
1111 return rc;
1112}
1113
1114/**
1115 * dfs_cache_add_refsrv_session - add SMB session of referral server
1116 *
1117 * @mount_id: mount group uuid to lookup.
1118 * @ses: reference counted SMB session of referral server.
1119 */
1120void dfs_cache_add_refsrv_session(const uuid_t *mount_id, struct cifs_ses *ses)
1121{
1122 struct mount_group *mg;
1123
1124 if (WARN_ON_ONCE(!mount_id || uuid_is_null(mount_id) || !ses))
1125 return;
1126
1127 mg = get_mount_group(mount_id);
1128 if (WARN_ON_ONCE(IS_ERR(mg)))
1129 return;
1130
1131 spin_lock(&mg->lock);
1132 if (mg->num_sessions < ARRAY_SIZE(mg->sessions))
1133 mg->sessions[mg->num_sessions++] = ses;
1134 spin_unlock(&mg->lock);
1135 kref_put(&mg->refcount, mount_group_release);
1136}
1137
1138/**
1139 * dfs_cache_put_refsrv_sessions - put all referral server sessions
1140 *
1141 * Put all SMB sessions from the given mount group id.
1142 *
1143 * @mount_id: mount group uuid to lookup.
1144 */
1145void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id)
1146{
1147 struct mount_group *mg;
1148
1149 if (!mount_id || uuid_is_null(mount_id))
1150 return;
1151
1152 mutex_lock(&mount_group_list_lock);
1153 mg = find_mount_group_locked(mount_id);
1154 if (IS_ERR(mg)) {
1155 mutex_unlock(&mount_group_list_lock);
1156 return;
1157 }
1158 mutex_unlock(&mount_group_list_lock);
1159 kref_put(&mg->refcount, mount_group_release);
1160}
1161
1162/* Extract share from DFS target and return a pointer to prefix path or NULL */
1163static const char *parse_target_share(const char *target, char **share)
1164{
1165 const char *s, *seps = "/\\";
1166 size_t len;
1167
1168 s = strpbrk(target + 1, seps);
1169 if (!s)
1170 return ERR_PTR(-EINVAL);
1171
1172 len = strcspn(s + 1, seps);
1173 if (!len)
1174 return ERR_PTR(-EINVAL);
1175 s += len;
1176
1177 len = s - target + 1;
1178 *share = kstrndup(target, len, GFP_KERNEL);
1179 if (!*share)
1180 return ERR_PTR(-ENOMEM);
1181
1182 s = target + len;
1183 return s + strspn(s, seps);
1184}
1185
1186/**
1187 * dfs_cache_get_tgt_share - parse a DFS target
1188 *
1189 * @path: DFS full path
1190 * @it: DFS target iterator.
1191 * @share: tree name.
1192 * @prefix: prefix path.
1193 *
1194 * Return zero if target was parsed correctly, otherwise non-zero.
1195 */
1196int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, char **share,
1197 char **prefix)
1198{
1199 char sep;
1200 char *target_share;
1201 char *ppath = NULL;
1202 const char *target_ppath, *dfsref_ppath;
1203 size_t target_pplen, dfsref_pplen;
1204 size_t len, c;
1205
1206 if (!it || !path || !share || !prefix || strlen(path) < it->it_path_consumed)
1207 return -EINVAL;
1208
1209 sep = it->it_name[0];
1210 if (sep != '\\' && sep != '/')
1211 return -EINVAL;
1212
1213 target_ppath = parse_target_share(it->it_name, &target_share);
1214 if (IS_ERR(target_ppath))
1215 return PTR_ERR(target_ppath);
1216
1217 /* point to prefix in DFS referral path */
1218 dfsref_ppath = path + it->it_path_consumed;
1219 dfsref_ppath += strspn(dfsref_ppath, "/\\");
1220
1221 target_pplen = strlen(target_ppath);
1222 dfsref_pplen = strlen(dfsref_ppath);
1223
1224 /* merge prefix paths from DFS referral path and target node */
1225 if (target_pplen || dfsref_pplen) {
1226 len = target_pplen + dfsref_pplen + 2;
1227 ppath = kzalloc(len, GFP_KERNEL);
1228 if (!ppath) {
1229 kfree(target_share);
1230 return -ENOMEM;
1231 }
1232 c = strscpy(ppath, target_ppath, len);
1233 if (c && dfsref_pplen)
1234 ppath[c] = sep;
1235 strlcat(ppath, dfsref_ppath, len);
1236 }
1237 *share = target_share;
1238 *prefix = ppath;
1239 return 0;
1240}
1241
1242static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, const char *s2)
1243{
1244 char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0};
1245 const char *host;
1246 size_t hostlen;
1247 struct sockaddr_storage ss;
1248 bool match;
1249 int rc;
1250
1251 if (strcasecmp(s1, s2))
1252 return false;
1253
1254 /*
1255 * Resolve share's hostname and check if server address matches. Otherwise just ignore it
1256 * as we could not have upcall to resolve hostname or failed to convert ip address.
1257 */
1258 extract_unc_hostname(s1, &host, &hostlen);
1259 scnprintf(unc, sizeof(unc), "\\\\%.*s", (int)hostlen, host);
1260
1261 rc = dns_resolve_server_name_to_ip(unc, (struct sockaddr *)&ss, NULL);
1262 if (rc < 0) {
1263 cifs_dbg(FYI, "%s: could not resolve %.*s. assuming server address matches.\n",
1264 __func__, (int)hostlen, host);
1265 return true;
1266 }
1267
1268 cifs_server_lock(server);
1269 match = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, (struct sockaddr *)&ss);
1270 cifs_server_unlock(server);
1271
1272 return match;
1273}
1274
1275/*
1276 * Mark dfs tcon for reconnecting when the currently connected tcon does not match any of the new
1277 * target shares in @refs.
1278 */
1279static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
1280 struct dfs_cache_tgt_list *old_tl,
1281 struct dfs_cache_tgt_list *new_tl)
1282{
1283 struct dfs_cache_tgt_iterator *oit, *nit;
1284
1285 for (oit = dfs_cache_get_tgt_iterator(old_tl); oit;
1286 oit = dfs_cache_get_next_tgt(old_tl, oit)) {
1287 for (nit = dfs_cache_get_tgt_iterator(new_tl); nit;
1288 nit = dfs_cache_get_next_tgt(new_tl, nit)) {
1289 if (target_share_equal(server,
1290 dfs_cache_get_tgt_name(oit),
1291 dfs_cache_get_tgt_name(nit)))
1292 return;
1293 }
1294 }
1295
1296 cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
1297 cifs_signal_cifsd_for_reconnect(server, true);
1298}
1299
1300/* Refresh dfs referral of tcon and mark it for reconnect if needed */
1301static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_refresh)
1302{
1303 struct dfs_cache_tgt_list old_tl = DFS_CACHE_TGT_LIST_INIT(old_tl);
1304 struct dfs_cache_tgt_list new_tl = DFS_CACHE_TGT_LIST_INIT(new_tl);
1305 struct cifs_ses *ses = CIFS_DFS_ROOT_SES(tcon->ses);
1306 struct cifs_tcon *ipc = ses->tcon_ipc;
1307 bool needs_refresh = false;
1308 struct cache_entry *ce;
1309 unsigned int xid;
1310 int rc = 0;
1311
1312 xid = get_xid();
1313
1314 down_read(&htable_rw_lock);
1315 ce = lookup_cache_entry(path);
1316 needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
1317 if (!IS_ERR(ce)) {
1318 rc = get_targets(ce, &old_tl);
1319 cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
1320 }
1321 up_read(&htable_rw_lock);
1322
1323 if (!needs_refresh) {
1324 rc = 0;
1325 goto out;
1326 }
1327
1328 spin_lock(&ipc->tc_lock);
1329 if (ses->ses_status != SES_GOOD || ipc->status != TID_GOOD) {
1330 spin_unlock(&ipc->tc_lock);
1331 cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n", __func__);
1332 goto out;
1333 }
1334 spin_unlock(&ipc->tc_lock);
1335
1336 ce = cache_refresh_path(xid, ses, path, true);
1337 if (!IS_ERR(ce)) {
1338 rc = get_targets(ce, &new_tl);
1339 up_read(&htable_rw_lock);
1340 cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
1341 mark_for_reconnect_if_needed(tcon->ses->server, &old_tl, &new_tl);
1342 }
1343
1344out:
1345 free_xid(xid);
1346 dfs_cache_free_tgts(&old_tl);
1347 dfs_cache_free_tgts(&new_tl);
1348 return rc;
1349}
1350
1351static int refresh_tcon(struct cifs_tcon *tcon, bool force_refresh)
1352{
1353 struct TCP_Server_Info *server = tcon->ses->server;
1354
1355 mutex_lock(&server->refpath_lock);
1356 if (server->leaf_fullpath)
1357 __refresh_tcon(server->leaf_fullpath + 1, tcon, force_refresh);
1358 mutex_unlock(&server->refpath_lock);
1359 return 0;
1360}
1361
1362/**
1363 * dfs_cache_remount_fs - remount a DFS share
1364 *
1365 * Reconfigure dfs mount by forcing a new DFS referral and if the currently cached targets do not
1366 * match any of the new targets, mark it for reconnect.
1367 *
1368 * @cifs_sb: cifs superblock.
1369 *
1370 * Return zero if remounted, otherwise non-zero.
1371 */
1372int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
1373{
1374 struct cifs_tcon *tcon;
1375 struct TCP_Server_Info *server;
1376
1377 if (!cifs_sb || !cifs_sb->master_tlink)
1378 return -EINVAL;
1379
1380 tcon = cifs_sb_master_tcon(cifs_sb);
1381 server = tcon->ses->server;
1382
1383 if (!server->origin_fullpath) {
1384 cifs_dbg(FYI, "%s: not a dfs mount\n", __func__);
1385 return 0;
1386 }
1387
1388 if (uuid_is_null(&cifs_sb->dfs_mount_id)) {
1389 cifs_dbg(FYI, "%s: no dfs mount group id\n", __func__);
1390 return -EINVAL;
1391 }
1392 /*
1393 * After reconnecting to a different server, unique ids won't match anymore, so we disable
1394 * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
1395 */
1396 cifs_autodisable_serverino(cifs_sb);
1397 /*
1398 * Force the use of prefix path to support failover on DFS paths that resolve to targets
1399 * that have different prefix paths.
1400 */
1401 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
1402
1403 return refresh_tcon(tcon, true);
1404}
1405
1406/*
1407 * Worker that will refresh DFS cache from all active mounts based on lowest TTL value
1408 * from a DFS referral.
1409 */
1410static void refresh_cache_worker(struct work_struct *work)
1411{
1412 struct TCP_Server_Info *server;
1413 struct cifs_tcon *tcon, *ntcon;
1414 struct list_head tcons;
1415 struct cifs_ses *ses;
1416
1417 INIT_LIST_HEAD(&tcons);
1418
1419 spin_lock(&cifs_tcp_ses_lock);
1420 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
1421 if (!server->leaf_fullpath)
1422 continue;
1423
1424 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
1425 if (ses->tcon_ipc) {
1426 ses->ses_count++;
1427 list_add_tail(&ses->tcon_ipc->ulist, &tcons);
1428 }
1429 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
1430 if (!tcon->ipc) {
1431 tcon->tc_count++;
1432 list_add_tail(&tcon->ulist, &tcons);
1433 }
1434 }
1435 }
1436 }
1437 spin_unlock(&cifs_tcp_ses_lock);
1438
1439 list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
1440 struct TCP_Server_Info *server = tcon->ses->server;
1441
1442 list_del_init(&tcon->ulist);
1443
1444 mutex_lock(&server->refpath_lock);
1445 if (server->leaf_fullpath)
1446 __refresh_tcon(server->leaf_fullpath + 1, tcon, false);
1447 mutex_unlock(&server->refpath_lock);
1448
1449 if (tcon->ipc)
1450 cifs_put_smb_ses(tcon->ses);
1451 else
1452 cifs_put_tcon(tcon);
1453 }
1454
1455 spin_lock(&cache_ttl_lock);
1456 queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
1457 spin_unlock(&cache_ttl_lock);
1458}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * DFS referral cache routines
4 *
5 * Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de>
6 */
7
8#include <linux/jhash.h>
9#include <linux/ktime.h>
10#include <linux/slab.h>
11#include <linux/proc_fs.h>
12#include <linux/nls.h>
13#include <linux/workqueue.h>
14#include "cifsglob.h"
15#include "smb2pdu.h"
16#include "smb2proto.h"
17#include "cifsproto.h"
18#include "cifs_debug.h"
19#include "cifs_unicode.h"
20#include "smb2glob.h"
21
22#include "dfs_cache.h"
23
24#define CACHE_HTABLE_SIZE 32
25#define CACHE_MAX_ENTRIES 64
26
27#define IS_INTERLINK_SET(v) ((v) & (DFSREF_REFERRAL_SERVER | \
28 DFSREF_STORAGE_SERVER))
29
30struct cache_dfs_tgt {
31 char *name;
32 int path_consumed;
33 struct list_head list;
34};
35
36struct cache_entry {
37 struct hlist_node hlist;
38 const char *path;
39 int ttl;
40 int srvtype;
41 int flags;
42 struct timespec64 etime;
43 int path_consumed;
44 int numtgts;
45 struct list_head tlist;
46 struct cache_dfs_tgt *tgthint;
47};
48
49struct vol_info {
50 char *fullpath;
51 spinlock_t smb_vol_lock;
52 struct smb_vol smb_vol;
53 char *mntdata;
54 struct list_head list;
55 struct list_head rlist;
56 struct kref refcnt;
57};
58
59static struct kmem_cache *cache_slab __read_mostly;
60static struct workqueue_struct *dfscache_wq __read_mostly;
61
62static int cache_ttl;
63static DEFINE_SPINLOCK(cache_ttl_lock);
64
65static struct nls_table *cache_nlsc;
66
67/*
68 * Number of entries in the cache
69 */
70static atomic_t cache_count;
71
72static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
73static DECLARE_RWSEM(htable_rw_lock);
74
75static LIST_HEAD(vol_list);
76static DEFINE_SPINLOCK(vol_list_lock);
77
78static void refresh_cache_worker(struct work_struct *work);
79
80static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
81
82static int get_normalized_path(const char *path, char **npath)
83{
84 if (!path || strlen(path) < 3 || (*path != '\\' && *path != '/'))
85 return -EINVAL;
86
87 if (*path == '\\') {
88 *npath = (char *)path;
89 } else {
90 *npath = kstrndup(path, strlen(path), GFP_KERNEL);
91 if (!*npath)
92 return -ENOMEM;
93 convert_delimiter(*npath, '\\');
94 }
95 return 0;
96}
97
98static inline void free_normalized_path(const char *path, char *npath)
99{
100 if (path != npath)
101 kfree(npath);
102}
103
104static inline bool cache_entry_expired(const struct cache_entry *ce)
105{
106 struct timespec64 ts;
107
108 ktime_get_coarse_real_ts64(&ts);
109 return timespec64_compare(&ts, &ce->etime) >= 0;
110}
111
112static inline void free_tgts(struct cache_entry *ce)
113{
114 struct cache_dfs_tgt *t, *n;
115
116 list_for_each_entry_safe(t, n, &ce->tlist, list) {
117 list_del(&t->list);
118 kfree(t->name);
119 kfree(t);
120 }
121}
122
123static inline void flush_cache_ent(struct cache_entry *ce)
124{
125 hlist_del_init(&ce->hlist);
126 kfree(ce->path);
127 free_tgts(ce);
128 atomic_dec(&cache_count);
129 kmem_cache_free(cache_slab, ce);
130}
131
132static void flush_cache_ents(void)
133{
134 int i;
135
136 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
137 struct hlist_head *l = &cache_htable[i];
138 struct hlist_node *n;
139 struct cache_entry *ce;
140
141 hlist_for_each_entry_safe(ce, n, l, hlist) {
142 if (!hlist_unhashed(&ce->hlist))
143 flush_cache_ent(ce);
144 }
145 }
146}
147
148/*
149 * dfs cache /proc file
150 */
151static int dfscache_proc_show(struct seq_file *m, void *v)
152{
153 int i;
154 struct cache_entry *ce;
155 struct cache_dfs_tgt *t;
156
157 seq_puts(m, "DFS cache\n---------\n");
158
159 down_read(&htable_rw_lock);
160 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
161 struct hlist_head *l = &cache_htable[i];
162
163 hlist_for_each_entry(ce, l, hlist) {
164 if (hlist_unhashed(&ce->hlist))
165 continue;
166
167 seq_printf(m,
168 "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,"
169 "interlink=%s,path_consumed=%d,expired=%s\n",
170 ce->path,
171 ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
172 ce->ttl, ce->etime.tv_nsec,
173 IS_INTERLINK_SET(ce->flags) ? "yes" : "no",
174 ce->path_consumed,
175 cache_entry_expired(ce) ? "yes" : "no");
176
177 list_for_each_entry(t, &ce->tlist, list) {
178 seq_printf(m, " %s%s\n",
179 t->name,
180 ce->tgthint == t ? " (target hint)" : "");
181 }
182 }
183 }
184 up_read(&htable_rw_lock);
185
186 return 0;
187}
188
189static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer,
190 size_t count, loff_t *ppos)
191{
192 char c;
193 int rc;
194
195 rc = get_user(c, buffer);
196 if (rc)
197 return rc;
198
199 if (c != '0')
200 return -EINVAL;
201
202 cifs_dbg(FYI, "clearing dfs cache\n");
203
204 down_write(&htable_rw_lock);
205 flush_cache_ents();
206 up_write(&htable_rw_lock);
207
208 return count;
209}
210
211static int dfscache_proc_open(struct inode *inode, struct file *file)
212{
213 return single_open(file, dfscache_proc_show, NULL);
214}
215
216const struct proc_ops dfscache_proc_ops = {
217 .proc_open = dfscache_proc_open,
218 .proc_read = seq_read,
219 .proc_lseek = seq_lseek,
220 .proc_release = single_release,
221 .proc_write = dfscache_proc_write,
222};
223
224#ifdef CONFIG_CIFS_DEBUG2
225static inline void dump_tgts(const struct cache_entry *ce)
226{
227 struct cache_dfs_tgt *t;
228
229 cifs_dbg(FYI, "target list:\n");
230 list_for_each_entry(t, &ce->tlist, list) {
231 cifs_dbg(FYI, " %s%s\n", t->name,
232 ce->tgthint == t ? " (target hint)" : "");
233 }
234}
235
236static inline void dump_ce(const struct cache_entry *ce)
237{
238 cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,interlink=%s,path_consumed=%d,expired=%s\n",
239 ce->path,
240 ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
241 ce->etime.tv_nsec,
242 IS_INTERLINK_SET(ce->flags) ? "yes" : "no",
243 ce->path_consumed,
244 cache_entry_expired(ce) ? "yes" : "no");
245 dump_tgts(ce);
246}
247
248static inline void dump_refs(const struct dfs_info3_param *refs, int numrefs)
249{
250 int i;
251
252 cifs_dbg(FYI, "DFS referrals returned by the server:\n");
253 for (i = 0; i < numrefs; i++) {
254 const struct dfs_info3_param *ref = &refs[i];
255
256 cifs_dbg(FYI,
257 "\n"
258 "flags: 0x%x\n"
259 "path_consumed: %d\n"
260 "server_type: 0x%x\n"
261 "ref_flag: 0x%x\n"
262 "path_name: %s\n"
263 "node_name: %s\n"
264 "ttl: %d (%dm)\n",
265 ref->flags, ref->path_consumed, ref->server_type,
266 ref->ref_flag, ref->path_name, ref->node_name,
267 ref->ttl, ref->ttl / 60);
268 }
269}
270#else
271#define dump_tgts(e)
272#define dump_ce(e)
273#define dump_refs(r, n)
274#endif
275
276/**
277 * dfs_cache_init - Initialize DFS referral cache.
278 *
279 * Return zero if initialized successfully, otherwise non-zero.
280 */
281int dfs_cache_init(void)
282{
283 int rc;
284 int i;
285
286 dfscache_wq = alloc_workqueue("cifs-dfscache",
287 WQ_FREEZABLE | WQ_MEM_RECLAIM, 1);
288 if (!dfscache_wq)
289 return -ENOMEM;
290
291 cache_slab = kmem_cache_create("cifs_dfs_cache",
292 sizeof(struct cache_entry), 0,
293 SLAB_HWCACHE_ALIGN, NULL);
294 if (!cache_slab) {
295 rc = -ENOMEM;
296 goto out_destroy_wq;
297 }
298
299 for (i = 0; i < CACHE_HTABLE_SIZE; i++)
300 INIT_HLIST_HEAD(&cache_htable[i]);
301
302 atomic_set(&cache_count, 0);
303 cache_nlsc = load_nls_default();
304
305 cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__);
306 return 0;
307
308out_destroy_wq:
309 destroy_workqueue(dfscache_wq);
310 return rc;
311}
312
313static inline unsigned int cache_entry_hash(const void *data, int size)
314{
315 unsigned int h;
316
317 h = jhash(data, size, 0);
318 return h & (CACHE_HTABLE_SIZE - 1);
319}
320
321/* Check whether second path component of @path is SYSVOL or NETLOGON */
322static inline bool is_sysvol_or_netlogon(const char *path)
323{
324 const char *s;
325 char sep = path[0];
326
327 s = strchr(path + 1, sep) + 1;
328 return !strncasecmp(s, "sysvol", strlen("sysvol")) ||
329 !strncasecmp(s, "netlogon", strlen("netlogon"));
330}
331
332/* Return target hint of a DFS cache entry */
333static inline char *get_tgt_name(const struct cache_entry *ce)
334{
335 struct cache_dfs_tgt *t = ce->tgthint;
336
337 return t ? t->name : ERR_PTR(-ENOENT);
338}
339
340/* Return expire time out of a new entry's TTL */
341static inline struct timespec64 get_expire_time(int ttl)
342{
343 struct timespec64 ts = {
344 .tv_sec = ttl,
345 .tv_nsec = 0,
346 };
347 struct timespec64 now;
348
349 ktime_get_coarse_real_ts64(&now);
350 return timespec64_add(now, ts);
351}
352
353/* Allocate a new DFS target */
354static struct cache_dfs_tgt *alloc_target(const char *name, int path_consumed)
355{
356 struct cache_dfs_tgt *t;
357
358 t = kmalloc(sizeof(*t), GFP_ATOMIC);
359 if (!t)
360 return ERR_PTR(-ENOMEM);
361 t->name = kstrndup(name, strlen(name), GFP_ATOMIC);
362 if (!t->name) {
363 kfree(t);
364 return ERR_PTR(-ENOMEM);
365 }
366 t->path_consumed = path_consumed;
367 INIT_LIST_HEAD(&t->list);
368 return t;
369}
370
371/*
372 * Copy DFS referral information to a cache entry and conditionally update
373 * target hint.
374 */
375static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
376 struct cache_entry *ce, const char *tgthint)
377{
378 int i;
379
380 ce->ttl = refs[0].ttl;
381 ce->etime = get_expire_time(ce->ttl);
382 ce->srvtype = refs[0].server_type;
383 ce->flags = refs[0].ref_flag;
384 ce->path_consumed = refs[0].path_consumed;
385
386 for (i = 0; i < numrefs; i++) {
387 struct cache_dfs_tgt *t;
388
389 t = alloc_target(refs[i].node_name, refs[i].path_consumed);
390 if (IS_ERR(t)) {
391 free_tgts(ce);
392 return PTR_ERR(t);
393 }
394 if (tgthint && !strcasecmp(t->name, tgthint)) {
395 list_add(&t->list, &ce->tlist);
396 tgthint = NULL;
397 } else {
398 list_add_tail(&t->list, &ce->tlist);
399 }
400 ce->numtgts++;
401 }
402
403 ce->tgthint = list_first_entry_or_null(&ce->tlist,
404 struct cache_dfs_tgt, list);
405
406 return 0;
407}
408
409/* Allocate a new cache entry */
410static struct cache_entry *alloc_cache_entry(const char *path,
411 const struct dfs_info3_param *refs,
412 int numrefs)
413{
414 struct cache_entry *ce;
415 int rc;
416
417 ce = kmem_cache_zalloc(cache_slab, GFP_KERNEL);
418 if (!ce)
419 return ERR_PTR(-ENOMEM);
420
421 ce->path = kstrndup(path, strlen(path), GFP_KERNEL);
422 if (!ce->path) {
423 kmem_cache_free(cache_slab, ce);
424 return ERR_PTR(-ENOMEM);
425 }
426 INIT_HLIST_NODE(&ce->hlist);
427 INIT_LIST_HEAD(&ce->tlist);
428
429 rc = copy_ref_data(refs, numrefs, ce, NULL);
430 if (rc) {
431 kfree(ce->path);
432 kmem_cache_free(cache_slab, ce);
433 ce = ERR_PTR(rc);
434 }
435 return ce;
436}
437
438/* Must be called with htable_rw_lock held */
439static void remove_oldest_entry(void)
440{
441 int i;
442 struct cache_entry *ce;
443 struct cache_entry *to_del = NULL;
444
445 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
446 struct hlist_head *l = &cache_htable[i];
447
448 hlist_for_each_entry(ce, l, hlist) {
449 if (hlist_unhashed(&ce->hlist))
450 continue;
451 if (!to_del || timespec64_compare(&ce->etime,
452 &to_del->etime) < 0)
453 to_del = ce;
454 }
455 }
456
457 if (!to_del) {
458 cifs_dbg(FYI, "%s: no entry to remove\n", __func__);
459 return;
460 }
461
462 cifs_dbg(FYI, "%s: removing entry\n", __func__);
463 dump_ce(to_del);
464 flush_cache_ent(to_del);
465}
466
467/* Add a new DFS cache entry */
468static int add_cache_entry(const char *path, unsigned int hash,
469 struct dfs_info3_param *refs, int numrefs)
470{
471 struct cache_entry *ce;
472
473 ce = alloc_cache_entry(path, refs, numrefs);
474 if (IS_ERR(ce))
475 return PTR_ERR(ce);
476
477 spin_lock(&cache_ttl_lock);
478 if (!cache_ttl) {
479 cache_ttl = ce->ttl;
480 queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
481 } else {
482 cache_ttl = min_t(int, cache_ttl, ce->ttl);
483 mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
484 }
485 spin_unlock(&cache_ttl_lock);
486
487 down_write(&htable_rw_lock);
488 hlist_add_head(&ce->hlist, &cache_htable[hash]);
489 dump_ce(ce);
490 up_write(&htable_rw_lock);
491
492 return 0;
493}
494
495static struct cache_entry *__lookup_cache_entry(const char *path)
496{
497 struct cache_entry *ce;
498 unsigned int h;
499 bool found = false;
500
501 h = cache_entry_hash(path, strlen(path));
502
503 hlist_for_each_entry(ce, &cache_htable[h], hlist) {
504 if (!strcasecmp(path, ce->path)) {
505 found = true;
506 dump_ce(ce);
507 break;
508 }
509 }
510
511 if (!found)
512 ce = ERR_PTR(-ENOENT);
513 return ce;
514}
515
516/*
517 * Find a DFS cache entry in hash table and optionally check prefix path against
518 * @path.
519 * Use whole path components in the match.
520 * Must be called with htable_rw_lock held.
521 *
522 * Return ERR_PTR(-ENOENT) if the entry is not found.
523 */
524static struct cache_entry *lookup_cache_entry(const char *path, unsigned int *hash)
525{
526 struct cache_entry *ce = ERR_PTR(-ENOENT);
527 unsigned int h;
528 int cnt = 0;
529 char *npath;
530 char *s, *e;
531 char sep;
532
533 npath = kstrndup(path, strlen(path), GFP_KERNEL);
534 if (!npath)
535 return ERR_PTR(-ENOMEM);
536
537 s = npath;
538 sep = *npath;
539 while ((s = strchr(s, sep)) && ++cnt < 3)
540 s++;
541
542 if (cnt < 3) {
543 h = cache_entry_hash(path, strlen(path));
544 ce = __lookup_cache_entry(path);
545 goto out;
546 }
547 /*
548 * Handle paths that have more than two path components and are a complete prefix of the DFS
549 * referral request path (@path).
550 *
551 * See MS-DFSC 3.2.5.5 "Receiving a Root Referral Request or Link Referral Request".
552 */
553 h = cache_entry_hash(npath, strlen(npath));
554 e = npath + strlen(npath) - 1;
555 while (e > s) {
556 char tmp;
557
558 /* skip separators */
559 while (e > s && *e == sep)
560 e--;
561 if (e == s)
562 goto out;
563
564 tmp = *(e+1);
565 *(e+1) = 0;
566
567 ce = __lookup_cache_entry(npath);
568 if (!IS_ERR(ce)) {
569 h = cache_entry_hash(npath, strlen(npath));
570 break;
571 }
572
573 *(e+1) = tmp;
574 /* backward until separator */
575 while (e > s && *e != sep)
576 e--;
577 }
578out:
579 if (hash)
580 *hash = h;
581 kfree(npath);
582 return ce;
583}
584
585static void __vol_release(struct vol_info *vi)
586{
587 kfree(vi->fullpath);
588 kfree(vi->mntdata);
589 cifs_cleanup_volume_info_contents(&vi->smb_vol);
590 kfree(vi);
591}
592
593static void vol_release(struct kref *kref)
594{
595 struct vol_info *vi = container_of(kref, struct vol_info, refcnt);
596
597 spin_lock(&vol_list_lock);
598 list_del(&vi->list);
599 spin_unlock(&vol_list_lock);
600 __vol_release(vi);
601}
602
603static inline void free_vol_list(void)
604{
605 struct vol_info *vi, *nvi;
606
607 list_for_each_entry_safe(vi, nvi, &vol_list, list) {
608 list_del_init(&vi->list);
609 __vol_release(vi);
610 }
611}
612
613/**
614 * dfs_cache_destroy - destroy DFS referral cache
615 */
616void dfs_cache_destroy(void)
617{
618 cancel_delayed_work_sync(&refresh_task);
619 unload_nls(cache_nlsc);
620 free_vol_list();
621 flush_cache_ents();
622 kmem_cache_destroy(cache_slab);
623 destroy_workqueue(dfscache_wq);
624
625 cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__);
626}
627
628/* Must be called with htable_rw_lock held */
629static int __update_cache_entry(const char *path,
630 const struct dfs_info3_param *refs,
631 int numrefs)
632{
633 int rc;
634 struct cache_entry *ce;
635 char *s, *th = NULL;
636
637 ce = lookup_cache_entry(path, NULL);
638 if (IS_ERR(ce))
639 return PTR_ERR(ce);
640
641 if (ce->tgthint) {
642 s = ce->tgthint->name;
643 th = kstrndup(s, strlen(s), GFP_ATOMIC);
644 if (!th)
645 return -ENOMEM;
646 }
647
648 free_tgts(ce);
649 ce->numtgts = 0;
650
651 rc = copy_ref_data(refs, numrefs, ce, th);
652
653 kfree(th);
654
655 return rc;
656}
657
658static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses,
659 const struct nls_table *nls_codepage, int remap,
660 const char *path, struct dfs_info3_param **refs,
661 int *numrefs)
662{
663 cifs_dbg(FYI, "%s: get an DFS referral for %s\n", __func__, path);
664
665 if (!ses || !ses->server || !ses->server->ops->get_dfs_refer)
666 return -EOPNOTSUPP;
667 if (unlikely(!nls_codepage))
668 return -EINVAL;
669
670 *refs = NULL;
671 *numrefs = 0;
672
673 return ses->server->ops->get_dfs_refer(xid, ses, path, refs, numrefs,
674 nls_codepage, remap);
675}
676
677/* Update an expired cache entry by getting a new DFS referral from server */
678static int update_cache_entry(const char *path,
679 const struct dfs_info3_param *refs,
680 int numrefs)
681{
682
683 int rc;
684
685 down_write(&htable_rw_lock);
686 rc = __update_cache_entry(path, refs, numrefs);
687 up_write(&htable_rw_lock);
688
689 return rc;
690}
691
692/*
693 * Find, create or update a DFS cache entry.
694 *
695 * If the entry wasn't found, it will create a new one. Or if it was found but
696 * expired, then it will update the entry accordingly.
697 *
698 * For interlinks, __cifs_dfs_mount() and expand_dfs_referral() are supposed to
699 * handle them properly.
700 */
701static int __dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
702 const struct nls_table *nls_codepage, int remap,
703 const char *path, bool noreq)
704{
705 int rc;
706 unsigned int hash;
707 struct cache_entry *ce;
708 struct dfs_info3_param *refs = NULL;
709 int numrefs = 0;
710 bool newent = false;
711
712 cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
713
714 down_read(&htable_rw_lock);
715
716 ce = lookup_cache_entry(path, &hash);
717
718 /*
719 * If @noreq is set, no requests will be sent to the server. Just return
720 * the cache entry.
721 */
722 if (noreq) {
723 up_read(&htable_rw_lock);
724 return PTR_ERR_OR_ZERO(ce);
725 }
726
727 if (!IS_ERR(ce)) {
728 if (!cache_entry_expired(ce)) {
729 dump_ce(ce);
730 up_read(&htable_rw_lock);
731 return 0;
732 }
733 } else {
734 newent = true;
735 }
736
737 up_read(&htable_rw_lock);
738
739 /*
740 * No entry was found.
741 *
742 * Request a new DFS referral in order to create a new cache entry, or
743 * updating an existing one.
744 */
745 rc = get_dfs_referral(xid, ses, nls_codepage, remap, path,
746 &refs, &numrefs);
747 if (rc)
748 return rc;
749
750 dump_refs(refs, numrefs);
751
752 if (!newent) {
753 rc = update_cache_entry(path, refs, numrefs);
754 goto out_free_refs;
755 }
756
757 if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
758 cifs_dbg(FYI, "%s: reached max cache size (%d)\n",
759 __func__, CACHE_MAX_ENTRIES);
760 down_write(&htable_rw_lock);
761 remove_oldest_entry();
762 up_write(&htable_rw_lock);
763 }
764
765 rc = add_cache_entry(path, hash, refs, numrefs);
766 if (!rc)
767 atomic_inc(&cache_count);
768
769out_free_refs:
770 free_dfs_info_array(refs, numrefs);
771 return rc;
772}
773
774/*
775 * Set up a DFS referral from a given cache entry.
776 *
777 * Must be called with htable_rw_lock held.
778 */
779static int setup_referral(const char *path, struct cache_entry *ce,
780 struct dfs_info3_param *ref, const char *target)
781{
782 int rc;
783
784 cifs_dbg(FYI, "%s: set up new ref\n", __func__);
785
786 memset(ref, 0, sizeof(*ref));
787
788 ref->path_name = kstrndup(path, strlen(path), GFP_ATOMIC);
789 if (!ref->path_name)
790 return -ENOMEM;
791
792 ref->node_name = kstrndup(target, strlen(target), GFP_ATOMIC);
793 if (!ref->node_name) {
794 rc = -ENOMEM;
795 goto err_free_path;
796 }
797
798 ref->path_consumed = ce->path_consumed;
799 ref->ttl = ce->ttl;
800 ref->server_type = ce->srvtype;
801 ref->ref_flag = ce->flags;
802
803 return 0;
804
805err_free_path:
806 kfree(ref->path_name);
807 ref->path_name = NULL;
808 return rc;
809}
810
811/* Return target list of a DFS cache entry */
812static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
813{
814 int rc;
815 struct list_head *head = &tl->tl_list;
816 struct cache_dfs_tgt *t;
817 struct dfs_cache_tgt_iterator *it, *nit;
818
819 memset(tl, 0, sizeof(*tl));
820 INIT_LIST_HEAD(head);
821
822 list_for_each_entry(t, &ce->tlist, list) {
823 it = kzalloc(sizeof(*it), GFP_ATOMIC);
824 if (!it) {
825 rc = -ENOMEM;
826 goto err_free_it;
827 }
828
829 it->it_name = kstrndup(t->name, strlen(t->name), GFP_ATOMIC);
830 if (!it->it_name) {
831 kfree(it);
832 rc = -ENOMEM;
833 goto err_free_it;
834 }
835 it->it_path_consumed = t->path_consumed;
836
837 if (ce->tgthint == t)
838 list_add(&it->it_list, head);
839 else
840 list_add_tail(&it->it_list, head);
841 }
842
843 tl->tl_numtgts = ce->numtgts;
844
845 return 0;
846
847err_free_it:
848 list_for_each_entry_safe(it, nit, head, it_list) {
849 kfree(it->it_name);
850 kfree(it);
851 }
852 return rc;
853}
854
855/**
856 * dfs_cache_find - find a DFS cache entry
857 *
858 * If it doesn't find the cache entry, then it will get a DFS referral
859 * for @path and create a new entry.
860 *
861 * In case the cache entry exists but expired, it will get a DFS referral
862 * for @path and then update the respective cache entry.
863 *
864 * These parameters are passed down to the get_dfs_refer() call if it
865 * needs to be issued:
866 * @xid: syscall xid
867 * @ses: smb session to issue the request on
868 * @nls_codepage: charset conversion
869 * @remap: path character remapping type
870 * @path: path to lookup in DFS referral cache.
871 *
872 * @ref: when non-NULL, store single DFS referral result in it.
873 * @tgt_list: when non-NULL, store complete DFS target list in it.
874 *
875 * Return zero if the target was found, otherwise non-zero.
876 */
877int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
878 const struct nls_table *nls_codepage, int remap,
879 const char *path, struct dfs_info3_param *ref,
880 struct dfs_cache_tgt_list *tgt_list)
881{
882 int rc;
883 char *npath;
884 struct cache_entry *ce;
885
886 rc = get_normalized_path(path, &npath);
887 if (rc)
888 return rc;
889
890 rc = __dfs_cache_find(xid, ses, nls_codepage, remap, npath, false);
891 if (rc)
892 goto out_free_path;
893
894 down_read(&htable_rw_lock);
895
896 ce = lookup_cache_entry(npath, NULL);
897 if (IS_ERR(ce)) {
898 up_read(&htable_rw_lock);
899 rc = PTR_ERR(ce);
900 goto out_free_path;
901 }
902
903 if (ref)
904 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
905 else
906 rc = 0;
907 if (!rc && tgt_list)
908 rc = get_targets(ce, tgt_list);
909
910 up_read(&htable_rw_lock);
911
912out_free_path:
913 free_normalized_path(path, npath);
914 return rc;
915}
916
917/**
918 * dfs_cache_noreq_find - find a DFS cache entry without sending any requests to
919 * the currently connected server.
920 *
921 * NOTE: This function will neither update a cache entry in case it was
922 * expired, nor create a new cache entry if @path hasn't been found. It heavily
923 * relies on an existing cache entry.
924 *
925 * @path: path to lookup in the DFS referral cache.
926 * @ref: when non-NULL, store single DFS referral result in it.
927 * @tgt_list: when non-NULL, store complete DFS target list in it.
928 *
929 * Return 0 if successful.
930 * Return -ENOENT if the entry was not found.
931 * Return non-zero for other errors.
932 */
933int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
934 struct dfs_cache_tgt_list *tgt_list)
935{
936 int rc;
937 char *npath;
938 struct cache_entry *ce;
939
940 rc = get_normalized_path(path, &npath);
941 if (rc)
942 return rc;
943
944 cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
945
946 down_read(&htable_rw_lock);
947
948 ce = lookup_cache_entry(npath, NULL);
949 if (IS_ERR(ce)) {
950 rc = PTR_ERR(ce);
951 goto out_unlock;
952 }
953
954 if (ref)
955 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
956 else
957 rc = 0;
958 if (!rc && tgt_list)
959 rc = get_targets(ce, tgt_list);
960
961out_unlock:
962 up_read(&htable_rw_lock);
963 free_normalized_path(path, npath);
964
965 return rc;
966}
967
968/**
969 * dfs_cache_update_tgthint - update target hint of a DFS cache entry
970 *
971 * If it doesn't find the cache entry, then it will get a DFS referral for @path
972 * and create a new entry.
973 *
974 * In case the cache entry exists but expired, it will get a DFS referral
975 * for @path and then update the respective cache entry.
976 *
977 * @xid: syscall id
978 * @ses: smb session
979 * @nls_codepage: charset conversion
980 * @remap: type of character remapping for paths
981 * @path: path to lookup in DFS referral cache.
982 * @it: DFS target iterator
983 *
984 * Return zero if the target hint was updated successfully, otherwise non-zero.
985 */
986int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
987 const struct nls_table *nls_codepage, int remap,
988 const char *path,
989 const struct dfs_cache_tgt_iterator *it)
990{
991 int rc;
992 char *npath;
993 struct cache_entry *ce;
994 struct cache_dfs_tgt *t;
995
996 rc = get_normalized_path(path, &npath);
997 if (rc)
998 return rc;
999
1000 cifs_dbg(FYI, "%s: update target hint - path: %s\n", __func__, npath);
1001
1002 rc = __dfs_cache_find(xid, ses, nls_codepage, remap, npath, false);
1003 if (rc)
1004 goto out_free_path;
1005
1006 down_write(&htable_rw_lock);
1007
1008 ce = lookup_cache_entry(npath, NULL);
1009 if (IS_ERR(ce)) {
1010 rc = PTR_ERR(ce);
1011 goto out_unlock;
1012 }
1013
1014 t = ce->tgthint;
1015
1016 if (likely(!strcasecmp(it->it_name, t->name)))
1017 goto out_unlock;
1018
1019 list_for_each_entry(t, &ce->tlist, list) {
1020 if (!strcasecmp(t->name, it->it_name)) {
1021 ce->tgthint = t;
1022 cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
1023 it->it_name);
1024 break;
1025 }
1026 }
1027
1028out_unlock:
1029 up_write(&htable_rw_lock);
1030out_free_path:
1031 free_normalized_path(path, npath);
1032
1033 return rc;
1034}
1035
1036/**
1037 * dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry
1038 * without sending any requests to the currently connected server.
1039 *
1040 * NOTE: This function will neither update a cache entry in case it was
1041 * expired, nor create a new cache entry if @path hasn't been found. It heavily
1042 * relies on an existing cache entry.
1043 *
1044 * @path: path to lookup in DFS referral cache.
1045 * @it: target iterator which contains the target hint to update the cache
1046 * entry with.
1047 *
1048 * Return zero if the target hint was updated successfully, otherwise non-zero.
1049 */
1050int dfs_cache_noreq_update_tgthint(const char *path,
1051 const struct dfs_cache_tgt_iterator *it)
1052{
1053 int rc;
1054 char *npath;
1055 struct cache_entry *ce;
1056 struct cache_dfs_tgt *t;
1057
1058 if (!it)
1059 return -EINVAL;
1060
1061 rc = get_normalized_path(path, &npath);
1062 if (rc)
1063 return rc;
1064
1065 cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
1066
1067 down_write(&htable_rw_lock);
1068
1069 ce = lookup_cache_entry(npath, NULL);
1070 if (IS_ERR(ce)) {
1071 rc = PTR_ERR(ce);
1072 goto out_unlock;
1073 }
1074
1075 rc = 0;
1076 t = ce->tgthint;
1077
1078 if (unlikely(!strcasecmp(it->it_name, t->name)))
1079 goto out_unlock;
1080
1081 list_for_each_entry(t, &ce->tlist, list) {
1082 if (!strcasecmp(t->name, it->it_name)) {
1083 ce->tgthint = t;
1084 cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
1085 it->it_name);
1086 break;
1087 }
1088 }
1089
1090out_unlock:
1091 up_write(&htable_rw_lock);
1092 free_normalized_path(path, npath);
1093
1094 return rc;
1095}
1096
1097/**
1098 * dfs_cache_get_tgt_referral - returns a DFS referral (@ref) from a given
1099 * target iterator (@it).
1100 *
1101 * @path: path to lookup in DFS referral cache.
1102 * @it: DFS target iterator.
1103 * @ref: DFS referral pointer to set up the gathered information.
1104 *
1105 * Return zero if the DFS referral was set up correctly, otherwise non-zero.
1106 */
1107int dfs_cache_get_tgt_referral(const char *path,
1108 const struct dfs_cache_tgt_iterator *it,
1109 struct dfs_info3_param *ref)
1110{
1111 int rc;
1112 char *npath;
1113 struct cache_entry *ce;
1114
1115 if (!it || !ref)
1116 return -EINVAL;
1117
1118 rc = get_normalized_path(path, &npath);
1119 if (rc)
1120 return rc;
1121
1122 cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
1123
1124 down_read(&htable_rw_lock);
1125
1126 ce = lookup_cache_entry(npath, NULL);
1127 if (IS_ERR(ce)) {
1128 rc = PTR_ERR(ce);
1129 goto out_unlock;
1130 }
1131
1132 cifs_dbg(FYI, "%s: target name: %s\n", __func__, it->it_name);
1133
1134 rc = setup_referral(path, ce, ref, it->it_name);
1135
1136out_unlock:
1137 up_read(&htable_rw_lock);
1138 free_normalized_path(path, npath);
1139
1140 return rc;
1141}
1142
1143static int dup_vol(struct smb_vol *vol, struct smb_vol *new)
1144{
1145 memcpy(new, vol, sizeof(*new));
1146
1147 if (vol->username) {
1148 new->username = kstrndup(vol->username, strlen(vol->username),
1149 GFP_KERNEL);
1150 if (!new->username)
1151 return -ENOMEM;
1152 }
1153 if (vol->password) {
1154 new->password = kstrndup(vol->password, strlen(vol->password),
1155 GFP_KERNEL);
1156 if (!new->password)
1157 goto err_free_username;
1158 }
1159 if (vol->UNC) {
1160 cifs_dbg(FYI, "%s: vol->UNC: %s\n", __func__, vol->UNC);
1161 new->UNC = kstrndup(vol->UNC, strlen(vol->UNC), GFP_KERNEL);
1162 if (!new->UNC)
1163 goto err_free_password;
1164 }
1165 if (vol->domainname) {
1166 new->domainname = kstrndup(vol->domainname,
1167 strlen(vol->domainname), GFP_KERNEL);
1168 if (!new->domainname)
1169 goto err_free_unc;
1170 }
1171 if (vol->iocharset) {
1172 new->iocharset = kstrndup(vol->iocharset,
1173 strlen(vol->iocharset), GFP_KERNEL);
1174 if (!new->iocharset)
1175 goto err_free_domainname;
1176 }
1177 if (vol->prepath) {
1178 cifs_dbg(FYI, "%s: vol->prepath: %s\n", __func__, vol->prepath);
1179 new->prepath = kstrndup(vol->prepath, strlen(vol->prepath),
1180 GFP_KERNEL);
1181 if (!new->prepath)
1182 goto err_free_iocharset;
1183 }
1184
1185 return 0;
1186
1187err_free_iocharset:
1188 kfree(new->iocharset);
1189err_free_domainname:
1190 kfree(new->domainname);
1191err_free_unc:
1192 kfree(new->UNC);
1193err_free_password:
1194 kfree_sensitive(new->password);
1195err_free_username:
1196 kfree(new->username);
1197 kfree(new);
1198 return -ENOMEM;
1199}
1200
1201/**
1202 * dfs_cache_add_vol - add a cifs volume during mount() that will be handled by
1203 * DFS cache refresh worker.
1204 *
1205 * @mntdata: mount data.
1206 * @vol: cifs volume.
1207 * @fullpath: origin full path.
1208 *
1209 * Return zero if volume was set up correctly, otherwise non-zero.
1210 */
1211int dfs_cache_add_vol(char *mntdata, struct smb_vol *vol, const char *fullpath)
1212{
1213 int rc;
1214 struct vol_info *vi;
1215
1216 if (!vol || !fullpath || !mntdata)
1217 return -EINVAL;
1218
1219 cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
1220
1221 vi = kzalloc(sizeof(*vi), GFP_KERNEL);
1222 if (!vi)
1223 return -ENOMEM;
1224
1225 vi->fullpath = kstrndup(fullpath, strlen(fullpath), GFP_KERNEL);
1226 if (!vi->fullpath) {
1227 rc = -ENOMEM;
1228 goto err_free_vi;
1229 }
1230
1231 rc = dup_vol(vol, &vi->smb_vol);
1232 if (rc)
1233 goto err_free_fullpath;
1234
1235 vi->mntdata = mntdata;
1236 spin_lock_init(&vi->smb_vol_lock);
1237 kref_init(&vi->refcnt);
1238
1239 spin_lock(&vol_list_lock);
1240 list_add_tail(&vi->list, &vol_list);
1241 spin_unlock(&vol_list_lock);
1242
1243 return 0;
1244
1245err_free_fullpath:
1246 kfree(vi->fullpath);
1247err_free_vi:
1248 kfree(vi);
1249 return rc;
1250}
1251
1252/* Must be called with vol_list_lock held */
1253static struct vol_info *find_vol(const char *fullpath)
1254{
1255 struct vol_info *vi;
1256
1257 list_for_each_entry(vi, &vol_list, list) {
1258 cifs_dbg(FYI, "%s: vi->fullpath: %s\n", __func__, vi->fullpath);
1259 if (!strcasecmp(vi->fullpath, fullpath))
1260 return vi;
1261 }
1262 return ERR_PTR(-ENOENT);
1263}
1264
1265/**
1266 * dfs_cache_update_vol - update vol info in DFS cache after failover
1267 *
1268 * @fullpath: fullpath to look up in volume list.
1269 * @server: TCP ses pointer.
1270 *
1271 * Return zero if volume was updated, otherwise non-zero.
1272 */
1273int dfs_cache_update_vol(const char *fullpath, struct TCP_Server_Info *server)
1274{
1275 struct vol_info *vi;
1276
1277 if (!fullpath || !server)
1278 return -EINVAL;
1279
1280 cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
1281
1282 spin_lock(&vol_list_lock);
1283 vi = find_vol(fullpath);
1284 if (IS_ERR(vi)) {
1285 spin_unlock(&vol_list_lock);
1286 return PTR_ERR(vi);
1287 }
1288 kref_get(&vi->refcnt);
1289 spin_unlock(&vol_list_lock);
1290
1291 cifs_dbg(FYI, "%s: updating volume info\n", __func__);
1292 spin_lock(&vi->smb_vol_lock);
1293 memcpy(&vi->smb_vol.dstaddr, &server->dstaddr,
1294 sizeof(vi->smb_vol.dstaddr));
1295 spin_unlock(&vi->smb_vol_lock);
1296
1297 kref_put(&vi->refcnt, vol_release);
1298
1299 return 0;
1300}
1301
1302/**
1303 * dfs_cache_del_vol - remove volume info in DFS cache during umount()
1304 *
1305 * @fullpath: fullpath to look up in volume list.
1306 */
1307void dfs_cache_del_vol(const char *fullpath)
1308{
1309 struct vol_info *vi;
1310
1311 if (!fullpath || !*fullpath)
1312 return;
1313
1314 cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
1315
1316 spin_lock(&vol_list_lock);
1317 vi = find_vol(fullpath);
1318 spin_unlock(&vol_list_lock);
1319
1320 kref_put(&vi->refcnt, vol_release);
1321}
1322
1323/**
1324 * dfs_cache_get_tgt_share - parse a DFS target
1325 *
1326 * @path: DFS full path
1327 * @it: DFS target iterator.
1328 * @share: tree name.
1329 * @prefix: prefix path.
1330 *
1331 * Return zero if target was parsed correctly, otherwise non-zero.
1332 */
1333int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
1334 char **share, char **prefix)
1335{
1336 char *s, sep, *p;
1337 size_t len;
1338 size_t plen1, plen2;
1339
1340 if (!it || !path || !share || !prefix || strlen(path) < it->it_path_consumed)
1341 return -EINVAL;
1342
1343 *share = NULL;
1344 *prefix = NULL;
1345
1346 sep = it->it_name[0];
1347 if (sep != '\\' && sep != '/')
1348 return -EINVAL;
1349
1350 s = strchr(it->it_name + 1, sep);
1351 if (!s)
1352 return -EINVAL;
1353
1354 /* point to prefix in target node */
1355 s = strchrnul(s + 1, sep);
1356
1357 /* extract target share */
1358 *share = kstrndup(it->it_name, s - it->it_name, GFP_KERNEL);
1359 if (!*share)
1360 return -ENOMEM;
1361
1362 /* skip separator */
1363 if (*s)
1364 s++;
1365 /* point to prefix in DFS path */
1366 p = path + it->it_path_consumed;
1367 if (*p == sep)
1368 p++;
1369
1370 /* merge prefix paths from DFS path and target node */
1371 plen1 = it->it_name + strlen(it->it_name) - s;
1372 plen2 = path + strlen(path) - p;
1373 if (plen1 || plen2) {
1374 len = plen1 + plen2 + 2;
1375 *prefix = kmalloc(len, GFP_KERNEL);
1376 if (!*prefix) {
1377 kfree(*share);
1378 *share = NULL;
1379 return -ENOMEM;
1380 }
1381 if (plen1)
1382 scnprintf(*prefix, len, "%.*s%c%.*s", (int)plen1, s, sep, (int)plen2, p);
1383 else
1384 strscpy(*prefix, p, len);
1385 }
1386 return 0;
1387}
1388
1389/* Get all tcons that are within a DFS namespace and can be refreshed */
1390static void get_tcons(struct TCP_Server_Info *server, struct list_head *head)
1391{
1392 struct cifs_ses *ses;
1393 struct cifs_tcon *tcon;
1394
1395 INIT_LIST_HEAD(head);
1396
1397 spin_lock(&cifs_tcp_ses_lock);
1398 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
1399 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
1400 if (!tcon->need_reconnect && !tcon->need_reopen_files &&
1401 tcon->dfs_path) {
1402 tcon->tc_count++;
1403 list_add_tail(&tcon->ulist, head);
1404 }
1405 }
1406 if (ses->tcon_ipc && !ses->tcon_ipc->need_reconnect &&
1407 ses->tcon_ipc->dfs_path) {
1408 list_add_tail(&ses->tcon_ipc->ulist, head);
1409 }
1410 }
1411 spin_unlock(&cifs_tcp_ses_lock);
1412}
1413
1414static bool is_dfs_link(const char *path)
1415{
1416 char *s;
1417
1418 s = strchr(path + 1, '\\');
1419 if (!s)
1420 return false;
1421 return !!strchr(s + 1, '\\');
1422}
1423
1424static char *get_dfs_root(const char *path)
1425{
1426 char *s, *npath;
1427
1428 s = strchr(path + 1, '\\');
1429 if (!s)
1430 return ERR_PTR(-EINVAL);
1431
1432 s = strchr(s + 1, '\\');
1433 if (!s)
1434 return ERR_PTR(-EINVAL);
1435
1436 npath = kstrndup(path, s - path, GFP_KERNEL);
1437 if (!npath)
1438 return ERR_PTR(-ENOMEM);
1439
1440 return npath;
1441}
1442
1443static inline void put_tcp_server(struct TCP_Server_Info *server)
1444{
1445 cifs_put_tcp_session(server, 0);
1446}
1447
1448static struct TCP_Server_Info *get_tcp_server(struct smb_vol *vol)
1449{
1450 struct TCP_Server_Info *server;
1451
1452 server = cifs_find_tcp_session(vol);
1453 if (IS_ERR_OR_NULL(server))
1454 return NULL;
1455
1456 spin_lock(&GlobalMid_Lock);
1457 if (server->tcpStatus != CifsGood) {
1458 spin_unlock(&GlobalMid_Lock);
1459 put_tcp_server(server);
1460 return NULL;
1461 }
1462 spin_unlock(&GlobalMid_Lock);
1463
1464 return server;
1465}
1466
1467/* Find root SMB session out of a DFS link path */
1468static struct cifs_ses *find_root_ses(struct vol_info *vi,
1469 struct cifs_tcon *tcon,
1470 const char *path)
1471{
1472 char *rpath;
1473 int rc;
1474 struct cache_entry *ce;
1475 struct dfs_info3_param ref = {0};
1476 char *mdata = NULL, *devname = NULL;
1477 struct TCP_Server_Info *server;
1478 struct cifs_ses *ses;
1479 struct smb_vol vol = {NULL};
1480
1481 rpath = get_dfs_root(path);
1482 if (IS_ERR(rpath))
1483 return ERR_CAST(rpath);
1484
1485 down_read(&htable_rw_lock);
1486
1487 ce = lookup_cache_entry(rpath, NULL);
1488 if (IS_ERR(ce)) {
1489 up_read(&htable_rw_lock);
1490 ses = ERR_CAST(ce);
1491 goto out;
1492 }
1493
1494 rc = setup_referral(path, ce, &ref, get_tgt_name(ce));
1495 if (rc) {
1496 up_read(&htable_rw_lock);
1497 ses = ERR_PTR(rc);
1498 goto out;
1499 }
1500
1501 up_read(&htable_rw_lock);
1502
1503 mdata = cifs_compose_mount_options(vi->mntdata, rpath, &ref,
1504 &devname);
1505 free_dfs_info_param(&ref);
1506
1507 if (IS_ERR(mdata)) {
1508 ses = ERR_CAST(mdata);
1509 mdata = NULL;
1510 goto out;
1511 }
1512
1513 rc = cifs_setup_volume_info(&vol, mdata, devname, false);
1514 kfree(devname);
1515
1516 if (rc) {
1517 ses = ERR_PTR(rc);
1518 goto out;
1519 }
1520
1521 server = get_tcp_server(&vol);
1522 if (!server) {
1523 ses = ERR_PTR(-EHOSTDOWN);
1524 goto out;
1525 }
1526
1527 ses = cifs_get_smb_ses(server, &vol);
1528
1529out:
1530 cifs_cleanup_volume_info_contents(&vol);
1531 kfree(mdata);
1532 kfree(rpath);
1533
1534 return ses;
1535}
1536
1537/* Refresh DFS cache entry from a given tcon */
1538static int refresh_tcon(struct vol_info *vi, struct cifs_tcon *tcon)
1539{
1540 int rc = 0;
1541 unsigned int xid;
1542 char *path, *npath;
1543 struct cache_entry *ce;
1544 struct cifs_ses *root_ses = NULL, *ses;
1545 struct dfs_info3_param *refs = NULL;
1546 int numrefs = 0;
1547
1548 xid = get_xid();
1549
1550 path = tcon->dfs_path + 1;
1551
1552 rc = get_normalized_path(path, &npath);
1553 if (rc)
1554 goto out_free_xid;
1555
1556 down_read(&htable_rw_lock);
1557
1558 ce = lookup_cache_entry(npath, NULL);
1559 if (IS_ERR(ce)) {
1560 rc = PTR_ERR(ce);
1561 up_read(&htable_rw_lock);
1562 goto out_free_path;
1563 }
1564
1565 if (!cache_entry_expired(ce)) {
1566 up_read(&htable_rw_lock);
1567 goto out_free_path;
1568 }
1569
1570 up_read(&htable_rw_lock);
1571
1572 /* If it's a DFS Link, then use root SMB session for refreshing it */
1573 if (is_dfs_link(npath)) {
1574 ses = root_ses = find_root_ses(vi, tcon, npath);
1575 if (IS_ERR(ses)) {
1576 rc = PTR_ERR(ses);
1577 root_ses = NULL;
1578 goto out_free_path;
1579 }
1580 } else {
1581 ses = tcon->ses;
1582 }
1583
1584 rc = get_dfs_referral(xid, ses, cache_nlsc, tcon->remap, npath, &refs,
1585 &numrefs);
1586 if (!rc) {
1587 dump_refs(refs, numrefs);
1588 rc = update_cache_entry(npath, refs, numrefs);
1589 free_dfs_info_array(refs, numrefs);
1590 }
1591
1592 if (root_ses)
1593 cifs_put_smb_ses(root_ses);
1594
1595out_free_path:
1596 free_normalized_path(path, npath);
1597
1598out_free_xid:
1599 free_xid(xid);
1600 return rc;
1601}
1602
1603/*
1604 * Worker that will refresh DFS cache based on lowest TTL value from a DFS
1605 * referral.
1606 */
1607static void refresh_cache_worker(struct work_struct *work)
1608{
1609 struct vol_info *vi, *nvi;
1610 struct TCP_Server_Info *server;
1611 LIST_HEAD(vols);
1612 LIST_HEAD(tcons);
1613 struct cifs_tcon *tcon, *ntcon;
1614 int rc;
1615
1616 /*
1617 * Find SMB volumes that are eligible (server->tcpStatus == CifsGood)
1618 * for refreshing.
1619 */
1620 spin_lock(&vol_list_lock);
1621 list_for_each_entry(vi, &vol_list, list) {
1622 server = get_tcp_server(&vi->smb_vol);
1623 if (!server)
1624 continue;
1625
1626 kref_get(&vi->refcnt);
1627 list_add_tail(&vi->rlist, &vols);
1628 put_tcp_server(server);
1629 }
1630 spin_unlock(&vol_list_lock);
1631
1632 /* Walk through all TCONs and refresh any expired cache entry */
1633 list_for_each_entry_safe(vi, nvi, &vols, rlist) {
1634 spin_lock(&vi->smb_vol_lock);
1635 server = get_tcp_server(&vi->smb_vol);
1636 spin_unlock(&vi->smb_vol_lock);
1637
1638 if (!server)
1639 goto next_vol;
1640
1641 get_tcons(server, &tcons);
1642 rc = 0;
1643
1644 list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
1645 /*
1646 * Skip tcp server if any of its tcons failed to refresh
1647 * (possibily due to reconnects).
1648 */
1649 if (!rc)
1650 rc = refresh_tcon(vi, tcon);
1651
1652 list_del_init(&tcon->ulist);
1653 cifs_put_tcon(tcon);
1654 }
1655
1656 put_tcp_server(server);
1657
1658next_vol:
1659 list_del_init(&vi->rlist);
1660 kref_put(&vi->refcnt, vol_release);
1661 }
1662
1663 spin_lock(&cache_ttl_lock);
1664 queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
1665 spin_unlock(&cache_ttl_lock);
1666}