Loading...
Note: File does not exist in v5.4.
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Functions to handle the cached directory entries
4 *
5 * Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com>
6 */
7
8#include <linux/namei.h>
9#include "cifsglob.h"
10#include "cifsproto.h"
11#include "cifs_debug.h"
12#include "smb2proto.h"
13#include "cached_dir.h"
14
15static struct cached_fid *init_cached_dir(const char *path);
16static void free_cached_dir(struct cached_fid *cfid);
17static void smb2_close_cached_fid(struct kref *ref);
18static void cfids_laundromat_worker(struct work_struct *work);
19
20static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
21 const char *path,
22 bool lookup_only,
23 __u32 max_cached_dirs)
24{
25 struct cached_fid *cfid;
26
27 spin_lock(&cfids->cfid_list_lock);
28 list_for_each_entry(cfid, &cfids->entries, entry) {
29 if (!strcmp(cfid->path, path)) {
30 /*
31 * If it doesn't have a lease it is either not yet
32 * fully cached or it may be in the process of
33 * being deleted due to a lease break.
34 */
35 if (!cfid->time || !cfid->has_lease) {
36 spin_unlock(&cfids->cfid_list_lock);
37 return NULL;
38 }
39 kref_get(&cfid->refcount);
40 spin_unlock(&cfids->cfid_list_lock);
41 return cfid;
42 }
43 }
44 if (lookup_only) {
45 spin_unlock(&cfids->cfid_list_lock);
46 return NULL;
47 }
48 if (cfids->num_entries >= max_cached_dirs) {
49 spin_unlock(&cfids->cfid_list_lock);
50 return NULL;
51 }
52 cfid = init_cached_dir(path);
53 if (cfid == NULL) {
54 spin_unlock(&cfids->cfid_list_lock);
55 return NULL;
56 }
57 cfid->cfids = cfids;
58 cfids->num_entries++;
59 list_add(&cfid->entry, &cfids->entries);
60 cfid->on_list = true;
61 kref_get(&cfid->refcount);
62 spin_unlock(&cfids->cfid_list_lock);
63 return cfid;
64}
65
66static struct dentry *
67path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path)
68{
69 struct dentry *dentry;
70 const char *s, *p;
71 char sep;
72
73 sep = CIFS_DIR_SEP(cifs_sb);
74 dentry = dget(cifs_sb->root);
75 s = path;
76
77 do {
78 struct inode *dir = d_inode(dentry);
79 struct dentry *child;
80
81 if (!S_ISDIR(dir->i_mode)) {
82 dput(dentry);
83 dentry = ERR_PTR(-ENOTDIR);
84 break;
85 }
86
87 /* skip separators */
88 while (*s == sep)
89 s++;
90 if (!*s)
91 break;
92 p = s++;
93 /* next separator */
94 while (*s && *s != sep)
95 s++;
96
97 child = lookup_positive_unlocked(p, dentry, s - p);
98 dput(dentry);
99 dentry = child;
100 } while (!IS_ERR(dentry));
101 return dentry;
102}
103
104static const char *path_no_prefix(struct cifs_sb_info *cifs_sb,
105 const char *path)
106{
107 size_t len = 0;
108
109 if (!*path)
110 return path;
111
112 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
113 cifs_sb->prepath) {
114 len = strlen(cifs_sb->prepath) + 1;
115 if (unlikely(len > strlen(path)))
116 return ERR_PTR(-EINVAL);
117 }
118 return path + len;
119}
120
121/*
122 * Open the and cache a directory handle.
123 * If error then *cfid is not initialized.
124 */
125int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
126 const char *path,
127 struct cifs_sb_info *cifs_sb,
128 bool lookup_only, struct cached_fid **ret_cfid)
129{
130 struct cifs_ses *ses;
131 struct TCP_Server_Info *server;
132 struct cifs_open_parms oparms;
133 struct smb2_create_rsp *o_rsp = NULL;
134 struct smb2_query_info_rsp *qi_rsp = NULL;
135 int resp_buftype[2];
136 struct smb_rqst rqst[2];
137 struct kvec rsp_iov[2];
138 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
139 struct kvec qi_iov[1];
140 int rc, flags = 0;
141 __le16 *utf16_path = NULL;
142 u8 oplock = SMB2_OPLOCK_LEVEL_II;
143 struct cifs_fid *pfid;
144 struct dentry *dentry = NULL;
145 struct cached_fid *cfid;
146 struct cached_fids *cfids;
147 const char *npath;
148 int retries = 0, cur_sleep = 1;
149
150 if (tcon == NULL || tcon->cfids == NULL || tcon->nohandlecache ||
151 is_smb1_server(tcon->ses->server) || (dir_cache_timeout == 0))
152 return -EOPNOTSUPP;
153
154 ses = tcon->ses;
155 cfids = tcon->cfids;
156
157 if (cifs_sb->root == NULL)
158 return -ENOENT;
159
160replay_again:
161 /* reinitialize for possible replay */
162 flags = 0;
163 oplock = SMB2_OPLOCK_LEVEL_II;
164 server = cifs_pick_channel(ses);
165
166 if (!server->ops->new_lease_key)
167 return -EIO;
168
169 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
170 if (!utf16_path)
171 return -ENOMEM;
172
173 cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs);
174 if (cfid == NULL) {
175 kfree(utf16_path);
176 return -ENOENT;
177 }
178 /*
179 * Return cached fid if it has a lease. Otherwise, it is either a new
180 * entry or laundromat worker removed it from @cfids->entries. Caller
181 * will put last reference if the latter.
182 */
183 spin_lock(&cfids->cfid_list_lock);
184 if (cfid->has_lease) {
185 spin_unlock(&cfids->cfid_list_lock);
186 *ret_cfid = cfid;
187 kfree(utf16_path);
188 return 0;
189 }
190 spin_unlock(&cfids->cfid_list_lock);
191
192 /*
193 * Skip any prefix paths in @path as lookup_positive_unlocked() ends up
194 * calling ->lookup() which already adds those through
195 * build_path_from_dentry(). Also, do it earlier as we might reconnect
196 * below when trying to send compounded request and then potentially
197 * having a different prefix path (e.g. after DFS failover).
198 */
199 npath = path_no_prefix(cifs_sb, path);
200 if (IS_ERR(npath)) {
201 rc = PTR_ERR(npath);
202 goto out;
203 }
204
205 if (!npath[0]) {
206 dentry = dget(cifs_sb->root);
207 } else {
208 dentry = path_to_dentry(cifs_sb, npath);
209 if (IS_ERR(dentry)) {
210 rc = -ENOENT;
211 goto out;
212 }
213 }
214 cfid->dentry = dentry;
215
216 /*
217 * We do not hold the lock for the open because in case
218 * SMB2_open needs to reconnect.
219 * This is safe because no other thread will be able to get a ref
220 * to the cfid until we have finished opening the file and (possibly)
221 * acquired a lease.
222 */
223 if (smb3_encryption_required(tcon))
224 flags |= CIFS_TRANSFORM_REQ;
225
226 pfid = &cfid->fid;
227 server->ops->new_lease_key(pfid);
228
229 memset(rqst, 0, sizeof(rqst));
230 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
231 memset(rsp_iov, 0, sizeof(rsp_iov));
232
233 /* Open */
234 memset(&open_iov, 0, sizeof(open_iov));
235 rqst[0].rq_iov = open_iov;
236 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
237
238 oparms = (struct cifs_open_parms) {
239 .tcon = tcon,
240 .path = path,
241 .create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE),
242 .desired_access = FILE_READ_DATA | FILE_READ_ATTRIBUTES,
243 .disposition = FILE_OPEN,
244 .fid = pfid,
245 .replay = !!(retries),
246 };
247
248 rc = SMB2_open_init(tcon, server,
249 &rqst[0], &oplock, &oparms, utf16_path);
250 if (rc)
251 goto oshr_free;
252 smb2_set_next_command(tcon, &rqst[0]);
253
254 memset(&qi_iov, 0, sizeof(qi_iov));
255 rqst[1].rq_iov = qi_iov;
256 rqst[1].rq_nvec = 1;
257
258 rc = SMB2_query_info_init(tcon, server,
259 &rqst[1], COMPOUND_FID,
260 COMPOUND_FID, FILE_ALL_INFORMATION,
261 SMB2_O_INFO_FILE, 0,
262 sizeof(struct smb2_file_all_info) +
263 PATH_MAX * 2, 0, NULL);
264 if (rc)
265 goto oshr_free;
266
267 smb2_set_related(&rqst[1]);
268
269 /*
270 * Set @cfid->has_lease to true before sending out compounded request so
271 * its lease reference can be put in cached_dir_lease_break() due to a
272 * potential lease break right after the request is sent or while @cfid
273 * is still being cached. Concurrent processes won't be to use it yet
274 * due to @cfid->time being zero.
275 */
276 cfid->has_lease = true;
277
278 if (retries) {
279 smb2_set_replay(server, &rqst[0]);
280 smb2_set_replay(server, &rqst[1]);
281 }
282
283 rc = compound_send_recv(xid, ses, server,
284 flags, 2, rqst,
285 resp_buftype, rsp_iov);
286 if (rc) {
287 if (rc == -EREMCHG) {
288 tcon->need_reconnect = true;
289 pr_warn_once("server share %s deleted\n",
290 tcon->tree_name);
291 }
292 goto oshr_free;
293 }
294 cfid->tcon = tcon;
295 cfid->is_open = true;
296
297 spin_lock(&cfids->cfid_list_lock);
298
299 o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
300 oparms.fid->persistent_fid = o_rsp->PersistentFileId;
301 oparms.fid->volatile_fid = o_rsp->VolatileFileId;
302#ifdef CONFIG_CIFS_DEBUG2
303 oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
304#endif /* CIFS_DEBUG2 */
305
306
307 if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) {
308 spin_unlock(&cfids->cfid_list_lock);
309 rc = -EINVAL;
310 goto oshr_free;
311 }
312
313 rc = smb2_parse_contexts(server, rsp_iov,
314 &oparms.fid->epoch,
315 oparms.fid->lease_key,
316 &oplock, NULL, NULL);
317 if (rc) {
318 spin_unlock(&cfids->cfid_list_lock);
319 goto oshr_free;
320 }
321
322 rc = -EINVAL;
323 if (!(oplock & SMB2_LEASE_READ_CACHING_HE)) {
324 spin_unlock(&cfids->cfid_list_lock);
325 goto oshr_free;
326 }
327 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
328 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) {
329 spin_unlock(&cfids->cfid_list_lock);
330 goto oshr_free;
331 }
332 if (!smb2_validate_and_copy_iov(
333 le16_to_cpu(qi_rsp->OutputBufferOffset),
334 sizeof(struct smb2_file_all_info),
335 &rsp_iov[1], sizeof(struct smb2_file_all_info),
336 (char *)&cfid->file_all_info))
337 cfid->file_all_info_is_valid = true;
338
339 cfid->time = jiffies;
340 spin_unlock(&cfids->cfid_list_lock);
341 /* At this point the directory handle is fully cached */
342 rc = 0;
343
344oshr_free:
345 SMB2_open_free(&rqst[0]);
346 SMB2_query_info_free(&rqst[1]);
347 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
348 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
349 if (rc) {
350 spin_lock(&cfids->cfid_list_lock);
351 if (cfid->on_list) {
352 list_del(&cfid->entry);
353 cfid->on_list = false;
354 cfids->num_entries--;
355 }
356 if (cfid->has_lease) {
357 /*
358 * We are guaranteed to have two references at this
359 * point. One for the caller and one for a potential
360 * lease. Release the Lease-ref so that the directory
361 * will be closed when the caller closes the cached
362 * handle.
363 */
364 cfid->has_lease = false;
365 spin_unlock(&cfids->cfid_list_lock);
366 kref_put(&cfid->refcount, smb2_close_cached_fid);
367 goto out;
368 }
369 spin_unlock(&cfids->cfid_list_lock);
370 }
371out:
372 if (rc) {
373 if (cfid->is_open)
374 SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
375 cfid->fid.volatile_fid);
376 free_cached_dir(cfid);
377 } else {
378 *ret_cfid = cfid;
379 atomic_inc(&tcon->num_remote_opens);
380 }
381 kfree(utf16_path);
382
383 if (is_replayable_error(rc) &&
384 smb2_should_replay(tcon, &retries, &cur_sleep))
385 goto replay_again;
386
387 return rc;
388}
389
390int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
391 struct dentry *dentry,
392 struct cached_fid **ret_cfid)
393{
394 struct cached_fid *cfid;
395 struct cached_fids *cfids = tcon->cfids;
396
397 if (cfids == NULL)
398 return -ENOENT;
399
400 spin_lock(&cfids->cfid_list_lock);
401 list_for_each_entry(cfid, &cfids->entries, entry) {
402 if (dentry && cfid->dentry == dentry) {
403 cifs_dbg(FYI, "found a cached root file handle by dentry\n");
404 kref_get(&cfid->refcount);
405 *ret_cfid = cfid;
406 spin_unlock(&cfids->cfid_list_lock);
407 return 0;
408 }
409 }
410 spin_unlock(&cfids->cfid_list_lock);
411 return -ENOENT;
412}
413
414static void
415smb2_close_cached_fid(struct kref *ref)
416{
417 struct cached_fid *cfid = container_of(ref, struct cached_fid,
418 refcount);
419
420 spin_lock(&cfid->cfids->cfid_list_lock);
421 if (cfid->on_list) {
422 list_del(&cfid->entry);
423 cfid->on_list = false;
424 cfid->cfids->num_entries--;
425 }
426 spin_unlock(&cfid->cfids->cfid_list_lock);
427
428 dput(cfid->dentry);
429 cfid->dentry = NULL;
430
431 if (cfid->is_open) {
432 SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
433 cfid->fid.volatile_fid);
434 atomic_dec(&cfid->tcon->num_remote_opens);
435 }
436
437 free_cached_dir(cfid);
438}
439
440void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon,
441 const char *name, struct cifs_sb_info *cifs_sb)
442{
443 struct cached_fid *cfid = NULL;
444 int rc;
445
446 rc = open_cached_dir(xid, tcon, name, cifs_sb, true, &cfid);
447 if (rc) {
448 cifs_dbg(FYI, "no cached dir found for rmdir(%s)\n", name);
449 return;
450 }
451 spin_lock(&cfid->cfids->cfid_list_lock);
452 if (cfid->has_lease) {
453 cfid->has_lease = false;
454 kref_put(&cfid->refcount, smb2_close_cached_fid);
455 }
456 spin_unlock(&cfid->cfids->cfid_list_lock);
457 close_cached_dir(cfid);
458}
459
460
461void close_cached_dir(struct cached_fid *cfid)
462{
463 kref_put(&cfid->refcount, smb2_close_cached_fid);
464}
465
466/*
467 * Called from cifs_kill_sb when we unmount a share
468 */
469void close_all_cached_dirs(struct cifs_sb_info *cifs_sb)
470{
471 struct rb_root *root = &cifs_sb->tlink_tree;
472 struct rb_node *node;
473 struct cached_fid *cfid;
474 struct cifs_tcon *tcon;
475 struct tcon_link *tlink;
476 struct cached_fids *cfids;
477
478 for (node = rb_first(root); node; node = rb_next(node)) {
479 tlink = rb_entry(node, struct tcon_link, tl_rbnode);
480 tcon = tlink_tcon(tlink);
481 if (IS_ERR(tcon))
482 continue;
483 cfids = tcon->cfids;
484 if (cfids == NULL)
485 continue;
486 list_for_each_entry(cfid, &cfids->entries, entry) {
487 dput(cfid->dentry);
488 cfid->dentry = NULL;
489 }
490 }
491}
492
493/*
494 * Invalidate all cached dirs when a TCON has been reset
495 * due to a session loss.
496 */
497void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
498{
499 struct cached_fids *cfids = tcon->cfids;
500 struct cached_fid *cfid, *q;
501 LIST_HEAD(entry);
502
503 if (cfids == NULL)
504 return;
505
506 spin_lock(&cfids->cfid_list_lock);
507 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
508 list_move(&cfid->entry, &entry);
509 cfids->num_entries--;
510 cfid->is_open = false;
511 cfid->on_list = false;
512 /* To prevent race with smb2_cached_lease_break() */
513 kref_get(&cfid->refcount);
514 }
515 spin_unlock(&cfids->cfid_list_lock);
516
517 list_for_each_entry_safe(cfid, q, &entry, entry) {
518 list_del(&cfid->entry);
519 cancel_work_sync(&cfid->lease_break);
520 if (cfid->has_lease) {
521 /*
522 * We lease was never cancelled from the server so we
523 * need to drop the reference.
524 */
525 spin_lock(&cfids->cfid_list_lock);
526 cfid->has_lease = false;
527 spin_unlock(&cfids->cfid_list_lock);
528 kref_put(&cfid->refcount, smb2_close_cached_fid);
529 }
530 /* Drop the extra reference opened above*/
531 kref_put(&cfid->refcount, smb2_close_cached_fid);
532 }
533}
534
535static void
536smb2_cached_lease_break(struct work_struct *work)
537{
538 struct cached_fid *cfid = container_of(work,
539 struct cached_fid, lease_break);
540
541 spin_lock(&cfid->cfids->cfid_list_lock);
542 cfid->has_lease = false;
543 spin_unlock(&cfid->cfids->cfid_list_lock);
544 kref_put(&cfid->refcount, smb2_close_cached_fid);
545}
546
547int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
548{
549 struct cached_fids *cfids = tcon->cfids;
550 struct cached_fid *cfid;
551
552 if (cfids == NULL)
553 return false;
554
555 spin_lock(&cfids->cfid_list_lock);
556 list_for_each_entry(cfid, &cfids->entries, entry) {
557 if (cfid->has_lease &&
558 !memcmp(lease_key,
559 cfid->fid.lease_key,
560 SMB2_LEASE_KEY_SIZE)) {
561 cfid->time = 0;
562 /*
563 * We found a lease remove it from the list
564 * so no threads can access it.
565 */
566 list_del(&cfid->entry);
567 cfid->on_list = false;
568 cfids->num_entries--;
569
570 queue_work(cifsiod_wq,
571 &cfid->lease_break);
572 spin_unlock(&cfids->cfid_list_lock);
573 return true;
574 }
575 }
576 spin_unlock(&cfids->cfid_list_lock);
577 return false;
578}
579
580static struct cached_fid *init_cached_dir(const char *path)
581{
582 struct cached_fid *cfid;
583
584 cfid = kzalloc(sizeof(*cfid), GFP_ATOMIC);
585 if (!cfid)
586 return NULL;
587 cfid->path = kstrdup(path, GFP_ATOMIC);
588 if (!cfid->path) {
589 kfree(cfid);
590 return NULL;
591 }
592
593 INIT_WORK(&cfid->lease_break, smb2_cached_lease_break);
594 INIT_LIST_HEAD(&cfid->entry);
595 INIT_LIST_HEAD(&cfid->dirents.entries);
596 mutex_init(&cfid->dirents.de_mutex);
597 spin_lock_init(&cfid->fid_lock);
598 kref_init(&cfid->refcount);
599 return cfid;
600}
601
602static void free_cached_dir(struct cached_fid *cfid)
603{
604 struct cached_dirent *dirent, *q;
605
606 dput(cfid->dentry);
607 cfid->dentry = NULL;
608
609 /*
610 * Delete all cached dirent names
611 */
612 list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) {
613 list_del(&dirent->entry);
614 kfree(dirent->name);
615 kfree(dirent);
616 }
617
618 kfree(cfid->path);
619 cfid->path = NULL;
620 kfree(cfid);
621}
622
623static void cfids_laundromat_worker(struct work_struct *work)
624{
625 struct cached_fids *cfids;
626 struct cached_fid *cfid, *q;
627 LIST_HEAD(entry);
628
629 cfids = container_of(work, struct cached_fids, laundromat_work.work);
630
631 spin_lock(&cfids->cfid_list_lock);
632 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
633 if (cfid->time &&
634 time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) {
635 cfid->on_list = false;
636 list_move(&cfid->entry, &entry);
637 cfids->num_entries--;
638 /* To prevent race with smb2_cached_lease_break() */
639 kref_get(&cfid->refcount);
640 }
641 }
642 spin_unlock(&cfids->cfid_list_lock);
643
644 list_for_each_entry_safe(cfid, q, &entry, entry) {
645 list_del(&cfid->entry);
646 /*
647 * Cancel and wait for the work to finish in case we are racing
648 * with it.
649 */
650 cancel_work_sync(&cfid->lease_break);
651 if (cfid->has_lease) {
652 /*
653 * Our lease has not yet been cancelled from the server
654 * so we need to drop the reference.
655 */
656 spin_lock(&cfids->cfid_list_lock);
657 cfid->has_lease = false;
658 spin_unlock(&cfids->cfid_list_lock);
659 kref_put(&cfid->refcount, smb2_close_cached_fid);
660 }
661 /* Drop the extra reference opened above */
662 kref_put(&cfid->refcount, smb2_close_cached_fid);
663 }
664 queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
665 dir_cache_timeout * HZ);
666}
667
668struct cached_fids *init_cached_dirs(void)
669{
670 struct cached_fids *cfids;
671
672 cfids = kzalloc(sizeof(*cfids), GFP_KERNEL);
673 if (!cfids)
674 return NULL;
675 spin_lock_init(&cfids->cfid_list_lock);
676 INIT_LIST_HEAD(&cfids->entries);
677
678 INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker);
679 queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
680 dir_cache_timeout * HZ);
681
682 return cfids;
683}
684
685/*
686 * Called from tconInfoFree when we are tearing down the tcon.
687 * There are no active users or open files/directories at this point.
688 */
689void free_cached_dirs(struct cached_fids *cfids)
690{
691 struct cached_fid *cfid, *q;
692 LIST_HEAD(entry);
693
694 if (cfids == NULL)
695 return;
696
697 cancel_delayed_work_sync(&cfids->laundromat_work);
698
699 spin_lock(&cfids->cfid_list_lock);
700 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
701 cfid->on_list = false;
702 cfid->is_open = false;
703 list_move(&cfid->entry, &entry);
704 }
705 spin_unlock(&cfids->cfid_list_lock);
706
707 list_for_each_entry_safe(cfid, q, &entry, entry) {
708 list_del(&cfid->entry);
709 free_cached_dir(cfid);
710 }
711
712 kfree(cfids);
713}