Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  Functions to handle the cached directory entries
  4 *
  5 *  Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com>
  6 */
  7
  8#include <linux/namei.h>
  9#include "cifsglob.h"
 10#include "cifsproto.h"
 11#include "cifs_debug.h"
 12#include "smb2proto.h"
 13#include "cached_dir.h"
 14
 15static struct cached_fid *init_cached_dir(const char *path);
 16static void free_cached_dir(struct cached_fid *cfid);
 17static void smb2_close_cached_fid(struct kref *ref);
 18static void cfids_laundromat_worker(struct work_struct *work);
 19
 20struct cached_dir_dentry {
 21	struct list_head entry;
 22	struct dentry *dentry;
 23};
 24
 25static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
 26						    const char *path,
 27						    bool lookup_only,
 28						    __u32 max_cached_dirs)
 29{
 30	struct cached_fid *cfid;
 31
 32	spin_lock(&cfids->cfid_list_lock);
 33	list_for_each_entry(cfid, &cfids->entries, entry) {
 34		if (!strcmp(cfid->path, path)) {
 35			/*
 36			 * If it doesn't have a lease it is either not yet
 37			 * fully cached or it may be in the process of
 38			 * being deleted due to a lease break.
 39			 */
 40			if (!cfid->time || !cfid->has_lease) {
 41				spin_unlock(&cfids->cfid_list_lock);
 42				return NULL;
 43			}
 44			kref_get(&cfid->refcount);
 45			spin_unlock(&cfids->cfid_list_lock);
 46			return cfid;
 47		}
 48	}
 49	if (lookup_only) {
 50		spin_unlock(&cfids->cfid_list_lock);
 51		return NULL;
 52	}
 53	if (cfids->num_entries >= max_cached_dirs) {
 54		spin_unlock(&cfids->cfid_list_lock);
 55		return NULL;
 56	}
 57	cfid = init_cached_dir(path);
 58	if (cfid == NULL) {
 59		spin_unlock(&cfids->cfid_list_lock);
 60		return NULL;
 61	}
 62	cfid->cfids = cfids;
 63	cfids->num_entries++;
 64	list_add(&cfid->entry, &cfids->entries);
 65	cfid->on_list = true;
 66	kref_get(&cfid->refcount);
 67	/*
 68	 * Set @cfid->has_lease to true during construction so that the lease
 69	 * reference can be put in cached_dir_lease_break() due to a potential
 70	 * lease break right after the request is sent or while @cfid is still
 71	 * being cached, or if a reconnection is triggered during construction.
 72	 * Concurrent processes won't be to use it yet due to @cfid->time being
 73	 * zero.
 74	 */
 75	cfid->has_lease = true;
 76
 77	spin_unlock(&cfids->cfid_list_lock);
 78	return cfid;
 79}
 80
 81static struct dentry *
 82path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path)
 83{
 84	struct dentry *dentry;
 85	const char *s, *p;
 86	char sep;
 87
 88	sep = CIFS_DIR_SEP(cifs_sb);
 89	dentry = dget(cifs_sb->root);
 90	s = path;
 91
 92	do {
 93		struct inode *dir = d_inode(dentry);
 94		struct dentry *child;
 95
 96		if (!S_ISDIR(dir->i_mode)) {
 97			dput(dentry);
 98			dentry = ERR_PTR(-ENOTDIR);
 99			break;
100		}
101
102		/* skip separators */
103		while (*s == sep)
104			s++;
105		if (!*s)
106			break;
107		p = s++;
108		/* next separator */
109		while (*s && *s != sep)
110			s++;
111
112		child = lookup_positive_unlocked(p, dentry, s - p);
113		dput(dentry);
114		dentry = child;
115	} while (!IS_ERR(dentry));
116	return dentry;
117}
118
119static const char *path_no_prefix(struct cifs_sb_info *cifs_sb,
120				  const char *path)
121{
122	size_t len = 0;
123
124	if (!*path)
125		return path;
126
127	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
128	    cifs_sb->prepath) {
129		len = strlen(cifs_sb->prepath) + 1;
130		if (unlikely(len > strlen(path)))
131			return ERR_PTR(-EINVAL);
132	}
133	return path + len;
134}
135
136/*
137 * Open the and cache a directory handle.
138 * If error then *cfid is not initialized.
139 */
140int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
141		    const char *path,
142		    struct cifs_sb_info *cifs_sb,
143		    bool lookup_only, struct cached_fid **ret_cfid)
144{
145	struct cifs_ses *ses;
146	struct TCP_Server_Info *server;
147	struct cifs_open_parms oparms;
148	struct smb2_create_rsp *o_rsp = NULL;
149	struct smb2_query_info_rsp *qi_rsp = NULL;
150	int resp_buftype[2];
151	struct smb_rqst rqst[2];
152	struct kvec rsp_iov[2];
153	struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
154	struct kvec qi_iov[1];
155	int rc, flags = 0;
156	__le16 *utf16_path = NULL;
157	u8 oplock = SMB2_OPLOCK_LEVEL_II;
158	struct cifs_fid *pfid;
159	struct dentry *dentry = NULL;
160	struct cached_fid *cfid;
161	struct cached_fids *cfids;
162	const char *npath;
163	int retries = 0, cur_sleep = 1;
164
165	if (cifs_sb->root == NULL)
166		return -ENOENT;
167
168	if (tcon == NULL)
169		return -EOPNOTSUPP;
170
171	ses = tcon->ses;
172	cfids = tcon->cfids;
173
174	if (cfids == NULL)
175		return -EOPNOTSUPP;
176
177replay_again:
178	/* reinitialize for possible replay */
179	flags = 0;
180	oplock = SMB2_OPLOCK_LEVEL_II;
181	server = cifs_pick_channel(ses);
182
183	if (!server->ops->new_lease_key)
184		return -EIO;
185
186	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
187	if (!utf16_path)
188		return -ENOMEM;
189
190	cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs);
191	if (cfid == NULL) {
192		kfree(utf16_path);
193		return -ENOENT;
194	}
195	/*
196	 * Return cached fid if it is valid (has a lease and has a time).
197	 * Otherwise, it is either a new entry or laundromat worker removed it
198	 * from @cfids->entries.  Caller will put last reference if the latter.
199	 */
200	spin_lock(&cfids->cfid_list_lock);
201	if (cfid->has_lease && cfid->time) {
202		spin_unlock(&cfids->cfid_list_lock);
203		*ret_cfid = cfid;
204		kfree(utf16_path);
205		return 0;
206	}
207	spin_unlock(&cfids->cfid_list_lock);
208
209	/*
210	 * Skip any prefix paths in @path as lookup_positive_unlocked() ends up
211	 * calling ->lookup() which already adds those through
212	 * build_path_from_dentry().  Also, do it earlier as we might reconnect
213	 * below when trying to send compounded request and then potentially
214	 * having a different prefix path (e.g. after DFS failover).
215	 */
216	npath = path_no_prefix(cifs_sb, path);
217	if (IS_ERR(npath)) {
218		rc = PTR_ERR(npath);
219		goto out;
220	}
221
222	if (!npath[0]) {
223		dentry = dget(cifs_sb->root);
224	} else {
225		dentry = path_to_dentry(cifs_sb, npath);
226		if (IS_ERR(dentry)) {
227			rc = -ENOENT;
228			goto out;
229		}
230	}
231	cfid->dentry = dentry;
232	cfid->tcon = tcon;
233
234	/*
235	 * We do not hold the lock for the open because in case
236	 * SMB2_open needs to reconnect.
237	 * This is safe because no other thread will be able to get a ref
238	 * to the cfid until we have finished opening the file and (possibly)
239	 * acquired a lease.
240	 */
241	if (smb3_encryption_required(tcon))
242		flags |= CIFS_TRANSFORM_REQ;
243
244	pfid = &cfid->fid;
245	server->ops->new_lease_key(pfid);
246
247	memset(rqst, 0, sizeof(rqst));
248	resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
249	memset(rsp_iov, 0, sizeof(rsp_iov));
250
251	/* Open */
252	memset(&open_iov, 0, sizeof(open_iov));
253	rqst[0].rq_iov = open_iov;
254	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
255
256	oparms = (struct cifs_open_parms) {
257		.tcon = tcon,
258		.path = path,
259		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE),
260		.desired_access =  FILE_READ_DATA | FILE_READ_ATTRIBUTES |
261				   FILE_READ_EA,
262		.disposition = FILE_OPEN,
263		.fid = pfid,
264		.replay = !!(retries),
265	};
266
267	rc = SMB2_open_init(tcon, server,
268			    &rqst[0], &oplock, &oparms, utf16_path);
269	if (rc)
270		goto oshr_free;
271	smb2_set_next_command(tcon, &rqst[0]);
272
273	memset(&qi_iov, 0, sizeof(qi_iov));
274	rqst[1].rq_iov = qi_iov;
275	rqst[1].rq_nvec = 1;
276
277	rc = SMB2_query_info_init(tcon, server,
278				  &rqst[1], COMPOUND_FID,
279				  COMPOUND_FID, FILE_ALL_INFORMATION,
280				  SMB2_O_INFO_FILE, 0,
281				  sizeof(struct smb2_file_all_info) +
282				  PATH_MAX * 2, 0, NULL);
283	if (rc)
284		goto oshr_free;
285
286	smb2_set_related(&rqst[1]);
287
 
 
 
 
 
 
 
 
 
288	if (retries) {
289		smb2_set_replay(server, &rqst[0]);
290		smb2_set_replay(server, &rqst[1]);
291	}
292
293	rc = compound_send_recv(xid, ses, server,
294				flags, 2, rqst,
295				resp_buftype, rsp_iov);
296	if (rc) {
297		if (rc == -EREMCHG) {
298			tcon->need_reconnect = true;
299			pr_warn_once("server share %s deleted\n",
300				     tcon->tree_name);
301		}
302		goto oshr_free;
303	}
 
304	cfid->is_open = true;
305
306	spin_lock(&cfids->cfid_list_lock);
307
308	o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
309	oparms.fid->persistent_fid = o_rsp->PersistentFileId;
310	oparms.fid->volatile_fid = o_rsp->VolatileFileId;
311#ifdef CONFIG_CIFS_DEBUG2
312	oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
313#endif /* CIFS_DEBUG2 */
314
315
316	if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) {
317		spin_unlock(&cfids->cfid_list_lock);
318		rc = -EINVAL;
319		goto oshr_free;
320	}
321
322	rc = smb2_parse_contexts(server, rsp_iov,
323				 &oparms.fid->epoch,
324				 oparms.fid->lease_key,
325				 &oplock, NULL, NULL);
326	if (rc) {
327		spin_unlock(&cfids->cfid_list_lock);
328		goto oshr_free;
329	}
330
331	rc = -EINVAL;
332	if (!(oplock & SMB2_LEASE_READ_CACHING_HE)) {
333		spin_unlock(&cfids->cfid_list_lock);
334		goto oshr_free;
335	}
336	qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
337	if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) {
338		spin_unlock(&cfids->cfid_list_lock);
339		goto oshr_free;
340	}
341	if (!smb2_validate_and_copy_iov(
342				le16_to_cpu(qi_rsp->OutputBufferOffset),
343				sizeof(struct smb2_file_all_info),
344				&rsp_iov[1], sizeof(struct smb2_file_all_info),
345				(char *)&cfid->file_all_info))
346		cfid->file_all_info_is_valid = true;
347
348	cfid->time = jiffies;
349	spin_unlock(&cfids->cfid_list_lock);
350	/* At this point the directory handle is fully cached */
351	rc = 0;
352
353oshr_free:
354	SMB2_open_free(&rqst[0]);
355	SMB2_query_info_free(&rqst[1]);
356	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
357	free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
358out:
359	if (rc) {
360		spin_lock(&cfids->cfid_list_lock);
361		if (cfid->on_list) {
362			list_del(&cfid->entry);
363			cfid->on_list = false;
364			cfids->num_entries--;
365		}
366		if (cfid->has_lease) {
367			/*
368			 * We are guaranteed to have two references at this
369			 * point. One for the caller and one for a potential
370			 * lease. Release one here, and the second below.
 
 
371			 */
372			cfid->has_lease = false;
 
373			kref_put(&cfid->refcount, smb2_close_cached_fid);
 
374		}
375		spin_unlock(&cfids->cfid_list_lock);
376
377		kref_put(&cfid->refcount, smb2_close_cached_fid);
 
 
 
 
 
378	} else {
379		*ret_cfid = cfid;
380		atomic_inc(&tcon->num_remote_opens);
381	}
382	kfree(utf16_path);
383
384	if (is_replayable_error(rc) &&
385	    smb2_should_replay(tcon, &retries, &cur_sleep))
386		goto replay_again;
387
388	return rc;
389}
390
391int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
392			      struct dentry *dentry,
393			      struct cached_fid **ret_cfid)
394{
395	struct cached_fid *cfid;
396	struct cached_fids *cfids = tcon->cfids;
397
398	if (cfids == NULL)
399		return -EOPNOTSUPP;
400
401	spin_lock(&cfids->cfid_list_lock);
402	list_for_each_entry(cfid, &cfids->entries, entry) {
403		if (dentry && cfid->dentry == dentry) {
404			cifs_dbg(FYI, "found a cached file handle by dentry\n");
405			kref_get(&cfid->refcount);
406			*ret_cfid = cfid;
407			spin_unlock(&cfids->cfid_list_lock);
408			return 0;
409		}
410	}
411	spin_unlock(&cfids->cfid_list_lock);
412	return -ENOENT;
413}
414
415static void
416smb2_close_cached_fid(struct kref *ref)
417{
418	struct cached_fid *cfid = container_of(ref, struct cached_fid,
419					       refcount);
420	int rc;
421
422	spin_lock(&cfid->cfids->cfid_list_lock);
423	if (cfid->on_list) {
424		list_del(&cfid->entry);
425		cfid->on_list = false;
426		cfid->cfids->num_entries--;
427	}
428	spin_unlock(&cfid->cfids->cfid_list_lock);
429
430	dput(cfid->dentry);
431	cfid->dentry = NULL;
432
433	if (cfid->is_open) {
434		rc = SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
435			   cfid->fid.volatile_fid);
436		if (rc) /* should we retry on -EBUSY or -EAGAIN? */
437			cifs_dbg(VFS, "close cached dir rc %d\n", rc);
438	}
439
440	free_cached_dir(cfid);
441}
442
443void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon,
444			     const char *name, struct cifs_sb_info *cifs_sb)
445{
446	struct cached_fid *cfid = NULL;
447	int rc;
448
449	rc = open_cached_dir(xid, tcon, name, cifs_sb, true, &cfid);
450	if (rc) {
451		cifs_dbg(FYI, "no cached dir found for rmdir(%s)\n", name);
452		return;
453	}
454	spin_lock(&cfid->cfids->cfid_list_lock);
455	if (cfid->has_lease) {
456		cfid->has_lease = false;
457		kref_put(&cfid->refcount, smb2_close_cached_fid);
458	}
459	spin_unlock(&cfid->cfids->cfid_list_lock);
460	close_cached_dir(cfid);
461}
462
463
464void close_cached_dir(struct cached_fid *cfid)
465{
466	kref_put(&cfid->refcount, smb2_close_cached_fid);
467}
468
469/*
470 * Called from cifs_kill_sb when we unmount a share
471 */
472void close_all_cached_dirs(struct cifs_sb_info *cifs_sb)
473{
474	struct rb_root *root = &cifs_sb->tlink_tree;
475	struct rb_node *node;
476	struct cached_fid *cfid;
477	struct cifs_tcon *tcon;
478	struct tcon_link *tlink;
479	struct cached_fids *cfids;
480	struct cached_dir_dentry *tmp_list, *q;
481	LIST_HEAD(entry);
482
483	spin_lock(&cifs_sb->tlink_tree_lock);
484	for (node = rb_first(root); node; node = rb_next(node)) {
485		tlink = rb_entry(node, struct tcon_link, tl_rbnode);
486		tcon = tlink_tcon(tlink);
487		if (IS_ERR(tcon))
488			continue;
489		cfids = tcon->cfids;
490		if (cfids == NULL)
491			continue;
492		spin_lock(&cfids->cfid_list_lock);
493		list_for_each_entry(cfid, &cfids->entries, entry) {
494			tmp_list = kmalloc(sizeof(*tmp_list), GFP_ATOMIC);
495			if (tmp_list == NULL)
496				break;
497			spin_lock(&cfid->fid_lock);
498			tmp_list->dentry = cfid->dentry;
499			cfid->dentry = NULL;
500			spin_unlock(&cfid->fid_lock);
501
502			list_add_tail(&tmp_list->entry, &entry);
503		}
504		spin_unlock(&cfids->cfid_list_lock);
505	}
506	spin_unlock(&cifs_sb->tlink_tree_lock);
507
508	list_for_each_entry_safe(tmp_list, q, &entry, entry) {
509		list_del(&tmp_list->entry);
510		dput(tmp_list->dentry);
511		kfree(tmp_list);
512	}
513
514	/* Flush any pending work that will drop dentries */
515	flush_workqueue(cfid_put_wq);
516}
517
518/*
519 * Invalidate all cached dirs when a TCON has been reset
520 * due to a session loss.
521 */
522void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
523{
524	struct cached_fids *cfids = tcon->cfids;
525	struct cached_fid *cfid, *q;
 
526
527	if (cfids == NULL)
528		return;
529
530	/*
531	 * Mark all the cfids as closed, and move them to the cfids->dying list.
532	 * They'll be cleaned up later by cfids_invalidation_worker. Take
533	 * a reference to each cfid during this process.
534	 */
535	spin_lock(&cfids->cfid_list_lock);
536	list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
537		list_move(&cfid->entry, &cfids->dying);
538		cfids->num_entries--;
539		cfid->is_open = false;
540		cfid->on_list = false;
 
 
 
 
 
 
 
 
541		if (cfid->has_lease) {
542			/*
543			 * The lease was never cancelled from the server,
544			 * so steal that reference.
545			 */
 
546			cfid->has_lease = false;
547		} else
548			kref_get(&cfid->refcount);
 
 
 
549	}
550	/*
551	 * Queue dropping of the dentries once locks have been dropped
552	 */
553	if (!list_empty(&cfids->dying))
554		queue_work(cfid_put_wq, &cfids->invalidation_work);
555	spin_unlock(&cfids->cfid_list_lock);
556}
557
558static void
559cached_dir_offload_close(struct work_struct *work)
560{
561	struct cached_fid *cfid = container_of(work,
562				struct cached_fid, close_work);
563	struct cifs_tcon *tcon = cfid->tcon;
564
565	WARN_ON(cfid->on_list);
566
 
 
 
567	kref_put(&cfid->refcount, smb2_close_cached_fid);
568	cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cached_close);
569}
570
571/*
572 * Release the cached directory's dentry, and then queue work to drop cached
573 * directory itself (closing on server if needed).
574 *
575 * Must be called with a reference to the cached_fid and a reference to the
576 * tcon.
577 */
578static void cached_dir_put_work(struct work_struct *work)
579{
580	struct cached_fid *cfid = container_of(work, struct cached_fid,
581					       put_work);
582	struct dentry *dentry;
583
584	spin_lock(&cfid->fid_lock);
585	dentry = cfid->dentry;
586	cfid->dentry = NULL;
587	spin_unlock(&cfid->fid_lock);
588
589	dput(dentry);
590	queue_work(serverclose_wq, &cfid->close_work);
591}
592
593int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
594{
595	struct cached_fids *cfids = tcon->cfids;
596	struct cached_fid *cfid;
597
598	if (cfids == NULL)
599		return false;
600
601	spin_lock(&cfids->cfid_list_lock);
602	list_for_each_entry(cfid, &cfids->entries, entry) {
603		if (cfid->has_lease &&
604		    !memcmp(lease_key,
605			    cfid->fid.lease_key,
606			    SMB2_LEASE_KEY_SIZE)) {
607			cfid->has_lease = false;
608			cfid->time = 0;
609			/*
610			 * We found a lease remove it from the list
611			 * so no threads can access it.
612			 */
613			list_del(&cfid->entry);
614			cfid->on_list = false;
615			cfids->num_entries--;
616
617			++tcon->tc_count;
618			trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
619					    netfs_trace_tcon_ref_get_cached_lease_break);
620			queue_work(cfid_put_wq, &cfid->put_work);
621			spin_unlock(&cfids->cfid_list_lock);
622			return true;
623		}
624	}
625	spin_unlock(&cfids->cfid_list_lock);
626	return false;
627}
628
629static struct cached_fid *init_cached_dir(const char *path)
630{
631	struct cached_fid *cfid;
632
633	cfid = kzalloc(sizeof(*cfid), GFP_ATOMIC);
634	if (!cfid)
635		return NULL;
636	cfid->path = kstrdup(path, GFP_ATOMIC);
637	if (!cfid->path) {
638		kfree(cfid);
639		return NULL;
640	}
641
642	INIT_WORK(&cfid->close_work, cached_dir_offload_close);
643	INIT_WORK(&cfid->put_work, cached_dir_put_work);
644	INIT_LIST_HEAD(&cfid->entry);
645	INIT_LIST_HEAD(&cfid->dirents.entries);
646	mutex_init(&cfid->dirents.de_mutex);
647	spin_lock_init(&cfid->fid_lock);
648	kref_init(&cfid->refcount);
649	return cfid;
650}
651
652static void free_cached_dir(struct cached_fid *cfid)
653{
654	struct cached_dirent *dirent, *q;
655
656	WARN_ON(work_pending(&cfid->close_work));
657	WARN_ON(work_pending(&cfid->put_work));
658
659	dput(cfid->dentry);
660	cfid->dentry = NULL;
661
662	/*
663	 * Delete all cached dirent names
664	 */
665	list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) {
666		list_del(&dirent->entry);
667		kfree(dirent->name);
668		kfree(dirent);
669	}
670
671	kfree(cfid->path);
672	cfid->path = NULL;
673	kfree(cfid);
674}
675
676static void cfids_invalidation_worker(struct work_struct *work)
677{
678	struct cached_fids *cfids = container_of(work, struct cached_fids,
679						 invalidation_work);
680	struct cached_fid *cfid, *q;
681	LIST_HEAD(entry);
682
683	spin_lock(&cfids->cfid_list_lock);
684	/* move cfids->dying to the local list */
685	list_cut_before(&entry, &cfids->dying, &cfids->dying);
686	spin_unlock(&cfids->cfid_list_lock);
687
688	list_for_each_entry_safe(cfid, q, &entry, entry) {
689		list_del(&cfid->entry);
690		/* Drop the ref-count acquired in invalidate_all_cached_dirs */
691		kref_put(&cfid->refcount, smb2_close_cached_fid);
692	}
693}
694
695static void cfids_laundromat_worker(struct work_struct *work)
696{
697	struct cached_fids *cfids;
698	struct cached_fid *cfid, *q;
699	struct dentry *dentry;
700	LIST_HEAD(entry);
701
702	cfids = container_of(work, struct cached_fids, laundromat_work.work);
703
704	spin_lock(&cfids->cfid_list_lock);
705	list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
706		if (cfid->time &&
707		    time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) {
708			cfid->on_list = false;
709			list_move(&cfid->entry, &entry);
710			cfids->num_entries--;
711			if (cfid->has_lease) {
712				/*
713				 * Our lease has not yet been cancelled from the
714				 * server. Steal that reference.
715				 */
716				cfid->has_lease = false;
717			} else
718				kref_get(&cfid->refcount);
719		}
720	}
721	spin_unlock(&cfids->cfid_list_lock);
722
723	list_for_each_entry_safe(cfid, q, &entry, entry) {
724		list_del(&cfid->entry);
725
726		spin_lock(&cfid->fid_lock);
727		dentry = cfid->dentry;
728		cfid->dentry = NULL;
729		spin_unlock(&cfid->fid_lock);
730
731		dput(dentry);
732		if (cfid->is_open) {
733			spin_lock(&cifs_tcp_ses_lock);
734			++cfid->tcon->tc_count;
735			trace_smb3_tcon_ref(cfid->tcon->debug_id, cfid->tcon->tc_count,
736					    netfs_trace_tcon_ref_get_cached_laundromat);
737			spin_unlock(&cifs_tcp_ses_lock);
738			queue_work(serverclose_wq, &cfid->close_work);
739		} else
740			/*
741			 * Drop the ref-count from above, either the lease-ref (if there
742			 * was one) or the extra one acquired.
743			 */
 
 
 
744			kref_put(&cfid->refcount, smb2_close_cached_fid);
 
 
 
745	}
746	queue_delayed_work(cfid_put_wq, &cfids->laundromat_work,
747			   dir_cache_timeout * HZ);
748}
749
750struct cached_fids *init_cached_dirs(void)
751{
752	struct cached_fids *cfids;
753
754	cfids = kzalloc(sizeof(*cfids), GFP_KERNEL);
755	if (!cfids)
756		return NULL;
757	spin_lock_init(&cfids->cfid_list_lock);
758	INIT_LIST_HEAD(&cfids->entries);
759	INIT_LIST_HEAD(&cfids->dying);
760
761	INIT_WORK(&cfids->invalidation_work, cfids_invalidation_worker);
762	INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker);
763	queue_delayed_work(cfid_put_wq, &cfids->laundromat_work,
764			   dir_cache_timeout * HZ);
765
766	return cfids;
767}
768
769/*
770 * Called from tconInfoFree when we are tearing down the tcon.
771 * There are no active users or open files/directories at this point.
772 */
773void free_cached_dirs(struct cached_fids *cfids)
774{
775	struct cached_fid *cfid, *q;
776	LIST_HEAD(entry);
777
778	if (cfids == NULL)
779		return;
780
781	cancel_delayed_work_sync(&cfids->laundromat_work);
782	cancel_work_sync(&cfids->invalidation_work);
783
784	spin_lock(&cfids->cfid_list_lock);
785	list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
786		cfid->on_list = false;
787		cfid->is_open = false;
788		list_move(&cfid->entry, &entry);
789	}
790	list_for_each_entry_safe(cfid, q, &cfids->dying, entry) {
791		cfid->on_list = false;
792		cfid->is_open = false;
793		list_move(&cfid->entry, &entry);
794	}
795	spin_unlock(&cfids->cfid_list_lock);
796
797	list_for_each_entry_safe(cfid, q, &entry, entry) {
798		list_del(&cfid->entry);
799		free_cached_dir(cfid);
800	}
801
802	kfree(cfids);
803}
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  Functions to handle the cached directory entries
  4 *
  5 *  Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com>
  6 */
  7
  8#include <linux/namei.h>
  9#include "cifsglob.h"
 10#include "cifsproto.h"
 11#include "cifs_debug.h"
 12#include "smb2proto.h"
 13#include "cached_dir.h"
 14
 15static struct cached_fid *init_cached_dir(const char *path);
 16static void free_cached_dir(struct cached_fid *cfid);
 17static void smb2_close_cached_fid(struct kref *ref);
 18static void cfids_laundromat_worker(struct work_struct *work);
 19
 
 
 
 
 
 20static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
 21						    const char *path,
 22						    bool lookup_only,
 23						    __u32 max_cached_dirs)
 24{
 25	struct cached_fid *cfid;
 26
 27	spin_lock(&cfids->cfid_list_lock);
 28	list_for_each_entry(cfid, &cfids->entries, entry) {
 29		if (!strcmp(cfid->path, path)) {
 30			/*
 31			 * If it doesn't have a lease it is either not yet
 32			 * fully cached or it may be in the process of
 33			 * being deleted due to a lease break.
 34			 */
 35			if (!cfid->time || !cfid->has_lease) {
 36				spin_unlock(&cfids->cfid_list_lock);
 37				return NULL;
 38			}
 39			kref_get(&cfid->refcount);
 40			spin_unlock(&cfids->cfid_list_lock);
 41			return cfid;
 42		}
 43	}
 44	if (lookup_only) {
 45		spin_unlock(&cfids->cfid_list_lock);
 46		return NULL;
 47	}
 48	if (cfids->num_entries >= max_cached_dirs) {
 49		spin_unlock(&cfids->cfid_list_lock);
 50		return NULL;
 51	}
 52	cfid = init_cached_dir(path);
 53	if (cfid == NULL) {
 54		spin_unlock(&cfids->cfid_list_lock);
 55		return NULL;
 56	}
 57	cfid->cfids = cfids;
 58	cfids->num_entries++;
 59	list_add(&cfid->entry, &cfids->entries);
 60	cfid->on_list = true;
 61	kref_get(&cfid->refcount);
 
 
 
 
 
 
 
 
 
 
 62	spin_unlock(&cfids->cfid_list_lock);
 63	return cfid;
 64}
 65
 66static struct dentry *
 67path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path)
 68{
 69	struct dentry *dentry;
 70	const char *s, *p;
 71	char sep;
 72
 73	sep = CIFS_DIR_SEP(cifs_sb);
 74	dentry = dget(cifs_sb->root);
 75	s = path;
 76
 77	do {
 78		struct inode *dir = d_inode(dentry);
 79		struct dentry *child;
 80
 81		if (!S_ISDIR(dir->i_mode)) {
 82			dput(dentry);
 83			dentry = ERR_PTR(-ENOTDIR);
 84			break;
 85		}
 86
 87		/* skip separators */
 88		while (*s == sep)
 89			s++;
 90		if (!*s)
 91			break;
 92		p = s++;
 93		/* next separator */
 94		while (*s && *s != sep)
 95			s++;
 96
 97		child = lookup_positive_unlocked(p, dentry, s - p);
 98		dput(dentry);
 99		dentry = child;
100	} while (!IS_ERR(dentry));
101	return dentry;
102}
103
104static const char *path_no_prefix(struct cifs_sb_info *cifs_sb,
105				  const char *path)
106{
107	size_t len = 0;
108
109	if (!*path)
110		return path;
111
112	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
113	    cifs_sb->prepath) {
114		len = strlen(cifs_sb->prepath) + 1;
115		if (unlikely(len > strlen(path)))
116			return ERR_PTR(-EINVAL);
117	}
118	return path + len;
119}
120
121/*
122 * Open the and cache a directory handle.
123 * If error then *cfid is not initialized.
124 */
125int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
126		    const char *path,
127		    struct cifs_sb_info *cifs_sb,
128		    bool lookup_only, struct cached_fid **ret_cfid)
129{
130	struct cifs_ses *ses;
131	struct TCP_Server_Info *server;
132	struct cifs_open_parms oparms;
133	struct smb2_create_rsp *o_rsp = NULL;
134	struct smb2_query_info_rsp *qi_rsp = NULL;
135	int resp_buftype[2];
136	struct smb_rqst rqst[2];
137	struct kvec rsp_iov[2];
138	struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
139	struct kvec qi_iov[1];
140	int rc, flags = 0;
141	__le16 *utf16_path = NULL;
142	u8 oplock = SMB2_OPLOCK_LEVEL_II;
143	struct cifs_fid *pfid;
144	struct dentry *dentry = NULL;
145	struct cached_fid *cfid;
146	struct cached_fids *cfids;
147	const char *npath;
148	int retries = 0, cur_sleep = 1;
149
150	if (tcon == NULL || tcon->cfids == NULL || tcon->nohandlecache ||
151	    is_smb1_server(tcon->ses->server) || (dir_cache_timeout == 0))
 
 
152		return -EOPNOTSUPP;
153
154	ses = tcon->ses;
155	cfids = tcon->cfids;
156
157	if (cifs_sb->root == NULL)
158		return -ENOENT;
159
160replay_again:
161	/* reinitialize for possible replay */
162	flags = 0;
163	oplock = SMB2_OPLOCK_LEVEL_II;
164	server = cifs_pick_channel(ses);
165
166	if (!server->ops->new_lease_key)
167		return -EIO;
168
169	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
170	if (!utf16_path)
171		return -ENOMEM;
172
173	cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs);
174	if (cfid == NULL) {
175		kfree(utf16_path);
176		return -ENOENT;
177	}
178	/*
179	 * Return cached fid if it has a lease.  Otherwise, it is either a new
180	 * entry or laundromat worker removed it from @cfids->entries.  Caller
181	 * will put last reference if the latter.
182	 */
183	spin_lock(&cfids->cfid_list_lock);
184	if (cfid->has_lease) {
185		spin_unlock(&cfids->cfid_list_lock);
186		*ret_cfid = cfid;
187		kfree(utf16_path);
188		return 0;
189	}
190	spin_unlock(&cfids->cfid_list_lock);
191
192	/*
193	 * Skip any prefix paths in @path as lookup_positive_unlocked() ends up
194	 * calling ->lookup() which already adds those through
195	 * build_path_from_dentry().  Also, do it earlier as we might reconnect
196	 * below when trying to send compounded request and then potentially
197	 * having a different prefix path (e.g. after DFS failover).
198	 */
199	npath = path_no_prefix(cifs_sb, path);
200	if (IS_ERR(npath)) {
201		rc = PTR_ERR(npath);
202		goto out;
203	}
204
205	if (!npath[0]) {
206		dentry = dget(cifs_sb->root);
207	} else {
208		dentry = path_to_dentry(cifs_sb, npath);
209		if (IS_ERR(dentry)) {
210			rc = -ENOENT;
211			goto out;
212		}
213	}
214	cfid->dentry = dentry;
 
215
216	/*
217	 * We do not hold the lock for the open because in case
218	 * SMB2_open needs to reconnect.
219	 * This is safe because no other thread will be able to get a ref
220	 * to the cfid until we have finished opening the file and (possibly)
221	 * acquired a lease.
222	 */
223	if (smb3_encryption_required(tcon))
224		flags |= CIFS_TRANSFORM_REQ;
225
226	pfid = &cfid->fid;
227	server->ops->new_lease_key(pfid);
228
229	memset(rqst, 0, sizeof(rqst));
230	resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
231	memset(rsp_iov, 0, sizeof(rsp_iov));
232
233	/* Open */
234	memset(&open_iov, 0, sizeof(open_iov));
235	rqst[0].rq_iov = open_iov;
236	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
237
238	oparms = (struct cifs_open_parms) {
239		.tcon = tcon,
240		.path = path,
241		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE),
242		.desired_access =  FILE_READ_DATA | FILE_READ_ATTRIBUTES,
 
243		.disposition = FILE_OPEN,
244		.fid = pfid,
245		.replay = !!(retries),
246	};
247
248	rc = SMB2_open_init(tcon, server,
249			    &rqst[0], &oplock, &oparms, utf16_path);
250	if (rc)
251		goto oshr_free;
252	smb2_set_next_command(tcon, &rqst[0]);
253
254	memset(&qi_iov, 0, sizeof(qi_iov));
255	rqst[1].rq_iov = qi_iov;
256	rqst[1].rq_nvec = 1;
257
258	rc = SMB2_query_info_init(tcon, server,
259				  &rqst[1], COMPOUND_FID,
260				  COMPOUND_FID, FILE_ALL_INFORMATION,
261				  SMB2_O_INFO_FILE, 0,
262				  sizeof(struct smb2_file_all_info) +
263				  PATH_MAX * 2, 0, NULL);
264	if (rc)
265		goto oshr_free;
266
267	smb2_set_related(&rqst[1]);
268
269	/*
270	 * Set @cfid->has_lease to true before sending out compounded request so
271	 * its lease reference can be put in cached_dir_lease_break() due to a
272	 * potential lease break right after the request is sent or while @cfid
273	 * is still being cached.  Concurrent processes won't be to use it yet
274	 * due to @cfid->time being zero.
275	 */
276	cfid->has_lease = true;
277
278	if (retries) {
279		smb2_set_replay(server, &rqst[0]);
280		smb2_set_replay(server, &rqst[1]);
281	}
282
283	rc = compound_send_recv(xid, ses, server,
284				flags, 2, rqst,
285				resp_buftype, rsp_iov);
286	if (rc) {
287		if (rc == -EREMCHG) {
288			tcon->need_reconnect = true;
289			pr_warn_once("server share %s deleted\n",
290				     tcon->tree_name);
291		}
292		goto oshr_free;
293	}
294	cfid->tcon = tcon;
295	cfid->is_open = true;
296
297	spin_lock(&cfids->cfid_list_lock);
298
299	o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
300	oparms.fid->persistent_fid = o_rsp->PersistentFileId;
301	oparms.fid->volatile_fid = o_rsp->VolatileFileId;
302#ifdef CONFIG_CIFS_DEBUG2
303	oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
304#endif /* CIFS_DEBUG2 */
305
306
307	if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) {
308		spin_unlock(&cfids->cfid_list_lock);
309		rc = -EINVAL;
310		goto oshr_free;
311	}
312
313	rc = smb2_parse_contexts(server, rsp_iov,
314				 &oparms.fid->epoch,
315				 oparms.fid->lease_key,
316				 &oplock, NULL, NULL);
317	if (rc) {
318		spin_unlock(&cfids->cfid_list_lock);
319		goto oshr_free;
320	}
321
322	rc = -EINVAL;
323	if (!(oplock & SMB2_LEASE_READ_CACHING_HE)) {
324		spin_unlock(&cfids->cfid_list_lock);
325		goto oshr_free;
326	}
327	qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
328	if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) {
329		spin_unlock(&cfids->cfid_list_lock);
330		goto oshr_free;
331	}
332	if (!smb2_validate_and_copy_iov(
333				le16_to_cpu(qi_rsp->OutputBufferOffset),
334				sizeof(struct smb2_file_all_info),
335				&rsp_iov[1], sizeof(struct smb2_file_all_info),
336				(char *)&cfid->file_all_info))
337		cfid->file_all_info_is_valid = true;
338
339	cfid->time = jiffies;
340	spin_unlock(&cfids->cfid_list_lock);
341	/* At this point the directory handle is fully cached */
342	rc = 0;
343
344oshr_free:
345	SMB2_open_free(&rqst[0]);
346	SMB2_query_info_free(&rqst[1]);
347	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
348	free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
 
349	if (rc) {
350		spin_lock(&cfids->cfid_list_lock);
351		if (cfid->on_list) {
352			list_del(&cfid->entry);
353			cfid->on_list = false;
354			cfids->num_entries--;
355		}
356		if (cfid->has_lease) {
357			/*
358			 * We are guaranteed to have two references at this
359			 * point. One for the caller and one for a potential
360			 * lease. Release the Lease-ref so that the directory
361			 * will be closed when the caller closes the cached
362			 * handle.
363			 */
364			cfid->has_lease = false;
365			spin_unlock(&cfids->cfid_list_lock);
366			kref_put(&cfid->refcount, smb2_close_cached_fid);
367			goto out;
368		}
369		spin_unlock(&cfids->cfid_list_lock);
370	}
371out:
372	if (rc) {
373		if (cfid->is_open)
374			SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
375				   cfid->fid.volatile_fid);
376		free_cached_dir(cfid);
377	} else {
378		*ret_cfid = cfid;
379		atomic_inc(&tcon->num_remote_opens);
380	}
381	kfree(utf16_path);
382
383	if (is_replayable_error(rc) &&
384	    smb2_should_replay(tcon, &retries, &cur_sleep))
385		goto replay_again;
386
387	return rc;
388}
389
390int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
391			      struct dentry *dentry,
392			      struct cached_fid **ret_cfid)
393{
394	struct cached_fid *cfid;
395	struct cached_fids *cfids = tcon->cfids;
396
397	if (cfids == NULL)
398		return -ENOENT;
399
400	spin_lock(&cfids->cfid_list_lock);
401	list_for_each_entry(cfid, &cfids->entries, entry) {
402		if (dentry && cfid->dentry == dentry) {
403			cifs_dbg(FYI, "found a cached root file handle by dentry\n");
404			kref_get(&cfid->refcount);
405			*ret_cfid = cfid;
406			spin_unlock(&cfids->cfid_list_lock);
407			return 0;
408		}
409	}
410	spin_unlock(&cfids->cfid_list_lock);
411	return -ENOENT;
412}
413
414static void
415smb2_close_cached_fid(struct kref *ref)
416{
417	struct cached_fid *cfid = container_of(ref, struct cached_fid,
418					       refcount);
 
419
420	spin_lock(&cfid->cfids->cfid_list_lock);
421	if (cfid->on_list) {
422		list_del(&cfid->entry);
423		cfid->on_list = false;
424		cfid->cfids->num_entries--;
425	}
426	spin_unlock(&cfid->cfids->cfid_list_lock);
427
428	dput(cfid->dentry);
429	cfid->dentry = NULL;
430
431	if (cfid->is_open) {
432		SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
433			   cfid->fid.volatile_fid);
434		atomic_dec(&cfid->tcon->num_remote_opens);
 
435	}
436
437	free_cached_dir(cfid);
438}
439
440void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon,
441			     const char *name, struct cifs_sb_info *cifs_sb)
442{
443	struct cached_fid *cfid = NULL;
444	int rc;
445
446	rc = open_cached_dir(xid, tcon, name, cifs_sb, true, &cfid);
447	if (rc) {
448		cifs_dbg(FYI, "no cached dir found for rmdir(%s)\n", name);
449		return;
450	}
451	spin_lock(&cfid->cfids->cfid_list_lock);
452	if (cfid->has_lease) {
453		cfid->has_lease = false;
454		kref_put(&cfid->refcount, smb2_close_cached_fid);
455	}
456	spin_unlock(&cfid->cfids->cfid_list_lock);
457	close_cached_dir(cfid);
458}
459
460
461void close_cached_dir(struct cached_fid *cfid)
462{
463	kref_put(&cfid->refcount, smb2_close_cached_fid);
464}
465
466/*
467 * Called from cifs_kill_sb when we unmount a share
468 */
469void close_all_cached_dirs(struct cifs_sb_info *cifs_sb)
470{
471	struct rb_root *root = &cifs_sb->tlink_tree;
472	struct rb_node *node;
473	struct cached_fid *cfid;
474	struct cifs_tcon *tcon;
475	struct tcon_link *tlink;
476	struct cached_fids *cfids;
 
 
477
 
478	for (node = rb_first(root); node; node = rb_next(node)) {
479		tlink = rb_entry(node, struct tcon_link, tl_rbnode);
480		tcon = tlink_tcon(tlink);
481		if (IS_ERR(tcon))
482			continue;
483		cfids = tcon->cfids;
484		if (cfids == NULL)
485			continue;
 
486		list_for_each_entry(cfid, &cfids->entries, entry) {
487			dput(cfid->dentry);
 
 
 
 
488			cfid->dentry = NULL;
 
 
 
489		}
 
 
 
 
 
 
 
 
490	}
 
 
 
491}
492
493/*
494 * Invalidate all cached dirs when a TCON has been reset
495 * due to a session loss.
496 */
497void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
498{
499	struct cached_fids *cfids = tcon->cfids;
500	struct cached_fid *cfid, *q;
501	LIST_HEAD(entry);
502
503	if (cfids == NULL)
504		return;
505
 
 
 
 
 
506	spin_lock(&cfids->cfid_list_lock);
507	list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
508		list_move(&cfid->entry, &entry);
509		cfids->num_entries--;
510		cfid->is_open = false;
511		cfid->on_list = false;
512		/* To prevent race with smb2_cached_lease_break() */
513		kref_get(&cfid->refcount);
514	}
515	spin_unlock(&cfids->cfid_list_lock);
516
517	list_for_each_entry_safe(cfid, q, &entry, entry) {
518		list_del(&cfid->entry);
519		cancel_work_sync(&cfid->lease_break);
520		if (cfid->has_lease) {
521			/*
522			 * We lease was never cancelled from the server so we
523			 * need to drop the reference.
524			 */
525			spin_lock(&cfids->cfid_list_lock);
526			cfid->has_lease = false;
527			spin_unlock(&cfids->cfid_list_lock);
528			kref_put(&cfid->refcount, smb2_close_cached_fid);
529		}
530		/* Drop the extra reference opened above*/
531		kref_put(&cfid->refcount, smb2_close_cached_fid);
532	}
 
 
 
 
 
 
533}
534
535static void
536smb2_cached_lease_break(struct work_struct *work)
537{
538	struct cached_fid *cfid = container_of(work,
539				struct cached_fid, lease_break);
 
 
 
540
541	spin_lock(&cfid->cfids->cfid_list_lock);
542	cfid->has_lease = false;
543	spin_unlock(&cfid->cfids->cfid_list_lock);
544	kref_put(&cfid->refcount, smb2_close_cached_fid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
545}
546
547int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
548{
549	struct cached_fids *cfids = tcon->cfids;
550	struct cached_fid *cfid;
551
552	if (cfids == NULL)
553		return false;
554
555	spin_lock(&cfids->cfid_list_lock);
556	list_for_each_entry(cfid, &cfids->entries, entry) {
557		if (cfid->has_lease &&
558		    !memcmp(lease_key,
559			    cfid->fid.lease_key,
560			    SMB2_LEASE_KEY_SIZE)) {
 
561			cfid->time = 0;
562			/*
563			 * We found a lease remove it from the list
564			 * so no threads can access it.
565			 */
566			list_del(&cfid->entry);
567			cfid->on_list = false;
568			cfids->num_entries--;
569
570			queue_work(cifsiod_wq,
571				   &cfid->lease_break);
 
 
572			spin_unlock(&cfids->cfid_list_lock);
573			return true;
574		}
575	}
576	spin_unlock(&cfids->cfid_list_lock);
577	return false;
578}
579
580static struct cached_fid *init_cached_dir(const char *path)
581{
582	struct cached_fid *cfid;
583
584	cfid = kzalloc(sizeof(*cfid), GFP_ATOMIC);
585	if (!cfid)
586		return NULL;
587	cfid->path = kstrdup(path, GFP_ATOMIC);
588	if (!cfid->path) {
589		kfree(cfid);
590		return NULL;
591	}
592
593	INIT_WORK(&cfid->lease_break, smb2_cached_lease_break);
 
594	INIT_LIST_HEAD(&cfid->entry);
595	INIT_LIST_HEAD(&cfid->dirents.entries);
596	mutex_init(&cfid->dirents.de_mutex);
597	spin_lock_init(&cfid->fid_lock);
598	kref_init(&cfid->refcount);
599	return cfid;
600}
601
602static void free_cached_dir(struct cached_fid *cfid)
603{
604	struct cached_dirent *dirent, *q;
605
 
 
 
606	dput(cfid->dentry);
607	cfid->dentry = NULL;
608
609	/*
610	 * Delete all cached dirent names
611	 */
612	list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) {
613		list_del(&dirent->entry);
614		kfree(dirent->name);
615		kfree(dirent);
616	}
617
618	kfree(cfid->path);
619	cfid->path = NULL;
620	kfree(cfid);
621}
622
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
623static void cfids_laundromat_worker(struct work_struct *work)
624{
625	struct cached_fids *cfids;
626	struct cached_fid *cfid, *q;
 
627	LIST_HEAD(entry);
628
629	cfids = container_of(work, struct cached_fids, laundromat_work.work);
630
631	spin_lock(&cfids->cfid_list_lock);
632	list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
633		if (cfid->time &&
634		    time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) {
635			cfid->on_list = false;
636			list_move(&cfid->entry, &entry);
637			cfids->num_entries--;
638			/* To prevent race with smb2_cached_lease_break() */
639			kref_get(&cfid->refcount);
 
 
 
 
 
 
640		}
641	}
642	spin_unlock(&cfids->cfid_list_lock);
643
644	list_for_each_entry_safe(cfid, q, &entry, entry) {
645		list_del(&cfid->entry);
646		/*
647		 * Cancel and wait for the work to finish in case we are racing
648		 * with it.
649		 */
650		cancel_work_sync(&cfid->lease_break);
651		if (cfid->has_lease) {
 
 
 
 
 
 
 
 
 
652			/*
653			 * Our lease has not yet been cancelled from the server
654			 * so we need to drop the reference.
655			 */
656			spin_lock(&cfids->cfid_list_lock);
657			cfid->has_lease = false;
658			spin_unlock(&cfids->cfid_list_lock);
659			kref_put(&cfid->refcount, smb2_close_cached_fid);
660		}
661		/* Drop the extra reference opened above */
662		kref_put(&cfid->refcount, smb2_close_cached_fid);
663	}
664	queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
665			   dir_cache_timeout * HZ);
666}
667
668struct cached_fids *init_cached_dirs(void)
669{
670	struct cached_fids *cfids;
671
672	cfids = kzalloc(sizeof(*cfids), GFP_KERNEL);
673	if (!cfids)
674		return NULL;
675	spin_lock_init(&cfids->cfid_list_lock);
676	INIT_LIST_HEAD(&cfids->entries);
 
677
 
678	INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker);
679	queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
680			   dir_cache_timeout * HZ);
681
682	return cfids;
683}
684
685/*
686 * Called from tconInfoFree when we are tearing down the tcon.
687 * There are no active users or open files/directories at this point.
688 */
689void free_cached_dirs(struct cached_fids *cfids)
690{
691	struct cached_fid *cfid, *q;
692	LIST_HEAD(entry);
693
694	if (cfids == NULL)
695		return;
696
697	cancel_delayed_work_sync(&cfids->laundromat_work);
 
698
699	spin_lock(&cfids->cfid_list_lock);
700	list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
 
 
 
 
 
701		cfid->on_list = false;
702		cfid->is_open = false;
703		list_move(&cfid->entry, &entry);
704	}
705	spin_unlock(&cfids->cfid_list_lock);
706
707	list_for_each_entry_safe(cfid, q, &entry, entry) {
708		list_del(&cfid->entry);
709		free_cached_dir(cfid);
710	}
711
712	kfree(cfids);
713}