Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.17.
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
   4 * Copyright (C) 2019 Samsung Electronics Co., Ltd.
   5 */
   6
   7#include <linux/fs.h>
   8#include <linux/filelock.h>
   9#include <linux/slab.h>
  10#include <linux/vmalloc.h>
  11#include <linux/kthread.h>
  12#include <linux/freezer.h>
  13
  14#include "glob.h"
  15#include "vfs_cache.h"
  16#include "oplock.h"
  17#include "vfs.h"
  18#include "connection.h"
  19#include "mgmt/tree_connect.h"
  20#include "mgmt/user_session.h"
  21#include "smb_common.h"
  22#include "server.h"
  23
  24#define S_DEL_PENDING			1
  25#define S_DEL_ON_CLS			2
  26#define S_DEL_ON_CLS_STREAM		8
  27
  28static unsigned int inode_hash_mask __read_mostly;
  29static unsigned int inode_hash_shift __read_mostly;
  30static struct hlist_head *inode_hashtable __read_mostly;
  31static DEFINE_RWLOCK(inode_hash_lock);
  32
  33static struct ksmbd_file_table global_ft;
  34static atomic_long_t fd_limit;
  35static struct kmem_cache *filp_cache;
  36
  37static bool durable_scavenger_running;
  38static DEFINE_MUTEX(durable_scavenger_lock);
  39static wait_queue_head_t dh_wq;
  40
  41void ksmbd_set_fd_limit(unsigned long limit)
  42{
  43	limit = min(limit, get_max_files());
  44	atomic_long_set(&fd_limit, limit);
  45}
  46
  47static bool fd_limit_depleted(void)
  48{
  49	long v = atomic_long_dec_return(&fd_limit);
  50
  51	if (v >= 0)
  52		return false;
  53	atomic_long_inc(&fd_limit);
  54	return true;
  55}
  56
  57static void fd_limit_close(void)
  58{
  59	atomic_long_inc(&fd_limit);
  60}
  61
  62/*
  63 * INODE hash
  64 */
  65
  66static unsigned long inode_hash(struct super_block *sb, unsigned long hashval)
  67{
  68	unsigned long tmp;
  69
  70	tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
  71		L1_CACHE_BYTES;
  72	tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> inode_hash_shift);
  73	return tmp & inode_hash_mask;
  74}
  75
  76static struct ksmbd_inode *__ksmbd_inode_lookup(struct dentry *de)
  77{
  78	struct hlist_head *head = inode_hashtable +
  79		inode_hash(d_inode(de)->i_sb, (unsigned long)de);
  80	struct ksmbd_inode *ci = NULL, *ret_ci = NULL;
  81
  82	hlist_for_each_entry(ci, head, m_hash) {
  83		if (ci->m_de == de) {
  84			if (atomic_inc_not_zero(&ci->m_count))
  85				ret_ci = ci;
  86			break;
  87		}
  88	}
  89	return ret_ci;
  90}
  91
  92static struct ksmbd_inode *ksmbd_inode_lookup(struct ksmbd_file *fp)
  93{
  94	return __ksmbd_inode_lookup(fp->filp->f_path.dentry);
  95}
  96
  97struct ksmbd_inode *ksmbd_inode_lookup_lock(struct dentry *d)
  98{
  99	struct ksmbd_inode *ci;
 100
 101	read_lock(&inode_hash_lock);
 102	ci = __ksmbd_inode_lookup(d);
 103	read_unlock(&inode_hash_lock);
 104
 105	return ci;
 106}
 107
 108int ksmbd_query_inode_status(struct dentry *dentry)
 109{
 110	struct ksmbd_inode *ci;
 111	int ret = KSMBD_INODE_STATUS_UNKNOWN;
 112
 113	read_lock(&inode_hash_lock);
 114	ci = __ksmbd_inode_lookup(dentry);
 115	if (ci) {
 116		ret = KSMBD_INODE_STATUS_OK;
 117		if (ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS))
 118			ret = KSMBD_INODE_STATUS_PENDING_DELETE;
 119		atomic_dec(&ci->m_count);
 120	}
 121	read_unlock(&inode_hash_lock);
 122	return ret;
 123}
 124
 125bool ksmbd_inode_pending_delete(struct ksmbd_file *fp)
 126{
 127	return (fp->f_ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS));
 128}
 129
 130void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp)
 131{
 132	fp->f_ci->m_flags |= S_DEL_PENDING;
 133}
 134
 135void ksmbd_clear_inode_pending_delete(struct ksmbd_file *fp)
 136{
 137	fp->f_ci->m_flags &= ~S_DEL_PENDING;
 138}
 139
 140void ksmbd_fd_set_delete_on_close(struct ksmbd_file *fp,
 141				  int file_info)
 142{
 143	if (ksmbd_stream_fd(fp)) {
 144		fp->f_ci->m_flags |= S_DEL_ON_CLS_STREAM;
 145		return;
 146	}
 147
 148	fp->f_ci->m_flags |= S_DEL_ON_CLS;
 149}
 150
 151static void ksmbd_inode_hash(struct ksmbd_inode *ci)
 152{
 153	struct hlist_head *b = inode_hashtable +
 154		inode_hash(d_inode(ci->m_de)->i_sb, (unsigned long)ci->m_de);
 155
 156	hlist_add_head(&ci->m_hash, b);
 157}
 158
 159static void ksmbd_inode_unhash(struct ksmbd_inode *ci)
 160{
 161	write_lock(&inode_hash_lock);
 162	hlist_del_init(&ci->m_hash);
 163	write_unlock(&inode_hash_lock);
 164}
 165
 166static int ksmbd_inode_init(struct ksmbd_inode *ci, struct ksmbd_file *fp)
 167{
 168	atomic_set(&ci->m_count, 1);
 169	atomic_set(&ci->op_count, 0);
 170	atomic_set(&ci->sop_count, 0);
 171	ci->m_flags = 0;
 172	ci->m_fattr = 0;
 173	INIT_LIST_HEAD(&ci->m_fp_list);
 174	INIT_LIST_HEAD(&ci->m_op_list);
 175	init_rwsem(&ci->m_lock);
 176	ci->m_de = fp->filp->f_path.dentry;
 177	return 0;
 178}
 179
 180static struct ksmbd_inode *ksmbd_inode_get(struct ksmbd_file *fp)
 181{
 182	struct ksmbd_inode *ci, *tmpci;
 183	int rc;
 184
 185	read_lock(&inode_hash_lock);
 186	ci = ksmbd_inode_lookup(fp);
 187	read_unlock(&inode_hash_lock);
 188	if (ci)
 189		return ci;
 190
 191	ci = kmalloc(sizeof(struct ksmbd_inode), KSMBD_DEFAULT_GFP);
 192	if (!ci)
 193		return NULL;
 194
 195	rc = ksmbd_inode_init(ci, fp);
 196	if (rc) {
 197		pr_err("inode initialized failed\n");
 198		kfree(ci);
 199		return NULL;
 200	}
 201
 202	write_lock(&inode_hash_lock);
 203	tmpci = ksmbd_inode_lookup(fp);
 204	if (!tmpci) {
 205		ksmbd_inode_hash(ci);
 206	} else {
 207		kfree(ci);
 208		ci = tmpci;
 209	}
 210	write_unlock(&inode_hash_lock);
 211	return ci;
 212}
 213
 214static void ksmbd_inode_free(struct ksmbd_inode *ci)
 215{
 216	ksmbd_inode_unhash(ci);
 217	kfree(ci);
 218}
 219
 220void ksmbd_inode_put(struct ksmbd_inode *ci)
 221{
 222	if (atomic_dec_and_test(&ci->m_count))
 223		ksmbd_inode_free(ci);
 224}
 225
 226int __init ksmbd_inode_hash_init(void)
 227{
 228	unsigned int loop;
 229	unsigned long numentries = 16384;
 230	unsigned long bucketsize = sizeof(struct hlist_head);
 231	unsigned long size;
 232
 233	inode_hash_shift = ilog2(numentries);
 234	inode_hash_mask = (1 << inode_hash_shift) - 1;
 235
 236	size = bucketsize << inode_hash_shift;
 237
 238	/* init master fp hash table */
 239	inode_hashtable = vmalloc(size);
 240	if (!inode_hashtable)
 241		return -ENOMEM;
 242
 243	for (loop = 0; loop < (1U << inode_hash_shift); loop++)
 244		INIT_HLIST_HEAD(&inode_hashtable[loop]);
 245	return 0;
 246}
 247
 248void ksmbd_release_inode_hash(void)
 249{
 250	vfree(inode_hashtable);
 251}
 252
 253static void __ksmbd_inode_close(struct ksmbd_file *fp)
 254{
 255	struct ksmbd_inode *ci = fp->f_ci;
 256	int err;
 257	struct file *filp;
 258
 259	filp = fp->filp;
 260	if (ksmbd_stream_fd(fp) && (ci->m_flags & S_DEL_ON_CLS_STREAM)) {
 261		ci->m_flags &= ~S_DEL_ON_CLS_STREAM;
 262		err = ksmbd_vfs_remove_xattr(file_mnt_idmap(filp),
 263					     &filp->f_path,
 264					     fp->stream.name,
 265					     true);
 266		if (err)
 267			pr_err("remove xattr failed : %s\n",
 268			       fp->stream.name);
 269	}
 270
 271	if (atomic_dec_and_test(&ci->m_count)) {
 272		down_write(&ci->m_lock);
 273		if (ci->m_flags & (S_DEL_ON_CLS | S_DEL_PENDING)) {
 274			ci->m_flags &= ~(S_DEL_ON_CLS | S_DEL_PENDING);
 275			up_write(&ci->m_lock);
 276			ksmbd_vfs_unlink(filp);
 277			down_write(&ci->m_lock);
 278		}
 279		up_write(&ci->m_lock);
 280
 281		ksmbd_inode_free(ci);
 282	}
 283}
 284
 285static void __ksmbd_remove_durable_fd(struct ksmbd_file *fp)
 286{
 287	if (!has_file_id(fp->persistent_id))
 288		return;
 289
 290	idr_remove(global_ft.idr, fp->persistent_id);
 291}
 292
 293static void ksmbd_remove_durable_fd(struct ksmbd_file *fp)
 294{
 295	write_lock(&global_ft.lock);
 296	__ksmbd_remove_durable_fd(fp);
 297	write_unlock(&global_ft.lock);
 298	if (waitqueue_active(&dh_wq))
 299		wake_up(&dh_wq);
 300}
 301
 302static void __ksmbd_remove_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
 303{
 304	if (!has_file_id(fp->volatile_id))
 305		return;
 306
 307	down_write(&fp->f_ci->m_lock);
 308	list_del_init(&fp->node);
 309	up_write(&fp->f_ci->m_lock);
 310
 311	write_lock(&ft->lock);
 312	idr_remove(ft->idr, fp->volatile_id);
 313	write_unlock(&ft->lock);
 314}
 315
 316static void __ksmbd_close_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
 317{
 318	struct file *filp;
 319	struct ksmbd_lock *smb_lock, *tmp_lock;
 320
 321	fd_limit_close();
 322	ksmbd_remove_durable_fd(fp);
 323	if (ft)
 324		__ksmbd_remove_fd(ft, fp);
 325
 326	close_id_del_oplock(fp);
 327	filp = fp->filp;
 328
 329	__ksmbd_inode_close(fp);
 330	if (!IS_ERR_OR_NULL(filp))
 331		fput(filp);
 332
 333	/* because the reference count of fp is 0, it is guaranteed that
 334	 * there are not accesses to fp->lock_list.
 335	 */
 336	list_for_each_entry_safe(smb_lock, tmp_lock, &fp->lock_list, flist) {
 337		spin_lock(&fp->conn->llist_lock);
 338		list_del(&smb_lock->clist);
 339		spin_unlock(&fp->conn->llist_lock);
 340
 341		list_del(&smb_lock->flist);
 342		locks_free_lock(smb_lock->fl);
 343		kfree(smb_lock);
 344	}
 345
 346	if (ksmbd_stream_fd(fp))
 347		kfree(fp->stream.name);
 348	kmem_cache_free(filp_cache, fp);
 349}
 350
 351static struct ksmbd_file *ksmbd_fp_get(struct ksmbd_file *fp)
 352{
 353	if (fp->f_state != FP_INITED)
 354		return NULL;
 355
 356	if (!atomic_inc_not_zero(&fp->refcount))
 357		return NULL;
 358	return fp;
 359}
 360
 361static struct ksmbd_file *__ksmbd_lookup_fd(struct ksmbd_file_table *ft,
 362					    u64 id)
 363{
 364	struct ksmbd_file *fp;
 365
 366	if (!has_file_id(id))
 367		return NULL;
 368
 369	read_lock(&ft->lock);
 370	fp = idr_find(ft->idr, id);
 371	if (fp)
 372		fp = ksmbd_fp_get(fp);
 373	read_unlock(&ft->lock);
 374	return fp;
 375}
 376
 377static void __put_fd_final(struct ksmbd_work *work, struct ksmbd_file *fp)
 378{
 379	__ksmbd_close_fd(&work->sess->file_table, fp);
 380	atomic_dec(&work->conn->stats.open_files_count);
 381}
 382
 383static void set_close_state_blocked_works(struct ksmbd_file *fp)
 384{
 385	struct ksmbd_work *cancel_work;
 386
 387	spin_lock(&fp->f_lock);
 388	list_for_each_entry(cancel_work, &fp->blocked_works,
 389				 fp_entry) {
 390		cancel_work->state = KSMBD_WORK_CLOSED;
 391		cancel_work->cancel_fn(cancel_work->cancel_argv);
 392	}
 393	spin_unlock(&fp->f_lock);
 394}
 395
 396int ksmbd_close_fd(struct ksmbd_work *work, u64 id)
 397{
 398	struct ksmbd_file	*fp;
 399	struct ksmbd_file_table	*ft;
 400
 401	if (!has_file_id(id))
 402		return 0;
 403
 404	ft = &work->sess->file_table;
 405	write_lock(&ft->lock);
 406	fp = idr_find(ft->idr, id);
 407	if (fp) {
 408		set_close_state_blocked_works(fp);
 409
 410		if (fp->f_state != FP_INITED)
 411			fp = NULL;
 412		else {
 413			fp->f_state = FP_CLOSED;
 414			if (!atomic_dec_and_test(&fp->refcount))
 415				fp = NULL;
 416		}
 417	}
 418	write_unlock(&ft->lock);
 419
 420	if (!fp)
 421		return -EINVAL;
 422
 423	__put_fd_final(work, fp);
 424	return 0;
 425}
 426
 427void ksmbd_fd_put(struct ksmbd_work *work, struct ksmbd_file *fp)
 428{
 429	if (!fp)
 430		return;
 431
 432	if (!atomic_dec_and_test(&fp->refcount))
 433		return;
 434	__put_fd_final(work, fp);
 435}
 436
 437static bool __sanity_check(struct ksmbd_tree_connect *tcon, struct ksmbd_file *fp)
 438{
 439	if (!fp)
 440		return false;
 441	if (fp->tcon != tcon)
 442		return false;
 443	return true;
 444}
 445
 446struct ksmbd_file *ksmbd_lookup_foreign_fd(struct ksmbd_work *work, u64 id)
 447{
 448	return __ksmbd_lookup_fd(&work->sess->file_table, id);
 449}
 450
 451struct ksmbd_file *ksmbd_lookup_fd_fast(struct ksmbd_work *work, u64 id)
 452{
 453	struct ksmbd_file *fp = __ksmbd_lookup_fd(&work->sess->file_table, id);
 454
 455	if (__sanity_check(work->tcon, fp))
 456		return fp;
 457
 458	ksmbd_fd_put(work, fp);
 459	return NULL;
 460}
 461
 462struct ksmbd_file *ksmbd_lookup_fd_slow(struct ksmbd_work *work, u64 id,
 463					u64 pid)
 464{
 465	struct ksmbd_file *fp;
 466
 467	if (!has_file_id(id)) {
 468		id = work->compound_fid;
 469		pid = work->compound_pfid;
 470	}
 471
 472	fp = __ksmbd_lookup_fd(&work->sess->file_table, id);
 473	if (!__sanity_check(work->tcon, fp)) {
 474		ksmbd_fd_put(work, fp);
 475		return NULL;
 476	}
 477	if (fp->persistent_id != pid) {
 478		ksmbd_fd_put(work, fp);
 479		return NULL;
 480	}
 481	return fp;
 482}
 483
 484struct ksmbd_file *ksmbd_lookup_global_fd(unsigned long long id)
 485{
 486	return __ksmbd_lookup_fd(&global_ft, id);
 487}
 488
 489struct ksmbd_file *ksmbd_lookup_durable_fd(unsigned long long id)
 490{
 491	struct ksmbd_file *fp;
 492
 493	fp = __ksmbd_lookup_fd(&global_ft, id);
 494	if (fp && (fp->conn ||
 495		   (fp->durable_scavenger_timeout &&
 496		    (fp->durable_scavenger_timeout <
 497		     jiffies_to_msecs(jiffies))))) {
 498		ksmbd_put_durable_fd(fp);
 499		fp = NULL;
 500	}
 501
 502	return fp;
 503}
 504
 505void ksmbd_put_durable_fd(struct ksmbd_file *fp)
 506{
 507	if (!atomic_dec_and_test(&fp->refcount))
 508		return;
 509
 510	__ksmbd_close_fd(NULL, fp);
 511}
 512
 513struct ksmbd_file *ksmbd_lookup_fd_cguid(char *cguid)
 514{
 515	struct ksmbd_file	*fp = NULL;
 516	unsigned int		id;
 517
 518	read_lock(&global_ft.lock);
 519	idr_for_each_entry(global_ft.idr, fp, id) {
 520		if (!memcmp(fp->create_guid,
 521			    cguid,
 522			    SMB2_CREATE_GUID_SIZE)) {
 523			fp = ksmbd_fp_get(fp);
 524			break;
 525		}
 526	}
 527	read_unlock(&global_ft.lock);
 528
 529	return fp;
 530}
 531
 532struct ksmbd_file *ksmbd_lookup_fd_inode(struct dentry *dentry)
 533{
 534	struct ksmbd_file	*lfp;
 535	struct ksmbd_inode	*ci;
 536	struct inode		*inode = d_inode(dentry);
 537
 538	read_lock(&inode_hash_lock);
 539	ci = __ksmbd_inode_lookup(dentry);
 540	read_unlock(&inode_hash_lock);
 541	if (!ci)
 542		return NULL;
 543
 544	down_read(&ci->m_lock);
 545	list_for_each_entry(lfp, &ci->m_fp_list, node) {
 546		if (inode == file_inode(lfp->filp)) {
 547			atomic_dec(&ci->m_count);
 548			lfp = ksmbd_fp_get(lfp);
 549			up_read(&ci->m_lock);
 550			return lfp;
 551		}
 552	}
 553	atomic_dec(&ci->m_count);
 554	up_read(&ci->m_lock);
 555	return NULL;
 556}
 557
 558#define OPEN_ID_TYPE_VOLATILE_ID	(0)
 559#define OPEN_ID_TYPE_PERSISTENT_ID	(1)
 560
 561static void __open_id_set(struct ksmbd_file *fp, u64 id, int type)
 562{
 563	if (type == OPEN_ID_TYPE_VOLATILE_ID)
 564		fp->volatile_id = id;
 565	if (type == OPEN_ID_TYPE_PERSISTENT_ID)
 566		fp->persistent_id = id;
 567}
 568
 569static int __open_id(struct ksmbd_file_table *ft, struct ksmbd_file *fp,
 570		     int type)
 571{
 572	u64			id = 0;
 573	int			ret;
 574
 575	if (type == OPEN_ID_TYPE_VOLATILE_ID && fd_limit_depleted()) {
 576		__open_id_set(fp, KSMBD_NO_FID, type);
 577		return -EMFILE;
 578	}
 579
 580	idr_preload(KSMBD_DEFAULT_GFP);
 581	write_lock(&ft->lock);
 582	ret = idr_alloc_cyclic(ft->idr, fp, 0, INT_MAX - 1, GFP_NOWAIT);
 583	if (ret >= 0) {
 584		id = ret;
 585		ret = 0;
 586	} else {
 587		id = KSMBD_NO_FID;
 588		fd_limit_close();
 589	}
 590
 591	__open_id_set(fp, id, type);
 592	write_unlock(&ft->lock);
 593	idr_preload_end();
 594	return ret;
 595}
 596
 597unsigned int ksmbd_open_durable_fd(struct ksmbd_file *fp)
 598{
 599	__open_id(&global_ft, fp, OPEN_ID_TYPE_PERSISTENT_ID);
 600	return fp->persistent_id;
 601}
 602
 603struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work, struct file *filp)
 604{
 605	struct ksmbd_file *fp;
 606	int ret;
 607
 608	fp = kmem_cache_zalloc(filp_cache, KSMBD_DEFAULT_GFP);
 609	if (!fp) {
 610		pr_err("Failed to allocate memory\n");
 611		return ERR_PTR(-ENOMEM);
 612	}
 613
 614	INIT_LIST_HEAD(&fp->blocked_works);
 615	INIT_LIST_HEAD(&fp->node);
 616	INIT_LIST_HEAD(&fp->lock_list);
 617	spin_lock_init(&fp->f_lock);
 618	atomic_set(&fp->refcount, 1);
 619
 620	fp->filp		= filp;
 621	fp->conn		= work->conn;
 622	fp->tcon		= work->tcon;
 623	fp->volatile_id		= KSMBD_NO_FID;
 624	fp->persistent_id	= KSMBD_NO_FID;
 625	fp->f_state		= FP_NEW;
 626	fp->f_ci		= ksmbd_inode_get(fp);
 627
 628	if (!fp->f_ci) {
 629		ret = -ENOMEM;
 630		goto err_out;
 631	}
 632
 633	ret = __open_id(&work->sess->file_table, fp, OPEN_ID_TYPE_VOLATILE_ID);
 634	if (ret) {
 635		ksmbd_inode_put(fp->f_ci);
 636		goto err_out;
 637	}
 638
 639	atomic_inc(&work->conn->stats.open_files_count);
 640	return fp;
 641
 642err_out:
 643	kmem_cache_free(filp_cache, fp);
 644	return ERR_PTR(ret);
 645}
 646
 647void ksmbd_update_fstate(struct ksmbd_file_table *ft, struct ksmbd_file *fp,
 648			 unsigned int state)
 649{
 650	if (!fp)
 651		return;
 652
 653	write_lock(&ft->lock);
 654	fp->f_state = state;
 655	write_unlock(&ft->lock);
 656}
 657
 658static int
 659__close_file_table_ids(struct ksmbd_file_table *ft,
 660		       struct ksmbd_tree_connect *tcon,
 661		       bool (*skip)(struct ksmbd_tree_connect *tcon,
 662				    struct ksmbd_file *fp))
 663{
 664	unsigned int			id;
 665	struct ksmbd_file		*fp;
 666	int				num = 0;
 667
 668	idr_for_each_entry(ft->idr, fp, id) {
 669		if (skip(tcon, fp))
 670			continue;
 671
 672		set_close_state_blocked_works(fp);
 673
 674		if (!atomic_dec_and_test(&fp->refcount))
 675			continue;
 676		__ksmbd_close_fd(ft, fp);
 677		num++;
 678	}
 679	return num;
 680}
 681
 682static inline bool is_reconnectable(struct ksmbd_file *fp)
 683{
 684	struct oplock_info *opinfo = opinfo_get(fp);
 685	bool reconn = false;
 686
 687	if (!opinfo)
 688		return false;
 689
 690	if (opinfo->op_state != OPLOCK_STATE_NONE) {
 691		opinfo_put(opinfo);
 692		return false;
 693	}
 694
 695	if (fp->is_resilient || fp->is_persistent)
 696		reconn = true;
 697	else if (fp->is_durable && opinfo->is_lease &&
 698		 opinfo->o_lease->state & SMB2_LEASE_HANDLE_CACHING_LE)
 699		reconn = true;
 700
 701	else if (fp->is_durable && opinfo->level == SMB2_OPLOCK_LEVEL_BATCH)
 702		reconn = true;
 703
 704	opinfo_put(opinfo);
 705	return reconn;
 706}
 707
 708static bool tree_conn_fd_check(struct ksmbd_tree_connect *tcon,
 709			       struct ksmbd_file *fp)
 710{
 711	return fp->tcon != tcon;
 712}
 713
 714static bool ksmbd_durable_scavenger_alive(void)
 715{
 716	mutex_lock(&durable_scavenger_lock);
 717	if (!durable_scavenger_running) {
 718		mutex_unlock(&durable_scavenger_lock);
 719		return false;
 720	}
 721	mutex_unlock(&durable_scavenger_lock);
 722
 723	if (kthread_should_stop())
 724		return false;
 725
 726	if (idr_is_empty(global_ft.idr))
 727		return false;
 728
 729	return true;
 730}
 731
 732static void ksmbd_scavenger_dispose_dh(struct list_head *head)
 733{
 734	while (!list_empty(head)) {
 735		struct ksmbd_file *fp;
 736
 737		fp = list_first_entry(head, struct ksmbd_file, node);
 738		list_del_init(&fp->node);
 739		__ksmbd_close_fd(NULL, fp);
 740	}
 741}
 742
 743static int ksmbd_durable_scavenger(void *dummy)
 744{
 745	struct ksmbd_file *fp = NULL;
 746	unsigned int id;
 747	unsigned int min_timeout = 1;
 748	bool found_fp_timeout;
 749	LIST_HEAD(scavenger_list);
 750	unsigned long remaining_jiffies;
 751
 752	__module_get(THIS_MODULE);
 753
 754	set_freezable();
 755	while (ksmbd_durable_scavenger_alive()) {
 756		if (try_to_freeze())
 757			continue;
 758
 759		found_fp_timeout = false;
 760
 761		remaining_jiffies = wait_event_timeout(dh_wq,
 762				   ksmbd_durable_scavenger_alive() == false,
 763				   __msecs_to_jiffies(min_timeout));
 764		if (remaining_jiffies)
 765			min_timeout = jiffies_to_msecs(remaining_jiffies);
 766		else
 767			min_timeout = DURABLE_HANDLE_MAX_TIMEOUT;
 768
 769		write_lock(&global_ft.lock);
 770		idr_for_each_entry(global_ft.idr, fp, id) {
 771			if (!fp->durable_timeout)
 772				continue;
 773
 774			if (atomic_read(&fp->refcount) > 1 ||
 775			    fp->conn)
 776				continue;
 777
 778			found_fp_timeout = true;
 779			if (fp->durable_scavenger_timeout <=
 780			    jiffies_to_msecs(jiffies)) {
 781				__ksmbd_remove_durable_fd(fp);
 782				list_add(&fp->node, &scavenger_list);
 783			} else {
 784				unsigned long durable_timeout;
 785
 786				durable_timeout =
 787					fp->durable_scavenger_timeout -
 788						jiffies_to_msecs(jiffies);
 789
 790				if (min_timeout > durable_timeout)
 791					min_timeout = durable_timeout;
 792			}
 793		}
 794		write_unlock(&global_ft.lock);
 795
 796		ksmbd_scavenger_dispose_dh(&scavenger_list);
 797
 798		if (found_fp_timeout == false)
 799			break;
 800	}
 801
 802	mutex_lock(&durable_scavenger_lock);
 803	durable_scavenger_running = false;
 804	mutex_unlock(&durable_scavenger_lock);
 805
 806	module_put(THIS_MODULE);
 807
 808	return 0;
 809}
 810
 811void ksmbd_launch_ksmbd_durable_scavenger(void)
 812{
 813	if (!(server_conf.flags & KSMBD_GLOBAL_FLAG_DURABLE_HANDLE))
 814		return;
 815
 816	mutex_lock(&durable_scavenger_lock);
 817	if (durable_scavenger_running == true) {
 818		mutex_unlock(&durable_scavenger_lock);
 819		return;
 820	}
 821
 822	durable_scavenger_running = true;
 823
 824	server_conf.dh_task = kthread_run(ksmbd_durable_scavenger,
 825				     (void *)NULL, "ksmbd-durable-scavenger");
 826	if (IS_ERR(server_conf.dh_task))
 827		pr_err("cannot start conn thread, err : %ld\n",
 828		       PTR_ERR(server_conf.dh_task));
 829	mutex_unlock(&durable_scavenger_lock);
 830}
 831
 832void ksmbd_stop_durable_scavenger(void)
 833{
 834	if (!(server_conf.flags & KSMBD_GLOBAL_FLAG_DURABLE_HANDLE))
 835		return;
 836
 837	mutex_lock(&durable_scavenger_lock);
 838	if (!durable_scavenger_running) {
 839		mutex_unlock(&durable_scavenger_lock);
 840		return;
 841	}
 842
 843	durable_scavenger_running = false;
 844	if (waitqueue_active(&dh_wq))
 845		wake_up(&dh_wq);
 846	mutex_unlock(&durable_scavenger_lock);
 847	kthread_stop(server_conf.dh_task);
 848}
 849
 850static bool session_fd_check(struct ksmbd_tree_connect *tcon,
 851			     struct ksmbd_file *fp)
 852{
 853	struct ksmbd_inode *ci;
 854	struct oplock_info *op;
 855	struct ksmbd_conn *conn;
 856
 857	if (!is_reconnectable(fp))
 858		return false;
 859
 860	conn = fp->conn;
 861	ci = fp->f_ci;
 862	down_write(&ci->m_lock);
 863	list_for_each_entry_rcu(op, &ci->m_op_list, op_entry) {
 864		if (op->conn != conn)
 865			continue;
 866		if (op->conn && atomic_dec_and_test(&op->conn->refcnt))
 867			kfree(op->conn);
 868		op->conn = NULL;
 869	}
 870	up_write(&ci->m_lock);
 871
 872	fp->conn = NULL;
 873	fp->tcon = NULL;
 874	fp->volatile_id = KSMBD_NO_FID;
 875
 876	if (fp->durable_timeout)
 877		fp->durable_scavenger_timeout =
 878			jiffies_to_msecs(jiffies) + fp->durable_timeout;
 879
 880	return true;
 881}
 882
 883void ksmbd_close_tree_conn_fds(struct ksmbd_work *work)
 884{
 885	int num = __close_file_table_ids(&work->sess->file_table,
 886					 work->tcon,
 887					 tree_conn_fd_check);
 888
 889	atomic_sub(num, &work->conn->stats.open_files_count);
 890}
 891
 892void ksmbd_close_session_fds(struct ksmbd_work *work)
 893{
 894	int num = __close_file_table_ids(&work->sess->file_table,
 895					 work->tcon,
 896					 session_fd_check);
 897
 898	atomic_sub(num, &work->conn->stats.open_files_count);
 899}
 900
 901int ksmbd_init_global_file_table(void)
 902{
 903	return ksmbd_init_file_table(&global_ft);
 904}
 905
 906void ksmbd_free_global_file_table(void)
 907{
 908	struct ksmbd_file	*fp = NULL;
 909	unsigned int		id;
 910
 911	idr_for_each_entry(global_ft.idr, fp, id) {
 912		ksmbd_remove_durable_fd(fp);
 913		__ksmbd_close_fd(NULL, fp);
 914	}
 915
 916	idr_destroy(global_ft.idr);
 917	kfree(global_ft.idr);
 918}
 919
 920int ksmbd_validate_name_reconnect(struct ksmbd_share_config *share,
 921				  struct ksmbd_file *fp, char *name)
 922{
 923	char *pathname, *ab_pathname;
 924	int ret = 0;
 925
 926	pathname = kmalloc(PATH_MAX, KSMBD_DEFAULT_GFP);
 927	if (!pathname)
 928		return -EACCES;
 929
 930	ab_pathname = d_path(&fp->filp->f_path, pathname, PATH_MAX);
 931	if (IS_ERR(ab_pathname)) {
 932		kfree(pathname);
 933		return -EACCES;
 934	}
 935
 936	if (name && strcmp(&ab_pathname[share->path_sz + 1], name)) {
 937		ksmbd_debug(SMB, "invalid name reconnect %s\n", name);
 938		ret = -EINVAL;
 939	}
 940
 941	kfree(pathname);
 942
 943	return ret;
 944}
 945
 946int ksmbd_reopen_durable_fd(struct ksmbd_work *work, struct ksmbd_file *fp)
 947{
 948	struct ksmbd_inode *ci;
 949	struct oplock_info *op;
 950
 951	if (!fp->is_durable || fp->conn || fp->tcon) {
 952		pr_err("Invalid durable fd [%p:%p]\n", fp->conn, fp->tcon);
 953		return -EBADF;
 954	}
 955
 956	if (has_file_id(fp->volatile_id)) {
 957		pr_err("Still in use durable fd: %llu\n", fp->volatile_id);
 958		return -EBADF;
 959	}
 960
 961	fp->conn = work->conn;
 962	fp->tcon = work->tcon;
 963
 964	ci = fp->f_ci;
 965	down_write(&ci->m_lock);
 966	list_for_each_entry_rcu(op, &ci->m_op_list, op_entry) {
 967		if (op->conn)
 968			continue;
 969		op->conn = fp->conn;
 970		atomic_inc(&op->conn->refcnt);
 971	}
 972	up_write(&ci->m_lock);
 973
 974	fp->f_state = FP_NEW;
 975	__open_id(&work->sess->file_table, fp, OPEN_ID_TYPE_VOLATILE_ID);
 976	if (!has_file_id(fp->volatile_id)) {
 977		fp->conn = NULL;
 978		fp->tcon = NULL;
 979		return -EBADF;
 980	}
 981	return 0;
 982}
 983
 984int ksmbd_init_file_table(struct ksmbd_file_table *ft)
 985{
 986	ft->idr = kzalloc(sizeof(struct idr), KSMBD_DEFAULT_GFP);
 987	if (!ft->idr)
 988		return -ENOMEM;
 989
 990	idr_init(ft->idr);
 991	rwlock_init(&ft->lock);
 992	return 0;
 993}
 994
 995void ksmbd_destroy_file_table(struct ksmbd_file_table *ft)
 996{
 997	if (!ft->idr)
 998		return;
 999
1000	__close_file_table_ids(ft, NULL, session_fd_check);
1001	idr_destroy(ft->idr);
1002	kfree(ft->idr);
1003	ft->idr = NULL;
1004}
1005
1006int ksmbd_init_file_cache(void)
1007{
1008	filp_cache = kmem_cache_create("ksmbd_file_cache",
1009				       sizeof(struct ksmbd_file), 0,
1010				       SLAB_HWCACHE_ALIGN, NULL);
1011	if (!filp_cache)
1012		goto out;
1013
1014	init_waitqueue_head(&dh_wq);
1015
1016	return 0;
1017
1018out:
1019	pr_err("failed to allocate file cache\n");
1020	return -ENOMEM;
1021}
1022
1023void ksmbd_exit_file_cache(void)
1024{
1025	kmem_cache_destroy(filp_cache);
1026}