Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
   1// SPDX-License-Identifier: LGPL-2.1
   2/*
   3 *
   4 *   Copyright (C) International Business Machines  Corp., 2002,2008
   5 *   Author(s): Steve French (sfrench@us.ibm.com)
   6 *
   7 *   Common Internet FileSystem (CIFS) client
   8 *
   9 */
  10
  11/* Note that BB means BUGBUG (ie something to fix eventually) */
  12
  13#include <linux/module.h>
  14#include <linux/fs.h>
  15#include <linux/filelock.h>
  16#include <linux/mount.h>
  17#include <linux/slab.h>
  18#include <linux/init.h>
  19#include <linux/list.h>
  20#include <linux/seq_file.h>
  21#include <linux/vfs.h>
  22#include <linux/mempool.h>
  23#include <linux/delay.h>
  24#include <linux/kthread.h>
  25#include <linux/freezer.h>
  26#include <linux/namei.h>
  27#include <linux/random.h>
  28#include <linux/splice.h>
  29#include <linux/uuid.h>
  30#include <linux/xattr.h>
  31#include <uapi/linux/magic.h>
  32#include <net/ipv6.h>
  33#include "cifsfs.h"
  34#include "cifspdu.h"
  35#define DECLARE_GLOBALS_HERE
  36#include "cifsglob.h"
  37#include "cifsproto.h"
  38#include "cifs_debug.h"
  39#include "cifs_fs_sb.h"
  40#include <linux/mm.h>
  41#include <linux/key-type.h>
  42#include "cifs_spnego.h"
  43#include "fscache.h"
  44#ifdef CONFIG_CIFS_DFS_UPCALL
  45#include "dfs_cache.h"
  46#endif
  47#ifdef CONFIG_CIFS_SWN_UPCALL
  48#include "netlink.h"
  49#endif
  50#include "fs_context.h"
  51#include "cached_dir.h"
  52
  53/*
  54 * DOS dates from 1980/1/1 through 2107/12/31
  55 * Protocol specifications indicate the range should be to 119, which
  56 * limits maximum year to 2099. But this range has not been checked.
  57 */
  58#define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
  59#define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
  60#define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
  61
  62int cifsFYI = 0;
  63bool traceSMB;
  64bool enable_oplocks = true;
  65bool linuxExtEnabled = true;
  66bool lookupCacheEnabled = true;
  67bool disable_legacy_dialects; /* false by default */
  68bool enable_gcm_256 = true;
  69bool require_gcm_256; /* false by default */
  70bool enable_negotiate_signing; /* false by default */
  71unsigned int global_secflags = CIFSSEC_DEF;
  72/* unsigned int ntlmv2_support = 0; */
  73unsigned int sign_CIFS_PDUs = 1;
  74
  75/*
  76 * Global transaction id (XID) information
  77 */
  78unsigned int GlobalCurrentXid;	/* protected by GlobalMid_Lock */
  79unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Lock */
  80unsigned int GlobalMaxActiveXid;	/* prot by GlobalMid_Lock */
  81spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
  82
  83/*
  84 *  Global counters, updated atomically
  85 */
  86atomic_t sesInfoAllocCount;
  87atomic_t tconInfoAllocCount;
  88atomic_t tcpSesNextId;
  89atomic_t tcpSesAllocCount;
  90atomic_t tcpSesReconnectCount;
  91atomic_t tconInfoReconnectCount;
  92
  93atomic_t mid_count;
  94atomic_t buf_alloc_count;
  95atomic_t small_buf_alloc_count;
  96#ifdef CONFIG_CIFS_STATS2
  97atomic_t total_buf_alloc_count;
  98atomic_t total_small_buf_alloc_count;
  99#endif/* STATS2 */
 100struct list_head	cifs_tcp_ses_list;
 101spinlock_t		cifs_tcp_ses_lock;
 102static const struct super_operations cifs_super_ops;
 103unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
 104module_param(CIFSMaxBufSize, uint, 0444);
 105MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
 106				 "for CIFS requests. "
 107				 "Default: 16384 Range: 8192 to 130048");
 108unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
 109module_param(cifs_min_rcv, uint, 0444);
 110MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
 111				"1 to 64");
 112unsigned int cifs_min_small = 30;
 113module_param(cifs_min_small, uint, 0444);
 114MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
 115				 "Range: 2 to 256");
 116unsigned int cifs_max_pending = CIFS_MAX_REQ;
 117module_param(cifs_max_pending, uint, 0444);
 118MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
 119				   "CIFS/SMB1 dialect (N/A for SMB3) "
 120				   "Default: 32767 Range: 2 to 32767.");
 121unsigned int dir_cache_timeout = 30;
 122module_param(dir_cache_timeout, uint, 0644);
 123MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
 124				 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
 125#ifdef CONFIG_CIFS_STATS2
 126unsigned int slow_rsp_threshold = 1;
 127module_param(slow_rsp_threshold, uint, 0644);
 128MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
 129				   "before logging that a response is delayed. "
 130				   "Default: 1 (if set to 0 disables msg).");
 131#endif /* STATS2 */
 132
 133module_param(enable_oplocks, bool, 0644);
 134MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
 135
 136module_param(enable_gcm_256, bool, 0644);
 137MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/0");
 138
 139module_param(require_gcm_256, bool, 0644);
 140MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
 141
 142module_param(enable_negotiate_signing, bool, 0644);
 143MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
 144
 145module_param(disable_legacy_dialects, bool, 0644);
 146MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
 147				  "helpful to restrict the ability to "
 148				  "override the default dialects (SMB2.1, "
 149				  "SMB3 and SMB3.02) on mount with old "
 150				  "dialects (CIFS/SMB1 and SMB2) since "
 151				  "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
 152				  " and less secure. Default: n/N/0");
 153
 154struct workqueue_struct	*cifsiod_wq;
 155struct workqueue_struct	*decrypt_wq;
 156struct workqueue_struct	*fileinfo_put_wq;
 157struct workqueue_struct	*cifsoplockd_wq;
 158struct workqueue_struct	*deferredclose_wq;
 159struct workqueue_struct	*serverclose_wq;
 160struct workqueue_struct	*cfid_put_wq;
 161__u32 cifs_lock_secret;
 162
 163/*
 164 * Bumps refcount for cifs super block.
 165 * Note that it should be only called if a reference to VFS super block is
 166 * already held, e.g. in open-type syscalls context. Otherwise it can race with
 167 * atomic_dec_and_test in deactivate_locked_super.
 168 */
 169void
 170cifs_sb_active(struct super_block *sb)
 171{
 172	struct cifs_sb_info *server = CIFS_SB(sb);
 173
 174	if (atomic_inc_return(&server->active) == 1)
 175		atomic_inc(&sb->s_active);
 176}
 177
 178void
 179cifs_sb_deactive(struct super_block *sb)
 180{
 181	struct cifs_sb_info *server = CIFS_SB(sb);
 182
 183	if (atomic_dec_and_test(&server->active))
 184		deactivate_super(sb);
 185}
 186
 187static int
 188cifs_read_super(struct super_block *sb)
 189{
 190	struct inode *inode;
 191	struct cifs_sb_info *cifs_sb;
 192	struct cifs_tcon *tcon;
 193	struct timespec64 ts;
 194	int rc = 0;
 195
 196	cifs_sb = CIFS_SB(sb);
 197	tcon = cifs_sb_master_tcon(cifs_sb);
 198
 199	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
 200		sb->s_flags |= SB_POSIXACL;
 201
 202	if (tcon->snapshot_time)
 203		sb->s_flags |= SB_RDONLY;
 204
 205	if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
 206		sb->s_maxbytes = MAX_LFS_FILESIZE;
 207	else
 208		sb->s_maxbytes = MAX_NON_LFS;
 209
 210	/*
 211	 * Some very old servers like DOS and OS/2 used 2 second granularity
 212	 * (while all current servers use 100ns granularity - see MS-DTYP)
 213	 * but 1 second is the maximum allowed granularity for the VFS
 214	 * so for old servers set time granularity to 1 second while for
 215	 * everything else (current servers) set it to 100ns.
 216	 */
 217	if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
 218	    ((tcon->ses->capabilities &
 219	      tcon->ses->server->vals->cap_nt_find) == 0) &&
 220	    !tcon->unix_ext) {
 221		sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
 222		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
 223		sb->s_time_min = ts.tv_sec;
 224		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
 225				    cpu_to_le16(SMB_TIME_MAX), 0);
 226		sb->s_time_max = ts.tv_sec;
 227	} else {
 228		/*
 229		 * Almost every server, including all SMB2+, uses DCE TIME
 230		 * ie 100 nanosecond units, since 1601.  See MS-DTYP and MS-FSCC
 231		 */
 232		sb->s_time_gran = 100;
 233		ts = cifs_NTtimeToUnix(0);
 234		sb->s_time_min = ts.tv_sec;
 235		ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
 236		sb->s_time_max = ts.tv_sec;
 237	}
 238
 239	sb->s_magic = CIFS_SUPER_MAGIC;
 240	sb->s_op = &cifs_super_ops;
 241	sb->s_xattr = cifs_xattr_handlers;
 242	rc = super_setup_bdi(sb);
 243	if (rc)
 244		goto out_no_root;
 245	/* tune readahead according to rsize if readahead size not set on mount */
 246	if (cifs_sb->ctx->rsize == 0)
 247		cifs_sb->ctx->rsize =
 248			tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
 249	if (cifs_sb->ctx->rasize)
 250		sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
 251	else
 252		sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
 253
 254	sb->s_blocksize = CIFS_MAX_MSGSIZE;
 255	sb->s_blocksize_bits = 14;	/* default 2**14 = CIFS_MAX_MSGSIZE */
 256	inode = cifs_root_iget(sb);
 257
 258	if (IS_ERR(inode)) {
 259		rc = PTR_ERR(inode);
 260		goto out_no_root;
 261	}
 262
 263	if (tcon->nocase)
 264		sb->s_d_op = &cifs_ci_dentry_ops;
 265	else
 266		sb->s_d_op = &cifs_dentry_ops;
 267
 268	sb->s_root = d_make_root(inode);
 269	if (!sb->s_root) {
 270		rc = -ENOMEM;
 271		goto out_no_root;
 272	}
 273
 274#ifdef CONFIG_CIFS_NFSD_EXPORT
 275	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
 276		cifs_dbg(FYI, "export ops supported\n");
 277		sb->s_export_op = &cifs_export_ops;
 278	}
 279#endif /* CONFIG_CIFS_NFSD_EXPORT */
 280
 281	return 0;
 282
 283out_no_root:
 284	cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
 285	return rc;
 286}
 287
 288static void cifs_kill_sb(struct super_block *sb)
 289{
 290	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
 291
 292	/*
 293	 * We need to release all dentries for the cached directories
 294	 * before we kill the sb.
 295	 */
 296	if (cifs_sb->root) {
 297		close_all_cached_dirs(cifs_sb);
 298
 299		/* finally release root dentry */
 300		dput(cifs_sb->root);
 301		cifs_sb->root = NULL;
 302	}
 303
 304	kill_anon_super(sb);
 305	cifs_umount(cifs_sb);
 306}
 307
 308static int
 309cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
 310{
 311	struct super_block *sb = dentry->d_sb;
 312	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
 313	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
 314	struct TCP_Server_Info *server = tcon->ses->server;
 315	unsigned int xid;
 316	int rc = 0;
 317	const char *full_path;
 318	void *page;
 319
 320	xid = get_xid();
 321	page = alloc_dentry_path();
 322
 323	full_path = build_path_from_dentry(dentry, page);
 324	if (IS_ERR(full_path)) {
 325		rc = PTR_ERR(full_path);
 326		goto statfs_out;
 327	}
 328
 329	if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
 330		buf->f_namelen =
 331		       le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
 332	else
 333		buf->f_namelen = PATH_MAX;
 334
 335	buf->f_fsid.val[0] = tcon->vol_serial_number;
 336	/* are using part of create time for more randomness, see man statfs */
 337	buf->f_fsid.val[1] =  (int)le64_to_cpu(tcon->vol_create_time);
 338
 339	buf->f_files = 0;	/* undefined */
 340	buf->f_ffree = 0;	/* unlimited */
 341
 342	if (server->ops->queryfs)
 343		rc = server->ops->queryfs(xid, tcon, full_path, cifs_sb, buf);
 344
 345statfs_out:
 346	free_dentry_path(page);
 347	free_xid(xid);
 348	return rc;
 349}
 350
 351static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
 352{
 353	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
 354	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
 355	struct TCP_Server_Info *server = tcon->ses->server;
 356
 357	if (server->ops->fallocate)
 358		return server->ops->fallocate(file, tcon, mode, off, len);
 359
 360	return -EOPNOTSUPP;
 361}
 362
 363static int cifs_permission(struct mnt_idmap *idmap,
 364			   struct inode *inode, int mask)
 365{
 366	struct cifs_sb_info *cifs_sb;
 367
 368	cifs_sb = CIFS_SB(inode->i_sb);
 369
 370	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
 371		if ((mask & MAY_EXEC) && !execute_ok(inode))
 372			return -EACCES;
 373		else
 374			return 0;
 375	} else /* file mode might have been restricted at mount time
 376		on the client (above and beyond ACL on servers) for
 377		servers which do not support setting and viewing mode bits,
 378		so allowing client to check permissions is useful */
 379		return generic_permission(&nop_mnt_idmap, inode, mask);
 380}
 381
 382static struct kmem_cache *cifs_inode_cachep;
 383static struct kmem_cache *cifs_req_cachep;
 384static struct kmem_cache *cifs_mid_cachep;
 385static struct kmem_cache *cifs_sm_req_cachep;
 386static struct kmem_cache *cifs_io_request_cachep;
 387static struct kmem_cache *cifs_io_subrequest_cachep;
 388mempool_t *cifs_sm_req_poolp;
 389mempool_t *cifs_req_poolp;
 390mempool_t *cifs_mid_poolp;
 391mempool_t cifs_io_request_pool;
 392mempool_t cifs_io_subrequest_pool;
 393
 394static struct inode *
 395cifs_alloc_inode(struct super_block *sb)
 396{
 397	struct cifsInodeInfo *cifs_inode;
 398	cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
 399	if (!cifs_inode)
 400		return NULL;
 401	cifs_inode->cifsAttrs = ATTR_ARCHIVE;	/* default */
 402	cifs_inode->time = 0;
 403	/*
 404	 * Until the file is open and we have gotten oplock info back from the
 405	 * server, can not assume caching of file data or metadata.
 406	 */
 407	cifs_set_oplock_level(cifs_inode, 0);
 408	cifs_inode->lease_granted = false;
 409	cifs_inode->flags = 0;
 410	spin_lock_init(&cifs_inode->writers_lock);
 411	cifs_inode->writers = 0;
 412	cifs_inode->netfs.inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
 413	cifs_inode->netfs.remote_i_size = 0;
 414	cifs_inode->uniqueid = 0;
 415	cifs_inode->createtime = 0;
 416	cifs_inode->epoch = 0;
 417	spin_lock_init(&cifs_inode->open_file_lock);
 418	generate_random_uuid(cifs_inode->lease_key);
 419	cifs_inode->symlink_target = NULL;
 420
 421	/*
 422	 * Can not set i_flags here - they get immediately overwritten to zero
 423	 * by the VFS.
 424	 */
 425	/* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
 426	INIT_LIST_HEAD(&cifs_inode->openFileList);
 427	INIT_LIST_HEAD(&cifs_inode->llist);
 428	INIT_LIST_HEAD(&cifs_inode->deferred_closes);
 429	spin_lock_init(&cifs_inode->deferred_lock);
 430	return &cifs_inode->netfs.inode;
 431}
 432
 433static void
 434cifs_free_inode(struct inode *inode)
 435{
 436	struct cifsInodeInfo *cinode = CIFS_I(inode);
 437
 438	if (S_ISLNK(inode->i_mode))
 439		kfree(cinode->symlink_target);
 440	kmem_cache_free(cifs_inode_cachep, cinode);
 441}
 442
 443static void
 444cifs_evict_inode(struct inode *inode)
 445{
 446	netfs_wait_for_outstanding_io(inode);
 447	truncate_inode_pages_final(&inode->i_data);
 448	if (inode->i_state & I_PINNING_NETFS_WB)
 449		cifs_fscache_unuse_inode_cookie(inode, true);
 450	cifs_fscache_release_inode_cookie(inode);
 451	clear_inode(inode);
 452}
 453
 454static void
 455cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
 456{
 457	struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
 458	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
 459
 460	seq_puts(s, ",addr=");
 461
 462	switch (server->dstaddr.ss_family) {
 463	case AF_INET:
 464		seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
 465		break;
 466	case AF_INET6:
 467		seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
 468		if (sa6->sin6_scope_id)
 469			seq_printf(s, "%%%u", sa6->sin6_scope_id);
 470		break;
 471	default:
 472		seq_puts(s, "(unknown)");
 473	}
 474	if (server->rdma)
 475		seq_puts(s, ",rdma");
 476}
 477
 478static void
 479cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
 480{
 481	if (ses->sectype == Unspecified) {
 482		if (ses->user_name == NULL)
 483			seq_puts(s, ",sec=none");
 484		return;
 485	}
 486
 487	seq_puts(s, ",sec=");
 488
 489	switch (ses->sectype) {
 490	case NTLMv2:
 491		seq_puts(s, "ntlmv2");
 492		break;
 493	case Kerberos:
 494		seq_puts(s, "krb5");
 495		break;
 496	case RawNTLMSSP:
 497		seq_puts(s, "ntlmssp");
 498		break;
 499	default:
 500		/* shouldn't ever happen */
 501		seq_puts(s, "unknown");
 502		break;
 503	}
 504
 505	if (ses->sign)
 506		seq_puts(s, "i");
 507
 508	if (ses->sectype == Kerberos)
 509		seq_printf(s, ",cruid=%u",
 510			   from_kuid_munged(&init_user_ns, ses->cred_uid));
 511}
 512
 513static void
 514cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
 515{
 516	seq_puts(s, ",cache=");
 517
 518	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
 519		seq_puts(s, "strict");
 520	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
 521		seq_puts(s, "none");
 522	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
 523		seq_puts(s, "singleclient"); /* assume only one client access */
 524	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
 525		seq_puts(s, "ro"); /* read only caching assumed */
 526	else
 527		seq_puts(s, "loose");
 528}
 529
 530/*
 531 * cifs_show_devname() is used so we show the mount device name with correct
 532 * format (e.g. forward slashes vs. back slashes) in /proc/mounts
 533 */
 534static int cifs_show_devname(struct seq_file *m, struct dentry *root)
 535{
 536	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
 537	char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
 538
 539	if (devname == NULL)
 540		seq_puts(m, "none");
 541	else {
 542		convert_delimiter(devname, '/');
 543		/* escape all spaces in share names */
 544		seq_escape(m, devname, " \t");
 545		kfree(devname);
 546	}
 547	return 0;
 548}
 549
 550static void
 551cifs_show_upcall_target(struct seq_file *s, struct cifs_sb_info *cifs_sb)
 552{
 553	if (cifs_sb->ctx->upcall_target == UPTARGET_UNSPECIFIED) {
 554		seq_puts(s, ",upcall_target=app");
 555		return;
 556	}
 557
 558	seq_puts(s, ",upcall_target=");
 559
 560	switch (cifs_sb->ctx->upcall_target) {
 561	case UPTARGET_APP:
 562		seq_puts(s, "app");
 563		break;
 564	case UPTARGET_MOUNT:
 565		seq_puts(s, "mount");
 566		break;
 567	default:
 568		/* shouldn't ever happen */
 569		seq_puts(s, "unknown");
 570		break;
 571	}
 572}
 573
 574/*
 575 * cifs_show_options() is for displaying mount options in /proc/mounts.
 576 * Not all settable options are displayed but most of the important
 577 * ones are.
 578 */
 579static int
 580cifs_show_options(struct seq_file *s, struct dentry *root)
 581{
 582	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
 583	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
 584	struct sockaddr *srcaddr;
 585	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
 586
 587	seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
 588	cifs_show_security(s, tcon->ses);
 589	cifs_show_cache_flavor(s, cifs_sb);
 590	cifs_show_upcall_target(s, cifs_sb);
 591
 592	if (tcon->no_lease)
 593		seq_puts(s, ",nolease");
 594	if (cifs_sb->ctx->multiuser)
 595		seq_puts(s, ",multiuser");
 596	else if (tcon->ses->user_name)
 597		seq_show_option(s, "username", tcon->ses->user_name);
 598
 599	if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
 600		seq_show_option(s, "domain", tcon->ses->domainName);
 601
 602	if (srcaddr->sa_family != AF_UNSPEC) {
 603		struct sockaddr_in *saddr4;
 604		struct sockaddr_in6 *saddr6;
 605		saddr4 = (struct sockaddr_in *)srcaddr;
 606		saddr6 = (struct sockaddr_in6 *)srcaddr;
 607		if (srcaddr->sa_family == AF_INET6)
 608			seq_printf(s, ",srcaddr=%pI6c",
 609				   &saddr6->sin6_addr);
 610		else if (srcaddr->sa_family == AF_INET)
 611			seq_printf(s, ",srcaddr=%pI4",
 612				   &saddr4->sin_addr.s_addr);
 613		else
 614			seq_printf(s, ",srcaddr=BAD-AF:%i",
 615				   (int)(srcaddr->sa_family));
 616	}
 617
 618	seq_printf(s, ",uid=%u",
 619		   from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
 620	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
 621		seq_puts(s, ",forceuid");
 622	else
 623		seq_puts(s, ",noforceuid");
 624
 625	seq_printf(s, ",gid=%u",
 626		   from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
 627	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
 628		seq_puts(s, ",forcegid");
 629	else
 630		seq_puts(s, ",noforcegid");
 631
 632	cifs_show_address(s, tcon->ses->server);
 633
 634	if (!tcon->unix_ext)
 635		seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
 636					   cifs_sb->ctx->file_mode,
 637					   cifs_sb->ctx->dir_mode);
 638	if (cifs_sb->ctx->iocharset)
 639		seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
 640	if (tcon->seal)
 641		seq_puts(s, ",seal");
 642	else if (tcon->ses->server->ignore_signature)
 643		seq_puts(s, ",signloosely");
 644	if (tcon->nocase)
 645		seq_puts(s, ",nocase");
 646	if (tcon->nodelete)
 647		seq_puts(s, ",nodelete");
 648	if (cifs_sb->ctx->no_sparse)
 649		seq_puts(s, ",nosparse");
 650	if (tcon->local_lease)
 651		seq_puts(s, ",locallease");
 652	if (tcon->retry)
 653		seq_puts(s, ",hard");
 654	else
 655		seq_puts(s, ",soft");
 656	if (tcon->use_persistent)
 657		seq_puts(s, ",persistenthandles");
 658	else if (tcon->use_resilient)
 659		seq_puts(s, ",resilienthandles");
 660	if (tcon->posix_extensions)
 661		seq_puts(s, ",posix");
 662	else if (tcon->unix_ext)
 663		seq_puts(s, ",unix");
 664	else
 665		seq_puts(s, ",nounix");
 666	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
 667		seq_puts(s, ",nodfs");
 668	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
 669		seq_puts(s, ",posixpaths");
 670	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
 671		seq_puts(s, ",setuids");
 672	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
 673		seq_puts(s, ",idsfromsid");
 674	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
 675		seq_puts(s, ",serverino");
 676	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
 677		seq_puts(s, ",rwpidforward");
 678	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
 679		seq_puts(s, ",forcemand");
 680	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
 681		seq_puts(s, ",nouser_xattr");
 682	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
 683		seq_puts(s, ",mapchars");
 684	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
 685		seq_puts(s, ",mapposix");
 686	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
 687		seq_puts(s, ",sfu");
 688	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
 689		seq_puts(s, ",nobrl");
 690	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
 691		seq_puts(s, ",nohandlecache");
 692	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
 693		seq_puts(s, ",modefromsid");
 694	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
 695		seq_puts(s, ",cifsacl");
 696	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
 697		seq_puts(s, ",dynperm");
 698	if (root->d_sb->s_flags & SB_POSIXACL)
 699		seq_puts(s, ",acl");
 700	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
 701		seq_puts(s, ",mfsymlinks");
 702	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
 703		seq_puts(s, ",fsc");
 704	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
 705		seq_puts(s, ",nostrictsync");
 706	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
 707		seq_puts(s, ",noperm");
 708	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
 709		seq_printf(s, ",backupuid=%u",
 710			   from_kuid_munged(&init_user_ns,
 711					    cifs_sb->ctx->backupuid));
 712	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
 713		seq_printf(s, ",backupgid=%u",
 714			   from_kgid_munged(&init_user_ns,
 715					    cifs_sb->ctx->backupgid));
 716	seq_show_option(s, "reparse",
 717			cifs_reparse_type_str(cifs_sb->ctx->reparse_type));
 718
 719	seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
 720	seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
 721	seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
 722	if (cifs_sb->ctx->rasize)
 723		seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
 724	if (tcon->ses->server->min_offload)
 725		seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
 726	if (tcon->ses->server->retrans)
 727		seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
 728	seq_printf(s, ",echo_interval=%lu",
 729			tcon->ses->server->echo_interval / HZ);
 730
 731	/* Only display the following if overridden on mount */
 732	if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
 733		seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
 734	if (tcon->ses->server->tcp_nodelay)
 735		seq_puts(s, ",tcpnodelay");
 736	if (tcon->ses->server->noautotune)
 737		seq_puts(s, ",noautotune");
 738	if (tcon->ses->server->noblocksnd)
 739		seq_puts(s, ",noblocksend");
 740	if (tcon->ses->server->nosharesock)
 741		seq_puts(s, ",nosharesock");
 742
 743	if (tcon->snapshot_time)
 744		seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
 745	if (tcon->handle_timeout)
 746		seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
 747	if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
 748		seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
 749
 750	/*
 751	 * Display file and directory attribute timeout in seconds.
 752	 * If file and directory attribute timeout the same then actimeo
 753	 * was likely specified on mount
 754	 */
 755	if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
 756		seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
 757	else {
 758		seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
 759		seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
 760	}
 761	seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
 762
 763	if (tcon->ses->chan_max > 1)
 764		seq_printf(s, ",multichannel,max_channels=%zu",
 765			   tcon->ses->chan_max);
 766
 767	if (tcon->use_witness)
 768		seq_puts(s, ",witness");
 769
 770	return 0;
 771}
 772
 773static void cifs_umount_begin(struct super_block *sb)
 774{
 775	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
 776	struct cifs_tcon *tcon;
 777
 778	if (cifs_sb == NULL)
 779		return;
 780
 781	tcon = cifs_sb_master_tcon(cifs_sb);
 782
 783	spin_lock(&cifs_tcp_ses_lock);
 784	spin_lock(&tcon->tc_lock);
 785	trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
 786			    netfs_trace_tcon_ref_see_umount);
 787	if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
 788		/* we have other mounts to same share or we have
 789		   already tried to umount this and woken up
 790		   all waiting network requests, nothing to do */
 791		spin_unlock(&tcon->tc_lock);
 792		spin_unlock(&cifs_tcp_ses_lock);
 793		return;
 794	}
 795	/*
 796	 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
 797	 * fail later (e.g. due to open files).  TID_EXITING will be set just before tdis req sent
 798	 */
 799	spin_unlock(&tcon->tc_lock);
 800	spin_unlock(&cifs_tcp_ses_lock);
 801
 802	cifs_close_all_deferred_files(tcon);
 803	/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
 804	/* cancel_notify_requests(tcon); */
 805	if (tcon->ses && tcon->ses->server) {
 806		cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
 807		wake_up_all(&tcon->ses->server->request_q);
 808		wake_up_all(&tcon->ses->server->response_q);
 809		msleep(1); /* yield */
 810		/* we have to kick the requests once more */
 811		wake_up_all(&tcon->ses->server->response_q);
 812		msleep(1);
 813	}
 814
 815	return;
 816}
 817
 818static int cifs_freeze(struct super_block *sb)
 819{
 820	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
 821	struct cifs_tcon *tcon;
 822
 823	if (cifs_sb == NULL)
 824		return 0;
 825
 826	tcon = cifs_sb_master_tcon(cifs_sb);
 827
 828	cifs_close_all_deferred_files(tcon);
 829	return 0;
 830}
 831
 832#ifdef CONFIG_CIFS_STATS2
 833static int cifs_show_stats(struct seq_file *s, struct dentry *root)
 834{
 835	/* BB FIXME */
 836	return 0;
 837}
 838#endif
 839
 840static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
 841{
 842	return netfs_unpin_writeback(inode, wbc);
 843}
 844
 845static int cifs_drop_inode(struct inode *inode)
 846{
 847	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
 848
 849	/* no serverino => unconditional eviction */
 850	return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
 851		generic_drop_inode(inode);
 852}
 853
 854static const struct super_operations cifs_super_ops = {
 855	.statfs = cifs_statfs,
 856	.alloc_inode = cifs_alloc_inode,
 857	.write_inode	= cifs_write_inode,
 858	.free_inode = cifs_free_inode,
 859	.drop_inode	= cifs_drop_inode,
 860	.evict_inode	= cifs_evict_inode,
 861/*	.show_path	= cifs_show_path, */ /* Would we ever need show path? */
 862	.show_devname   = cifs_show_devname,
 863/*	.delete_inode	= cifs_delete_inode,  */  /* Do not need above
 864	function unless later we add lazy close of inodes or unless the
 865	kernel forgets to call us with the same number of releases (closes)
 866	as opens */
 867	.show_options = cifs_show_options,
 868	.umount_begin   = cifs_umount_begin,
 869	.freeze_fs      = cifs_freeze,
 870#ifdef CONFIG_CIFS_STATS2
 871	.show_stats = cifs_show_stats,
 872#endif
 873};
 874
 875/*
 876 * Get root dentry from superblock according to prefix path mount option.
 877 * Return dentry with refcount + 1 on success and NULL otherwise.
 878 */
 879static struct dentry *
 880cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
 881{
 882	struct dentry *dentry;
 883	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
 884	char *full_path = NULL;
 885	char *s, *p;
 886	char sep;
 887
 888	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
 889		return dget(sb->s_root);
 890
 891	full_path = cifs_build_path_to_root(ctx, cifs_sb,
 892				cifs_sb_master_tcon(cifs_sb), 0);
 893	if (full_path == NULL)
 894		return ERR_PTR(-ENOMEM);
 895
 896	cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
 897
 898	sep = CIFS_DIR_SEP(cifs_sb);
 899	dentry = dget(sb->s_root);
 900	s = full_path;
 901
 902	do {
 903		struct inode *dir = d_inode(dentry);
 904		struct dentry *child;
 905
 906		if (!S_ISDIR(dir->i_mode)) {
 907			dput(dentry);
 908			dentry = ERR_PTR(-ENOTDIR);
 909			break;
 910		}
 911
 912		/* skip separators */
 913		while (*s == sep)
 914			s++;
 915		if (!*s)
 916			break;
 917		p = s++;
 918		/* next separator */
 919		while (*s && *s != sep)
 920			s++;
 921
 922		child = lookup_positive_unlocked(p, dentry, s - p);
 923		dput(dentry);
 924		dentry = child;
 925	} while (!IS_ERR(dentry));
 926	kfree(full_path);
 927	return dentry;
 928}
 929
 930static int cifs_set_super(struct super_block *sb, void *data)
 931{
 932	struct cifs_mnt_data *mnt_data = data;
 933	sb->s_fs_info = mnt_data->cifs_sb;
 934	return set_anon_super(sb, NULL);
 935}
 936
 937struct dentry *
 938cifs_smb3_do_mount(struct file_system_type *fs_type,
 939	      int flags, struct smb3_fs_context *old_ctx)
 940{
 941	struct cifs_mnt_data mnt_data;
 942	struct cifs_sb_info *cifs_sb;
 943	struct super_block *sb;
 944	struct dentry *root;
 945	int rc;
 946
 947	if (cifsFYI) {
 948		cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
 949			 old_ctx->source, flags);
 950	} else {
 951		cifs_info("Attempting to mount %s\n", old_ctx->source);
 952	}
 953
 954	cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
 955	if (!cifs_sb)
 956		return ERR_PTR(-ENOMEM);
 957
 958	cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
 959	if (!cifs_sb->ctx) {
 960		root = ERR_PTR(-ENOMEM);
 961		goto out;
 962	}
 963	rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
 964	if (rc) {
 965		root = ERR_PTR(rc);
 966		goto out;
 967	}
 968
 969	rc = cifs_setup_cifs_sb(cifs_sb);
 970	if (rc) {
 971		root = ERR_PTR(rc);
 972		goto out;
 973	}
 974
 975	rc = cifs_mount(cifs_sb, cifs_sb->ctx);
 976	if (rc) {
 977		if (!(flags & SB_SILENT))
 978			cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
 979				 rc);
 980		root = ERR_PTR(rc);
 981		goto out;
 982	}
 983
 984	mnt_data.ctx = cifs_sb->ctx;
 985	mnt_data.cifs_sb = cifs_sb;
 986	mnt_data.flags = flags;
 987
 988	/* BB should we make this contingent on mount parm? */
 989	flags |= SB_NODIRATIME | SB_NOATIME;
 990
 991	sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
 992	if (IS_ERR(sb)) {
 993		cifs_umount(cifs_sb);
 994		return ERR_CAST(sb);
 995	}
 996
 997	if (sb->s_root) {
 998		cifs_dbg(FYI, "Use existing superblock\n");
 999		cifs_umount(cifs_sb);
1000		cifs_sb = NULL;
1001	} else {
1002		rc = cifs_read_super(sb);
1003		if (rc) {
1004			root = ERR_PTR(rc);
1005			goto out_super;
1006		}
1007
1008		sb->s_flags |= SB_ACTIVE;
1009	}
1010
1011	root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
1012	if (IS_ERR(root))
1013		goto out_super;
1014
1015	if (cifs_sb)
1016		cifs_sb->root = dget(root);
1017
1018	cifs_dbg(FYI, "dentry root is: %p\n", root);
1019	return root;
1020
1021out_super:
1022	deactivate_locked_super(sb);
1023	return root;
1024out:
1025	kfree(cifs_sb->prepath);
1026	smb3_cleanup_fs_context(cifs_sb->ctx);
1027	kfree(cifs_sb);
1028	return root;
1029}
1030
1031static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1032{
1033	struct cifsFileInfo *cfile = file->private_data;
1034	struct cifs_tcon *tcon;
1035
1036	/*
1037	 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1038	 * the cached file length
1039	 */
1040	if (whence != SEEK_SET && whence != SEEK_CUR) {
1041		int rc;
1042		struct inode *inode = file_inode(file);
1043
1044		/*
1045		 * We need to be sure that all dirty pages are written and the
1046		 * server has the newest file length.
1047		 */
1048		if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1049		    inode->i_mapping->nrpages != 0) {
1050			rc = filemap_fdatawait(inode->i_mapping);
1051			if (rc) {
1052				mapping_set_error(inode->i_mapping, rc);
1053				return rc;
1054			}
1055		}
1056		/*
1057		 * Some applications poll for the file length in this strange
1058		 * way so we must seek to end on non-oplocked files by
1059		 * setting the revalidate time to zero.
1060		 */
1061		CIFS_I(inode)->time = 0;
1062
1063		rc = cifs_revalidate_file_attr(file);
1064		if (rc < 0)
1065			return (loff_t)rc;
1066	}
1067	if (cfile && cfile->tlink) {
1068		tcon = tlink_tcon(cfile->tlink);
1069		if (tcon->ses->server->ops->llseek)
1070			return tcon->ses->server->ops->llseek(file, tcon,
1071							      offset, whence);
1072	}
1073	return generic_file_llseek(file, offset, whence);
1074}
1075
1076static int
1077cifs_setlease(struct file *file, int arg, struct file_lease **lease, void **priv)
1078{
1079	/*
1080	 * Note that this is called by vfs setlease with i_lock held to
1081	 * protect *lease from going away.
1082	 */
1083	struct inode *inode = file_inode(file);
1084	struct cifsFileInfo *cfile = file->private_data;
1085
1086	/* Check if file is oplocked if this is request for new lease */
1087	if (arg == F_UNLCK ||
1088	    ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1089	    ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1090		return generic_setlease(file, arg, lease, priv);
1091	else if (tlink_tcon(cfile->tlink)->local_lease &&
1092		 !CIFS_CACHE_READ(CIFS_I(inode)))
1093		/*
1094		 * If the server claims to support oplock on this file, then we
1095		 * still need to check oplock even if the local_lease mount
1096		 * option is set, but there are servers which do not support
1097		 * oplock for which this mount option may be useful if the user
1098		 * knows that the file won't be changed on the server by anyone
1099		 * else.
1100		 */
1101		return generic_setlease(file, arg, lease, priv);
1102	else
1103		return -EAGAIN;
1104}
1105
1106struct file_system_type cifs_fs_type = {
1107	.owner = THIS_MODULE,
1108	.name = "cifs",
1109	.init_fs_context = smb3_init_fs_context,
1110	.parameters = smb3_fs_parameters,
1111	.kill_sb = cifs_kill_sb,
1112	.fs_flags = FS_RENAME_DOES_D_MOVE,
1113};
1114MODULE_ALIAS_FS("cifs");
1115
1116struct file_system_type smb3_fs_type = {
1117	.owner = THIS_MODULE,
1118	.name = "smb3",
1119	.init_fs_context = smb3_init_fs_context,
1120	.parameters = smb3_fs_parameters,
1121	.kill_sb = cifs_kill_sb,
1122	.fs_flags = FS_RENAME_DOES_D_MOVE,
1123};
1124MODULE_ALIAS_FS("smb3");
1125MODULE_ALIAS("smb3");
1126
1127const struct inode_operations cifs_dir_inode_ops = {
1128	.create = cifs_create,
1129	.atomic_open = cifs_atomic_open,
1130	.lookup = cifs_lookup,
1131	.getattr = cifs_getattr,
1132	.unlink = cifs_unlink,
1133	.link = cifs_hardlink,
1134	.mkdir = cifs_mkdir,
1135	.rmdir = cifs_rmdir,
1136	.rename = cifs_rename2,
1137	.permission = cifs_permission,
1138	.setattr = cifs_setattr,
1139	.symlink = cifs_symlink,
1140	.mknod   = cifs_mknod,
1141	.listxattr = cifs_listxattr,
1142	.get_acl = cifs_get_acl,
1143	.set_acl = cifs_set_acl,
1144};
1145
1146const struct inode_operations cifs_file_inode_ops = {
1147	.setattr = cifs_setattr,
1148	.getattr = cifs_getattr,
1149	.permission = cifs_permission,
1150	.listxattr = cifs_listxattr,
1151	.fiemap = cifs_fiemap,
1152	.get_acl = cifs_get_acl,
1153	.set_acl = cifs_set_acl,
1154};
1155
1156const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1157			    struct delayed_call *done)
1158{
1159	char *target_path;
1160
1161	if (!dentry)
1162		return ERR_PTR(-ECHILD);
1163
1164	target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1165	if (!target_path)
1166		return ERR_PTR(-ENOMEM);
1167
1168	spin_lock(&inode->i_lock);
1169	if (likely(CIFS_I(inode)->symlink_target)) {
1170		strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1171	} else {
1172		kfree(target_path);
1173		target_path = ERR_PTR(-EOPNOTSUPP);
1174	}
1175	spin_unlock(&inode->i_lock);
1176
1177	if (!IS_ERR(target_path))
1178		set_delayed_call(done, kfree_link, target_path);
1179
1180	return target_path;
1181}
1182
1183const struct inode_operations cifs_symlink_inode_ops = {
1184	.get_link = cifs_get_link,
1185	.setattr = cifs_setattr,
1186	.permission = cifs_permission,
1187	.listxattr = cifs_listxattr,
1188};
1189
1190/*
1191 * Advance the EOF marker to after the source range.
1192 */
1193static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1194				struct cifs_tcon *src_tcon,
1195				unsigned int xid, loff_t src_end)
1196{
1197	struct cifsFileInfo *writeable_srcfile;
1198	int rc = -EINVAL;
1199
1200	writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
1201	if (writeable_srcfile) {
1202		if (src_tcon->ses->server->ops->set_file_size)
1203			rc = src_tcon->ses->server->ops->set_file_size(
1204				xid, src_tcon, writeable_srcfile,
1205				src_inode->i_size, true /* no need to set sparse */);
1206		else
1207			rc = -ENOSYS;
1208		cifsFileInfo_put(writeable_srcfile);
1209		cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1210	}
1211
1212	if (rc < 0)
1213		goto set_failed;
1214
1215	netfs_resize_file(&src_cifsi->netfs, src_end, true);
1216	fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1217	return 0;
1218
1219set_failed:
1220	return filemap_write_and_wait(src_inode->i_mapping);
1221}
1222
1223/*
1224 * Flush out either the folio that overlaps the beginning of a range in which
1225 * pos resides or the folio that overlaps the end of a range unless that folio
1226 * is entirely within the range we're going to invalidate.  We extend the flush
1227 * bounds to encompass the folio.
1228 */
1229static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1230			    bool first)
1231{
1232	struct folio *folio;
1233	unsigned long long fpos, fend;
1234	pgoff_t index = pos / PAGE_SIZE;
1235	size_t size;
1236	int rc = 0;
1237
1238	folio = filemap_get_folio(inode->i_mapping, index);
1239	if (IS_ERR(folio))
1240		return 0;
1241
1242	size = folio_size(folio);
1243	fpos = folio_pos(folio);
1244	fend = fpos + size - 1;
1245	*_fstart = min_t(unsigned long long, *_fstart, fpos);
1246	*_fend   = max_t(unsigned long long, *_fend, fend);
1247	if ((first && pos == fpos) || (!first && pos == fend))
1248		goto out;
1249
1250	rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1251out:
1252	folio_put(folio);
1253	return rc;
1254}
1255
1256static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1257		struct file *dst_file, loff_t destoff, loff_t len,
1258		unsigned int remap_flags)
1259{
1260	struct inode *src_inode = file_inode(src_file);
1261	struct inode *target_inode = file_inode(dst_file);
1262	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1263	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1264	struct cifsFileInfo *smb_file_src = src_file->private_data;
1265	struct cifsFileInfo *smb_file_target = dst_file->private_data;
1266	struct cifs_tcon *target_tcon, *src_tcon;
1267	unsigned long long destend, fstart, fend, old_size, new_size;
1268	unsigned int xid;
1269	int rc;
1270
1271	if (remap_flags & REMAP_FILE_DEDUP)
1272		return -EOPNOTSUPP;
1273	if (remap_flags & ~REMAP_FILE_ADVISORY)
1274		return -EINVAL;
1275
1276	cifs_dbg(FYI, "clone range\n");
1277
1278	xid = get_xid();
1279
1280	if (!smb_file_src || !smb_file_target) {
1281		rc = -EBADF;
1282		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1283		goto out;
1284	}
1285
1286	src_tcon = tlink_tcon(smb_file_src->tlink);
1287	target_tcon = tlink_tcon(smb_file_target->tlink);
1288
1289	/*
1290	 * Note: cifs case is easier than btrfs since server responsible for
1291	 * checks for proper open modes and file type and if it wants
1292	 * server could even support copy of range where source = target
1293	 */
1294	lock_two_nondirectories(target_inode, src_inode);
1295
1296	if (len == 0)
1297		len = src_inode->i_size - off;
1298
1299	cifs_dbg(FYI, "clone range\n");
1300
1301	/* Flush the source buffer */
1302	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1303					  off + len - 1);
1304	if (rc)
1305		goto unlock;
1306
1307	/* The server-side copy will fail if the source crosses the EOF marker.
1308	 * Advance the EOF marker after the flush above to the end of the range
1309	 * if it's short of that.
1310	 */
1311	if (src_cifsi->netfs.remote_i_size < off + len) {
1312		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1313		if (rc < 0)
1314			goto unlock;
1315	}
1316
1317	new_size = destoff + len;
1318	destend = destoff + len - 1;
1319
1320	/* Flush the folios at either end of the destination range to prevent
1321	 * accidental loss of dirty data outside of the range.
1322	 */
1323	fstart = destoff;
1324	fend = destend;
1325
1326	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1327	if (rc)
1328		goto unlock;
1329	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1330	if (rc)
1331		goto unlock;
1332	if (fend > target_cifsi->netfs.zero_point)
1333		target_cifsi->netfs.zero_point = fend + 1;
1334	old_size = target_cifsi->netfs.remote_i_size;
1335
1336	/* Discard all the folios that overlap the destination region. */
1337	cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1338	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1339
1340	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1341			   i_size_read(target_inode), 0);
1342
1343	rc = -EOPNOTSUPP;
1344	if (target_tcon->ses->server->ops->duplicate_extents) {
1345		rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1346			smb_file_src, smb_file_target, off, len, destoff);
1347		if (rc == 0 && new_size > old_size) {
1348			truncate_setsize(target_inode, new_size);
1349			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1350					      new_size);
1351		}
1352		if (rc == 0 && new_size > target_cifsi->netfs.zero_point)
1353			target_cifsi->netfs.zero_point = new_size;
1354	}
1355
1356	/* force revalidate of size and timestamps of target file now
1357	   that target is updated on the server */
1358	CIFS_I(target_inode)->time = 0;
1359unlock:
1360	/* although unlocking in the reverse order from locking is not
1361	   strictly necessary here it is a little cleaner to be consistent */
1362	unlock_two_nondirectories(src_inode, target_inode);
1363out:
1364	free_xid(xid);
1365	return rc < 0 ? rc : len;
1366}
1367
1368ssize_t cifs_file_copychunk_range(unsigned int xid,
1369				struct file *src_file, loff_t off,
1370				struct file *dst_file, loff_t destoff,
1371				size_t len, unsigned int flags)
1372{
1373	struct inode *src_inode = file_inode(src_file);
1374	struct inode *target_inode = file_inode(dst_file);
1375	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1376	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1377	struct cifsFileInfo *smb_file_src;
1378	struct cifsFileInfo *smb_file_target;
1379	struct cifs_tcon *src_tcon;
1380	struct cifs_tcon *target_tcon;
1381	ssize_t rc;
1382
1383	cifs_dbg(FYI, "copychunk range\n");
1384
1385	if (!src_file->private_data || !dst_file->private_data) {
1386		rc = -EBADF;
1387		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1388		goto out;
1389	}
1390
1391	rc = -EXDEV;
1392	smb_file_target = dst_file->private_data;
1393	smb_file_src = src_file->private_data;
1394	src_tcon = tlink_tcon(smb_file_src->tlink);
1395	target_tcon = tlink_tcon(smb_file_target->tlink);
1396
1397	if (src_tcon->ses != target_tcon->ses) {
1398		cifs_dbg(FYI, "source and target of copy not on same server\n");
1399		goto out;
1400	}
1401
1402	rc = -EOPNOTSUPP;
1403	if (!target_tcon->ses->server->ops->copychunk_range)
1404		goto out;
1405
1406	/*
1407	 * Note: cifs case is easier than btrfs since server responsible for
1408	 * checks for proper open modes and file type and if it wants
1409	 * server could even support copy of range where source = target
1410	 */
1411	lock_two_nondirectories(target_inode, src_inode);
1412
1413	cifs_dbg(FYI, "about to flush pages\n");
1414
1415	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1416					  off + len - 1);
1417	if (rc)
1418		goto unlock;
1419
1420	/* The server-side copy will fail if the source crosses the EOF marker.
1421	 * Advance the EOF marker after the flush above to the end of the range
1422	 * if it's short of that.
1423	 */
1424	if (src_cifsi->netfs.remote_i_size < off + len) {
1425		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1426		if (rc < 0)
1427			goto unlock;
1428	}
1429
1430	/* Flush and invalidate all the folios in the destination region.  If
1431	 * the copy was successful, then some of the flush is extra overhead,
1432	 * but we need to allow for the copy failing in some way (eg. ENOSPC).
1433	 */
1434	rc = filemap_invalidate_inode(target_inode, true, destoff, destoff + len - 1);
1435	if (rc)
1436		goto unlock;
1437
1438	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1439			   i_size_read(target_inode), 0);
1440
1441	rc = file_modified(dst_file);
1442	if (!rc) {
1443		rc = target_tcon->ses->server->ops->copychunk_range(xid,
1444			smb_file_src, smb_file_target, off, len, destoff);
1445		if (rc > 0 && destoff + rc > i_size_read(target_inode)) {
1446			truncate_setsize(target_inode, destoff + rc);
1447			netfs_resize_file(&target_cifsi->netfs,
1448					  i_size_read(target_inode), true);
1449			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1450					      i_size_read(target_inode));
1451		}
1452		if (rc > 0 && destoff + rc > target_cifsi->netfs.zero_point)
1453			target_cifsi->netfs.zero_point = destoff + rc;
1454	}
1455
1456	file_accessed(src_file);
1457
1458	/* force revalidate of size and timestamps of target file now
1459	 * that target is updated on the server
1460	 */
1461	CIFS_I(target_inode)->time = 0;
1462
1463unlock:
1464	/* although unlocking in the reverse order from locking is not
1465	 * strictly necessary here it is a little cleaner to be consistent
1466	 */
1467	unlock_two_nondirectories(src_inode, target_inode);
1468
1469out:
1470	return rc;
1471}
1472
1473/*
1474 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1475 * is a dummy operation.
1476 */
1477static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1478{
1479	cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1480		 file, datasync);
1481
1482	return 0;
1483}
1484
1485static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1486				struct file *dst_file, loff_t destoff,
1487				size_t len, unsigned int flags)
1488{
1489	unsigned int xid = get_xid();
1490	ssize_t rc;
1491	struct cifsFileInfo *cfile = dst_file->private_data;
1492
1493	if (cfile->swapfile) {
1494		rc = -EOPNOTSUPP;
1495		free_xid(xid);
1496		return rc;
1497	}
1498
1499	rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1500					len, flags);
1501	free_xid(xid);
1502
1503	if (rc == -EOPNOTSUPP || rc == -EXDEV)
1504		rc = splice_copy_file_range(src_file, off, dst_file,
1505					    destoff, len);
1506	return rc;
1507}
1508
1509const struct file_operations cifs_file_ops = {
1510	.read_iter = cifs_loose_read_iter,
1511	.write_iter = cifs_file_write_iter,
1512	.open = cifs_open,
1513	.release = cifs_close,
1514	.lock = cifs_lock,
1515	.flock = cifs_flock,
1516	.fsync = cifs_fsync,
1517	.flush = cifs_flush,
1518	.mmap  = cifs_file_mmap,
1519	.splice_read = filemap_splice_read,
1520	.splice_write = iter_file_splice_write,
1521	.llseek = cifs_llseek,
1522	.unlocked_ioctl	= cifs_ioctl,
1523	.copy_file_range = cifs_copy_file_range,
1524	.remap_file_range = cifs_remap_file_range,
1525	.setlease = cifs_setlease,
1526	.fallocate = cifs_fallocate,
1527};
1528
1529const struct file_operations cifs_file_strict_ops = {
1530	.read_iter = cifs_strict_readv,
1531	.write_iter = cifs_strict_writev,
1532	.open = cifs_open,
1533	.release = cifs_close,
1534	.lock = cifs_lock,
1535	.flock = cifs_flock,
1536	.fsync = cifs_strict_fsync,
1537	.flush = cifs_flush,
1538	.mmap = cifs_file_strict_mmap,
1539	.splice_read = filemap_splice_read,
1540	.splice_write = iter_file_splice_write,
1541	.llseek = cifs_llseek,
1542	.unlocked_ioctl	= cifs_ioctl,
1543	.copy_file_range = cifs_copy_file_range,
1544	.remap_file_range = cifs_remap_file_range,
1545	.setlease = cifs_setlease,
1546	.fallocate = cifs_fallocate,
1547};
1548
1549const struct file_operations cifs_file_direct_ops = {
1550	.read_iter = netfs_unbuffered_read_iter,
1551	.write_iter = netfs_file_write_iter,
1552	.open = cifs_open,
1553	.release = cifs_close,
1554	.lock = cifs_lock,
1555	.flock = cifs_flock,
1556	.fsync = cifs_fsync,
1557	.flush = cifs_flush,
1558	.mmap = cifs_file_mmap,
1559	.splice_read = copy_splice_read,
1560	.splice_write = iter_file_splice_write,
1561	.unlocked_ioctl  = cifs_ioctl,
1562	.copy_file_range = cifs_copy_file_range,
1563	.remap_file_range = cifs_remap_file_range,
1564	.llseek = cifs_llseek,
1565	.setlease = cifs_setlease,
1566	.fallocate = cifs_fallocate,
1567};
1568
1569const struct file_operations cifs_file_nobrl_ops = {
1570	.read_iter = cifs_loose_read_iter,
1571	.write_iter = cifs_file_write_iter,
1572	.open = cifs_open,
1573	.release = cifs_close,
1574	.fsync = cifs_fsync,
1575	.flush = cifs_flush,
1576	.mmap  = cifs_file_mmap,
1577	.splice_read = filemap_splice_read,
1578	.splice_write = iter_file_splice_write,
1579	.llseek = cifs_llseek,
1580	.unlocked_ioctl	= cifs_ioctl,
1581	.copy_file_range = cifs_copy_file_range,
1582	.remap_file_range = cifs_remap_file_range,
1583	.setlease = cifs_setlease,
1584	.fallocate = cifs_fallocate,
1585};
1586
1587const struct file_operations cifs_file_strict_nobrl_ops = {
1588	.read_iter = cifs_strict_readv,
1589	.write_iter = cifs_strict_writev,
1590	.open = cifs_open,
1591	.release = cifs_close,
1592	.fsync = cifs_strict_fsync,
1593	.flush = cifs_flush,
1594	.mmap = cifs_file_strict_mmap,
1595	.splice_read = filemap_splice_read,
1596	.splice_write = iter_file_splice_write,
1597	.llseek = cifs_llseek,
1598	.unlocked_ioctl	= cifs_ioctl,
1599	.copy_file_range = cifs_copy_file_range,
1600	.remap_file_range = cifs_remap_file_range,
1601	.setlease = cifs_setlease,
1602	.fallocate = cifs_fallocate,
1603};
1604
1605const struct file_operations cifs_file_direct_nobrl_ops = {
1606	.read_iter = netfs_unbuffered_read_iter,
1607	.write_iter = netfs_file_write_iter,
1608	.open = cifs_open,
1609	.release = cifs_close,
1610	.fsync = cifs_fsync,
1611	.flush = cifs_flush,
1612	.mmap = cifs_file_mmap,
1613	.splice_read = copy_splice_read,
1614	.splice_write = iter_file_splice_write,
1615	.unlocked_ioctl  = cifs_ioctl,
1616	.copy_file_range = cifs_copy_file_range,
1617	.remap_file_range = cifs_remap_file_range,
1618	.llseek = cifs_llseek,
1619	.setlease = cifs_setlease,
1620	.fallocate = cifs_fallocate,
1621};
1622
1623const struct file_operations cifs_dir_ops = {
1624	.iterate_shared = cifs_readdir,
1625	.release = cifs_closedir,
1626	.read    = generic_read_dir,
1627	.unlocked_ioctl  = cifs_ioctl,
1628	.copy_file_range = cifs_copy_file_range,
1629	.remap_file_range = cifs_remap_file_range,
1630	.llseek = generic_file_llseek,
1631	.fsync = cifs_dir_fsync,
1632};
1633
1634static void
1635cifs_init_once(void *inode)
1636{
1637	struct cifsInodeInfo *cifsi = inode;
1638
1639	inode_init_once(&cifsi->netfs.inode);
1640	init_rwsem(&cifsi->lock_sem);
1641}
1642
1643static int __init
1644cifs_init_inodecache(void)
1645{
1646	cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1647					      sizeof(struct cifsInodeInfo),
1648					      0, (SLAB_RECLAIM_ACCOUNT|
1649						SLAB_ACCOUNT),
1650					      cifs_init_once);
1651	if (cifs_inode_cachep == NULL)
1652		return -ENOMEM;
1653
1654	return 0;
1655}
1656
1657static void
1658cifs_destroy_inodecache(void)
1659{
1660	/*
1661	 * Make sure all delayed rcu free inodes are flushed before we
1662	 * destroy cache.
1663	 */
1664	rcu_barrier();
1665	kmem_cache_destroy(cifs_inode_cachep);
1666}
1667
1668static int
1669cifs_init_request_bufs(void)
1670{
1671	/*
1672	 * SMB2 maximum header size is bigger than CIFS one - no problems to
1673	 * allocate some more bytes for CIFS.
1674	 */
1675	size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1676
1677	if (CIFSMaxBufSize < 8192) {
1678	/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1679	Unicode path name has to fit in any SMB/CIFS path based frames */
1680		CIFSMaxBufSize = 8192;
1681	} else if (CIFSMaxBufSize > 1024*127) {
1682		CIFSMaxBufSize = 1024 * 127;
1683	} else {
1684		CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1685	}
1686/*
1687	cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1688		 CIFSMaxBufSize, CIFSMaxBufSize);
1689*/
1690	cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1691					    CIFSMaxBufSize + max_hdr_size, 0,
1692					    SLAB_HWCACHE_ALIGN, 0,
1693					    CIFSMaxBufSize + max_hdr_size,
1694					    NULL);
1695	if (cifs_req_cachep == NULL)
1696		return -ENOMEM;
1697
1698	if (cifs_min_rcv < 1)
1699		cifs_min_rcv = 1;
1700	else if (cifs_min_rcv > 64) {
1701		cifs_min_rcv = 64;
1702		cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1703	}
1704
1705	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1706						  cifs_req_cachep);
1707
1708	if (cifs_req_poolp == NULL) {
1709		kmem_cache_destroy(cifs_req_cachep);
1710		return -ENOMEM;
1711	}
1712	/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1713	almost all handle based requests (but not write response, nor is it
1714	sufficient for path based requests).  A smaller size would have
1715	been more efficient (compacting multiple slab items on one 4k page)
1716	for the case in which debug was on, but this larger size allows
1717	more SMBs to use small buffer alloc and is still much more
1718	efficient to alloc 1 per page off the slab compared to 17K (5page)
1719	alloc of large cifs buffers even when page debugging is on */
1720	cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1721			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1722			0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1723	if (cifs_sm_req_cachep == NULL) {
1724		mempool_destroy(cifs_req_poolp);
1725		kmem_cache_destroy(cifs_req_cachep);
1726		return -ENOMEM;
1727	}
1728
1729	if (cifs_min_small < 2)
1730		cifs_min_small = 2;
1731	else if (cifs_min_small > 256) {
1732		cifs_min_small = 256;
1733		cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1734	}
1735
1736	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1737						     cifs_sm_req_cachep);
1738
1739	if (cifs_sm_req_poolp == NULL) {
1740		mempool_destroy(cifs_req_poolp);
1741		kmem_cache_destroy(cifs_req_cachep);
1742		kmem_cache_destroy(cifs_sm_req_cachep);
1743		return -ENOMEM;
1744	}
1745
1746	return 0;
1747}
1748
1749static void
1750cifs_destroy_request_bufs(void)
1751{
1752	mempool_destroy(cifs_req_poolp);
1753	kmem_cache_destroy(cifs_req_cachep);
1754	mempool_destroy(cifs_sm_req_poolp);
1755	kmem_cache_destroy(cifs_sm_req_cachep);
1756}
1757
1758static int init_mids(void)
1759{
1760	cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1761					    sizeof(struct mid_q_entry), 0,
1762					    SLAB_HWCACHE_ALIGN, NULL);
1763	if (cifs_mid_cachep == NULL)
1764		return -ENOMEM;
1765
1766	/* 3 is a reasonable minimum number of simultaneous operations */
1767	cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1768	if (cifs_mid_poolp == NULL) {
1769		kmem_cache_destroy(cifs_mid_cachep);
1770		return -ENOMEM;
1771	}
1772
1773	return 0;
1774}
1775
1776static void destroy_mids(void)
1777{
1778	mempool_destroy(cifs_mid_poolp);
1779	kmem_cache_destroy(cifs_mid_cachep);
1780}
1781
1782static int cifs_init_netfs(void)
1783{
1784	cifs_io_request_cachep =
1785		kmem_cache_create("cifs_io_request",
1786				  sizeof(struct cifs_io_request), 0,
1787				  SLAB_HWCACHE_ALIGN, NULL);
1788	if (!cifs_io_request_cachep)
1789		goto nomem_req;
1790
1791	if (mempool_init_slab_pool(&cifs_io_request_pool, 100, cifs_io_request_cachep) < 0)
1792		goto nomem_reqpool;
1793
1794	cifs_io_subrequest_cachep =
1795		kmem_cache_create("cifs_io_subrequest",
1796				  sizeof(struct cifs_io_subrequest), 0,
1797				  SLAB_HWCACHE_ALIGN, NULL);
1798	if (!cifs_io_subrequest_cachep)
1799		goto nomem_subreq;
1800
1801	if (mempool_init_slab_pool(&cifs_io_subrequest_pool, 100, cifs_io_subrequest_cachep) < 0)
1802		goto nomem_subreqpool;
1803
1804	return 0;
1805
1806nomem_subreqpool:
1807	kmem_cache_destroy(cifs_io_subrequest_cachep);
1808nomem_subreq:
1809	mempool_exit(&cifs_io_request_pool);
1810nomem_reqpool:
1811	kmem_cache_destroy(cifs_io_request_cachep);
1812nomem_req:
1813	return -ENOMEM;
1814}
1815
1816static void cifs_destroy_netfs(void)
1817{
1818	mempool_exit(&cifs_io_subrequest_pool);
1819	kmem_cache_destroy(cifs_io_subrequest_cachep);
1820	mempool_exit(&cifs_io_request_pool);
1821	kmem_cache_destroy(cifs_io_request_cachep);
1822}
1823
1824static int __init
1825init_cifs(void)
1826{
1827	int rc = 0;
1828	cifs_proc_init();
1829	INIT_LIST_HEAD(&cifs_tcp_ses_list);
1830/*
1831 *  Initialize Global counters
1832 */
1833	atomic_set(&sesInfoAllocCount, 0);
1834	atomic_set(&tconInfoAllocCount, 0);
1835	atomic_set(&tcpSesNextId, 0);
1836	atomic_set(&tcpSesAllocCount, 0);
1837	atomic_set(&tcpSesReconnectCount, 0);
1838	atomic_set(&tconInfoReconnectCount, 0);
1839
1840	atomic_set(&buf_alloc_count, 0);
1841	atomic_set(&small_buf_alloc_count, 0);
1842#ifdef CONFIG_CIFS_STATS2
1843	atomic_set(&total_buf_alloc_count, 0);
1844	atomic_set(&total_small_buf_alloc_count, 0);
1845	if (slow_rsp_threshold < 1)
1846		cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1847	else if (slow_rsp_threshold > 32767)
1848		cifs_dbg(VFS,
1849		       "slow response threshold set higher than recommended (0 to 32767)\n");
1850#endif /* CONFIG_CIFS_STATS2 */
1851
1852	atomic_set(&mid_count, 0);
1853	GlobalCurrentXid = 0;
1854	GlobalTotalActiveXid = 0;
1855	GlobalMaxActiveXid = 0;
1856	spin_lock_init(&cifs_tcp_ses_lock);
1857	spin_lock_init(&GlobalMid_Lock);
1858
1859	cifs_lock_secret = get_random_u32();
1860
1861	if (cifs_max_pending < 2) {
1862		cifs_max_pending = 2;
1863		cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1864	} else if (cifs_max_pending > CIFS_MAX_REQ) {
1865		cifs_max_pending = CIFS_MAX_REQ;
1866		cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1867			 CIFS_MAX_REQ);
1868	}
1869
1870	/* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1871	if (dir_cache_timeout > 65000) {
1872		dir_cache_timeout = 65000;
1873		cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1874	}
1875
1876	cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1877	if (!cifsiod_wq) {
1878		rc = -ENOMEM;
1879		goto out_clean_proc;
1880	}
1881
1882	/*
1883	 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1884	 * so that we don't launch too many worker threads but
1885	 * Documentation/core-api/workqueue.rst recommends setting it to 0
1886	 */
1887
1888	/* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1889	decrypt_wq = alloc_workqueue("smb3decryptd",
1890				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1891	if (!decrypt_wq) {
1892		rc = -ENOMEM;
1893		goto out_destroy_cifsiod_wq;
1894	}
1895
1896	fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1897				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1898	if (!fileinfo_put_wq) {
1899		rc = -ENOMEM;
1900		goto out_destroy_decrypt_wq;
1901	}
1902
1903	cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1904					 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1905	if (!cifsoplockd_wq) {
1906		rc = -ENOMEM;
1907		goto out_destroy_fileinfo_put_wq;
1908	}
1909
1910	deferredclose_wq = alloc_workqueue("deferredclose",
1911					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1912	if (!deferredclose_wq) {
1913		rc = -ENOMEM;
1914		goto out_destroy_cifsoplockd_wq;
1915	}
1916
1917	serverclose_wq = alloc_workqueue("serverclose",
1918					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1919	if (!serverclose_wq) {
1920		rc = -ENOMEM;
1921		goto out_destroy_deferredclose_wq;
1922	}
1923
1924	cfid_put_wq = alloc_workqueue("cfid_put_wq",
1925				      WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1926	if (!cfid_put_wq) {
1927		rc = -ENOMEM;
1928		goto out_destroy_serverclose_wq;
1929	}
1930
1931	rc = cifs_init_inodecache();
1932	if (rc)
1933		goto out_destroy_cfid_put_wq;
1934
1935	rc = cifs_init_netfs();
1936	if (rc)
1937		goto out_destroy_inodecache;
1938
1939	rc = init_mids();
1940	if (rc)
1941		goto out_destroy_netfs;
1942
1943	rc = cifs_init_request_bufs();
1944	if (rc)
1945		goto out_destroy_mids;
1946
1947#ifdef CONFIG_CIFS_DFS_UPCALL
1948	rc = dfs_cache_init();
1949	if (rc)
1950		goto out_destroy_request_bufs;
1951#endif /* CONFIG_CIFS_DFS_UPCALL */
1952#ifdef CONFIG_CIFS_UPCALL
1953	rc = init_cifs_spnego();
1954	if (rc)
1955		goto out_destroy_dfs_cache;
1956#endif /* CONFIG_CIFS_UPCALL */
1957#ifdef CONFIG_CIFS_SWN_UPCALL
1958	rc = cifs_genl_init();
1959	if (rc)
1960		goto out_register_key_type;
1961#endif /* CONFIG_CIFS_SWN_UPCALL */
1962
1963	rc = init_cifs_idmap();
1964	if (rc)
1965		goto out_cifs_swn_init;
1966
1967	rc = register_filesystem(&cifs_fs_type);
1968	if (rc)
1969		goto out_init_cifs_idmap;
1970
1971	rc = register_filesystem(&smb3_fs_type);
1972	if (rc) {
1973		unregister_filesystem(&cifs_fs_type);
1974		goto out_init_cifs_idmap;
1975	}
1976
1977	return 0;
1978
1979out_init_cifs_idmap:
1980	exit_cifs_idmap();
1981out_cifs_swn_init:
1982#ifdef CONFIG_CIFS_SWN_UPCALL
1983	cifs_genl_exit();
1984out_register_key_type:
1985#endif
1986#ifdef CONFIG_CIFS_UPCALL
1987	exit_cifs_spnego();
1988out_destroy_dfs_cache:
1989#endif
1990#ifdef CONFIG_CIFS_DFS_UPCALL
1991	dfs_cache_destroy();
1992out_destroy_request_bufs:
1993#endif
1994	cifs_destroy_request_bufs();
1995out_destroy_mids:
1996	destroy_mids();
1997out_destroy_netfs:
1998	cifs_destroy_netfs();
1999out_destroy_inodecache:
2000	cifs_destroy_inodecache();
2001out_destroy_cfid_put_wq:
2002	destroy_workqueue(cfid_put_wq);
2003out_destroy_serverclose_wq:
2004	destroy_workqueue(serverclose_wq);
2005out_destroy_deferredclose_wq:
2006	destroy_workqueue(deferredclose_wq);
2007out_destroy_cifsoplockd_wq:
2008	destroy_workqueue(cifsoplockd_wq);
2009out_destroy_fileinfo_put_wq:
2010	destroy_workqueue(fileinfo_put_wq);
2011out_destroy_decrypt_wq:
2012	destroy_workqueue(decrypt_wq);
2013out_destroy_cifsiod_wq:
2014	destroy_workqueue(cifsiod_wq);
2015out_clean_proc:
2016	cifs_proc_clean();
2017	return rc;
2018}
2019
2020static void __exit
2021exit_cifs(void)
2022{
2023	cifs_dbg(NOISY, "exit_smb3\n");
2024	unregister_filesystem(&cifs_fs_type);
2025	unregister_filesystem(&smb3_fs_type);
2026	cifs_release_automount_timer();
2027	exit_cifs_idmap();
2028#ifdef CONFIG_CIFS_SWN_UPCALL
2029	cifs_genl_exit();
2030#endif
2031#ifdef CONFIG_CIFS_UPCALL
2032	exit_cifs_spnego();
2033#endif
2034#ifdef CONFIG_CIFS_DFS_UPCALL
2035	dfs_cache_destroy();
2036#endif
2037	cifs_destroy_request_bufs();
2038	destroy_mids();
2039	cifs_destroy_netfs();
2040	cifs_destroy_inodecache();
2041	destroy_workqueue(deferredclose_wq);
2042	destroy_workqueue(cifsoplockd_wq);
2043	destroy_workqueue(decrypt_wq);
2044	destroy_workqueue(fileinfo_put_wq);
2045	destroy_workqueue(serverclose_wq);
2046	destroy_workqueue(cfid_put_wq);
2047	destroy_workqueue(cifsiod_wq);
2048	cifs_proc_clean();
2049}
2050
2051MODULE_AUTHOR("Steve French");
2052MODULE_LICENSE("GPL");	/* combination of LGPL + GPL source behaves as GPL */
2053MODULE_DESCRIPTION
2054	("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
2055	"also older servers complying with the SNIA CIFS Specification)");
2056MODULE_VERSION(CIFS_VERSION);
2057MODULE_SOFTDEP("ecb");
2058MODULE_SOFTDEP("hmac");
2059MODULE_SOFTDEP("md5");
2060MODULE_SOFTDEP("nls");
2061MODULE_SOFTDEP("aes");
2062MODULE_SOFTDEP("cmac");
2063MODULE_SOFTDEP("sha256");
2064MODULE_SOFTDEP("sha512");
2065MODULE_SOFTDEP("aead2");
2066MODULE_SOFTDEP("ccm");
2067MODULE_SOFTDEP("gcm");
2068module_init(init_cifs)
2069module_exit(exit_cifs)