Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: LGPL-2.1
   2/*
 
   3 *
   4 *   Copyright (C) International Business Machines  Corp., 2002,2008
   5 *   Author(s): Steve French (sfrench@us.ibm.com)
   6 *
   7 *   Common Internet FileSystem (CIFS) client
   8 *
 
 
 
 
 
 
 
 
 
 
 
 
 
   9 */
  10
  11/* Note that BB means BUGBUG (ie something to fix eventually) */
  12
  13#include <linux/module.h>
  14#include <linux/fs.h>
  15#include <linux/mount.h>
  16#include <linux/slab.h>
  17#include <linux/init.h>
  18#include <linux/list.h>
  19#include <linux/seq_file.h>
  20#include <linux/vfs.h>
  21#include <linux/mempool.h>
  22#include <linux/delay.h>
  23#include <linux/kthread.h>
  24#include <linux/freezer.h>
  25#include <linux/namei.h>
  26#include <linux/random.h>
  27#include <linux/uuid.h>
  28#include <linux/xattr.h>
  29#include <uapi/linux/magic.h>
  30#include <net/ipv6.h>
  31#include "cifsfs.h"
  32#include "cifspdu.h"
  33#define DECLARE_GLOBALS_HERE
  34#include "cifsglob.h"
  35#include "cifsproto.h"
  36#include "cifs_debug.h"
  37#include "cifs_fs_sb.h"
  38#include <linux/mm.h>
  39#include <linux/key-type.h>
  40#include "cifs_spnego.h"
  41#include "fscache.h"
  42#ifdef CONFIG_CIFS_DFS_UPCALL
  43#include "dfs_cache.h"
  44#endif
  45#ifdef CONFIG_CIFS_SWN_UPCALL
  46#include "netlink.h"
  47#endif
  48#include "fs_context.h"
  49#include "cached_dir.h"
  50
  51/*
  52 * DOS dates from 1980/1/1 through 2107/12/31
  53 * Protocol specifications indicate the range should be to 119, which
  54 * limits maximum year to 2099. But this range has not been checked.
  55 */
  56#define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
  57#define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
  58#define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
  59
  60int cifsFYI = 0;
  61bool traceSMB;
  62bool enable_oplocks = true;
  63bool linuxExtEnabled = true;
  64bool lookupCacheEnabled = true;
  65bool disable_legacy_dialects; /* false by default */
  66bool enable_gcm_256 = true;
  67bool require_gcm_256; /* false by default */
  68bool enable_negotiate_signing; /* false by default */
  69unsigned int global_secflags = CIFSSEC_DEF;
  70/* unsigned int ntlmv2_support = 0; */
  71unsigned int sign_CIFS_PDUs = 1;
  72
  73/*
  74 * Global transaction id (XID) information
  75 */
  76unsigned int GlobalCurrentXid;	/* protected by GlobalMid_Sem */
  77unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
  78unsigned int GlobalMaxActiveXid;	/* prot by GlobalMid_Sem */
  79spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
  80
  81/*
  82 *  Global counters, updated atomically
  83 */
  84atomic_t sesInfoAllocCount;
  85atomic_t tconInfoAllocCount;
  86atomic_t tcpSesNextId;
  87atomic_t tcpSesAllocCount;
  88atomic_t tcpSesReconnectCount;
  89atomic_t tconInfoReconnectCount;
  90
  91atomic_t mid_count;
  92atomic_t buf_alloc_count;
  93atomic_t small_buf_alloc_count;
  94#ifdef CONFIG_CIFS_STATS2
  95atomic_t total_buf_alloc_count;
  96atomic_t total_small_buf_alloc_count;
  97#endif/* STATS2 */
  98struct list_head	cifs_tcp_ses_list;
  99spinlock_t		cifs_tcp_ses_lock;
 100static const struct super_operations cifs_super_ops;
 101unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
 102module_param(CIFSMaxBufSize, uint, 0444);
 103MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
 104				 "for CIFS requests. "
 105				 "Default: 16384 Range: 8192 to 130048");
 106unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
 107module_param(cifs_min_rcv, uint, 0444);
 108MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
 109				"1 to 64");
 110unsigned int cifs_min_small = 30;
 111module_param(cifs_min_small, uint, 0444);
 112MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
 113				 "Range: 2 to 256");
 114unsigned int cifs_max_pending = CIFS_MAX_REQ;
 115module_param(cifs_max_pending, uint, 0444);
 116MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
 117				   "CIFS/SMB1 dialect (N/A for SMB3) "
 118				   "Default: 32767 Range: 2 to 32767.");
 119#ifdef CONFIG_CIFS_STATS2
 120unsigned int slow_rsp_threshold = 1;
 121module_param(slow_rsp_threshold, uint, 0644);
 122MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
 123				   "before logging that a response is delayed. "
 124				   "Default: 1 (if set to 0 disables msg).");
 125#endif /* STATS2 */
 126
 127module_param(enable_oplocks, bool, 0644);
 128MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
 129
 130module_param(enable_gcm_256, bool, 0644);
 131MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0");
 132
 133module_param(require_gcm_256, bool, 0644);
 134MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
 135
 136module_param(enable_negotiate_signing, bool, 0644);
 137MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
 138
 139module_param(disable_legacy_dialects, bool, 0644);
 140MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
 141				  "helpful to restrict the ability to "
 142				  "override the default dialects (SMB2.1, "
 143				  "SMB3 and SMB3.02) on mount with old "
 144				  "dialects (CIFS/SMB1 and SMB2) since "
 145				  "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
 146				  " and less secure. Default: n/N/0");
 147
 148extern mempool_t *cifs_sm_req_poolp;
 149extern mempool_t *cifs_req_poolp;
 150extern mempool_t *cifs_mid_poolp;
 151
 152struct workqueue_struct	*cifsiod_wq;
 153struct workqueue_struct	*decrypt_wq;
 154struct workqueue_struct	*fileinfo_put_wq;
 155struct workqueue_struct	*cifsoplockd_wq;
 156struct workqueue_struct	*deferredclose_wq;
 157__u32 cifs_lock_secret;
 158
 159/*
 160 * Bumps refcount for cifs super block.
 161 * Note that it should be only called if a referece to VFS super block is
 162 * already held, e.g. in open-type syscalls context. Otherwise it can race with
 163 * atomic_dec_and_test in deactivate_locked_super.
 164 */
 165void
 166cifs_sb_active(struct super_block *sb)
 167{
 168	struct cifs_sb_info *server = CIFS_SB(sb);
 169
 170	if (atomic_inc_return(&server->active) == 1)
 171		atomic_inc(&sb->s_active);
 172}
 173
 174void
 175cifs_sb_deactive(struct super_block *sb)
 176{
 177	struct cifs_sb_info *server = CIFS_SB(sb);
 178
 179	if (atomic_dec_and_test(&server->active))
 180		deactivate_super(sb);
 181}
 182
 183static int
 184cifs_read_super(struct super_block *sb)
 185{
 186	struct inode *inode;
 187	struct cifs_sb_info *cifs_sb;
 188	struct cifs_tcon *tcon;
 189	struct timespec64 ts;
 190	int rc = 0;
 191
 192	cifs_sb = CIFS_SB(sb);
 193	tcon = cifs_sb_master_tcon(cifs_sb);
 194
 195	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
 196		sb->s_flags |= SB_POSIXACL;
 197
 198	if (tcon->snapshot_time)
 199		sb->s_flags |= SB_RDONLY;
 200
 201	if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
 202		sb->s_maxbytes = MAX_LFS_FILESIZE;
 203	else
 204		sb->s_maxbytes = MAX_NON_LFS;
 205
 206	/*
 207	 * Some very old servers like DOS and OS/2 used 2 second granularity
 208	 * (while all current servers use 100ns granularity - see MS-DTYP)
 209	 * but 1 second is the maximum allowed granularity for the VFS
 210	 * so for old servers set time granularity to 1 second while for
 211	 * everything else (current servers) set it to 100ns.
 212	 */
 213	if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
 214	    ((tcon->ses->capabilities &
 215	      tcon->ses->server->vals->cap_nt_find) == 0) &&
 216	    !tcon->unix_ext) {
 217		sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
 218		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
 219		sb->s_time_min = ts.tv_sec;
 220		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
 221				    cpu_to_le16(SMB_TIME_MAX), 0);
 222		sb->s_time_max = ts.tv_sec;
 223	} else {
 224		/*
 225		 * Almost every server, including all SMB2+, uses DCE TIME
 226		 * ie 100 nanosecond units, since 1601.  See MS-DTYP and MS-FSCC
 227		 */
 228		sb->s_time_gran = 100;
 229		ts = cifs_NTtimeToUnix(0);
 230		sb->s_time_min = ts.tv_sec;
 231		ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
 232		sb->s_time_max = ts.tv_sec;
 233	}
 234
 235	sb->s_magic = CIFS_SUPER_MAGIC;
 236	sb->s_op = &cifs_super_ops;
 237	sb->s_xattr = cifs_xattr_handlers;
 238	rc = super_setup_bdi(sb);
 239	if (rc)
 240		goto out_no_root;
 241	/* tune readahead according to rsize if readahead size not set on mount */
 242	if (cifs_sb->ctx->rsize == 0)
 243		cifs_sb->ctx->rsize =
 244			tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
 245	if (cifs_sb->ctx->rasize)
 246		sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
 247	else
 248		sb->s_bdi->ra_pages = cifs_sb->ctx->rsize / PAGE_SIZE;
 249
 250	sb->s_blocksize = CIFS_MAX_MSGSIZE;
 251	sb->s_blocksize_bits = 14;	/* default 2**14 = CIFS_MAX_MSGSIZE */
 252	inode = cifs_root_iget(sb);
 253
 254	if (IS_ERR(inode)) {
 255		rc = PTR_ERR(inode);
 256		goto out_no_root;
 257	}
 258
 259	if (tcon->nocase)
 260		sb->s_d_op = &cifs_ci_dentry_ops;
 261	else
 262		sb->s_d_op = &cifs_dentry_ops;
 263
 264	sb->s_root = d_make_root(inode);
 265	if (!sb->s_root) {
 266		rc = -ENOMEM;
 267		goto out_no_root;
 268	}
 269
 270#ifdef CONFIG_CIFS_NFSD_EXPORT
 271	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
 272		cifs_dbg(FYI, "export ops supported\n");
 273		sb->s_export_op = &cifs_export_ops;
 274	}
 275#endif /* CONFIG_CIFS_NFSD_EXPORT */
 276
 277	return 0;
 278
 279out_no_root:
 280	cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
 281	return rc;
 282}
 283
 284static void cifs_kill_sb(struct super_block *sb)
 285{
 286	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
 287
 288	/*
 289	 * We ned to release all dentries for the cached directories
 290	 * before we kill the sb.
 291	 */
 292	if (cifs_sb->root) {
 293		close_all_cached_dirs(cifs_sb);
 294
 295		/* finally release root dentry */
 296		dput(cifs_sb->root);
 297		cifs_sb->root = NULL;
 298	}
 299
 300	kill_anon_super(sb);
 301	cifs_umount(cifs_sb);
 302}
 303
 304static int
 305cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
 306{
 307	struct super_block *sb = dentry->d_sb;
 308	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
 309	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
 310	struct TCP_Server_Info *server = tcon->ses->server;
 311	unsigned int xid;
 312	int rc = 0;
 313
 314	xid = get_xid();
 315
 316	if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
 317		buf->f_namelen =
 318		       le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
 319	else
 320		buf->f_namelen = PATH_MAX;
 321
 322	buf->f_fsid.val[0] = tcon->vol_serial_number;
 323	/* are using part of create time for more randomness, see man statfs */
 324	buf->f_fsid.val[1] =  (int)le64_to_cpu(tcon->vol_create_time);
 325
 326	buf->f_files = 0;	/* undefined */
 327	buf->f_ffree = 0;	/* unlimited */
 328
 329	if (server->ops->queryfs)
 330		rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
 331
 332	free_xid(xid);
 333	return rc;
 334}
 335
 336static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
 337{
 338	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
 339	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
 340	struct TCP_Server_Info *server = tcon->ses->server;
 341
 342	if (server->ops->fallocate)
 343		return server->ops->fallocate(file, tcon, mode, off, len);
 344
 345	return -EOPNOTSUPP;
 346}
 347
 348static int cifs_permission(struct user_namespace *mnt_userns,
 349			   struct inode *inode, int mask)
 350{
 351	struct cifs_sb_info *cifs_sb;
 352
 353	cifs_sb = CIFS_SB(inode->i_sb);
 354
 355	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
 356		if ((mask & MAY_EXEC) && !execute_ok(inode))
 357			return -EACCES;
 358		else
 359			return 0;
 360	} else /* file mode might have been restricted at mount time
 361		on the client (above and beyond ACL on servers) for
 362		servers which do not support setting and viewing mode bits,
 363		so allowing client to check permissions is useful */
 364		return generic_permission(&init_user_ns, inode, mask);
 365}
 366
 367static struct kmem_cache *cifs_inode_cachep;
 368static struct kmem_cache *cifs_req_cachep;
 369static struct kmem_cache *cifs_mid_cachep;
 370static struct kmem_cache *cifs_sm_req_cachep;
 371mempool_t *cifs_sm_req_poolp;
 372mempool_t *cifs_req_poolp;
 373mempool_t *cifs_mid_poolp;
 374
 375static struct inode *
 376cifs_alloc_inode(struct super_block *sb)
 377{
 378	struct cifsInodeInfo *cifs_inode;
 379	cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
 380	if (!cifs_inode)
 381		return NULL;
 382	cifs_inode->cifsAttrs = 0x20;	/* default */
 383	cifs_inode->time = 0;
 384	/*
 385	 * Until the file is open and we have gotten oplock info back from the
 386	 * server, can not assume caching of file data or metadata.
 387	 */
 388	cifs_set_oplock_level(cifs_inode, 0);
 389	cifs_inode->flags = 0;
 390	spin_lock_init(&cifs_inode->writers_lock);
 391	cifs_inode->writers = 0;
 392	cifs_inode->netfs.inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
 393	cifs_inode->server_eof = 0;
 394	cifs_inode->uniqueid = 0;
 395	cifs_inode->createtime = 0;
 396	cifs_inode->epoch = 0;
 397	spin_lock_init(&cifs_inode->open_file_lock);
 398	generate_random_uuid(cifs_inode->lease_key);
 399	cifs_inode->symlink_target = NULL;
 400
 401	/*
 402	 * Can not set i_flags here - they get immediately overwritten to zero
 403	 * by the VFS.
 404	 */
 405	/* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
 406	INIT_LIST_HEAD(&cifs_inode->openFileList);
 407	INIT_LIST_HEAD(&cifs_inode->llist);
 408	INIT_LIST_HEAD(&cifs_inode->deferred_closes);
 409	spin_lock_init(&cifs_inode->deferred_lock);
 410	return &cifs_inode->netfs.inode;
 411}
 412
 413static void
 414cifs_free_inode(struct inode *inode)
 415{
 416	struct cifsInodeInfo *cinode = CIFS_I(inode);
 
 
 417
 418	if (S_ISLNK(inode->i_mode))
 419		kfree(cinode->symlink_target);
 420	kmem_cache_free(cifs_inode_cachep, cinode);
 
 421}
 422
 423static void
 424cifs_evict_inode(struct inode *inode)
 425{
 426	truncate_inode_pages_final(&inode->i_data);
 427	if (inode->i_state & I_PINNING_FSCACHE_WB)
 428		cifs_fscache_unuse_inode_cookie(inode, true);
 429	cifs_fscache_release_inode_cookie(inode);
 430	clear_inode(inode);
 
 431}
 432
 433static void
 434cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
 435{
 436	struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
 437	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
 438
 439	seq_puts(s, ",addr=");
 440
 441	switch (server->dstaddr.ss_family) {
 442	case AF_INET:
 443		seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
 444		break;
 445	case AF_INET6:
 446		seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
 447		if (sa6->sin6_scope_id)
 448			seq_printf(s, "%%%u", sa6->sin6_scope_id);
 449		break;
 450	default:
 451		seq_puts(s, "(unknown)");
 452	}
 453	if (server->rdma)
 454		seq_puts(s, ",rdma");
 455}
 456
 457static void
 458cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
 459{
 460	if (ses->sectype == Unspecified) {
 461		if (ses->user_name == NULL)
 462			seq_puts(s, ",sec=none");
 463		return;
 464	}
 465
 466	seq_puts(s, ",sec=");
 467
 468	switch (ses->sectype) {
 
 
 
 469	case NTLMv2:
 470		seq_puts(s, "ntlmv2");
 471		break;
 
 
 
 472	case Kerberos:
 473		seq_puts(s, "krb5");
 474		break;
 475	case RawNTLMSSP:
 476		seq_puts(s, "ntlmssp");
 477		break;
 478	default:
 479		/* shouldn't ever happen */
 480		seq_puts(s, "unknown");
 481		break;
 482	}
 483
 484	if (ses->sign)
 485		seq_puts(s, "i");
 486
 487	if (ses->sectype == Kerberos)
 488		seq_printf(s, ",cruid=%u",
 489			   from_kuid_munged(&init_user_ns, ses->cred_uid));
 490}
 491
 492static void
 493cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
 494{
 495	seq_puts(s, ",cache=");
 496
 497	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
 498		seq_puts(s, "strict");
 499	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
 500		seq_puts(s, "none");
 501	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
 502		seq_puts(s, "singleclient"); /* assume only one client access */
 503	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
 504		seq_puts(s, "ro"); /* read only caching assumed */
 505	else
 506		seq_puts(s, "loose");
 507}
 508
 509/*
 510 * cifs_show_devname() is used so we show the mount device name with correct
 511 * format (e.g. forward slashes vs. back slashes) in /proc/mounts
 512 */
 513static int cifs_show_devname(struct seq_file *m, struct dentry *root)
 514{
 515	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
 516	char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
 517
 518	if (devname == NULL)
 519		seq_puts(m, "none");
 520	else {
 521		convert_delimiter(devname, '/');
 522		/* escape all spaces in share names */
 523		seq_escape(m, devname, " \t");
 524		kfree(devname);
 525	}
 526	return 0;
 527}
 528
 529/*
 530 * cifs_show_options() is for displaying mount options in /proc/mounts.
 531 * Not all settable options are displayed but most of the important
 532 * ones are.
 533 */
 534static int
 535cifs_show_options(struct seq_file *s, struct dentry *root)
 536{
 537	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
 538	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
 539	struct sockaddr *srcaddr;
 540	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
 541
 542	seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
 543	cifs_show_security(s, tcon->ses);
 544	cifs_show_cache_flavor(s, cifs_sb);
 545
 546	if (tcon->no_lease)
 547		seq_puts(s, ",nolease");
 548	if (cifs_sb->ctx->multiuser)
 549		seq_puts(s, ",multiuser");
 550	else if (tcon->ses->user_name)
 551		seq_show_option(s, "username", tcon->ses->user_name);
 552
 553	if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
 554		seq_show_option(s, "domain", tcon->ses->domainName);
 555
 556	if (srcaddr->sa_family != AF_UNSPEC) {
 557		struct sockaddr_in *saddr4;
 558		struct sockaddr_in6 *saddr6;
 559		saddr4 = (struct sockaddr_in *)srcaddr;
 560		saddr6 = (struct sockaddr_in6 *)srcaddr;
 561		if (srcaddr->sa_family == AF_INET6)
 562			seq_printf(s, ",srcaddr=%pI6c",
 563				   &saddr6->sin6_addr);
 564		else if (srcaddr->sa_family == AF_INET)
 565			seq_printf(s, ",srcaddr=%pI4",
 566				   &saddr4->sin_addr.s_addr);
 567		else
 568			seq_printf(s, ",srcaddr=BAD-AF:%i",
 569				   (int)(srcaddr->sa_family));
 570	}
 571
 572	seq_printf(s, ",uid=%u",
 573		   from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
 574	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
 575		seq_puts(s, ",forceuid");
 576	else
 577		seq_puts(s, ",noforceuid");
 578
 579	seq_printf(s, ",gid=%u",
 580		   from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
 581	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
 582		seq_puts(s, ",forcegid");
 583	else
 584		seq_puts(s, ",noforcegid");
 585
 586	cifs_show_address(s, tcon->ses->server);
 587
 588	if (!tcon->unix_ext)
 589		seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
 590					   cifs_sb->ctx->file_mode,
 591					   cifs_sb->ctx->dir_mode);
 592	if (cifs_sb->ctx->iocharset)
 593		seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
 
 594	if (tcon->seal)
 595		seq_puts(s, ",seal");
 596	else if (tcon->ses->server->ignore_signature)
 597		seq_puts(s, ",signloosely");
 598	if (tcon->nocase)
 599		seq_puts(s, ",nocase");
 600	if (tcon->nodelete)
 601		seq_puts(s, ",nodelete");
 602	if (cifs_sb->ctx->no_sparse)
 603		seq_puts(s, ",nosparse");
 604	if (tcon->local_lease)
 605		seq_puts(s, ",locallease");
 606	if (tcon->retry)
 607		seq_puts(s, ",hard");
 608	else
 609		seq_puts(s, ",soft");
 610	if (tcon->use_persistent)
 611		seq_puts(s, ",persistenthandles");
 612	else if (tcon->use_resilient)
 613		seq_puts(s, ",resilienthandles");
 614	if (tcon->posix_extensions)
 615		seq_puts(s, ",posix");
 616	else if (tcon->unix_ext)
 617		seq_puts(s, ",unix");
 618	else
 619		seq_puts(s, ",nounix");
 620	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
 621		seq_puts(s, ",nodfs");
 622	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
 623		seq_puts(s, ",posixpaths");
 624	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
 625		seq_puts(s, ",setuids");
 626	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
 627		seq_puts(s, ",idsfromsid");
 628	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
 629		seq_puts(s, ",serverino");
 630	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
 631		seq_puts(s, ",rwpidforward");
 632	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
 633		seq_puts(s, ",forcemand");
 634	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
 635		seq_puts(s, ",nouser_xattr");
 636	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
 637		seq_puts(s, ",mapchars");
 638	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
 639		seq_puts(s, ",mapposix");
 640	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
 641		seq_puts(s, ",sfu");
 642	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
 643		seq_puts(s, ",nobrl");
 644	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
 645		seq_puts(s, ",nohandlecache");
 646	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
 647		seq_puts(s, ",modefromsid");
 648	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
 649		seq_puts(s, ",cifsacl");
 650	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
 651		seq_puts(s, ",dynperm");
 652	if (root->d_sb->s_flags & SB_POSIXACL)
 653		seq_puts(s, ",acl");
 654	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
 655		seq_puts(s, ",mfsymlinks");
 656	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
 657		seq_puts(s, ",fsc");
 658	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
 659		seq_puts(s, ",nostrictsync");
 660	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
 661		seq_puts(s, ",noperm");
 662	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
 663		seq_printf(s, ",backupuid=%u",
 664			   from_kuid_munged(&init_user_ns,
 665					    cifs_sb->ctx->backupuid));
 666	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
 667		seq_printf(s, ",backupgid=%u",
 668			   from_kgid_munged(&init_user_ns,
 669					    cifs_sb->ctx->backupgid));
 670
 671	seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
 672	seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
 673	seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
 674	if (cifs_sb->ctx->rasize)
 675		seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
 676	if (tcon->ses->server->min_offload)
 677		seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
 678	seq_printf(s, ",echo_interval=%lu",
 679			tcon->ses->server->echo_interval / HZ);
 680
 681	/* Only display the following if overridden on mount */
 682	if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
 683		seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
 684	if (tcon->ses->server->tcp_nodelay)
 685		seq_puts(s, ",tcpnodelay");
 686	if (tcon->ses->server->noautotune)
 687		seq_puts(s, ",noautotune");
 688	if (tcon->ses->server->noblocksnd)
 689		seq_puts(s, ",noblocksend");
 690
 691	if (tcon->snapshot_time)
 692		seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
 693	if (tcon->handle_timeout)
 694		seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
 695
 696	/*
 697	 * Display file and directory attribute timeout in seconds.
 698	 * If file and directory attribute timeout the same then actimeo
 699	 * was likely specified on mount
 700	 */
 701	if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
 702		seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
 703	else {
 704		seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
 705		seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
 706	}
 707	seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
 708
 709	if (tcon->ses->chan_max > 1)
 710		seq_printf(s, ",multichannel,max_channels=%zu",
 711			   tcon->ses->chan_max);
 712
 713	if (tcon->use_witness)
 714		seq_puts(s, ",witness");
 715
 716	return 0;
 717}
 718
 719static void cifs_umount_begin(struct super_block *sb)
 720{
 721	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
 722	struct cifs_tcon *tcon;
 723
 724	if (cifs_sb == NULL)
 725		return;
 726
 727	tcon = cifs_sb_master_tcon(cifs_sb);
 728
 729	spin_lock(&cifs_tcp_ses_lock);
 730	spin_lock(&tcon->tc_lock);
 731	if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
 732		/* we have other mounts to same share or we have
 733		   already tried to force umount this and woken up
 734		   all waiting network requests, nothing to do */
 735		spin_unlock(&tcon->tc_lock);
 736		spin_unlock(&cifs_tcp_ses_lock);
 737		return;
 738	} else if (tcon->tc_count == 1)
 739		tcon->status = TID_EXITING;
 740	spin_unlock(&tcon->tc_lock);
 741	spin_unlock(&cifs_tcp_ses_lock);
 742
 743	/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
 744	/* cancel_notify_requests(tcon); */
 745	if (tcon->ses && tcon->ses->server) {
 746		cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
 747		wake_up_all(&tcon->ses->server->request_q);
 748		wake_up_all(&tcon->ses->server->response_q);
 749		msleep(1); /* yield */
 750		/* we have to kick the requests once more */
 751		wake_up_all(&tcon->ses->server->response_q);
 752		msleep(1);
 753	}
 754
 755	return;
 756}
 757
 758#ifdef CONFIG_CIFS_STATS2
 759static int cifs_show_stats(struct seq_file *s, struct dentry *root)
 760{
 761	/* BB FIXME */
 762	return 0;
 763}
 764#endif
 765
 766static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
 767{
 768	fscache_unpin_writeback(wbc, cifs_inode_cookie(inode));
 
 769	return 0;
 770}
 771
 772static int cifs_drop_inode(struct inode *inode)
 773{
 774	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
 775
 776	/* no serverino => unconditional eviction */
 777	return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
 778		generic_drop_inode(inode);
 779}
 780
 781static const struct super_operations cifs_super_ops = {
 782	.statfs = cifs_statfs,
 783	.alloc_inode = cifs_alloc_inode,
 784	.write_inode	= cifs_write_inode,
 785	.free_inode = cifs_free_inode,
 786	.drop_inode	= cifs_drop_inode,
 787	.evict_inode	= cifs_evict_inode,
 788/*	.show_path	= cifs_show_path, */ /* Would we ever need show path? */
 789	.show_devname   = cifs_show_devname,
 790/*	.delete_inode	= cifs_delete_inode,  */  /* Do not need above
 791	function unless later we add lazy close of inodes or unless the
 792	kernel forgets to call us with the same number of releases (closes)
 793	as opens */
 794	.show_options = cifs_show_options,
 795	.umount_begin   = cifs_umount_begin,
 
 796#ifdef CONFIG_CIFS_STATS2
 797	.show_stats = cifs_show_stats,
 798#endif
 799};
 800
 801/*
 802 * Get root dentry from superblock according to prefix path mount option.
 803 * Return dentry with refcount + 1 on success and NULL otherwise.
 804 */
 805static struct dentry *
 806cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
 807{
 808	struct dentry *dentry;
 809	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
 810	char *full_path = NULL;
 811	char *s, *p;
 812	char sep;
 813
 814	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
 815		return dget(sb->s_root);
 816
 817	full_path = cifs_build_path_to_root(ctx, cifs_sb,
 818				cifs_sb_master_tcon(cifs_sb), 0);
 819	if (full_path == NULL)
 820		return ERR_PTR(-ENOMEM);
 821
 822	cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
 823
 824	sep = CIFS_DIR_SEP(cifs_sb);
 825	dentry = dget(sb->s_root);
 826	s = full_path;
 827
 828	do {
 829		struct inode *dir = d_inode(dentry);
 830		struct dentry *child;
 831
 
 
 
 
 
 832		if (!S_ISDIR(dir->i_mode)) {
 833			dput(dentry);
 834			dentry = ERR_PTR(-ENOTDIR);
 835			break;
 836		}
 837
 838		/* skip separators */
 839		while (*s == sep)
 840			s++;
 841		if (!*s)
 842			break;
 843		p = s++;
 844		/* next separator */
 845		while (*s && *s != sep)
 846			s++;
 847
 848		child = lookup_positive_unlocked(p, dentry, s - p);
 849		dput(dentry);
 850		dentry = child;
 851	} while (!IS_ERR(dentry));
 852	kfree(full_path);
 853	return dentry;
 854}
 855
 856static int cifs_set_super(struct super_block *sb, void *data)
 857{
 858	struct cifs_mnt_data *mnt_data = data;
 859	sb->s_fs_info = mnt_data->cifs_sb;
 860	return set_anon_super(sb, NULL);
 861}
 862
 863struct dentry *
 864cifs_smb3_do_mount(struct file_system_type *fs_type,
 865	      int flags, struct smb3_fs_context *old_ctx)
 866{
 867	int rc;
 868	struct super_block *sb = NULL;
 869	struct cifs_sb_info *cifs_sb = NULL;
 
 870	struct cifs_mnt_data mnt_data;
 871	struct dentry *root;
 872
 873	/*
 874	 * Prints in Kernel / CIFS log the attempted mount operation
 875	 *	If CIFS_DEBUG && cifs_FYI
 876	 */
 877	if (cifsFYI)
 878		cifs_dbg(FYI, "Devname: %s flags: %d\n", old_ctx->UNC, flags);
 879	else
 880		cifs_info("Attempting to mount %s\n", old_ctx->UNC);
 881
 882	cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
 883	if (cifs_sb == NULL) {
 884		root = ERR_PTR(-ENOMEM);
 885		goto out;
 886	}
 887
 888	cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
 889	if (!cifs_sb->ctx) {
 890		root = ERR_PTR(-ENOMEM);
 891		goto out;
 892	}
 893	rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
 894	if (rc) {
 895		root = ERR_PTR(rc);
 896		goto out;
 897	}
 898
 899	rc = cifs_setup_cifs_sb(cifs_sb);
 900	if (rc) {
 901		root = ERR_PTR(rc);
 902		goto out;
 903	}
 904
 905	rc = cifs_mount(cifs_sb, cifs_sb->ctx);
 906	if (rc) {
 907		if (!(flags & SB_SILENT))
 908			cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
 909				 rc);
 910		root = ERR_PTR(rc);
 911		goto out;
 912	}
 913
 914	mnt_data.ctx = cifs_sb->ctx;
 915	mnt_data.cifs_sb = cifs_sb;
 916	mnt_data.flags = flags;
 917
 918	/* BB should we make this contingent on mount parm? */
 919	flags |= SB_NODIRATIME | SB_NOATIME;
 920
 921	sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
 922	if (IS_ERR(sb)) {
 923		root = ERR_CAST(sb);
 924		cifs_umount(cifs_sb);
 925		cifs_sb = NULL;
 926		goto out;
 927	}
 928
 929	if (sb->s_root) {
 930		cifs_dbg(FYI, "Use existing superblock\n");
 931		cifs_umount(cifs_sb);
 932		cifs_sb = NULL;
 933	} else {
 934		rc = cifs_read_super(sb);
 935		if (rc) {
 936			root = ERR_PTR(rc);
 937			goto out_super;
 938		}
 939
 940		sb->s_flags |= SB_ACTIVE;
 941	}
 942
 943	root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
 944	if (IS_ERR(root))
 945		goto out_super;
 946
 947	if (cifs_sb)
 948		cifs_sb->root = dget(root);
 949
 950	cifs_dbg(FYI, "dentry root is: %p\n", root);
 951	return root;
 952
 953out_super:
 954	deactivate_locked_super(sb);
 955	return root;
 956out:
 957	if (cifs_sb) {
 958		if (!sb || IS_ERR(sb)) {  /* otherwise kill_sb will handle */
 959			kfree(cifs_sb->prepath);
 960			smb3_cleanup_fs_context(cifs_sb->ctx);
 961			kfree(cifs_sb);
 962		}
 963	}
 964	return root;
 965}
 966
 
 
 
 
 
 
 
 
 967
 968static ssize_t
 969cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
 970{
 971	ssize_t rc;
 972	struct inode *inode = file_inode(iocb->ki_filp);
 973
 974	if (iocb->ki_flags & IOCB_DIRECT)
 975		return cifs_user_readv(iocb, iter);
 976
 977	rc = cifs_revalidate_mapping(inode);
 978	if (rc)
 979		return rc;
 980
 981	return generic_file_read_iter(iocb, iter);
 982}
 983
 984static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 985{
 986	struct inode *inode = file_inode(iocb->ki_filp);
 987	struct cifsInodeInfo *cinode = CIFS_I(inode);
 988	ssize_t written;
 989	int rc;
 990
 991	if (iocb->ki_filp->f_flags & O_DIRECT) {
 992		written = cifs_user_writev(iocb, from);
 993		if (written > 0 && CIFS_CACHE_READ(cinode)) {
 994			cifs_zap_mapping(inode);
 995			cifs_dbg(FYI,
 996				 "Set no oplock for inode=%p after a write operation\n",
 997				 inode);
 998			cinode->oplock = 0;
 999		}
1000		return written;
1001	}
1002
1003	written = cifs_get_writer(cinode);
1004	if (written)
1005		return written;
1006
1007	written = generic_file_write_iter(iocb, from);
1008
1009	if (CIFS_CACHE_WRITE(CIFS_I(inode)))
1010		goto out;
1011
1012	rc = filemap_fdatawrite(inode->i_mapping);
1013	if (rc)
1014		cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
1015			 rc, inode);
1016
1017out:
1018	cifs_put_writer(cinode);
1019	return written;
1020}
1021
1022static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1023{
1024	struct cifsFileInfo *cfile = file->private_data;
1025	struct cifs_tcon *tcon;
1026
1027	/*
1028	 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1029	 * the cached file length
1030	 */
1031	if (whence != SEEK_SET && whence != SEEK_CUR) {
1032		int rc;
1033		struct inode *inode = file_inode(file);
1034
1035		/*
1036		 * We need to be sure that all dirty pages are written and the
1037		 * server has the newest file length.
1038		 */
1039		if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1040		    inode->i_mapping->nrpages != 0) {
1041			rc = filemap_fdatawait(inode->i_mapping);
1042			if (rc) {
1043				mapping_set_error(inode->i_mapping, rc);
1044				return rc;
1045			}
1046		}
1047		/*
1048		 * Some applications poll for the file length in this strange
1049		 * way so we must seek to end on non-oplocked files by
1050		 * setting the revalidate time to zero.
1051		 */
1052		CIFS_I(inode)->time = 0;
1053
1054		rc = cifs_revalidate_file_attr(file);
1055		if (rc < 0)
1056			return (loff_t)rc;
1057	}
1058	if (cfile && cfile->tlink) {
1059		tcon = tlink_tcon(cfile->tlink);
1060		if (tcon->ses->server->ops->llseek)
1061			return tcon->ses->server->ops->llseek(file, tcon,
1062							      offset, whence);
1063	}
1064	return generic_file_llseek(file, offset, whence);
1065}
1066
1067static int
1068cifs_setlease(struct file *file, long arg, struct file_lock **lease, void **priv)
1069{
1070	/*
1071	 * Note that this is called by vfs setlease with i_lock held to
1072	 * protect *lease from going away.
1073	 */
1074	struct inode *inode = file_inode(file);
1075	struct cifsFileInfo *cfile = file->private_data;
1076
1077	if (!(S_ISREG(inode->i_mode)))
1078		return -EINVAL;
1079
1080	/* Check if file is oplocked if this is request for new lease */
1081	if (arg == F_UNLCK ||
1082	    ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1083	    ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1084		return generic_setlease(file, arg, lease, priv);
1085	else if (tlink_tcon(cfile->tlink)->local_lease &&
1086		 !CIFS_CACHE_READ(CIFS_I(inode)))
1087		/*
1088		 * If the server claims to support oplock on this file, then we
1089		 * still need to check oplock even if the local_lease mount
1090		 * option is set, but there are servers which do not support
1091		 * oplock for which this mount option may be useful if the user
1092		 * knows that the file won't be changed on the server by anyone
1093		 * else.
1094		 */
1095		return generic_setlease(file, arg, lease, priv);
1096	else
1097		return -EAGAIN;
1098}
1099
1100struct file_system_type cifs_fs_type = {
1101	.owner = THIS_MODULE,
1102	.name = "cifs",
1103	.init_fs_context = smb3_init_fs_context,
1104	.parameters = smb3_fs_parameters,
1105	.kill_sb = cifs_kill_sb,
1106	.fs_flags = FS_RENAME_DOES_D_MOVE,
1107};
1108MODULE_ALIAS_FS("cifs");
1109
1110struct file_system_type smb3_fs_type = {
1111	.owner = THIS_MODULE,
1112	.name = "smb3",
1113	.init_fs_context = smb3_init_fs_context,
1114	.parameters = smb3_fs_parameters,
1115	.kill_sb = cifs_kill_sb,
1116	.fs_flags = FS_RENAME_DOES_D_MOVE,
1117};
1118MODULE_ALIAS_FS("smb3");
1119MODULE_ALIAS("smb3");
1120
1121const struct inode_operations cifs_dir_inode_ops = {
1122	.create = cifs_create,
1123	.atomic_open = cifs_atomic_open,
1124	.lookup = cifs_lookup,
1125	.getattr = cifs_getattr,
1126	.unlink = cifs_unlink,
1127	.link = cifs_hardlink,
1128	.mkdir = cifs_mkdir,
1129	.rmdir = cifs_rmdir,
1130	.rename = cifs_rename2,
1131	.permission = cifs_permission,
1132	.setattr = cifs_setattr,
1133	.symlink = cifs_symlink,
1134	.mknod   = cifs_mknod,
1135	.listxattr = cifs_listxattr,
1136	.get_acl = cifs_get_acl,
1137	.set_acl = cifs_set_acl,
1138};
1139
1140const struct inode_operations cifs_file_inode_ops = {
1141	.setattr = cifs_setattr,
1142	.getattr = cifs_getattr,
1143	.permission = cifs_permission,
1144	.listxattr = cifs_listxattr,
1145	.fiemap = cifs_fiemap,
1146	.get_acl = cifs_get_acl,
1147	.set_acl = cifs_set_acl,
1148};
1149
1150const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1151			    struct delayed_call *done)
1152{
1153	char *target_path;
1154
1155	target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1156	if (!target_path)
1157		return ERR_PTR(-ENOMEM);
1158
1159	spin_lock(&inode->i_lock);
1160	if (likely(CIFS_I(inode)->symlink_target)) {
1161		strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1162	} else {
1163		kfree(target_path);
1164		target_path = ERR_PTR(-EOPNOTSUPP);
1165	}
1166	spin_unlock(&inode->i_lock);
1167
1168	if (!IS_ERR(target_path))
1169		set_delayed_call(done, kfree_link, target_path);
1170
1171	return target_path;
1172}
1173
1174const struct inode_operations cifs_symlink_inode_ops = {
1175	.get_link = cifs_get_link,
1176	.permission = cifs_permission,
1177	.listxattr = cifs_listxattr,
1178};
1179
1180static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1181		struct file *dst_file, loff_t destoff, loff_t len,
1182		unsigned int remap_flags)
1183{
1184	struct inode *src_inode = file_inode(src_file);
1185	struct inode *target_inode = file_inode(dst_file);
1186	struct cifsFileInfo *smb_file_src = src_file->private_data;
1187	struct cifsFileInfo *smb_file_target;
1188	struct cifs_tcon *target_tcon;
1189	unsigned int xid;
1190	int rc;
1191
1192	if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
1193		return -EINVAL;
1194
1195	cifs_dbg(FYI, "clone range\n");
1196
1197	xid = get_xid();
1198
1199	if (!src_file->private_data || !dst_file->private_data) {
1200		rc = -EBADF;
1201		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1202		goto out;
1203	}
1204
1205	smb_file_target = dst_file->private_data;
1206	target_tcon = tlink_tcon(smb_file_target->tlink);
1207
1208	/*
1209	 * Note: cifs case is easier than btrfs since server responsible for
1210	 * checks for proper open modes and file type and if it wants
1211	 * server could even support copy of range where source = target
1212	 */
1213	lock_two_nondirectories(target_inode, src_inode);
1214
1215	if (len == 0)
1216		len = src_inode->i_size - off;
1217
1218	cifs_dbg(FYI, "about to flush pages\n");
1219	/* should we flush first and last page first */
1220	truncate_inode_pages_range(&target_inode->i_data, destoff,
1221				   PAGE_ALIGN(destoff + len)-1);
1222
1223	if (target_tcon->ses->server->ops->duplicate_extents)
1224		rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1225			smb_file_src, smb_file_target, off, len, destoff);
1226	else
1227		rc = -EOPNOTSUPP;
1228
1229	/* force revalidate of size and timestamps of target file now
1230	   that target is updated on the server */
1231	CIFS_I(target_inode)->time = 0;
1232	/* although unlocking in the reverse order from locking is not
1233	   strictly necessary here it is a little cleaner to be consistent */
1234	unlock_two_nondirectories(src_inode, target_inode);
1235out:
1236	free_xid(xid);
1237	return rc < 0 ? rc : len;
1238}
1239
1240ssize_t cifs_file_copychunk_range(unsigned int xid,
1241				struct file *src_file, loff_t off,
1242				struct file *dst_file, loff_t destoff,
1243				size_t len, unsigned int flags)
1244{
1245	struct inode *src_inode = file_inode(src_file);
1246	struct inode *target_inode = file_inode(dst_file);
1247	struct cifsFileInfo *smb_file_src;
1248	struct cifsFileInfo *smb_file_target;
1249	struct cifs_tcon *src_tcon;
1250	struct cifs_tcon *target_tcon;
1251	ssize_t rc;
1252
1253	cifs_dbg(FYI, "copychunk range\n");
1254
 
 
 
 
 
1255	if (!src_file->private_data || !dst_file->private_data) {
1256		rc = -EBADF;
1257		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1258		goto out;
1259	}
1260
1261	rc = -EXDEV;
1262	smb_file_target = dst_file->private_data;
1263	smb_file_src = src_file->private_data;
1264	src_tcon = tlink_tcon(smb_file_src->tlink);
1265	target_tcon = tlink_tcon(smb_file_target->tlink);
1266
1267	if (src_tcon->ses != target_tcon->ses) {
1268		cifs_dbg(VFS, "source and target of copy not on same server\n");
1269		goto out;
1270	}
1271
1272	rc = -EOPNOTSUPP;
1273	if (!target_tcon->ses->server->ops->copychunk_range)
1274		goto out;
1275
1276	/*
1277	 * Note: cifs case is easier than btrfs since server responsible for
1278	 * checks for proper open modes and file type and if it wants
1279	 * server could even support copy of range where source = target
1280	 */
1281	lock_two_nondirectories(target_inode, src_inode);
1282
1283	cifs_dbg(FYI, "about to flush pages\n");
1284
1285	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1286					  off + len - 1);
1287	if (rc)
1288		goto unlock;
1289
1290	/* should we flush first and last page first */
1291	truncate_inode_pages(&target_inode->i_data, 0);
1292
1293	rc = file_modified(dst_file);
1294	if (!rc)
1295		rc = target_tcon->ses->server->ops->copychunk_range(xid,
1296			smb_file_src, smb_file_target, off, len, destoff);
1297
1298	file_accessed(src_file);
1299
1300	/* force revalidate of size and timestamps of target file now
1301	 * that target is updated on the server
1302	 */
1303	CIFS_I(target_inode)->time = 0;
1304
1305unlock:
1306	/* although unlocking in the reverse order from locking is not
1307	 * strictly necessary here it is a little cleaner to be consistent
1308	 */
1309	unlock_two_nondirectories(src_inode, target_inode);
1310
1311out:
1312	return rc;
1313}
1314
1315/*
1316 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1317 * is a dummy operation.
1318 */
1319static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1320{
1321	cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1322		 file, datasync);
1323
1324	return 0;
1325}
1326
1327static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1328				struct file *dst_file, loff_t destoff,
1329				size_t len, unsigned int flags)
1330{
1331	unsigned int xid = get_xid();
1332	ssize_t rc;
1333	struct cifsFileInfo *cfile = dst_file->private_data;
1334
1335	if (cfile->swapfile) {
1336		rc = -EOPNOTSUPP;
1337		free_xid(xid);
1338		return rc;
1339	}
1340
1341	rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1342					len, flags);
1343	free_xid(xid);
1344
1345	if (rc == -EOPNOTSUPP || rc == -EXDEV)
1346		rc = generic_copy_file_range(src_file, off, dst_file,
1347					     destoff, len, flags);
1348	return rc;
1349}
1350
1351const struct file_operations cifs_file_ops = {
1352	.read_iter = cifs_loose_read_iter,
1353	.write_iter = cifs_file_write_iter,
1354	.open = cifs_open,
1355	.release = cifs_close,
1356	.lock = cifs_lock,
1357	.flock = cifs_flock,
1358	.fsync = cifs_fsync,
1359	.flush = cifs_flush,
1360	.mmap  = cifs_file_mmap,
1361	.splice_read = generic_file_splice_read,
1362	.splice_write = iter_file_splice_write,
1363	.llseek = cifs_llseek,
1364	.unlocked_ioctl	= cifs_ioctl,
1365	.copy_file_range = cifs_copy_file_range,
1366	.remap_file_range = cifs_remap_file_range,
1367	.setlease = cifs_setlease,
1368	.fallocate = cifs_fallocate,
1369};
1370
1371const struct file_operations cifs_file_strict_ops = {
1372	.read_iter = cifs_strict_readv,
1373	.write_iter = cifs_strict_writev,
1374	.open = cifs_open,
1375	.release = cifs_close,
1376	.lock = cifs_lock,
1377	.flock = cifs_flock,
1378	.fsync = cifs_strict_fsync,
1379	.flush = cifs_flush,
1380	.mmap = cifs_file_strict_mmap,
1381	.splice_read = generic_file_splice_read,
1382	.splice_write = iter_file_splice_write,
1383	.llseek = cifs_llseek,
1384	.unlocked_ioctl	= cifs_ioctl,
1385	.copy_file_range = cifs_copy_file_range,
1386	.remap_file_range = cifs_remap_file_range,
1387	.setlease = cifs_setlease,
1388	.fallocate = cifs_fallocate,
1389};
1390
1391const struct file_operations cifs_file_direct_ops = {
1392	.read_iter = cifs_direct_readv,
1393	.write_iter = cifs_direct_writev,
 
1394	.open = cifs_open,
1395	.release = cifs_close,
1396	.lock = cifs_lock,
1397	.flock = cifs_flock,
1398	.fsync = cifs_fsync,
1399	.flush = cifs_flush,
1400	.mmap = cifs_file_mmap,
1401	.splice_read = generic_file_splice_read,
1402	.splice_write = iter_file_splice_write,
1403	.unlocked_ioctl  = cifs_ioctl,
1404	.copy_file_range = cifs_copy_file_range,
1405	.remap_file_range = cifs_remap_file_range,
1406	.llseek = cifs_llseek,
1407	.setlease = cifs_setlease,
1408	.fallocate = cifs_fallocate,
1409};
1410
1411const struct file_operations cifs_file_nobrl_ops = {
1412	.read_iter = cifs_loose_read_iter,
1413	.write_iter = cifs_file_write_iter,
1414	.open = cifs_open,
1415	.release = cifs_close,
1416	.fsync = cifs_fsync,
1417	.flush = cifs_flush,
1418	.mmap  = cifs_file_mmap,
1419	.splice_read = generic_file_splice_read,
1420	.splice_write = iter_file_splice_write,
1421	.llseek = cifs_llseek,
1422	.unlocked_ioctl	= cifs_ioctl,
1423	.copy_file_range = cifs_copy_file_range,
1424	.remap_file_range = cifs_remap_file_range,
1425	.setlease = cifs_setlease,
1426	.fallocate = cifs_fallocate,
1427};
1428
1429const struct file_operations cifs_file_strict_nobrl_ops = {
1430	.read_iter = cifs_strict_readv,
1431	.write_iter = cifs_strict_writev,
1432	.open = cifs_open,
1433	.release = cifs_close,
1434	.fsync = cifs_strict_fsync,
1435	.flush = cifs_flush,
1436	.mmap = cifs_file_strict_mmap,
1437	.splice_read = generic_file_splice_read,
1438	.splice_write = iter_file_splice_write,
1439	.llseek = cifs_llseek,
1440	.unlocked_ioctl	= cifs_ioctl,
1441	.copy_file_range = cifs_copy_file_range,
1442	.remap_file_range = cifs_remap_file_range,
1443	.setlease = cifs_setlease,
1444	.fallocate = cifs_fallocate,
1445};
1446
1447const struct file_operations cifs_file_direct_nobrl_ops = {
1448	.read_iter = cifs_direct_readv,
1449	.write_iter = cifs_direct_writev,
 
1450	.open = cifs_open,
1451	.release = cifs_close,
1452	.fsync = cifs_fsync,
1453	.flush = cifs_flush,
1454	.mmap = cifs_file_mmap,
1455	.splice_read = generic_file_splice_read,
1456	.splice_write = iter_file_splice_write,
1457	.unlocked_ioctl  = cifs_ioctl,
1458	.copy_file_range = cifs_copy_file_range,
1459	.remap_file_range = cifs_remap_file_range,
1460	.llseek = cifs_llseek,
1461	.setlease = cifs_setlease,
1462	.fallocate = cifs_fallocate,
1463};
1464
1465const struct file_operations cifs_dir_ops = {
1466	.iterate_shared = cifs_readdir,
1467	.release = cifs_closedir,
1468	.read    = generic_read_dir,
1469	.unlocked_ioctl  = cifs_ioctl,
1470	.copy_file_range = cifs_copy_file_range,
1471	.remap_file_range = cifs_remap_file_range,
1472	.llseek = generic_file_llseek,
1473	.fsync = cifs_dir_fsync,
1474};
1475
1476static void
1477cifs_init_once(void *inode)
1478{
1479	struct cifsInodeInfo *cifsi = inode;
1480
1481	inode_init_once(&cifsi->netfs.inode);
1482	init_rwsem(&cifsi->lock_sem);
1483}
1484
1485static int __init
1486cifs_init_inodecache(void)
1487{
1488	cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1489					      sizeof(struct cifsInodeInfo),
1490					      0, (SLAB_RECLAIM_ACCOUNT|
1491						SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1492					      cifs_init_once);
1493	if (cifs_inode_cachep == NULL)
1494		return -ENOMEM;
1495
1496	return 0;
1497}
1498
1499static void
1500cifs_destroy_inodecache(void)
1501{
1502	/*
1503	 * Make sure all delayed rcu free inodes are flushed before we
1504	 * destroy cache.
1505	 */
1506	rcu_barrier();
1507	kmem_cache_destroy(cifs_inode_cachep);
1508}
1509
1510static int
1511cifs_init_request_bufs(void)
1512{
1513	/*
1514	 * SMB2 maximum header size is bigger than CIFS one - no problems to
1515	 * allocate some more bytes for CIFS.
1516	 */
1517	size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1518
1519	if (CIFSMaxBufSize < 8192) {
1520	/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1521	Unicode path name has to fit in any SMB/CIFS path based frames */
1522		CIFSMaxBufSize = 8192;
1523	} else if (CIFSMaxBufSize > 1024*127) {
1524		CIFSMaxBufSize = 1024 * 127;
1525	} else {
1526		CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1527	}
1528/*
1529	cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1530		 CIFSMaxBufSize, CIFSMaxBufSize);
1531*/
1532	cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1533					    CIFSMaxBufSize + max_hdr_size, 0,
1534					    SLAB_HWCACHE_ALIGN, 0,
1535					    CIFSMaxBufSize + max_hdr_size,
1536					    NULL);
1537	if (cifs_req_cachep == NULL)
1538		return -ENOMEM;
1539
1540	if (cifs_min_rcv < 1)
1541		cifs_min_rcv = 1;
1542	else if (cifs_min_rcv > 64) {
1543		cifs_min_rcv = 64;
1544		cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1545	}
1546
1547	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1548						  cifs_req_cachep);
1549
1550	if (cifs_req_poolp == NULL) {
1551		kmem_cache_destroy(cifs_req_cachep);
1552		return -ENOMEM;
1553	}
1554	/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1555	almost all handle based requests (but not write response, nor is it
1556	sufficient for path based requests).  A smaller size would have
1557	been more efficient (compacting multiple slab items on one 4k page)
1558	for the case in which debug was on, but this larger size allows
1559	more SMBs to use small buffer alloc and is still much more
1560	efficient to alloc 1 per page off the slab compared to 17K (5page)
1561	alloc of large cifs buffers even when page debugging is on */
1562	cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1563			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1564			0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1565	if (cifs_sm_req_cachep == NULL) {
1566		mempool_destroy(cifs_req_poolp);
1567		kmem_cache_destroy(cifs_req_cachep);
1568		return -ENOMEM;
1569	}
1570
1571	if (cifs_min_small < 2)
1572		cifs_min_small = 2;
1573	else if (cifs_min_small > 256) {
1574		cifs_min_small = 256;
1575		cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1576	}
1577
1578	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1579						     cifs_sm_req_cachep);
1580
1581	if (cifs_sm_req_poolp == NULL) {
1582		mempool_destroy(cifs_req_poolp);
1583		kmem_cache_destroy(cifs_req_cachep);
1584		kmem_cache_destroy(cifs_sm_req_cachep);
1585		return -ENOMEM;
1586	}
1587
1588	return 0;
1589}
1590
1591static void
1592cifs_destroy_request_bufs(void)
1593{
1594	mempool_destroy(cifs_req_poolp);
1595	kmem_cache_destroy(cifs_req_cachep);
1596	mempool_destroy(cifs_sm_req_poolp);
1597	kmem_cache_destroy(cifs_sm_req_cachep);
1598}
1599
1600static int init_mids(void)
 
1601{
1602	cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1603					    sizeof(struct mid_q_entry), 0,
1604					    SLAB_HWCACHE_ALIGN, NULL);
1605	if (cifs_mid_cachep == NULL)
1606		return -ENOMEM;
1607
1608	/* 3 is a reasonable minimum number of simultaneous operations */
1609	cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1610	if (cifs_mid_poolp == NULL) {
1611		kmem_cache_destroy(cifs_mid_cachep);
1612		return -ENOMEM;
1613	}
1614
1615	return 0;
1616}
1617
1618static void destroy_mids(void)
 
1619{
1620	mempool_destroy(cifs_mid_poolp);
1621	kmem_cache_destroy(cifs_mid_cachep);
1622}
1623
1624static int __init
1625init_cifs(void)
1626{
1627	int rc = 0;
1628	cifs_proc_init();
1629	INIT_LIST_HEAD(&cifs_tcp_ses_list);
 
 
 
 
1630/*
1631 *  Initialize Global counters
1632 */
1633	atomic_set(&sesInfoAllocCount, 0);
1634	atomic_set(&tconInfoAllocCount, 0);
1635	atomic_set(&tcpSesNextId, 0);
1636	atomic_set(&tcpSesAllocCount, 0);
1637	atomic_set(&tcpSesReconnectCount, 0);
1638	atomic_set(&tconInfoReconnectCount, 0);
1639
1640	atomic_set(&buf_alloc_count, 0);
1641	atomic_set(&small_buf_alloc_count, 0);
1642#ifdef CONFIG_CIFS_STATS2
1643	atomic_set(&total_buf_alloc_count, 0);
1644	atomic_set(&total_small_buf_alloc_count, 0);
1645	if (slow_rsp_threshold < 1)
1646		cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1647	else if (slow_rsp_threshold > 32767)
1648		cifs_dbg(VFS,
1649		       "slow response threshold set higher than recommended (0 to 32767)\n");
1650#endif /* CONFIG_CIFS_STATS2 */
1651
1652	atomic_set(&mid_count, 0);
1653	GlobalCurrentXid = 0;
1654	GlobalTotalActiveXid = 0;
1655	GlobalMaxActiveXid = 0;
1656	spin_lock_init(&cifs_tcp_ses_lock);
1657	spin_lock_init(&GlobalMid_Lock);
1658
1659	cifs_lock_secret = get_random_u32();
1660
1661	if (cifs_max_pending < 2) {
1662		cifs_max_pending = 2;
1663		cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1664	} else if (cifs_max_pending > CIFS_MAX_REQ) {
1665		cifs_max_pending = CIFS_MAX_REQ;
1666		cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1667			 CIFS_MAX_REQ);
1668	}
1669
1670	cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1671	if (!cifsiod_wq) {
1672		rc = -ENOMEM;
1673		goto out_clean_proc;
1674	}
1675
1676	/*
1677	 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1678	 * so that we don't launch too many worker threads but
1679	 * Documentation/core-api/workqueue.rst recommends setting it to 0
1680	 */
1681
1682	/* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1683	decrypt_wq = alloc_workqueue("smb3decryptd",
1684				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1685	if (!decrypt_wq) {
1686		rc = -ENOMEM;
1687		goto out_destroy_cifsiod_wq;
1688	}
1689
1690	fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1691				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1692	if (!fileinfo_put_wq) {
1693		rc = -ENOMEM;
1694		goto out_destroy_decrypt_wq;
1695	}
1696
1697	cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1698					 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1699	if (!cifsoplockd_wq) {
1700		rc = -ENOMEM;
1701		goto out_destroy_fileinfo_put_wq;
1702	}
1703
1704	deferredclose_wq = alloc_workqueue("deferredclose",
1705					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1706	if (!deferredclose_wq) {
1707		rc = -ENOMEM;
1708		goto out_destroy_cifsoplockd_wq;
1709	}
1710
1711	rc = cifs_init_inodecache();
1712	if (rc)
1713		goto out_destroy_deferredclose_wq;
1714
1715	rc = init_mids();
1716	if (rc)
1717		goto out_destroy_inodecache;
1718
1719	rc = cifs_init_request_bufs();
1720	if (rc)
1721		goto out_destroy_mids;
1722
1723#ifdef CONFIG_CIFS_DFS_UPCALL
1724	rc = dfs_cache_init();
1725	if (rc)
1726		goto out_destroy_request_bufs;
1727#endif /* CONFIG_CIFS_DFS_UPCALL */
1728#ifdef CONFIG_CIFS_UPCALL
1729	rc = init_cifs_spnego();
1730	if (rc)
1731		goto out_destroy_dfs_cache;
1732#endif /* CONFIG_CIFS_UPCALL */
1733#ifdef CONFIG_CIFS_SWN_UPCALL
1734	rc = cifs_genl_init();
1735	if (rc)
1736		goto out_register_key_type;
1737#endif /* CONFIG_CIFS_SWN_UPCALL */
1738
 
1739	rc = init_cifs_idmap();
1740	if (rc)
1741		goto out_cifs_swn_init;
 
1742
1743	rc = register_filesystem(&cifs_fs_type);
1744	if (rc)
1745		goto out_init_cifs_idmap;
1746
1747	rc = register_filesystem(&smb3_fs_type);
1748	if (rc) {
1749		unregister_filesystem(&cifs_fs_type);
1750		goto out_init_cifs_idmap;
1751	}
1752
1753	return 0;
1754
1755out_init_cifs_idmap:
 
1756	exit_cifs_idmap();
1757out_cifs_swn_init:
1758#ifdef CONFIG_CIFS_SWN_UPCALL
1759	cifs_genl_exit();
1760out_register_key_type:
1761#endif
1762#ifdef CONFIG_CIFS_UPCALL
1763	exit_cifs_spnego();
1764out_destroy_dfs_cache:
1765#endif
1766#ifdef CONFIG_CIFS_DFS_UPCALL
1767	dfs_cache_destroy();
1768out_destroy_request_bufs:
1769#endif
1770	cifs_destroy_request_bufs();
1771out_destroy_mids:
1772	destroy_mids();
1773out_destroy_inodecache:
1774	cifs_destroy_inodecache();
1775out_destroy_deferredclose_wq:
1776	destroy_workqueue(deferredclose_wq);
1777out_destroy_cifsoplockd_wq:
1778	destroy_workqueue(cifsoplockd_wq);
1779out_destroy_fileinfo_put_wq:
1780	destroy_workqueue(fileinfo_put_wq);
1781out_destroy_decrypt_wq:
1782	destroy_workqueue(decrypt_wq);
1783out_destroy_cifsiod_wq:
1784	destroy_workqueue(cifsiod_wq);
1785out_clean_proc:
1786	cifs_proc_clean();
1787	return rc;
1788}
1789
1790static void __exit
1791exit_cifs(void)
1792{
1793	cifs_dbg(NOISY, "exit_smb3\n");
1794	unregister_filesystem(&cifs_fs_type);
1795	unregister_filesystem(&smb3_fs_type);
1796	cifs_dfs_release_automount_timer();
 
1797	exit_cifs_idmap();
1798#ifdef CONFIG_CIFS_SWN_UPCALL
1799	cifs_genl_exit();
1800#endif
1801#ifdef CONFIG_CIFS_UPCALL
1802	exit_cifs_spnego();
1803#endif
1804#ifdef CONFIG_CIFS_DFS_UPCALL
1805	dfs_cache_destroy();
1806#endif
1807	cifs_destroy_request_bufs();
1808	destroy_mids();
1809	cifs_destroy_inodecache();
1810	destroy_workqueue(deferredclose_wq);
1811	destroy_workqueue(cifsoplockd_wq);
1812	destroy_workqueue(decrypt_wq);
1813	destroy_workqueue(fileinfo_put_wq);
1814	destroy_workqueue(cifsiod_wq);
1815	cifs_proc_clean();
1816}
1817
1818MODULE_AUTHOR("Steve French");
1819MODULE_LICENSE("GPL");	/* combination of LGPL + GPL source behaves as GPL */
1820MODULE_DESCRIPTION
1821	("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
1822	"also older servers complying with the SNIA CIFS Specification)");
1823MODULE_VERSION(CIFS_VERSION);
1824MODULE_SOFTDEP("ecb");
1825MODULE_SOFTDEP("hmac");
1826MODULE_SOFTDEP("md5");
1827MODULE_SOFTDEP("nls");
1828MODULE_SOFTDEP("aes");
1829MODULE_SOFTDEP("cmac");
1830MODULE_SOFTDEP("sha256");
1831MODULE_SOFTDEP("sha512");
1832MODULE_SOFTDEP("aead2");
1833MODULE_SOFTDEP("ccm");
1834MODULE_SOFTDEP("gcm");
 
 
1835module_init(init_cifs)
1836module_exit(exit_cifs)
v4.17
 
   1/*
   2 *   fs/cifs/cifsfs.c
   3 *
   4 *   Copyright (C) International Business Machines  Corp., 2002,2008
   5 *   Author(s): Steve French (sfrench@us.ibm.com)
   6 *
   7 *   Common Internet FileSystem (CIFS) client
   8 *
   9 *   This library is free software; you can redistribute it and/or modify
  10 *   it under the terms of the GNU Lesser General Public License as published
  11 *   by the Free Software Foundation; either version 2.1 of the License, or
  12 *   (at your option) any later version.
  13 *
  14 *   This library is distributed in the hope that it will be useful,
  15 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
  17 *   the GNU Lesser General Public License for more details.
  18 *
  19 *   You should have received a copy of the GNU Lesser General Public License
  20 *   along with this library; if not, write to the Free Software
  21 *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  22 */
  23
  24/* Note that BB means BUGBUG (ie something to fix eventually) */
  25
  26#include <linux/module.h>
  27#include <linux/fs.h>
  28#include <linux/mount.h>
  29#include <linux/slab.h>
  30#include <linux/init.h>
  31#include <linux/list.h>
  32#include <linux/seq_file.h>
  33#include <linux/vfs.h>
  34#include <linux/mempool.h>
  35#include <linux/delay.h>
  36#include <linux/kthread.h>
  37#include <linux/freezer.h>
  38#include <linux/namei.h>
  39#include <linux/random.h>
  40#include <linux/uuid.h>
  41#include <linux/xattr.h>
 
  42#include <net/ipv6.h>
  43#include "cifsfs.h"
  44#include "cifspdu.h"
  45#define DECLARE_GLOBALS_HERE
  46#include "cifsglob.h"
  47#include "cifsproto.h"
  48#include "cifs_debug.h"
  49#include "cifs_fs_sb.h"
  50#include <linux/mm.h>
  51#include <linux/key-type.h>
  52#include "cifs_spnego.h"
  53#include "fscache.h"
  54#include "smb2pdu.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  55
  56int cifsFYI = 0;
  57bool traceSMB;
  58bool enable_oplocks = true;
  59bool linuxExtEnabled = true;
  60bool lookupCacheEnabled = true;
 
 
 
 
  61unsigned int global_secflags = CIFSSEC_DEF;
  62/* unsigned int ntlmv2_support = 0; */
  63unsigned int sign_CIFS_PDUs = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  64static const struct super_operations cifs_super_ops;
  65unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
  66module_param(CIFSMaxBufSize, uint, 0444);
  67MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header). "
 
  68				 "Default: 16384 Range: 8192 to 130048");
  69unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
  70module_param(cifs_min_rcv, uint, 0444);
  71MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
  72				"1 to 64");
  73unsigned int cifs_min_small = 30;
  74module_param(cifs_min_small, uint, 0444);
  75MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
  76				 "Range: 2 to 256");
  77unsigned int cifs_max_pending = CIFS_MAX_REQ;
  78module_param(cifs_max_pending, uint, 0444);
  79MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
 
  80				   "Default: 32767 Range: 2 to 32767.");
 
 
 
 
 
 
 
 
  81module_param(enable_oplocks, bool, 0644);
  82MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
  83
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  84extern mempool_t *cifs_sm_req_poolp;
  85extern mempool_t *cifs_req_poolp;
  86extern mempool_t *cifs_mid_poolp;
  87
  88struct workqueue_struct	*cifsiod_wq;
 
 
  89struct workqueue_struct	*cifsoplockd_wq;
 
  90__u32 cifs_lock_secret;
  91
  92/*
  93 * Bumps refcount for cifs super block.
  94 * Note that it should be only called if a referece to VFS super block is
  95 * already held, e.g. in open-type syscalls context. Otherwise it can race with
  96 * atomic_dec_and_test in deactivate_locked_super.
  97 */
  98void
  99cifs_sb_active(struct super_block *sb)
 100{
 101	struct cifs_sb_info *server = CIFS_SB(sb);
 102
 103	if (atomic_inc_return(&server->active) == 1)
 104		atomic_inc(&sb->s_active);
 105}
 106
 107void
 108cifs_sb_deactive(struct super_block *sb)
 109{
 110	struct cifs_sb_info *server = CIFS_SB(sb);
 111
 112	if (atomic_dec_and_test(&server->active))
 113		deactivate_super(sb);
 114}
 115
 116static int
 117cifs_read_super(struct super_block *sb)
 118{
 119	struct inode *inode;
 120	struct cifs_sb_info *cifs_sb;
 121	struct cifs_tcon *tcon;
 
 122	int rc = 0;
 123
 124	cifs_sb = CIFS_SB(sb);
 125	tcon = cifs_sb_master_tcon(cifs_sb);
 126
 127	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
 128		sb->s_flags |= SB_POSIXACL;
 129
 
 
 
 130	if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
 131		sb->s_maxbytes = MAX_LFS_FILESIZE;
 132	else
 133		sb->s_maxbytes = MAX_NON_LFS;
 134
 135	/* BB FIXME fix time_gran to be larger for LANMAN sessions */
 136	sb->s_time_gran = 100;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 137
 138	sb->s_magic = CIFS_MAGIC_NUMBER;
 139	sb->s_op = &cifs_super_ops;
 140	sb->s_xattr = cifs_xattr_handlers;
 141	rc = super_setup_bdi(sb);
 142	if (rc)
 143		goto out_no_root;
 144	/* tune readahead according to rsize */
 145	sb->s_bdi->ra_pages = cifs_sb->rsize / PAGE_SIZE;
 
 
 
 
 
 
 146
 147	sb->s_blocksize = CIFS_MAX_MSGSIZE;
 148	sb->s_blocksize_bits = 14;	/* default 2**14 = CIFS_MAX_MSGSIZE */
 149	inode = cifs_root_iget(sb);
 150
 151	if (IS_ERR(inode)) {
 152		rc = PTR_ERR(inode);
 153		goto out_no_root;
 154	}
 155
 156	if (tcon->nocase)
 157		sb->s_d_op = &cifs_ci_dentry_ops;
 158	else
 159		sb->s_d_op = &cifs_dentry_ops;
 160
 161	sb->s_root = d_make_root(inode);
 162	if (!sb->s_root) {
 163		rc = -ENOMEM;
 164		goto out_no_root;
 165	}
 166
 167#ifdef CONFIG_CIFS_NFSD_EXPORT
 168	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
 169		cifs_dbg(FYI, "export ops supported\n");
 170		sb->s_export_op = &cifs_export_ops;
 171	}
 172#endif /* CONFIG_CIFS_NFSD_EXPORT */
 173
 174	return 0;
 175
 176out_no_root:
 177	cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
 178	return rc;
 179}
 180
 181static void cifs_kill_sb(struct super_block *sb)
 182{
 183	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 184	kill_anon_super(sb);
 185	cifs_umount(cifs_sb);
 186}
 187
 188static int
 189cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
 190{
 191	struct super_block *sb = dentry->d_sb;
 192	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
 193	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
 194	struct TCP_Server_Info *server = tcon->ses->server;
 195	unsigned int xid;
 196	int rc = 0;
 197
 198	xid = get_xid();
 199
 200	/*
 201	 * PATH_MAX may be too long - it would presumably be total path,
 202	 * but note that some servers (includinng Samba 3) have a shorter
 203	 * maximum path.
 204	 *
 205	 * Instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO.
 206	 */
 207	buf->f_namelen = PATH_MAX;
 
 
 208	buf->f_files = 0;	/* undefined */
 209	buf->f_ffree = 0;	/* unlimited */
 210
 211	if (server->ops->queryfs)
 212		rc = server->ops->queryfs(xid, tcon, buf);
 213
 214	free_xid(xid);
 215	return 0;
 216}
 217
 218static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
 219{
 220	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
 221	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
 222	struct TCP_Server_Info *server = tcon->ses->server;
 223
 224	if (server->ops->fallocate)
 225		return server->ops->fallocate(file, tcon, mode, off, len);
 226
 227	return -EOPNOTSUPP;
 228}
 229
 230static int cifs_permission(struct inode *inode, int mask)
 
 231{
 232	struct cifs_sb_info *cifs_sb;
 233
 234	cifs_sb = CIFS_SB(inode->i_sb);
 235
 236	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
 237		if ((mask & MAY_EXEC) && !execute_ok(inode))
 238			return -EACCES;
 239		else
 240			return 0;
 241	} else /* file mode might have been restricted at mount time
 242		on the client (above and beyond ACL on servers) for
 243		servers which do not support setting and viewing mode bits,
 244		so allowing client to check permissions is useful */
 245		return generic_permission(inode, mask);
 246}
 247
 248static struct kmem_cache *cifs_inode_cachep;
 249static struct kmem_cache *cifs_req_cachep;
 250static struct kmem_cache *cifs_mid_cachep;
 251static struct kmem_cache *cifs_sm_req_cachep;
 252mempool_t *cifs_sm_req_poolp;
 253mempool_t *cifs_req_poolp;
 254mempool_t *cifs_mid_poolp;
 255
 256static struct inode *
 257cifs_alloc_inode(struct super_block *sb)
 258{
 259	struct cifsInodeInfo *cifs_inode;
 260	cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
 261	if (!cifs_inode)
 262		return NULL;
 263	cifs_inode->cifsAttrs = 0x20;	/* default */
 264	cifs_inode->time = 0;
 265	/*
 266	 * Until the file is open and we have gotten oplock info back from the
 267	 * server, can not assume caching of file data or metadata.
 268	 */
 269	cifs_set_oplock_level(cifs_inode, 0);
 270	cifs_inode->flags = 0;
 271	spin_lock_init(&cifs_inode->writers_lock);
 272	cifs_inode->writers = 0;
 273	cifs_inode->vfs_inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
 274	cifs_inode->server_eof = 0;
 275	cifs_inode->uniqueid = 0;
 276	cifs_inode->createtime = 0;
 277	cifs_inode->epoch = 0;
 
 278	generate_random_uuid(cifs_inode->lease_key);
 
 279
 280	/*
 281	 * Can not set i_flags here - they get immediately overwritten to zero
 282	 * by the VFS.
 283	 */
 284	/* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME; */
 285	INIT_LIST_HEAD(&cifs_inode->openFileList);
 286	INIT_LIST_HEAD(&cifs_inode->llist);
 287	return &cifs_inode->vfs_inode;
 
 
 288}
 289
 290static void cifs_i_callback(struct rcu_head *head)
 
 291{
 292	struct inode *inode = container_of(head, struct inode, i_rcu);
 293	kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
 294}
 295
 296static void
 297cifs_destroy_inode(struct inode *inode)
 298{
 299	call_rcu(&inode->i_rcu, cifs_i_callback);
 300}
 301
 302static void
 303cifs_evict_inode(struct inode *inode)
 304{
 305	truncate_inode_pages_final(&inode->i_data);
 
 
 
 306	clear_inode(inode);
 307	cifs_fscache_release_inode_cookie(inode);
 308}
 309
 310static void
 311cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
 312{
 313	struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
 314	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
 315
 316	seq_puts(s, ",addr=");
 317
 318	switch (server->dstaddr.ss_family) {
 319	case AF_INET:
 320		seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
 321		break;
 322	case AF_INET6:
 323		seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
 324		if (sa6->sin6_scope_id)
 325			seq_printf(s, "%%%u", sa6->sin6_scope_id);
 326		break;
 327	default:
 328		seq_puts(s, "(unknown)");
 329	}
 330	if (server->rdma)
 331		seq_puts(s, ",rdma");
 332}
 333
 334static void
 335cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
 336{
 337	if (ses->sectype == Unspecified) {
 338		if (ses->user_name == NULL)
 339			seq_puts(s, ",sec=none");
 340		return;
 341	}
 342
 343	seq_puts(s, ",sec=");
 344
 345	switch (ses->sectype) {
 346	case LANMAN:
 347		seq_puts(s, "lanman");
 348		break;
 349	case NTLMv2:
 350		seq_puts(s, "ntlmv2");
 351		break;
 352	case NTLM:
 353		seq_puts(s, "ntlm");
 354		break;
 355	case Kerberos:
 356		seq_puts(s, "krb5");
 357		break;
 358	case RawNTLMSSP:
 359		seq_puts(s, "ntlmssp");
 360		break;
 361	default:
 362		/* shouldn't ever happen */
 363		seq_puts(s, "unknown");
 364		break;
 365	}
 366
 367	if (ses->sign)
 368		seq_puts(s, "i");
 
 
 
 
 369}
 370
 371static void
 372cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
 373{
 374	seq_puts(s, ",cache=");
 375
 376	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
 377		seq_puts(s, "strict");
 378	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
 379		seq_puts(s, "none");
 
 
 
 
 380	else
 381		seq_puts(s, "loose");
 382}
 383
 384static void
 385cifs_show_nls(struct seq_file *s, struct nls_table *cur)
 
 
 
 386{
 387	struct nls_table *def;
 
 388
 389	/* Display iocharset= option if it's not default charset */
 390	def = load_nls_default();
 391	if (def != cur)
 392		seq_printf(s, ",iocharset=%s", cur->charset);
 393	unload_nls(def);
 
 
 
 
 394}
 395
 396/*
 397 * cifs_show_options() is for displaying mount options in /proc/mounts.
 398 * Not all settable options are displayed but most of the important
 399 * ones are.
 400 */
 401static int
 402cifs_show_options(struct seq_file *s, struct dentry *root)
 403{
 404	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
 405	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
 406	struct sockaddr *srcaddr;
 407	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
 408
 409	seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
 410	cifs_show_security(s, tcon->ses);
 411	cifs_show_cache_flavor(s, cifs_sb);
 412
 413	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
 
 
 414		seq_puts(s, ",multiuser");
 415	else if (tcon->ses->user_name)
 416		seq_show_option(s, "username", tcon->ses->user_name);
 417
 418	if (tcon->ses->domainName)
 419		seq_show_option(s, "domain", tcon->ses->domainName);
 420
 421	if (srcaddr->sa_family != AF_UNSPEC) {
 422		struct sockaddr_in *saddr4;
 423		struct sockaddr_in6 *saddr6;
 424		saddr4 = (struct sockaddr_in *)srcaddr;
 425		saddr6 = (struct sockaddr_in6 *)srcaddr;
 426		if (srcaddr->sa_family == AF_INET6)
 427			seq_printf(s, ",srcaddr=%pI6c",
 428				   &saddr6->sin6_addr);
 429		else if (srcaddr->sa_family == AF_INET)
 430			seq_printf(s, ",srcaddr=%pI4",
 431				   &saddr4->sin_addr.s_addr);
 432		else
 433			seq_printf(s, ",srcaddr=BAD-AF:%i",
 434				   (int)(srcaddr->sa_family));
 435	}
 436
 437	seq_printf(s, ",uid=%u",
 438		   from_kuid_munged(&init_user_ns, cifs_sb->mnt_uid));
 439	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
 440		seq_puts(s, ",forceuid");
 441	else
 442		seq_puts(s, ",noforceuid");
 443
 444	seq_printf(s, ",gid=%u",
 445		   from_kgid_munged(&init_user_ns, cifs_sb->mnt_gid));
 446	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
 447		seq_puts(s, ",forcegid");
 448	else
 449		seq_puts(s, ",noforcegid");
 450
 451	cifs_show_address(s, tcon->ses->server);
 452
 453	if (!tcon->unix_ext)
 454		seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
 455					   cifs_sb->mnt_file_mode,
 456					   cifs_sb->mnt_dir_mode);
 457
 458	cifs_show_nls(s, cifs_sb->local_nls);
 459
 460	if (tcon->seal)
 461		seq_puts(s, ",seal");
 
 
 462	if (tcon->nocase)
 463		seq_puts(s, ",nocase");
 
 
 
 
 
 
 464	if (tcon->retry)
 465		seq_puts(s, ",hard");
 466	else
 467		seq_puts(s, ",soft");
 468	if (tcon->use_persistent)
 469		seq_puts(s, ",persistenthandles");
 470	else if (tcon->use_resilient)
 471		seq_puts(s, ",resilienthandles");
 472	if (tcon->unix_ext)
 
 
 473		seq_puts(s, ",unix");
 474	else
 475		seq_puts(s, ",nounix");
 
 
 476	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
 477		seq_puts(s, ",posixpaths");
 478	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
 479		seq_puts(s, ",setuids");
 480	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
 481		seq_puts(s, ",idsfromsid");
 482	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
 483		seq_puts(s, ",serverino");
 484	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
 485		seq_puts(s, ",rwpidforward");
 486	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
 487		seq_puts(s, ",forcemand");
 488	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
 489		seq_puts(s, ",nouser_xattr");
 490	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
 491		seq_puts(s, ",mapchars");
 492	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
 493		seq_puts(s, ",mapposix");
 494	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
 495		seq_puts(s, ",sfu");
 496	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
 497		seq_puts(s, ",nobrl");
 
 
 
 
 498	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
 499		seq_puts(s, ",cifsacl");
 500	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
 501		seq_puts(s, ",dynperm");
 502	if (root->d_sb->s_flags & SB_POSIXACL)
 503		seq_puts(s, ",acl");
 504	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
 505		seq_puts(s, ",mfsymlinks");
 506	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
 507		seq_puts(s, ",fsc");
 508	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
 509		seq_puts(s, ",nostrictsync");
 510	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
 511		seq_puts(s, ",noperm");
 512	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
 513		seq_printf(s, ",backupuid=%u",
 514			   from_kuid_munged(&init_user_ns,
 515					    cifs_sb->mnt_backupuid));
 516	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
 517		seq_printf(s, ",backupgid=%u",
 518			   from_kgid_munged(&init_user_ns,
 519					    cifs_sb->mnt_backupgid));
 520
 521	seq_printf(s, ",rsize=%u", cifs_sb->rsize);
 522	seq_printf(s, ",wsize=%u", cifs_sb->wsize);
 
 
 
 
 
 523	seq_printf(s, ",echo_interval=%lu",
 524			tcon->ses->server->echo_interval / HZ);
 525	/* convert actimeo and display it in seconds */
 526	seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 527
 528	return 0;
 529}
 530
 531static void cifs_umount_begin(struct super_block *sb)
 532{
 533	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
 534	struct cifs_tcon *tcon;
 535
 536	if (cifs_sb == NULL)
 537		return;
 538
 539	tcon = cifs_sb_master_tcon(cifs_sb);
 540
 541	spin_lock(&cifs_tcp_ses_lock);
 542	if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) {
 
 543		/* we have other mounts to same share or we have
 544		   already tried to force umount this and woken up
 545		   all waiting network requests, nothing to do */
 
 546		spin_unlock(&cifs_tcp_ses_lock);
 547		return;
 548	} else if (tcon->tc_count == 1)
 549		tcon->tidStatus = CifsExiting;
 
 550	spin_unlock(&cifs_tcp_ses_lock);
 551
 552	/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
 553	/* cancel_notify_requests(tcon); */
 554	if (tcon->ses && tcon->ses->server) {
 555		cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
 556		wake_up_all(&tcon->ses->server->request_q);
 557		wake_up_all(&tcon->ses->server->response_q);
 558		msleep(1); /* yield */
 559		/* we have to kick the requests once more */
 560		wake_up_all(&tcon->ses->server->response_q);
 561		msleep(1);
 562	}
 563
 564	return;
 565}
 566
 567#ifdef CONFIG_CIFS_STATS2
 568static int cifs_show_stats(struct seq_file *s, struct dentry *root)
 569{
 570	/* BB FIXME */
 571	return 0;
 572}
 573#endif
 574
 575static int cifs_remount(struct super_block *sb, int *flags, char *data)
 576{
 577	sync_filesystem(sb);
 578	*flags |= SB_NODIRATIME;
 579	return 0;
 580}
 581
 582static int cifs_drop_inode(struct inode *inode)
 583{
 584	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
 585
 586	/* no serverino => unconditional eviction */
 587	return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
 588		generic_drop_inode(inode);
 589}
 590
 591static const struct super_operations cifs_super_ops = {
 592	.statfs = cifs_statfs,
 593	.alloc_inode = cifs_alloc_inode,
 594	.destroy_inode = cifs_destroy_inode,
 
 595	.drop_inode	= cifs_drop_inode,
 596	.evict_inode	= cifs_evict_inode,
 
 
 597/*	.delete_inode	= cifs_delete_inode,  */  /* Do not need above
 598	function unless later we add lazy close of inodes or unless the
 599	kernel forgets to call us with the same number of releases (closes)
 600	as opens */
 601	.show_options = cifs_show_options,
 602	.umount_begin   = cifs_umount_begin,
 603	.remount_fs = cifs_remount,
 604#ifdef CONFIG_CIFS_STATS2
 605	.show_stats = cifs_show_stats,
 606#endif
 607};
 608
 609/*
 610 * Get root dentry from superblock according to prefix path mount option.
 611 * Return dentry with refcount + 1 on success and NULL otherwise.
 612 */
 613static struct dentry *
 614cifs_get_root(struct smb_vol *vol, struct super_block *sb)
 615{
 616	struct dentry *dentry;
 617	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
 618	char *full_path = NULL;
 619	char *s, *p;
 620	char sep;
 621
 622	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
 623		return dget(sb->s_root);
 624
 625	full_path = cifs_build_path_to_root(vol, cifs_sb,
 626				cifs_sb_master_tcon(cifs_sb), 0);
 627	if (full_path == NULL)
 628		return ERR_PTR(-ENOMEM);
 629
 630	cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
 631
 632	sep = CIFS_DIR_SEP(cifs_sb);
 633	dentry = dget(sb->s_root);
 634	p = s = full_path;
 635
 636	do {
 637		struct inode *dir = d_inode(dentry);
 638		struct dentry *child;
 639
 640		if (!dir) {
 641			dput(dentry);
 642			dentry = ERR_PTR(-ENOENT);
 643			break;
 644		}
 645		if (!S_ISDIR(dir->i_mode)) {
 646			dput(dentry);
 647			dentry = ERR_PTR(-ENOTDIR);
 648			break;
 649		}
 650
 651		/* skip separators */
 652		while (*s == sep)
 653			s++;
 654		if (!*s)
 655			break;
 656		p = s++;
 657		/* next separator */
 658		while (*s && *s != sep)
 659			s++;
 660
 661		child = lookup_one_len_unlocked(p, dentry, s - p);
 662		dput(dentry);
 663		dentry = child;
 664	} while (!IS_ERR(dentry));
 665	kfree(full_path);
 666	return dentry;
 667}
 668
 669static int cifs_set_super(struct super_block *sb, void *data)
 670{
 671	struct cifs_mnt_data *mnt_data = data;
 672	sb->s_fs_info = mnt_data->cifs_sb;
 673	return set_anon_super(sb, NULL);
 674}
 675
 676static struct dentry *
 677cifs_do_mount(struct file_system_type *fs_type,
 678	      int flags, const char *dev_name, void *data)
 679{
 680	int rc;
 681	struct super_block *sb;
 682	struct cifs_sb_info *cifs_sb;
 683	struct smb_vol *volume_info;
 684	struct cifs_mnt_data mnt_data;
 685	struct dentry *root;
 686
 687	cifs_dbg(FYI, "Devname: %s flags: %d\n", dev_name, flags);
 688
 689	volume_info = cifs_get_volume_info((char *)data, dev_name);
 690	if (IS_ERR(volume_info))
 691		return ERR_CAST(volume_info);
 
 
 
 692
 693	cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
 694	if (cifs_sb == NULL) {
 695		root = ERR_PTR(-ENOMEM);
 696		goto out_nls;
 697	}
 698
 699	cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
 700	if (cifs_sb->mountdata == NULL) {
 701		root = ERR_PTR(-ENOMEM);
 702		goto out_free;
 
 
 
 
 
 703	}
 704
 705	rc = cifs_setup_cifs_sb(volume_info, cifs_sb);
 706	if (rc) {
 707		root = ERR_PTR(rc);
 708		goto out_free;
 709	}
 710
 711	rc = cifs_mount(cifs_sb, volume_info);
 712	if (rc) {
 713		if (!(flags & SB_SILENT))
 714			cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
 715				 rc);
 716		root = ERR_PTR(rc);
 717		goto out_free;
 718	}
 719
 720	mnt_data.vol = volume_info;
 721	mnt_data.cifs_sb = cifs_sb;
 722	mnt_data.flags = flags;
 723
 724	/* BB should we make this contingent on mount parm? */
 725	flags |= SB_NODIRATIME | SB_NOATIME;
 726
 727	sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
 728	if (IS_ERR(sb)) {
 729		root = ERR_CAST(sb);
 730		cifs_umount(cifs_sb);
 
 731		goto out;
 732	}
 733
 734	if (sb->s_root) {
 735		cifs_dbg(FYI, "Use existing superblock\n");
 736		cifs_umount(cifs_sb);
 
 737	} else {
 738		rc = cifs_read_super(sb);
 739		if (rc) {
 740			root = ERR_PTR(rc);
 741			goto out_super;
 742		}
 743
 744		sb->s_flags |= SB_ACTIVE;
 745	}
 746
 747	root = cifs_get_root(volume_info, sb);
 748	if (IS_ERR(root))
 749		goto out_super;
 750
 
 
 
 751	cifs_dbg(FYI, "dentry root is: %p\n", root);
 752	goto out;
 753
 754out_super:
 755	deactivate_locked_super(sb);
 
 756out:
 757	cifs_cleanup_volume_info(volume_info);
 
 
 
 
 
 
 758	return root;
 
 759
 760out_free:
 761	kfree(cifs_sb->prepath);
 762	kfree(cifs_sb->mountdata);
 763	kfree(cifs_sb);
 764out_nls:
 765	unload_nls(volume_info->local_nls);
 766	goto out;
 767}
 768
 769static ssize_t
 770cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
 771{
 772	ssize_t rc;
 773	struct inode *inode = file_inode(iocb->ki_filp);
 774
 775	if (iocb->ki_filp->f_flags & O_DIRECT)
 776		return cifs_user_readv(iocb, iter);
 777
 778	rc = cifs_revalidate_mapping(inode);
 779	if (rc)
 780		return rc;
 781
 782	return generic_file_read_iter(iocb, iter);
 783}
 784
 785static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 786{
 787	struct inode *inode = file_inode(iocb->ki_filp);
 788	struct cifsInodeInfo *cinode = CIFS_I(inode);
 789	ssize_t written;
 790	int rc;
 791
 792	if (iocb->ki_filp->f_flags & O_DIRECT) {
 793		written = cifs_user_writev(iocb, from);
 794		if (written > 0 && CIFS_CACHE_READ(cinode)) {
 795			cifs_zap_mapping(inode);
 796			cifs_dbg(FYI,
 797				 "Set no oplock for inode=%p after a write operation\n",
 798				 inode);
 799			cinode->oplock = 0;
 800		}
 801		return written;
 802	}
 803
 804	written = cifs_get_writer(cinode);
 805	if (written)
 806		return written;
 807
 808	written = generic_file_write_iter(iocb, from);
 809
 810	if (CIFS_CACHE_WRITE(CIFS_I(inode)))
 811		goto out;
 812
 813	rc = filemap_fdatawrite(inode->i_mapping);
 814	if (rc)
 815		cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
 816			 rc, inode);
 817
 818out:
 819	cifs_put_writer(cinode);
 820	return written;
 821}
 822
 823static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
 824{
 
 
 
 825	/*
 826	 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
 827	 * the cached file length
 828	 */
 829	if (whence != SEEK_SET && whence != SEEK_CUR) {
 830		int rc;
 831		struct inode *inode = file_inode(file);
 832
 833		/*
 834		 * We need to be sure that all dirty pages are written and the
 835		 * server has the newest file length.
 836		 */
 837		if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
 838		    inode->i_mapping->nrpages != 0) {
 839			rc = filemap_fdatawait(inode->i_mapping);
 840			if (rc) {
 841				mapping_set_error(inode->i_mapping, rc);
 842				return rc;
 843			}
 844		}
 845		/*
 846		 * Some applications poll for the file length in this strange
 847		 * way so we must seek to end on non-oplocked files by
 848		 * setting the revalidate time to zero.
 849		 */
 850		CIFS_I(inode)->time = 0;
 851
 852		rc = cifs_revalidate_file_attr(file);
 853		if (rc < 0)
 854			return (loff_t)rc;
 855	}
 
 
 
 
 
 
 856	return generic_file_llseek(file, offset, whence);
 857}
 858
 859static int
 860cifs_setlease(struct file *file, long arg, struct file_lock **lease, void **priv)
 861{
 862	/*
 863	 * Note that this is called by vfs setlease with i_lock held to
 864	 * protect *lease from going away.
 865	 */
 866	struct inode *inode = file_inode(file);
 867	struct cifsFileInfo *cfile = file->private_data;
 868
 869	if (!(S_ISREG(inode->i_mode)))
 870		return -EINVAL;
 871
 872	/* Check if file is oplocked if this is request for new lease */
 873	if (arg == F_UNLCK ||
 874	    ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
 875	    ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
 876		return generic_setlease(file, arg, lease, priv);
 877	else if (tlink_tcon(cfile->tlink)->local_lease &&
 878		 !CIFS_CACHE_READ(CIFS_I(inode)))
 879		/*
 880		 * If the server claims to support oplock on this file, then we
 881		 * still need to check oplock even if the local_lease mount
 882		 * option is set, but there are servers which do not support
 883		 * oplock for which this mount option may be useful if the user
 884		 * knows that the file won't be changed on the server by anyone
 885		 * else.
 886		 */
 887		return generic_setlease(file, arg, lease, priv);
 888	else
 889		return -EAGAIN;
 890}
 891
 892struct file_system_type cifs_fs_type = {
 893	.owner = THIS_MODULE,
 894	.name = "cifs",
 895	.mount = cifs_do_mount,
 
 896	.kill_sb = cifs_kill_sb,
 897	/*  .fs_flags */
 898};
 899MODULE_ALIAS_FS("cifs");
 
 
 
 
 
 
 
 
 
 
 
 
 900const struct inode_operations cifs_dir_inode_ops = {
 901	.create = cifs_create,
 902	.atomic_open = cifs_atomic_open,
 903	.lookup = cifs_lookup,
 904	.getattr = cifs_getattr,
 905	.unlink = cifs_unlink,
 906	.link = cifs_hardlink,
 907	.mkdir = cifs_mkdir,
 908	.rmdir = cifs_rmdir,
 909	.rename = cifs_rename2,
 910	.permission = cifs_permission,
 911	.setattr = cifs_setattr,
 912	.symlink = cifs_symlink,
 913	.mknod   = cifs_mknod,
 914	.listxattr = cifs_listxattr,
 
 
 915};
 916
 917const struct inode_operations cifs_file_inode_ops = {
 918	.setattr = cifs_setattr,
 919	.getattr = cifs_getattr,
 920	.permission = cifs_permission,
 921	.listxattr = cifs_listxattr,
 
 
 
 922};
 923
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 924const struct inode_operations cifs_symlink_inode_ops = {
 925	.get_link = cifs_get_link,
 926	.permission = cifs_permission,
 927	.listxattr = cifs_listxattr,
 928};
 929
 930static int cifs_clone_file_range(struct file *src_file, loff_t off,
 931		struct file *dst_file, loff_t destoff, u64 len)
 
 932{
 933	struct inode *src_inode = file_inode(src_file);
 934	struct inode *target_inode = file_inode(dst_file);
 935	struct cifsFileInfo *smb_file_src = src_file->private_data;
 936	struct cifsFileInfo *smb_file_target = dst_file->private_data;
 937	struct cifs_tcon *target_tcon = tlink_tcon(smb_file_target->tlink);
 938	unsigned int xid;
 939	int rc;
 940
 
 
 
 941	cifs_dbg(FYI, "clone range\n");
 942
 943	xid = get_xid();
 944
 945	if (!src_file->private_data || !dst_file->private_data) {
 946		rc = -EBADF;
 947		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
 948		goto out;
 949	}
 950
 
 
 
 951	/*
 952	 * Note: cifs case is easier than btrfs since server responsible for
 953	 * checks for proper open modes and file type and if it wants
 954	 * server could even support copy of range where source = target
 955	 */
 956	lock_two_nondirectories(target_inode, src_inode);
 957
 958	if (len == 0)
 959		len = src_inode->i_size - off;
 960
 961	cifs_dbg(FYI, "about to flush pages\n");
 962	/* should we flush first and last page first */
 963	truncate_inode_pages_range(&target_inode->i_data, destoff,
 964				   PAGE_ALIGN(destoff + len)-1);
 965
 966	if (target_tcon->ses->server->ops->duplicate_extents)
 967		rc = target_tcon->ses->server->ops->duplicate_extents(xid,
 968			smb_file_src, smb_file_target, off, len, destoff);
 969	else
 970		rc = -EOPNOTSUPP;
 971
 972	/* force revalidate of size and timestamps of target file now
 973	   that target is updated on the server */
 974	CIFS_I(target_inode)->time = 0;
 975	/* although unlocking in the reverse order from locking is not
 976	   strictly necessary here it is a little cleaner to be consistent */
 977	unlock_two_nondirectories(src_inode, target_inode);
 978out:
 979	free_xid(xid);
 980	return rc;
 981}
 982
 983ssize_t cifs_file_copychunk_range(unsigned int xid,
 984				struct file *src_file, loff_t off,
 985				struct file *dst_file, loff_t destoff,
 986				size_t len, unsigned int flags)
 987{
 988	struct inode *src_inode = file_inode(src_file);
 989	struct inode *target_inode = file_inode(dst_file);
 990	struct cifsFileInfo *smb_file_src;
 991	struct cifsFileInfo *smb_file_target;
 992	struct cifs_tcon *src_tcon;
 993	struct cifs_tcon *target_tcon;
 994	ssize_t rc;
 995
 996	cifs_dbg(FYI, "copychunk range\n");
 997
 998	if (src_inode == target_inode) {
 999		rc = -EINVAL;
1000		goto out;
1001	}
1002
1003	if (!src_file->private_data || !dst_file->private_data) {
1004		rc = -EBADF;
1005		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1006		goto out;
1007	}
1008
1009	rc = -EXDEV;
1010	smb_file_target = dst_file->private_data;
1011	smb_file_src = src_file->private_data;
1012	src_tcon = tlink_tcon(smb_file_src->tlink);
1013	target_tcon = tlink_tcon(smb_file_target->tlink);
1014
1015	if (src_tcon->ses != target_tcon->ses) {
1016		cifs_dbg(VFS, "source and target of copy not on same server\n");
1017		goto out;
1018	}
1019
 
 
 
 
1020	/*
1021	 * Note: cifs case is easier than btrfs since server responsible for
1022	 * checks for proper open modes and file type and if it wants
1023	 * server could even support copy of range where source = target
1024	 */
1025	lock_two_nondirectories(target_inode, src_inode);
1026
1027	cifs_dbg(FYI, "about to flush pages\n");
 
 
 
 
 
 
1028	/* should we flush first and last page first */
1029	truncate_inode_pages(&target_inode->i_data, 0);
1030
1031	if (target_tcon->ses->server->ops->copychunk_range)
 
1032		rc = target_tcon->ses->server->ops->copychunk_range(xid,
1033			smb_file_src, smb_file_target, off, len, destoff);
1034	else
1035		rc = -EOPNOTSUPP;
1036
1037	/* force revalidate of size and timestamps of target file now
1038	 * that target is updated on the server
1039	 */
1040	CIFS_I(target_inode)->time = 0;
 
 
1041	/* although unlocking in the reverse order from locking is not
1042	 * strictly necessary here it is a little cleaner to be consistent
1043	 */
1044	unlock_two_nondirectories(src_inode, target_inode);
1045
1046out:
1047	return rc;
1048}
1049
1050/*
1051 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1052 * is a dummy operation.
1053 */
1054static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1055{
1056	cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1057		 file, datasync);
1058
1059	return 0;
1060}
1061
1062static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1063				struct file *dst_file, loff_t destoff,
1064				size_t len, unsigned int flags)
1065{
1066	unsigned int xid = get_xid();
1067	ssize_t rc;
 
 
 
 
 
 
 
1068
1069	rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1070					len, flags);
1071	free_xid(xid);
 
 
 
 
1072	return rc;
1073}
1074
1075const struct file_operations cifs_file_ops = {
1076	.read_iter = cifs_loose_read_iter,
1077	.write_iter = cifs_file_write_iter,
1078	.open = cifs_open,
1079	.release = cifs_close,
1080	.lock = cifs_lock,
 
1081	.fsync = cifs_fsync,
1082	.flush = cifs_flush,
1083	.mmap  = cifs_file_mmap,
1084	.splice_read = generic_file_splice_read,
1085	.splice_write = iter_file_splice_write,
1086	.llseek = cifs_llseek,
1087	.unlocked_ioctl	= cifs_ioctl,
1088	.copy_file_range = cifs_copy_file_range,
1089	.clone_file_range = cifs_clone_file_range,
1090	.setlease = cifs_setlease,
1091	.fallocate = cifs_fallocate,
1092};
1093
1094const struct file_operations cifs_file_strict_ops = {
1095	.read_iter = cifs_strict_readv,
1096	.write_iter = cifs_strict_writev,
1097	.open = cifs_open,
1098	.release = cifs_close,
1099	.lock = cifs_lock,
 
1100	.fsync = cifs_strict_fsync,
1101	.flush = cifs_flush,
1102	.mmap = cifs_file_strict_mmap,
1103	.splice_read = generic_file_splice_read,
1104	.splice_write = iter_file_splice_write,
1105	.llseek = cifs_llseek,
1106	.unlocked_ioctl	= cifs_ioctl,
1107	.copy_file_range = cifs_copy_file_range,
1108	.clone_file_range = cifs_clone_file_range,
1109	.setlease = cifs_setlease,
1110	.fallocate = cifs_fallocate,
1111};
1112
1113const struct file_operations cifs_file_direct_ops = {
1114	/* BB reevaluate whether they can be done with directio, no cache */
1115	.read_iter = cifs_user_readv,
1116	.write_iter = cifs_user_writev,
1117	.open = cifs_open,
1118	.release = cifs_close,
1119	.lock = cifs_lock,
 
1120	.fsync = cifs_fsync,
1121	.flush = cifs_flush,
1122	.mmap = cifs_file_mmap,
1123	.splice_read = generic_file_splice_read,
1124	.splice_write = iter_file_splice_write,
1125	.unlocked_ioctl  = cifs_ioctl,
1126	.copy_file_range = cifs_copy_file_range,
1127	.clone_file_range = cifs_clone_file_range,
1128	.llseek = cifs_llseek,
1129	.setlease = cifs_setlease,
1130	.fallocate = cifs_fallocate,
1131};
1132
1133const struct file_operations cifs_file_nobrl_ops = {
1134	.read_iter = cifs_loose_read_iter,
1135	.write_iter = cifs_file_write_iter,
1136	.open = cifs_open,
1137	.release = cifs_close,
1138	.fsync = cifs_fsync,
1139	.flush = cifs_flush,
1140	.mmap  = cifs_file_mmap,
1141	.splice_read = generic_file_splice_read,
1142	.splice_write = iter_file_splice_write,
1143	.llseek = cifs_llseek,
1144	.unlocked_ioctl	= cifs_ioctl,
1145	.copy_file_range = cifs_copy_file_range,
1146	.clone_file_range = cifs_clone_file_range,
1147	.setlease = cifs_setlease,
1148	.fallocate = cifs_fallocate,
1149};
1150
1151const struct file_operations cifs_file_strict_nobrl_ops = {
1152	.read_iter = cifs_strict_readv,
1153	.write_iter = cifs_strict_writev,
1154	.open = cifs_open,
1155	.release = cifs_close,
1156	.fsync = cifs_strict_fsync,
1157	.flush = cifs_flush,
1158	.mmap = cifs_file_strict_mmap,
1159	.splice_read = generic_file_splice_read,
1160	.splice_write = iter_file_splice_write,
1161	.llseek = cifs_llseek,
1162	.unlocked_ioctl	= cifs_ioctl,
1163	.copy_file_range = cifs_copy_file_range,
1164	.clone_file_range = cifs_clone_file_range,
1165	.setlease = cifs_setlease,
1166	.fallocate = cifs_fallocate,
1167};
1168
1169const struct file_operations cifs_file_direct_nobrl_ops = {
1170	/* BB reevaluate whether they can be done with directio, no cache */
1171	.read_iter = cifs_user_readv,
1172	.write_iter = cifs_user_writev,
1173	.open = cifs_open,
1174	.release = cifs_close,
1175	.fsync = cifs_fsync,
1176	.flush = cifs_flush,
1177	.mmap = cifs_file_mmap,
1178	.splice_read = generic_file_splice_read,
1179	.splice_write = iter_file_splice_write,
1180	.unlocked_ioctl  = cifs_ioctl,
1181	.copy_file_range = cifs_copy_file_range,
1182	.clone_file_range = cifs_clone_file_range,
1183	.llseek = cifs_llseek,
1184	.setlease = cifs_setlease,
1185	.fallocate = cifs_fallocate,
1186};
1187
1188const struct file_operations cifs_dir_ops = {
1189	.iterate_shared = cifs_readdir,
1190	.release = cifs_closedir,
1191	.read    = generic_read_dir,
1192	.unlocked_ioctl  = cifs_ioctl,
1193	.copy_file_range = cifs_copy_file_range,
1194	.clone_file_range = cifs_clone_file_range,
1195	.llseek = generic_file_llseek,
1196	.fsync = cifs_dir_fsync,
1197};
1198
1199static void
1200cifs_init_once(void *inode)
1201{
1202	struct cifsInodeInfo *cifsi = inode;
1203
1204	inode_init_once(&cifsi->vfs_inode);
1205	init_rwsem(&cifsi->lock_sem);
1206}
1207
1208static int __init
1209cifs_init_inodecache(void)
1210{
1211	cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1212					      sizeof(struct cifsInodeInfo),
1213					      0, (SLAB_RECLAIM_ACCOUNT|
1214						SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1215					      cifs_init_once);
1216	if (cifs_inode_cachep == NULL)
1217		return -ENOMEM;
1218
1219	return 0;
1220}
1221
1222static void
1223cifs_destroy_inodecache(void)
1224{
1225	/*
1226	 * Make sure all delayed rcu free inodes are flushed before we
1227	 * destroy cache.
1228	 */
1229	rcu_barrier();
1230	kmem_cache_destroy(cifs_inode_cachep);
1231}
1232
1233static int
1234cifs_init_request_bufs(void)
1235{
1236	/*
1237	 * SMB2 maximum header size is bigger than CIFS one - no problems to
1238	 * allocate some more bytes for CIFS.
1239	 */
1240	size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1241
1242	if (CIFSMaxBufSize < 8192) {
1243	/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1244	Unicode path name has to fit in any SMB/CIFS path based frames */
1245		CIFSMaxBufSize = 8192;
1246	} else if (CIFSMaxBufSize > 1024*127) {
1247		CIFSMaxBufSize = 1024 * 127;
1248	} else {
1249		CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1250	}
1251/*
1252	cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1253		 CIFSMaxBufSize, CIFSMaxBufSize);
1254*/
1255	cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1256					    CIFSMaxBufSize + max_hdr_size, 0,
1257					    SLAB_HWCACHE_ALIGN, 0,
1258					    CIFSMaxBufSize + max_hdr_size,
1259					    NULL);
1260	if (cifs_req_cachep == NULL)
1261		return -ENOMEM;
1262
1263	if (cifs_min_rcv < 1)
1264		cifs_min_rcv = 1;
1265	else if (cifs_min_rcv > 64) {
1266		cifs_min_rcv = 64;
1267		cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1268	}
1269
1270	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1271						  cifs_req_cachep);
1272
1273	if (cifs_req_poolp == NULL) {
1274		kmem_cache_destroy(cifs_req_cachep);
1275		return -ENOMEM;
1276	}
1277	/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1278	almost all handle based requests (but not write response, nor is it
1279	sufficient for path based requests).  A smaller size would have
1280	been more efficient (compacting multiple slab items on one 4k page)
1281	for the case in which debug was on, but this larger size allows
1282	more SMBs to use small buffer alloc and is still much more
1283	efficient to alloc 1 per page off the slab compared to 17K (5page)
1284	alloc of large cifs buffers even when page debugging is on */
1285	cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1286			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1287			0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1288	if (cifs_sm_req_cachep == NULL) {
1289		mempool_destroy(cifs_req_poolp);
1290		kmem_cache_destroy(cifs_req_cachep);
1291		return -ENOMEM;
1292	}
1293
1294	if (cifs_min_small < 2)
1295		cifs_min_small = 2;
1296	else if (cifs_min_small > 256) {
1297		cifs_min_small = 256;
1298		cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1299	}
1300
1301	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1302						     cifs_sm_req_cachep);
1303
1304	if (cifs_sm_req_poolp == NULL) {
1305		mempool_destroy(cifs_req_poolp);
1306		kmem_cache_destroy(cifs_req_cachep);
1307		kmem_cache_destroy(cifs_sm_req_cachep);
1308		return -ENOMEM;
1309	}
1310
1311	return 0;
1312}
1313
1314static void
1315cifs_destroy_request_bufs(void)
1316{
1317	mempool_destroy(cifs_req_poolp);
1318	kmem_cache_destroy(cifs_req_cachep);
1319	mempool_destroy(cifs_sm_req_poolp);
1320	kmem_cache_destroy(cifs_sm_req_cachep);
1321}
1322
1323static int
1324cifs_init_mids(void)
1325{
1326	cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1327					    sizeof(struct mid_q_entry), 0,
1328					    SLAB_HWCACHE_ALIGN, NULL);
1329	if (cifs_mid_cachep == NULL)
1330		return -ENOMEM;
1331
1332	/* 3 is a reasonable minimum number of simultaneous operations */
1333	cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1334	if (cifs_mid_poolp == NULL) {
1335		kmem_cache_destroy(cifs_mid_cachep);
1336		return -ENOMEM;
1337	}
1338
1339	return 0;
1340}
1341
1342static void
1343cifs_destroy_mids(void)
1344{
1345	mempool_destroy(cifs_mid_poolp);
1346	kmem_cache_destroy(cifs_mid_cachep);
1347}
1348
1349static int __init
1350init_cifs(void)
1351{
1352	int rc = 0;
1353	cifs_proc_init();
1354	INIT_LIST_HEAD(&cifs_tcp_ses_list);
1355#ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
1356	INIT_LIST_HEAD(&GlobalDnotifyReqList);
1357	INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
1358#endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
1359/*
1360 *  Initialize Global counters
1361 */
1362	atomic_set(&sesInfoAllocCount, 0);
1363	atomic_set(&tconInfoAllocCount, 0);
 
1364	atomic_set(&tcpSesAllocCount, 0);
1365	atomic_set(&tcpSesReconnectCount, 0);
1366	atomic_set(&tconInfoReconnectCount, 0);
1367
1368	atomic_set(&bufAllocCount, 0);
1369	atomic_set(&smBufAllocCount, 0);
1370#ifdef CONFIG_CIFS_STATS2
1371	atomic_set(&totBufAllocCount, 0);
1372	atomic_set(&totSmBufAllocCount, 0);
 
 
 
 
 
1373#endif /* CONFIG_CIFS_STATS2 */
1374
1375	atomic_set(&midCount, 0);
1376	GlobalCurrentXid = 0;
1377	GlobalTotalActiveXid = 0;
1378	GlobalMaxActiveXid = 0;
1379	spin_lock_init(&cifs_tcp_ses_lock);
1380	spin_lock_init(&GlobalMid_Lock);
1381
1382	cifs_lock_secret = get_random_u32();
1383
1384	if (cifs_max_pending < 2) {
1385		cifs_max_pending = 2;
1386		cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1387	} else if (cifs_max_pending > CIFS_MAX_REQ) {
1388		cifs_max_pending = CIFS_MAX_REQ;
1389		cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1390			 CIFS_MAX_REQ);
1391	}
1392
1393	cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1394	if (!cifsiod_wq) {
1395		rc = -ENOMEM;
1396		goto out_clean_proc;
1397	}
1398
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1399	cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1400					 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1401	if (!cifsoplockd_wq) {
1402		rc = -ENOMEM;
1403		goto out_destroy_cifsiod_wq;
1404	}
1405
1406	rc = cifs_fscache_register();
1407	if (rc)
 
 
1408		goto out_destroy_cifsoplockd_wq;
 
1409
1410	rc = cifs_init_inodecache();
1411	if (rc)
1412		goto out_unreg_fscache;
1413
1414	rc = cifs_init_mids();
1415	if (rc)
1416		goto out_destroy_inodecache;
1417
1418	rc = cifs_init_request_bufs();
1419	if (rc)
1420		goto out_destroy_mids;
1421
 
 
 
 
 
1422#ifdef CONFIG_CIFS_UPCALL
1423	rc = init_cifs_spnego();
1424	if (rc)
1425		goto out_destroy_request_bufs;
1426#endif /* CONFIG_CIFS_UPCALL */
 
 
 
 
 
1427
1428#ifdef CONFIG_CIFS_ACL
1429	rc = init_cifs_idmap();
1430	if (rc)
1431		goto out_register_key_type;
1432#endif /* CONFIG_CIFS_ACL */
1433
1434	rc = register_filesystem(&cifs_fs_type);
1435	if (rc)
1436		goto out_init_cifs_idmap;
1437
 
 
 
 
 
 
1438	return 0;
1439
1440out_init_cifs_idmap:
1441#ifdef CONFIG_CIFS_ACL
1442	exit_cifs_idmap();
 
 
 
1443out_register_key_type:
1444#endif
1445#ifdef CONFIG_CIFS_UPCALL
1446	exit_cifs_spnego();
 
 
 
 
1447out_destroy_request_bufs:
1448#endif
1449	cifs_destroy_request_bufs();
1450out_destroy_mids:
1451	cifs_destroy_mids();
1452out_destroy_inodecache:
1453	cifs_destroy_inodecache();
1454out_unreg_fscache:
1455	cifs_fscache_unregister();
1456out_destroy_cifsoplockd_wq:
1457	destroy_workqueue(cifsoplockd_wq);
 
 
 
 
1458out_destroy_cifsiod_wq:
1459	destroy_workqueue(cifsiod_wq);
1460out_clean_proc:
1461	cifs_proc_clean();
1462	return rc;
1463}
1464
1465static void __exit
1466exit_cifs(void)
1467{
1468	cifs_dbg(NOISY, "exit_cifs\n");
1469	unregister_filesystem(&cifs_fs_type);
 
1470	cifs_dfs_release_automount_timer();
1471#ifdef CONFIG_CIFS_ACL
1472	exit_cifs_idmap();
 
 
1473#endif
1474#ifdef CONFIG_CIFS_UPCALL
1475	exit_cifs_spnego();
1476#endif
 
 
 
1477	cifs_destroy_request_bufs();
1478	cifs_destroy_mids();
1479	cifs_destroy_inodecache();
1480	cifs_fscache_unregister();
1481	destroy_workqueue(cifsoplockd_wq);
 
 
1482	destroy_workqueue(cifsiod_wq);
1483	cifs_proc_clean();
1484}
1485
1486MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1487MODULE_LICENSE("GPL");	/* combination of LGPL + GPL source behaves as GPL */
1488MODULE_DESCRIPTION
1489    ("VFS to access servers complying with the SNIA CIFS Specification "
1490     "e.g. Samba and Windows");
1491MODULE_VERSION(CIFS_VERSION);
1492MODULE_SOFTDEP("pre: arc4");
1493MODULE_SOFTDEP("pre: des");
1494MODULE_SOFTDEP("pre: ecb");
1495MODULE_SOFTDEP("pre: hmac");
1496MODULE_SOFTDEP("pre: md4");
1497MODULE_SOFTDEP("pre: md5");
1498MODULE_SOFTDEP("pre: nls");
1499MODULE_SOFTDEP("pre: aes");
1500MODULE_SOFTDEP("pre: cmac");
1501MODULE_SOFTDEP("pre: sha256");
1502MODULE_SOFTDEP("pre: sha512");
1503MODULE_SOFTDEP("pre: aead2");
1504MODULE_SOFTDEP("pre: ccm");
1505module_init(init_cifs)
1506module_exit(exit_cifs)