Loading...
1// SPDX-License-Identifier: LGPL-2.1
2/*
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * Common Internet FileSystem (CIFS) client
8 *
9 */
10
11/* Note that BB means BUGBUG (ie something to fix eventually) */
12
13#include <linux/module.h>
14#include <linux/fs.h>
15#include <linux/filelock.h>
16#include <linux/mount.h>
17#include <linux/slab.h>
18#include <linux/init.h>
19#include <linux/list.h>
20#include <linux/seq_file.h>
21#include <linux/vfs.h>
22#include <linux/mempool.h>
23#include <linux/delay.h>
24#include <linux/kthread.h>
25#include <linux/freezer.h>
26#include <linux/namei.h>
27#include <linux/random.h>
28#include <linux/splice.h>
29#include <linux/uuid.h>
30#include <linux/xattr.h>
31#include <uapi/linux/magic.h>
32#include <net/ipv6.h>
33#include "cifsfs.h"
34#include "cifspdu.h"
35#define DECLARE_GLOBALS_HERE
36#include "cifsglob.h"
37#include "cifsproto.h"
38#include "cifs_debug.h"
39#include "cifs_fs_sb.h"
40#include <linux/mm.h>
41#include <linux/key-type.h>
42#include "cifs_spnego.h"
43#include "fscache.h"
44#ifdef CONFIG_CIFS_DFS_UPCALL
45#include "dfs_cache.h"
46#endif
47#ifdef CONFIG_CIFS_SWN_UPCALL
48#include "netlink.h"
49#endif
50#include "fs_context.h"
51#include "cached_dir.h"
52
53/*
54 * DOS dates from 1980/1/1 through 2107/12/31
55 * Protocol specifications indicate the range should be to 119, which
56 * limits maximum year to 2099. But this range has not been checked.
57 */
58#define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
59#define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
60#define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
61
62int cifsFYI = 0;
63bool traceSMB;
64bool enable_oplocks = true;
65bool linuxExtEnabled = true;
66bool lookupCacheEnabled = true;
67bool disable_legacy_dialects; /* false by default */
68bool enable_gcm_256 = true;
69bool require_gcm_256; /* false by default */
70bool enable_negotiate_signing; /* false by default */
71unsigned int global_secflags = CIFSSEC_DEF;
72/* unsigned int ntlmv2_support = 0; */
73unsigned int sign_CIFS_PDUs = 1;
74
75/*
76 * Global transaction id (XID) information
77 */
78unsigned int GlobalCurrentXid; /* protected by GlobalMid_Lock */
79unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Lock */
80unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Lock */
81spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
82
83/*
84 * Global counters, updated atomically
85 */
86atomic_t sesInfoAllocCount;
87atomic_t tconInfoAllocCount;
88atomic_t tcpSesNextId;
89atomic_t tcpSesAllocCount;
90atomic_t tcpSesReconnectCount;
91atomic_t tconInfoReconnectCount;
92
93atomic_t mid_count;
94atomic_t buf_alloc_count;
95atomic_t small_buf_alloc_count;
96#ifdef CONFIG_CIFS_STATS2
97atomic_t total_buf_alloc_count;
98atomic_t total_small_buf_alloc_count;
99#endif/* STATS2 */
100struct list_head cifs_tcp_ses_list;
101spinlock_t cifs_tcp_ses_lock;
102static const struct super_operations cifs_super_ops;
103unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
104module_param(CIFSMaxBufSize, uint, 0444);
105MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
106 "for CIFS requests. "
107 "Default: 16384 Range: 8192 to 130048");
108unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
109module_param(cifs_min_rcv, uint, 0444);
110MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
111 "1 to 64");
112unsigned int cifs_min_small = 30;
113module_param(cifs_min_small, uint, 0444);
114MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
115 "Range: 2 to 256");
116unsigned int cifs_max_pending = CIFS_MAX_REQ;
117module_param(cifs_max_pending, uint, 0444);
118MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
119 "CIFS/SMB1 dialect (N/A for SMB3) "
120 "Default: 32767 Range: 2 to 32767.");
121unsigned int dir_cache_timeout = 30;
122module_param(dir_cache_timeout, uint, 0644);
123MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
124 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
125#ifdef CONFIG_CIFS_STATS2
126unsigned int slow_rsp_threshold = 1;
127module_param(slow_rsp_threshold, uint, 0644);
128MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
129 "before logging that a response is delayed. "
130 "Default: 1 (if set to 0 disables msg).");
131#endif /* STATS2 */
132
133module_param(enable_oplocks, bool, 0644);
134MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
135
136module_param(enable_gcm_256, bool, 0644);
137MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/0");
138
139module_param(require_gcm_256, bool, 0644);
140MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
141
142module_param(enable_negotiate_signing, bool, 0644);
143MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
144
145module_param(disable_legacy_dialects, bool, 0644);
146MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
147 "helpful to restrict the ability to "
148 "override the default dialects (SMB2.1, "
149 "SMB3 and SMB3.02) on mount with old "
150 "dialects (CIFS/SMB1 and SMB2) since "
151 "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
152 " and less secure. Default: n/N/0");
153
154struct workqueue_struct *cifsiod_wq;
155struct workqueue_struct *decrypt_wq;
156struct workqueue_struct *fileinfo_put_wq;
157struct workqueue_struct *cifsoplockd_wq;
158struct workqueue_struct *deferredclose_wq;
159struct workqueue_struct *serverclose_wq;
160struct workqueue_struct *cfid_put_wq;
161__u32 cifs_lock_secret;
162
163/*
164 * Bumps refcount for cifs super block.
165 * Note that it should be only called if a reference to VFS super block is
166 * already held, e.g. in open-type syscalls context. Otherwise it can race with
167 * atomic_dec_and_test in deactivate_locked_super.
168 */
169void
170cifs_sb_active(struct super_block *sb)
171{
172 struct cifs_sb_info *server = CIFS_SB(sb);
173
174 if (atomic_inc_return(&server->active) == 1)
175 atomic_inc(&sb->s_active);
176}
177
178void
179cifs_sb_deactive(struct super_block *sb)
180{
181 struct cifs_sb_info *server = CIFS_SB(sb);
182
183 if (atomic_dec_and_test(&server->active))
184 deactivate_super(sb);
185}
186
187static int
188cifs_read_super(struct super_block *sb)
189{
190 struct inode *inode;
191 struct cifs_sb_info *cifs_sb;
192 struct cifs_tcon *tcon;
193 struct timespec64 ts;
194 int rc = 0;
195
196 cifs_sb = CIFS_SB(sb);
197 tcon = cifs_sb_master_tcon(cifs_sb);
198
199 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
200 sb->s_flags |= SB_POSIXACL;
201
202 if (tcon->snapshot_time)
203 sb->s_flags |= SB_RDONLY;
204
205 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
206 sb->s_maxbytes = MAX_LFS_FILESIZE;
207 else
208 sb->s_maxbytes = MAX_NON_LFS;
209
210 /*
211 * Some very old servers like DOS and OS/2 used 2 second granularity
212 * (while all current servers use 100ns granularity - see MS-DTYP)
213 * but 1 second is the maximum allowed granularity for the VFS
214 * so for old servers set time granularity to 1 second while for
215 * everything else (current servers) set it to 100ns.
216 */
217 if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
218 ((tcon->ses->capabilities &
219 tcon->ses->server->vals->cap_nt_find) == 0) &&
220 !tcon->unix_ext) {
221 sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
222 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
223 sb->s_time_min = ts.tv_sec;
224 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
225 cpu_to_le16(SMB_TIME_MAX), 0);
226 sb->s_time_max = ts.tv_sec;
227 } else {
228 /*
229 * Almost every server, including all SMB2+, uses DCE TIME
230 * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC
231 */
232 sb->s_time_gran = 100;
233 ts = cifs_NTtimeToUnix(0);
234 sb->s_time_min = ts.tv_sec;
235 ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
236 sb->s_time_max = ts.tv_sec;
237 }
238
239 sb->s_magic = CIFS_SUPER_MAGIC;
240 sb->s_op = &cifs_super_ops;
241 sb->s_xattr = cifs_xattr_handlers;
242 rc = super_setup_bdi(sb);
243 if (rc)
244 goto out_no_root;
245 /* tune readahead according to rsize if readahead size not set on mount */
246 if (cifs_sb->ctx->rsize == 0)
247 cifs_sb->ctx->rsize =
248 tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
249 if (cifs_sb->ctx->rasize)
250 sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
251 else
252 sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
253
254 sb->s_blocksize = CIFS_MAX_MSGSIZE;
255 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
256 inode = cifs_root_iget(sb);
257
258 if (IS_ERR(inode)) {
259 rc = PTR_ERR(inode);
260 goto out_no_root;
261 }
262
263 if (tcon->nocase)
264 sb->s_d_op = &cifs_ci_dentry_ops;
265 else
266 sb->s_d_op = &cifs_dentry_ops;
267
268 sb->s_root = d_make_root(inode);
269 if (!sb->s_root) {
270 rc = -ENOMEM;
271 goto out_no_root;
272 }
273
274#ifdef CONFIG_CIFS_NFSD_EXPORT
275 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
276 cifs_dbg(FYI, "export ops supported\n");
277 sb->s_export_op = &cifs_export_ops;
278 }
279#endif /* CONFIG_CIFS_NFSD_EXPORT */
280
281 return 0;
282
283out_no_root:
284 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
285 return rc;
286}
287
288static void cifs_kill_sb(struct super_block *sb)
289{
290 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
291
292 /*
293 * We need to release all dentries for the cached directories
294 * before we kill the sb.
295 */
296 if (cifs_sb->root) {
297 close_all_cached_dirs(cifs_sb);
298
299 /* finally release root dentry */
300 dput(cifs_sb->root);
301 cifs_sb->root = NULL;
302 }
303
304 kill_anon_super(sb);
305 cifs_umount(cifs_sb);
306}
307
308static int
309cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
310{
311 struct super_block *sb = dentry->d_sb;
312 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
313 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
314 struct TCP_Server_Info *server = tcon->ses->server;
315 unsigned int xid;
316 int rc = 0;
317 const char *full_path;
318 void *page;
319
320 xid = get_xid();
321 page = alloc_dentry_path();
322
323 full_path = build_path_from_dentry(dentry, page);
324 if (IS_ERR(full_path)) {
325 rc = PTR_ERR(full_path);
326 goto statfs_out;
327 }
328
329 if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
330 buf->f_namelen =
331 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
332 else
333 buf->f_namelen = PATH_MAX;
334
335 buf->f_fsid.val[0] = tcon->vol_serial_number;
336 /* are using part of create time for more randomness, see man statfs */
337 buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time);
338
339 buf->f_files = 0; /* undefined */
340 buf->f_ffree = 0; /* unlimited */
341
342 if (server->ops->queryfs)
343 rc = server->ops->queryfs(xid, tcon, full_path, cifs_sb, buf);
344
345statfs_out:
346 free_dentry_path(page);
347 free_xid(xid);
348 return rc;
349}
350
351static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
352{
353 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
354 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
355 struct TCP_Server_Info *server = tcon->ses->server;
356
357 if (server->ops->fallocate)
358 return server->ops->fallocate(file, tcon, mode, off, len);
359
360 return -EOPNOTSUPP;
361}
362
363static int cifs_permission(struct mnt_idmap *idmap,
364 struct inode *inode, int mask)
365{
366 struct cifs_sb_info *cifs_sb;
367
368 cifs_sb = CIFS_SB(inode->i_sb);
369
370 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
371 if ((mask & MAY_EXEC) && !execute_ok(inode))
372 return -EACCES;
373 else
374 return 0;
375 } else /* file mode might have been restricted at mount time
376 on the client (above and beyond ACL on servers) for
377 servers which do not support setting and viewing mode bits,
378 so allowing client to check permissions is useful */
379 return generic_permission(&nop_mnt_idmap, inode, mask);
380}
381
382static struct kmem_cache *cifs_inode_cachep;
383static struct kmem_cache *cifs_req_cachep;
384static struct kmem_cache *cifs_mid_cachep;
385static struct kmem_cache *cifs_sm_req_cachep;
386static struct kmem_cache *cifs_io_request_cachep;
387static struct kmem_cache *cifs_io_subrequest_cachep;
388mempool_t *cifs_sm_req_poolp;
389mempool_t *cifs_req_poolp;
390mempool_t *cifs_mid_poolp;
391mempool_t cifs_io_request_pool;
392mempool_t cifs_io_subrequest_pool;
393
394static struct inode *
395cifs_alloc_inode(struct super_block *sb)
396{
397 struct cifsInodeInfo *cifs_inode;
398 cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
399 if (!cifs_inode)
400 return NULL;
401 cifs_inode->cifsAttrs = ATTR_ARCHIVE; /* default */
402 cifs_inode->time = 0;
403 /*
404 * Until the file is open and we have gotten oplock info back from the
405 * server, can not assume caching of file data or metadata.
406 */
407 cifs_set_oplock_level(cifs_inode, 0);
408 cifs_inode->lease_granted = false;
409 cifs_inode->flags = 0;
410 spin_lock_init(&cifs_inode->writers_lock);
411 cifs_inode->writers = 0;
412 cifs_inode->netfs.inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
413 cifs_inode->netfs.remote_i_size = 0;
414 cifs_inode->uniqueid = 0;
415 cifs_inode->createtime = 0;
416 cifs_inode->epoch = 0;
417 spin_lock_init(&cifs_inode->open_file_lock);
418 generate_random_uuid(cifs_inode->lease_key);
419 cifs_inode->symlink_target = NULL;
420
421 /*
422 * Can not set i_flags here - they get immediately overwritten to zero
423 * by the VFS.
424 */
425 /* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
426 INIT_LIST_HEAD(&cifs_inode->openFileList);
427 INIT_LIST_HEAD(&cifs_inode->llist);
428 INIT_LIST_HEAD(&cifs_inode->deferred_closes);
429 spin_lock_init(&cifs_inode->deferred_lock);
430 return &cifs_inode->netfs.inode;
431}
432
433static void
434cifs_free_inode(struct inode *inode)
435{
436 struct cifsInodeInfo *cinode = CIFS_I(inode);
437
438 if (S_ISLNK(inode->i_mode))
439 kfree(cinode->symlink_target);
440 kmem_cache_free(cifs_inode_cachep, cinode);
441}
442
443static void
444cifs_evict_inode(struct inode *inode)
445{
446 netfs_wait_for_outstanding_io(inode);
447 truncate_inode_pages_final(&inode->i_data);
448 if (inode->i_state & I_PINNING_NETFS_WB)
449 cifs_fscache_unuse_inode_cookie(inode, true);
450 cifs_fscache_release_inode_cookie(inode);
451 clear_inode(inode);
452}
453
454static void
455cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
456{
457 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
458 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
459
460 seq_puts(s, ",addr=");
461
462 switch (server->dstaddr.ss_family) {
463 case AF_INET:
464 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
465 break;
466 case AF_INET6:
467 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
468 if (sa6->sin6_scope_id)
469 seq_printf(s, "%%%u", sa6->sin6_scope_id);
470 break;
471 default:
472 seq_puts(s, "(unknown)");
473 }
474 if (server->rdma)
475 seq_puts(s, ",rdma");
476}
477
478static void
479cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
480{
481 if (ses->sectype == Unspecified) {
482 if (ses->user_name == NULL)
483 seq_puts(s, ",sec=none");
484 return;
485 }
486
487 seq_puts(s, ",sec=");
488
489 switch (ses->sectype) {
490 case NTLMv2:
491 seq_puts(s, "ntlmv2");
492 break;
493 case Kerberos:
494 seq_puts(s, "krb5");
495 break;
496 case RawNTLMSSP:
497 seq_puts(s, "ntlmssp");
498 break;
499 default:
500 /* shouldn't ever happen */
501 seq_puts(s, "unknown");
502 break;
503 }
504
505 if (ses->sign)
506 seq_puts(s, "i");
507
508 if (ses->sectype == Kerberos)
509 seq_printf(s, ",cruid=%u",
510 from_kuid_munged(&init_user_ns, ses->cred_uid));
511}
512
513static void
514cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
515{
516 seq_puts(s, ",cache=");
517
518 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
519 seq_puts(s, "strict");
520 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
521 seq_puts(s, "none");
522 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
523 seq_puts(s, "singleclient"); /* assume only one client access */
524 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
525 seq_puts(s, "ro"); /* read only caching assumed */
526 else
527 seq_puts(s, "loose");
528}
529
530/*
531 * cifs_show_devname() is used so we show the mount device name with correct
532 * format (e.g. forward slashes vs. back slashes) in /proc/mounts
533 */
534static int cifs_show_devname(struct seq_file *m, struct dentry *root)
535{
536 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
537 char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
538
539 if (devname == NULL)
540 seq_puts(m, "none");
541 else {
542 convert_delimiter(devname, '/');
543 /* escape all spaces in share names */
544 seq_escape(m, devname, " \t");
545 kfree(devname);
546 }
547 return 0;
548}
549
550static void
551cifs_show_upcall_target(struct seq_file *s, struct cifs_sb_info *cifs_sb)
552{
553 if (cifs_sb->ctx->upcall_target == UPTARGET_UNSPECIFIED) {
554 seq_puts(s, ",upcall_target=app");
555 return;
556 }
557
558 seq_puts(s, ",upcall_target=");
559
560 switch (cifs_sb->ctx->upcall_target) {
561 case UPTARGET_APP:
562 seq_puts(s, "app");
563 break;
564 case UPTARGET_MOUNT:
565 seq_puts(s, "mount");
566 break;
567 default:
568 /* shouldn't ever happen */
569 seq_puts(s, "unknown");
570 break;
571 }
572}
573
574/*
575 * cifs_show_options() is for displaying mount options in /proc/mounts.
576 * Not all settable options are displayed but most of the important
577 * ones are.
578 */
579static int
580cifs_show_options(struct seq_file *s, struct dentry *root)
581{
582 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
583 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
584 struct sockaddr *srcaddr;
585 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
586
587 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
588 cifs_show_security(s, tcon->ses);
589 cifs_show_cache_flavor(s, cifs_sb);
590 cifs_show_upcall_target(s, cifs_sb);
591
592 if (tcon->no_lease)
593 seq_puts(s, ",nolease");
594 if (cifs_sb->ctx->multiuser)
595 seq_puts(s, ",multiuser");
596 else if (tcon->ses->user_name)
597 seq_show_option(s, "username", tcon->ses->user_name);
598
599 if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
600 seq_show_option(s, "domain", tcon->ses->domainName);
601
602 if (srcaddr->sa_family != AF_UNSPEC) {
603 struct sockaddr_in *saddr4;
604 struct sockaddr_in6 *saddr6;
605 saddr4 = (struct sockaddr_in *)srcaddr;
606 saddr6 = (struct sockaddr_in6 *)srcaddr;
607 if (srcaddr->sa_family == AF_INET6)
608 seq_printf(s, ",srcaddr=%pI6c",
609 &saddr6->sin6_addr);
610 else if (srcaddr->sa_family == AF_INET)
611 seq_printf(s, ",srcaddr=%pI4",
612 &saddr4->sin_addr.s_addr);
613 else
614 seq_printf(s, ",srcaddr=BAD-AF:%i",
615 (int)(srcaddr->sa_family));
616 }
617
618 seq_printf(s, ",uid=%u",
619 from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
620 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
621 seq_puts(s, ",forceuid");
622 else
623 seq_puts(s, ",noforceuid");
624
625 seq_printf(s, ",gid=%u",
626 from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
627 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
628 seq_puts(s, ",forcegid");
629 else
630 seq_puts(s, ",noforcegid");
631
632 cifs_show_address(s, tcon->ses->server);
633
634 if (!tcon->unix_ext)
635 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
636 cifs_sb->ctx->file_mode,
637 cifs_sb->ctx->dir_mode);
638 if (cifs_sb->ctx->iocharset)
639 seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
640 if (tcon->seal)
641 seq_puts(s, ",seal");
642 else if (tcon->ses->server->ignore_signature)
643 seq_puts(s, ",signloosely");
644 if (tcon->nocase)
645 seq_puts(s, ",nocase");
646 if (tcon->nodelete)
647 seq_puts(s, ",nodelete");
648 if (cifs_sb->ctx->no_sparse)
649 seq_puts(s, ",nosparse");
650 if (tcon->local_lease)
651 seq_puts(s, ",locallease");
652 if (tcon->retry)
653 seq_puts(s, ",hard");
654 else
655 seq_puts(s, ",soft");
656 if (tcon->use_persistent)
657 seq_puts(s, ",persistenthandles");
658 else if (tcon->use_resilient)
659 seq_puts(s, ",resilienthandles");
660 if (tcon->posix_extensions)
661 seq_puts(s, ",posix");
662 else if (tcon->unix_ext)
663 seq_puts(s, ",unix");
664 else
665 seq_puts(s, ",nounix");
666 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
667 seq_puts(s, ",nodfs");
668 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
669 seq_puts(s, ",posixpaths");
670 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
671 seq_puts(s, ",setuids");
672 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
673 seq_puts(s, ",idsfromsid");
674 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
675 seq_puts(s, ",serverino");
676 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
677 seq_puts(s, ",rwpidforward");
678 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
679 seq_puts(s, ",forcemand");
680 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
681 seq_puts(s, ",nouser_xattr");
682 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
683 seq_puts(s, ",mapchars");
684 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
685 seq_puts(s, ",mapposix");
686 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
687 seq_puts(s, ",sfu");
688 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
689 seq_puts(s, ",nobrl");
690 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
691 seq_puts(s, ",nohandlecache");
692 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
693 seq_puts(s, ",modefromsid");
694 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
695 seq_puts(s, ",cifsacl");
696 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
697 seq_puts(s, ",dynperm");
698 if (root->d_sb->s_flags & SB_POSIXACL)
699 seq_puts(s, ",acl");
700 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
701 seq_puts(s, ",mfsymlinks");
702 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
703 seq_puts(s, ",fsc");
704 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
705 seq_puts(s, ",nostrictsync");
706 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
707 seq_puts(s, ",noperm");
708 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
709 seq_printf(s, ",backupuid=%u",
710 from_kuid_munged(&init_user_ns,
711 cifs_sb->ctx->backupuid));
712 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
713 seq_printf(s, ",backupgid=%u",
714 from_kgid_munged(&init_user_ns,
715 cifs_sb->ctx->backupgid));
716 seq_show_option(s, "reparse",
717 cifs_reparse_type_str(cifs_sb->ctx->reparse_type));
718
719 seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
720 seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
721 seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
722 if (cifs_sb->ctx->rasize)
723 seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
724 if (tcon->ses->server->min_offload)
725 seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
726 if (tcon->ses->server->retrans)
727 seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
728 seq_printf(s, ",echo_interval=%lu",
729 tcon->ses->server->echo_interval / HZ);
730
731 /* Only display the following if overridden on mount */
732 if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
733 seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
734 if (tcon->ses->server->tcp_nodelay)
735 seq_puts(s, ",tcpnodelay");
736 if (tcon->ses->server->noautotune)
737 seq_puts(s, ",noautotune");
738 if (tcon->ses->server->noblocksnd)
739 seq_puts(s, ",noblocksend");
740 if (tcon->ses->server->nosharesock)
741 seq_puts(s, ",nosharesock");
742
743 if (tcon->snapshot_time)
744 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
745 if (tcon->handle_timeout)
746 seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
747 if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
748 seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
749
750 /*
751 * Display file and directory attribute timeout in seconds.
752 * If file and directory attribute timeout the same then actimeo
753 * was likely specified on mount
754 */
755 if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
756 seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
757 else {
758 seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
759 seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
760 }
761 seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
762
763 if (tcon->ses->chan_max > 1)
764 seq_printf(s, ",multichannel,max_channels=%zu",
765 tcon->ses->chan_max);
766
767 if (tcon->use_witness)
768 seq_puts(s, ",witness");
769
770 return 0;
771}
772
773static void cifs_umount_begin(struct super_block *sb)
774{
775 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
776 struct cifs_tcon *tcon;
777
778 if (cifs_sb == NULL)
779 return;
780
781 tcon = cifs_sb_master_tcon(cifs_sb);
782
783 spin_lock(&cifs_tcp_ses_lock);
784 spin_lock(&tcon->tc_lock);
785 trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
786 netfs_trace_tcon_ref_see_umount);
787 if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
788 /* we have other mounts to same share or we have
789 already tried to umount this and woken up
790 all waiting network requests, nothing to do */
791 spin_unlock(&tcon->tc_lock);
792 spin_unlock(&cifs_tcp_ses_lock);
793 return;
794 }
795 /*
796 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
797 * fail later (e.g. due to open files). TID_EXITING will be set just before tdis req sent
798 */
799 spin_unlock(&tcon->tc_lock);
800 spin_unlock(&cifs_tcp_ses_lock);
801
802 cifs_close_all_deferred_files(tcon);
803 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
804 /* cancel_notify_requests(tcon); */
805 if (tcon->ses && tcon->ses->server) {
806 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
807 wake_up_all(&tcon->ses->server->request_q);
808 wake_up_all(&tcon->ses->server->response_q);
809 msleep(1); /* yield */
810 /* we have to kick the requests once more */
811 wake_up_all(&tcon->ses->server->response_q);
812 msleep(1);
813 }
814
815 return;
816}
817
818static int cifs_freeze(struct super_block *sb)
819{
820 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
821 struct cifs_tcon *tcon;
822
823 if (cifs_sb == NULL)
824 return 0;
825
826 tcon = cifs_sb_master_tcon(cifs_sb);
827
828 cifs_close_all_deferred_files(tcon);
829 return 0;
830}
831
832#ifdef CONFIG_CIFS_STATS2
833static int cifs_show_stats(struct seq_file *s, struct dentry *root)
834{
835 /* BB FIXME */
836 return 0;
837}
838#endif
839
840static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
841{
842 return netfs_unpin_writeback(inode, wbc);
843}
844
845static int cifs_drop_inode(struct inode *inode)
846{
847 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
848
849 /* no serverino => unconditional eviction */
850 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
851 generic_drop_inode(inode);
852}
853
854static const struct super_operations cifs_super_ops = {
855 .statfs = cifs_statfs,
856 .alloc_inode = cifs_alloc_inode,
857 .write_inode = cifs_write_inode,
858 .free_inode = cifs_free_inode,
859 .drop_inode = cifs_drop_inode,
860 .evict_inode = cifs_evict_inode,
861/* .show_path = cifs_show_path, */ /* Would we ever need show path? */
862 .show_devname = cifs_show_devname,
863/* .delete_inode = cifs_delete_inode, */ /* Do not need above
864 function unless later we add lazy close of inodes or unless the
865 kernel forgets to call us with the same number of releases (closes)
866 as opens */
867 .show_options = cifs_show_options,
868 .umount_begin = cifs_umount_begin,
869 .freeze_fs = cifs_freeze,
870#ifdef CONFIG_CIFS_STATS2
871 .show_stats = cifs_show_stats,
872#endif
873};
874
875/*
876 * Get root dentry from superblock according to prefix path mount option.
877 * Return dentry with refcount + 1 on success and NULL otherwise.
878 */
879static struct dentry *
880cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
881{
882 struct dentry *dentry;
883 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
884 char *full_path = NULL;
885 char *s, *p;
886 char sep;
887
888 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
889 return dget(sb->s_root);
890
891 full_path = cifs_build_path_to_root(ctx, cifs_sb,
892 cifs_sb_master_tcon(cifs_sb), 0);
893 if (full_path == NULL)
894 return ERR_PTR(-ENOMEM);
895
896 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
897
898 sep = CIFS_DIR_SEP(cifs_sb);
899 dentry = dget(sb->s_root);
900 s = full_path;
901
902 do {
903 struct inode *dir = d_inode(dentry);
904 struct dentry *child;
905
906 if (!S_ISDIR(dir->i_mode)) {
907 dput(dentry);
908 dentry = ERR_PTR(-ENOTDIR);
909 break;
910 }
911
912 /* skip separators */
913 while (*s == sep)
914 s++;
915 if (!*s)
916 break;
917 p = s++;
918 /* next separator */
919 while (*s && *s != sep)
920 s++;
921
922 child = lookup_positive_unlocked(p, dentry, s - p);
923 dput(dentry);
924 dentry = child;
925 } while (!IS_ERR(dentry));
926 kfree(full_path);
927 return dentry;
928}
929
930static int cifs_set_super(struct super_block *sb, void *data)
931{
932 struct cifs_mnt_data *mnt_data = data;
933 sb->s_fs_info = mnt_data->cifs_sb;
934 return set_anon_super(sb, NULL);
935}
936
937struct dentry *
938cifs_smb3_do_mount(struct file_system_type *fs_type,
939 int flags, struct smb3_fs_context *old_ctx)
940{
941 struct cifs_mnt_data mnt_data;
942 struct cifs_sb_info *cifs_sb;
943 struct super_block *sb;
944 struct dentry *root;
945 int rc;
946
947 if (cifsFYI) {
948 cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
949 old_ctx->source, flags);
950 } else {
951 cifs_info("Attempting to mount %s\n", old_ctx->source);
952 }
953
954 cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
955 if (!cifs_sb)
956 return ERR_PTR(-ENOMEM);
957
958 cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
959 if (!cifs_sb->ctx) {
960 root = ERR_PTR(-ENOMEM);
961 goto out;
962 }
963 rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
964 if (rc) {
965 root = ERR_PTR(rc);
966 goto out;
967 }
968
969 rc = cifs_setup_cifs_sb(cifs_sb);
970 if (rc) {
971 root = ERR_PTR(rc);
972 goto out;
973 }
974
975 rc = cifs_mount(cifs_sb, cifs_sb->ctx);
976 if (rc) {
977 if (!(flags & SB_SILENT))
978 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
979 rc);
980 root = ERR_PTR(rc);
981 goto out;
982 }
983
984 mnt_data.ctx = cifs_sb->ctx;
985 mnt_data.cifs_sb = cifs_sb;
986 mnt_data.flags = flags;
987
988 /* BB should we make this contingent on mount parm? */
989 flags |= SB_NODIRATIME | SB_NOATIME;
990
991 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
992 if (IS_ERR(sb)) {
993 cifs_umount(cifs_sb);
994 return ERR_CAST(sb);
995 }
996
997 if (sb->s_root) {
998 cifs_dbg(FYI, "Use existing superblock\n");
999 cifs_umount(cifs_sb);
1000 cifs_sb = NULL;
1001 } else {
1002 rc = cifs_read_super(sb);
1003 if (rc) {
1004 root = ERR_PTR(rc);
1005 goto out_super;
1006 }
1007
1008 sb->s_flags |= SB_ACTIVE;
1009 }
1010
1011 root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
1012 if (IS_ERR(root))
1013 goto out_super;
1014
1015 if (cifs_sb)
1016 cifs_sb->root = dget(root);
1017
1018 cifs_dbg(FYI, "dentry root is: %p\n", root);
1019 return root;
1020
1021out_super:
1022 deactivate_locked_super(sb);
1023 return root;
1024out:
1025 kfree(cifs_sb->prepath);
1026 smb3_cleanup_fs_context(cifs_sb->ctx);
1027 kfree(cifs_sb);
1028 return root;
1029}
1030
1031static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1032{
1033 struct cifsFileInfo *cfile = file->private_data;
1034 struct cifs_tcon *tcon;
1035
1036 /*
1037 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1038 * the cached file length
1039 */
1040 if (whence != SEEK_SET && whence != SEEK_CUR) {
1041 int rc;
1042 struct inode *inode = file_inode(file);
1043
1044 /*
1045 * We need to be sure that all dirty pages are written and the
1046 * server has the newest file length.
1047 */
1048 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1049 inode->i_mapping->nrpages != 0) {
1050 rc = filemap_fdatawait(inode->i_mapping);
1051 if (rc) {
1052 mapping_set_error(inode->i_mapping, rc);
1053 return rc;
1054 }
1055 }
1056 /*
1057 * Some applications poll for the file length in this strange
1058 * way so we must seek to end on non-oplocked files by
1059 * setting the revalidate time to zero.
1060 */
1061 CIFS_I(inode)->time = 0;
1062
1063 rc = cifs_revalidate_file_attr(file);
1064 if (rc < 0)
1065 return (loff_t)rc;
1066 }
1067 if (cfile && cfile->tlink) {
1068 tcon = tlink_tcon(cfile->tlink);
1069 if (tcon->ses->server->ops->llseek)
1070 return tcon->ses->server->ops->llseek(file, tcon,
1071 offset, whence);
1072 }
1073 return generic_file_llseek(file, offset, whence);
1074}
1075
1076static int
1077cifs_setlease(struct file *file, int arg, struct file_lease **lease, void **priv)
1078{
1079 /*
1080 * Note that this is called by vfs setlease with i_lock held to
1081 * protect *lease from going away.
1082 */
1083 struct inode *inode = file_inode(file);
1084 struct cifsFileInfo *cfile = file->private_data;
1085
1086 /* Check if file is oplocked if this is request for new lease */
1087 if (arg == F_UNLCK ||
1088 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1089 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1090 return generic_setlease(file, arg, lease, priv);
1091 else if (tlink_tcon(cfile->tlink)->local_lease &&
1092 !CIFS_CACHE_READ(CIFS_I(inode)))
1093 /*
1094 * If the server claims to support oplock on this file, then we
1095 * still need to check oplock even if the local_lease mount
1096 * option is set, but there are servers which do not support
1097 * oplock for which this mount option may be useful if the user
1098 * knows that the file won't be changed on the server by anyone
1099 * else.
1100 */
1101 return generic_setlease(file, arg, lease, priv);
1102 else
1103 return -EAGAIN;
1104}
1105
1106struct file_system_type cifs_fs_type = {
1107 .owner = THIS_MODULE,
1108 .name = "cifs",
1109 .init_fs_context = smb3_init_fs_context,
1110 .parameters = smb3_fs_parameters,
1111 .kill_sb = cifs_kill_sb,
1112 .fs_flags = FS_RENAME_DOES_D_MOVE,
1113};
1114MODULE_ALIAS_FS("cifs");
1115
1116struct file_system_type smb3_fs_type = {
1117 .owner = THIS_MODULE,
1118 .name = "smb3",
1119 .init_fs_context = smb3_init_fs_context,
1120 .parameters = smb3_fs_parameters,
1121 .kill_sb = cifs_kill_sb,
1122 .fs_flags = FS_RENAME_DOES_D_MOVE,
1123};
1124MODULE_ALIAS_FS("smb3");
1125MODULE_ALIAS("smb3");
1126
1127const struct inode_operations cifs_dir_inode_ops = {
1128 .create = cifs_create,
1129 .atomic_open = cifs_atomic_open,
1130 .lookup = cifs_lookup,
1131 .getattr = cifs_getattr,
1132 .unlink = cifs_unlink,
1133 .link = cifs_hardlink,
1134 .mkdir = cifs_mkdir,
1135 .rmdir = cifs_rmdir,
1136 .rename = cifs_rename2,
1137 .permission = cifs_permission,
1138 .setattr = cifs_setattr,
1139 .symlink = cifs_symlink,
1140 .mknod = cifs_mknod,
1141 .listxattr = cifs_listxattr,
1142 .get_acl = cifs_get_acl,
1143 .set_acl = cifs_set_acl,
1144};
1145
1146const struct inode_operations cifs_file_inode_ops = {
1147 .setattr = cifs_setattr,
1148 .getattr = cifs_getattr,
1149 .permission = cifs_permission,
1150 .listxattr = cifs_listxattr,
1151 .fiemap = cifs_fiemap,
1152 .get_acl = cifs_get_acl,
1153 .set_acl = cifs_set_acl,
1154};
1155
1156const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1157 struct delayed_call *done)
1158{
1159 char *target_path;
1160
1161 if (!dentry)
1162 return ERR_PTR(-ECHILD);
1163
1164 target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1165 if (!target_path)
1166 return ERR_PTR(-ENOMEM);
1167
1168 spin_lock(&inode->i_lock);
1169 if (likely(CIFS_I(inode)->symlink_target)) {
1170 strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1171 } else {
1172 kfree(target_path);
1173 target_path = ERR_PTR(-EOPNOTSUPP);
1174 }
1175 spin_unlock(&inode->i_lock);
1176
1177 if (!IS_ERR(target_path))
1178 set_delayed_call(done, kfree_link, target_path);
1179
1180 return target_path;
1181}
1182
1183const struct inode_operations cifs_symlink_inode_ops = {
1184 .get_link = cifs_get_link,
1185 .setattr = cifs_setattr,
1186 .permission = cifs_permission,
1187 .listxattr = cifs_listxattr,
1188};
1189
1190/*
1191 * Advance the EOF marker to after the source range.
1192 */
1193static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1194 struct cifs_tcon *src_tcon,
1195 unsigned int xid, loff_t src_end)
1196{
1197 struct cifsFileInfo *writeable_srcfile;
1198 int rc = -EINVAL;
1199
1200 writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
1201 if (writeable_srcfile) {
1202 if (src_tcon->ses->server->ops->set_file_size)
1203 rc = src_tcon->ses->server->ops->set_file_size(
1204 xid, src_tcon, writeable_srcfile,
1205 src_inode->i_size, true /* no need to set sparse */);
1206 else
1207 rc = -ENOSYS;
1208 cifsFileInfo_put(writeable_srcfile);
1209 cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1210 }
1211
1212 if (rc < 0)
1213 goto set_failed;
1214
1215 netfs_resize_file(&src_cifsi->netfs, src_end, true);
1216 fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1217 return 0;
1218
1219set_failed:
1220 return filemap_write_and_wait(src_inode->i_mapping);
1221}
1222
1223/*
1224 * Flush out either the folio that overlaps the beginning of a range in which
1225 * pos resides or the folio that overlaps the end of a range unless that folio
1226 * is entirely within the range we're going to invalidate. We extend the flush
1227 * bounds to encompass the folio.
1228 */
1229static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1230 bool first)
1231{
1232 struct folio *folio;
1233 unsigned long long fpos, fend;
1234 pgoff_t index = pos / PAGE_SIZE;
1235 size_t size;
1236 int rc = 0;
1237
1238 folio = filemap_get_folio(inode->i_mapping, index);
1239 if (IS_ERR(folio))
1240 return 0;
1241
1242 size = folio_size(folio);
1243 fpos = folio_pos(folio);
1244 fend = fpos + size - 1;
1245 *_fstart = min_t(unsigned long long, *_fstart, fpos);
1246 *_fend = max_t(unsigned long long, *_fend, fend);
1247 if ((first && pos == fpos) || (!first && pos == fend))
1248 goto out;
1249
1250 rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1251out:
1252 folio_put(folio);
1253 return rc;
1254}
1255
1256static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1257 struct file *dst_file, loff_t destoff, loff_t len,
1258 unsigned int remap_flags)
1259{
1260 struct inode *src_inode = file_inode(src_file);
1261 struct inode *target_inode = file_inode(dst_file);
1262 struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1263 struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1264 struct cifsFileInfo *smb_file_src = src_file->private_data;
1265 struct cifsFileInfo *smb_file_target = dst_file->private_data;
1266 struct cifs_tcon *target_tcon, *src_tcon;
1267 unsigned long long destend, fstart, fend, old_size, new_size;
1268 unsigned int xid;
1269 int rc;
1270
1271 if (remap_flags & REMAP_FILE_DEDUP)
1272 return -EOPNOTSUPP;
1273 if (remap_flags & ~REMAP_FILE_ADVISORY)
1274 return -EINVAL;
1275
1276 cifs_dbg(FYI, "clone range\n");
1277
1278 xid = get_xid();
1279
1280 if (!smb_file_src || !smb_file_target) {
1281 rc = -EBADF;
1282 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1283 goto out;
1284 }
1285
1286 src_tcon = tlink_tcon(smb_file_src->tlink);
1287 target_tcon = tlink_tcon(smb_file_target->tlink);
1288
1289 /*
1290 * Note: cifs case is easier than btrfs since server responsible for
1291 * checks for proper open modes and file type and if it wants
1292 * server could even support copy of range where source = target
1293 */
1294 lock_two_nondirectories(target_inode, src_inode);
1295
1296 if (len == 0)
1297 len = src_inode->i_size - off;
1298
1299 cifs_dbg(FYI, "clone range\n");
1300
1301 /* Flush the source buffer */
1302 rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1303 off + len - 1);
1304 if (rc)
1305 goto unlock;
1306
1307 /* The server-side copy will fail if the source crosses the EOF marker.
1308 * Advance the EOF marker after the flush above to the end of the range
1309 * if it's short of that.
1310 */
1311 if (src_cifsi->netfs.remote_i_size < off + len) {
1312 rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1313 if (rc < 0)
1314 goto unlock;
1315 }
1316
1317 new_size = destoff + len;
1318 destend = destoff + len - 1;
1319
1320 /* Flush the folios at either end of the destination range to prevent
1321 * accidental loss of dirty data outside of the range.
1322 */
1323 fstart = destoff;
1324 fend = destend;
1325
1326 rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1327 if (rc)
1328 goto unlock;
1329 rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1330 if (rc)
1331 goto unlock;
1332 if (fend > target_cifsi->netfs.zero_point)
1333 target_cifsi->netfs.zero_point = fend + 1;
1334 old_size = target_cifsi->netfs.remote_i_size;
1335
1336 /* Discard all the folios that overlap the destination region. */
1337 cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1338 truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1339
1340 fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1341 i_size_read(target_inode), 0);
1342
1343 rc = -EOPNOTSUPP;
1344 if (target_tcon->ses->server->ops->duplicate_extents) {
1345 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1346 smb_file_src, smb_file_target, off, len, destoff);
1347 if (rc == 0 && new_size > old_size) {
1348 truncate_setsize(target_inode, new_size);
1349 fscache_resize_cookie(cifs_inode_cookie(target_inode),
1350 new_size);
1351 }
1352 if (rc == 0 && new_size > target_cifsi->netfs.zero_point)
1353 target_cifsi->netfs.zero_point = new_size;
1354 }
1355
1356 /* force revalidate of size and timestamps of target file now
1357 that target is updated on the server */
1358 CIFS_I(target_inode)->time = 0;
1359unlock:
1360 /* although unlocking in the reverse order from locking is not
1361 strictly necessary here it is a little cleaner to be consistent */
1362 unlock_two_nondirectories(src_inode, target_inode);
1363out:
1364 free_xid(xid);
1365 return rc < 0 ? rc : len;
1366}
1367
1368ssize_t cifs_file_copychunk_range(unsigned int xid,
1369 struct file *src_file, loff_t off,
1370 struct file *dst_file, loff_t destoff,
1371 size_t len, unsigned int flags)
1372{
1373 struct inode *src_inode = file_inode(src_file);
1374 struct inode *target_inode = file_inode(dst_file);
1375 struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1376 struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1377 struct cifsFileInfo *smb_file_src;
1378 struct cifsFileInfo *smb_file_target;
1379 struct cifs_tcon *src_tcon;
1380 struct cifs_tcon *target_tcon;
1381 ssize_t rc;
1382
1383 cifs_dbg(FYI, "copychunk range\n");
1384
1385 if (!src_file->private_data || !dst_file->private_data) {
1386 rc = -EBADF;
1387 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1388 goto out;
1389 }
1390
1391 rc = -EXDEV;
1392 smb_file_target = dst_file->private_data;
1393 smb_file_src = src_file->private_data;
1394 src_tcon = tlink_tcon(smb_file_src->tlink);
1395 target_tcon = tlink_tcon(smb_file_target->tlink);
1396
1397 if (src_tcon->ses != target_tcon->ses) {
1398 cifs_dbg(FYI, "source and target of copy not on same server\n");
1399 goto out;
1400 }
1401
1402 rc = -EOPNOTSUPP;
1403 if (!target_tcon->ses->server->ops->copychunk_range)
1404 goto out;
1405
1406 /*
1407 * Note: cifs case is easier than btrfs since server responsible for
1408 * checks for proper open modes and file type and if it wants
1409 * server could even support copy of range where source = target
1410 */
1411 lock_two_nondirectories(target_inode, src_inode);
1412
1413 cifs_dbg(FYI, "about to flush pages\n");
1414
1415 rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1416 off + len - 1);
1417 if (rc)
1418 goto unlock;
1419
1420 /* The server-side copy will fail if the source crosses the EOF marker.
1421 * Advance the EOF marker after the flush above to the end of the range
1422 * if it's short of that.
1423 */
1424 if (src_cifsi->netfs.remote_i_size < off + len) {
1425 rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1426 if (rc < 0)
1427 goto unlock;
1428 }
1429
1430 /* Flush and invalidate all the folios in the destination region. If
1431 * the copy was successful, then some of the flush is extra overhead,
1432 * but we need to allow for the copy failing in some way (eg. ENOSPC).
1433 */
1434 rc = filemap_invalidate_inode(target_inode, true, destoff, destoff + len - 1);
1435 if (rc)
1436 goto unlock;
1437
1438 fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1439 i_size_read(target_inode), 0);
1440
1441 rc = file_modified(dst_file);
1442 if (!rc) {
1443 rc = target_tcon->ses->server->ops->copychunk_range(xid,
1444 smb_file_src, smb_file_target, off, len, destoff);
1445 if (rc > 0 && destoff + rc > i_size_read(target_inode)) {
1446 truncate_setsize(target_inode, destoff + rc);
1447 netfs_resize_file(&target_cifsi->netfs,
1448 i_size_read(target_inode), true);
1449 fscache_resize_cookie(cifs_inode_cookie(target_inode),
1450 i_size_read(target_inode));
1451 }
1452 if (rc > 0 && destoff + rc > target_cifsi->netfs.zero_point)
1453 target_cifsi->netfs.zero_point = destoff + rc;
1454 }
1455
1456 file_accessed(src_file);
1457
1458 /* force revalidate of size and timestamps of target file now
1459 * that target is updated on the server
1460 */
1461 CIFS_I(target_inode)->time = 0;
1462
1463unlock:
1464 /* although unlocking in the reverse order from locking is not
1465 * strictly necessary here it is a little cleaner to be consistent
1466 */
1467 unlock_two_nondirectories(src_inode, target_inode);
1468
1469out:
1470 return rc;
1471}
1472
1473/*
1474 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1475 * is a dummy operation.
1476 */
1477static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1478{
1479 cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1480 file, datasync);
1481
1482 return 0;
1483}
1484
1485static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1486 struct file *dst_file, loff_t destoff,
1487 size_t len, unsigned int flags)
1488{
1489 unsigned int xid = get_xid();
1490 ssize_t rc;
1491 struct cifsFileInfo *cfile = dst_file->private_data;
1492
1493 if (cfile->swapfile) {
1494 rc = -EOPNOTSUPP;
1495 free_xid(xid);
1496 return rc;
1497 }
1498
1499 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1500 len, flags);
1501 free_xid(xid);
1502
1503 if (rc == -EOPNOTSUPP || rc == -EXDEV)
1504 rc = splice_copy_file_range(src_file, off, dst_file,
1505 destoff, len);
1506 return rc;
1507}
1508
1509const struct file_operations cifs_file_ops = {
1510 .read_iter = cifs_loose_read_iter,
1511 .write_iter = cifs_file_write_iter,
1512 .open = cifs_open,
1513 .release = cifs_close,
1514 .lock = cifs_lock,
1515 .flock = cifs_flock,
1516 .fsync = cifs_fsync,
1517 .flush = cifs_flush,
1518 .mmap = cifs_file_mmap,
1519 .splice_read = filemap_splice_read,
1520 .splice_write = iter_file_splice_write,
1521 .llseek = cifs_llseek,
1522 .unlocked_ioctl = cifs_ioctl,
1523 .copy_file_range = cifs_copy_file_range,
1524 .remap_file_range = cifs_remap_file_range,
1525 .setlease = cifs_setlease,
1526 .fallocate = cifs_fallocate,
1527};
1528
1529const struct file_operations cifs_file_strict_ops = {
1530 .read_iter = cifs_strict_readv,
1531 .write_iter = cifs_strict_writev,
1532 .open = cifs_open,
1533 .release = cifs_close,
1534 .lock = cifs_lock,
1535 .flock = cifs_flock,
1536 .fsync = cifs_strict_fsync,
1537 .flush = cifs_flush,
1538 .mmap = cifs_file_strict_mmap,
1539 .splice_read = filemap_splice_read,
1540 .splice_write = iter_file_splice_write,
1541 .llseek = cifs_llseek,
1542 .unlocked_ioctl = cifs_ioctl,
1543 .copy_file_range = cifs_copy_file_range,
1544 .remap_file_range = cifs_remap_file_range,
1545 .setlease = cifs_setlease,
1546 .fallocate = cifs_fallocate,
1547};
1548
1549const struct file_operations cifs_file_direct_ops = {
1550 .read_iter = netfs_unbuffered_read_iter,
1551 .write_iter = netfs_file_write_iter,
1552 .open = cifs_open,
1553 .release = cifs_close,
1554 .lock = cifs_lock,
1555 .flock = cifs_flock,
1556 .fsync = cifs_fsync,
1557 .flush = cifs_flush,
1558 .mmap = cifs_file_mmap,
1559 .splice_read = copy_splice_read,
1560 .splice_write = iter_file_splice_write,
1561 .unlocked_ioctl = cifs_ioctl,
1562 .copy_file_range = cifs_copy_file_range,
1563 .remap_file_range = cifs_remap_file_range,
1564 .llseek = cifs_llseek,
1565 .setlease = cifs_setlease,
1566 .fallocate = cifs_fallocate,
1567};
1568
1569const struct file_operations cifs_file_nobrl_ops = {
1570 .read_iter = cifs_loose_read_iter,
1571 .write_iter = cifs_file_write_iter,
1572 .open = cifs_open,
1573 .release = cifs_close,
1574 .fsync = cifs_fsync,
1575 .flush = cifs_flush,
1576 .mmap = cifs_file_mmap,
1577 .splice_read = filemap_splice_read,
1578 .splice_write = iter_file_splice_write,
1579 .llseek = cifs_llseek,
1580 .unlocked_ioctl = cifs_ioctl,
1581 .copy_file_range = cifs_copy_file_range,
1582 .remap_file_range = cifs_remap_file_range,
1583 .setlease = cifs_setlease,
1584 .fallocate = cifs_fallocate,
1585};
1586
1587const struct file_operations cifs_file_strict_nobrl_ops = {
1588 .read_iter = cifs_strict_readv,
1589 .write_iter = cifs_strict_writev,
1590 .open = cifs_open,
1591 .release = cifs_close,
1592 .fsync = cifs_strict_fsync,
1593 .flush = cifs_flush,
1594 .mmap = cifs_file_strict_mmap,
1595 .splice_read = filemap_splice_read,
1596 .splice_write = iter_file_splice_write,
1597 .llseek = cifs_llseek,
1598 .unlocked_ioctl = cifs_ioctl,
1599 .copy_file_range = cifs_copy_file_range,
1600 .remap_file_range = cifs_remap_file_range,
1601 .setlease = cifs_setlease,
1602 .fallocate = cifs_fallocate,
1603};
1604
1605const struct file_operations cifs_file_direct_nobrl_ops = {
1606 .read_iter = netfs_unbuffered_read_iter,
1607 .write_iter = netfs_file_write_iter,
1608 .open = cifs_open,
1609 .release = cifs_close,
1610 .fsync = cifs_fsync,
1611 .flush = cifs_flush,
1612 .mmap = cifs_file_mmap,
1613 .splice_read = copy_splice_read,
1614 .splice_write = iter_file_splice_write,
1615 .unlocked_ioctl = cifs_ioctl,
1616 .copy_file_range = cifs_copy_file_range,
1617 .remap_file_range = cifs_remap_file_range,
1618 .llseek = cifs_llseek,
1619 .setlease = cifs_setlease,
1620 .fallocate = cifs_fallocate,
1621};
1622
1623const struct file_operations cifs_dir_ops = {
1624 .iterate_shared = cifs_readdir,
1625 .release = cifs_closedir,
1626 .read = generic_read_dir,
1627 .unlocked_ioctl = cifs_ioctl,
1628 .copy_file_range = cifs_copy_file_range,
1629 .remap_file_range = cifs_remap_file_range,
1630 .llseek = generic_file_llseek,
1631 .fsync = cifs_dir_fsync,
1632};
1633
1634static void
1635cifs_init_once(void *inode)
1636{
1637 struct cifsInodeInfo *cifsi = inode;
1638
1639 inode_init_once(&cifsi->netfs.inode);
1640 init_rwsem(&cifsi->lock_sem);
1641}
1642
1643static int __init
1644cifs_init_inodecache(void)
1645{
1646 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1647 sizeof(struct cifsInodeInfo),
1648 0, (SLAB_RECLAIM_ACCOUNT|
1649 SLAB_ACCOUNT),
1650 cifs_init_once);
1651 if (cifs_inode_cachep == NULL)
1652 return -ENOMEM;
1653
1654 return 0;
1655}
1656
1657static void
1658cifs_destroy_inodecache(void)
1659{
1660 /*
1661 * Make sure all delayed rcu free inodes are flushed before we
1662 * destroy cache.
1663 */
1664 rcu_barrier();
1665 kmem_cache_destroy(cifs_inode_cachep);
1666}
1667
1668static int
1669cifs_init_request_bufs(void)
1670{
1671 /*
1672 * SMB2 maximum header size is bigger than CIFS one - no problems to
1673 * allocate some more bytes for CIFS.
1674 */
1675 size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1676
1677 if (CIFSMaxBufSize < 8192) {
1678 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1679 Unicode path name has to fit in any SMB/CIFS path based frames */
1680 CIFSMaxBufSize = 8192;
1681 } else if (CIFSMaxBufSize > 1024*127) {
1682 CIFSMaxBufSize = 1024 * 127;
1683 } else {
1684 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1685 }
1686/*
1687 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1688 CIFSMaxBufSize, CIFSMaxBufSize);
1689*/
1690 cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1691 CIFSMaxBufSize + max_hdr_size, 0,
1692 SLAB_HWCACHE_ALIGN, 0,
1693 CIFSMaxBufSize + max_hdr_size,
1694 NULL);
1695 if (cifs_req_cachep == NULL)
1696 return -ENOMEM;
1697
1698 if (cifs_min_rcv < 1)
1699 cifs_min_rcv = 1;
1700 else if (cifs_min_rcv > 64) {
1701 cifs_min_rcv = 64;
1702 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1703 }
1704
1705 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1706 cifs_req_cachep);
1707
1708 if (cifs_req_poolp == NULL) {
1709 kmem_cache_destroy(cifs_req_cachep);
1710 return -ENOMEM;
1711 }
1712 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1713 almost all handle based requests (but not write response, nor is it
1714 sufficient for path based requests). A smaller size would have
1715 been more efficient (compacting multiple slab items on one 4k page)
1716 for the case in which debug was on, but this larger size allows
1717 more SMBs to use small buffer alloc and is still much more
1718 efficient to alloc 1 per page off the slab compared to 17K (5page)
1719 alloc of large cifs buffers even when page debugging is on */
1720 cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1721 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1722 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1723 if (cifs_sm_req_cachep == NULL) {
1724 mempool_destroy(cifs_req_poolp);
1725 kmem_cache_destroy(cifs_req_cachep);
1726 return -ENOMEM;
1727 }
1728
1729 if (cifs_min_small < 2)
1730 cifs_min_small = 2;
1731 else if (cifs_min_small > 256) {
1732 cifs_min_small = 256;
1733 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1734 }
1735
1736 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1737 cifs_sm_req_cachep);
1738
1739 if (cifs_sm_req_poolp == NULL) {
1740 mempool_destroy(cifs_req_poolp);
1741 kmem_cache_destroy(cifs_req_cachep);
1742 kmem_cache_destroy(cifs_sm_req_cachep);
1743 return -ENOMEM;
1744 }
1745
1746 return 0;
1747}
1748
1749static void
1750cifs_destroy_request_bufs(void)
1751{
1752 mempool_destroy(cifs_req_poolp);
1753 kmem_cache_destroy(cifs_req_cachep);
1754 mempool_destroy(cifs_sm_req_poolp);
1755 kmem_cache_destroy(cifs_sm_req_cachep);
1756}
1757
1758static int init_mids(void)
1759{
1760 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1761 sizeof(struct mid_q_entry), 0,
1762 SLAB_HWCACHE_ALIGN, NULL);
1763 if (cifs_mid_cachep == NULL)
1764 return -ENOMEM;
1765
1766 /* 3 is a reasonable minimum number of simultaneous operations */
1767 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1768 if (cifs_mid_poolp == NULL) {
1769 kmem_cache_destroy(cifs_mid_cachep);
1770 return -ENOMEM;
1771 }
1772
1773 return 0;
1774}
1775
1776static void destroy_mids(void)
1777{
1778 mempool_destroy(cifs_mid_poolp);
1779 kmem_cache_destroy(cifs_mid_cachep);
1780}
1781
1782static int cifs_init_netfs(void)
1783{
1784 cifs_io_request_cachep =
1785 kmem_cache_create("cifs_io_request",
1786 sizeof(struct cifs_io_request), 0,
1787 SLAB_HWCACHE_ALIGN, NULL);
1788 if (!cifs_io_request_cachep)
1789 goto nomem_req;
1790
1791 if (mempool_init_slab_pool(&cifs_io_request_pool, 100, cifs_io_request_cachep) < 0)
1792 goto nomem_reqpool;
1793
1794 cifs_io_subrequest_cachep =
1795 kmem_cache_create("cifs_io_subrequest",
1796 sizeof(struct cifs_io_subrequest), 0,
1797 SLAB_HWCACHE_ALIGN, NULL);
1798 if (!cifs_io_subrequest_cachep)
1799 goto nomem_subreq;
1800
1801 if (mempool_init_slab_pool(&cifs_io_subrequest_pool, 100, cifs_io_subrequest_cachep) < 0)
1802 goto nomem_subreqpool;
1803
1804 return 0;
1805
1806nomem_subreqpool:
1807 kmem_cache_destroy(cifs_io_subrequest_cachep);
1808nomem_subreq:
1809 mempool_exit(&cifs_io_request_pool);
1810nomem_reqpool:
1811 kmem_cache_destroy(cifs_io_request_cachep);
1812nomem_req:
1813 return -ENOMEM;
1814}
1815
1816static void cifs_destroy_netfs(void)
1817{
1818 mempool_exit(&cifs_io_subrequest_pool);
1819 kmem_cache_destroy(cifs_io_subrequest_cachep);
1820 mempool_exit(&cifs_io_request_pool);
1821 kmem_cache_destroy(cifs_io_request_cachep);
1822}
1823
1824static int __init
1825init_cifs(void)
1826{
1827 int rc = 0;
1828 cifs_proc_init();
1829 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1830/*
1831 * Initialize Global counters
1832 */
1833 atomic_set(&sesInfoAllocCount, 0);
1834 atomic_set(&tconInfoAllocCount, 0);
1835 atomic_set(&tcpSesNextId, 0);
1836 atomic_set(&tcpSesAllocCount, 0);
1837 atomic_set(&tcpSesReconnectCount, 0);
1838 atomic_set(&tconInfoReconnectCount, 0);
1839
1840 atomic_set(&buf_alloc_count, 0);
1841 atomic_set(&small_buf_alloc_count, 0);
1842#ifdef CONFIG_CIFS_STATS2
1843 atomic_set(&total_buf_alloc_count, 0);
1844 atomic_set(&total_small_buf_alloc_count, 0);
1845 if (slow_rsp_threshold < 1)
1846 cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1847 else if (slow_rsp_threshold > 32767)
1848 cifs_dbg(VFS,
1849 "slow response threshold set higher than recommended (0 to 32767)\n");
1850#endif /* CONFIG_CIFS_STATS2 */
1851
1852 atomic_set(&mid_count, 0);
1853 GlobalCurrentXid = 0;
1854 GlobalTotalActiveXid = 0;
1855 GlobalMaxActiveXid = 0;
1856 spin_lock_init(&cifs_tcp_ses_lock);
1857 spin_lock_init(&GlobalMid_Lock);
1858
1859 cifs_lock_secret = get_random_u32();
1860
1861 if (cifs_max_pending < 2) {
1862 cifs_max_pending = 2;
1863 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1864 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1865 cifs_max_pending = CIFS_MAX_REQ;
1866 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1867 CIFS_MAX_REQ);
1868 }
1869
1870 /* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1871 if (dir_cache_timeout > 65000) {
1872 dir_cache_timeout = 65000;
1873 cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1874 }
1875
1876 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1877 if (!cifsiod_wq) {
1878 rc = -ENOMEM;
1879 goto out_clean_proc;
1880 }
1881
1882 /*
1883 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1884 * so that we don't launch too many worker threads but
1885 * Documentation/core-api/workqueue.rst recommends setting it to 0
1886 */
1887
1888 /* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1889 decrypt_wq = alloc_workqueue("smb3decryptd",
1890 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1891 if (!decrypt_wq) {
1892 rc = -ENOMEM;
1893 goto out_destroy_cifsiod_wq;
1894 }
1895
1896 fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1897 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1898 if (!fileinfo_put_wq) {
1899 rc = -ENOMEM;
1900 goto out_destroy_decrypt_wq;
1901 }
1902
1903 cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1904 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1905 if (!cifsoplockd_wq) {
1906 rc = -ENOMEM;
1907 goto out_destroy_fileinfo_put_wq;
1908 }
1909
1910 deferredclose_wq = alloc_workqueue("deferredclose",
1911 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1912 if (!deferredclose_wq) {
1913 rc = -ENOMEM;
1914 goto out_destroy_cifsoplockd_wq;
1915 }
1916
1917 serverclose_wq = alloc_workqueue("serverclose",
1918 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1919 if (!serverclose_wq) {
1920 rc = -ENOMEM;
1921 goto out_destroy_deferredclose_wq;
1922 }
1923
1924 cfid_put_wq = alloc_workqueue("cfid_put_wq",
1925 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1926 if (!cfid_put_wq) {
1927 rc = -ENOMEM;
1928 goto out_destroy_serverclose_wq;
1929 }
1930
1931 rc = cifs_init_inodecache();
1932 if (rc)
1933 goto out_destroy_cfid_put_wq;
1934
1935 rc = cifs_init_netfs();
1936 if (rc)
1937 goto out_destroy_inodecache;
1938
1939 rc = init_mids();
1940 if (rc)
1941 goto out_destroy_netfs;
1942
1943 rc = cifs_init_request_bufs();
1944 if (rc)
1945 goto out_destroy_mids;
1946
1947#ifdef CONFIG_CIFS_DFS_UPCALL
1948 rc = dfs_cache_init();
1949 if (rc)
1950 goto out_destroy_request_bufs;
1951#endif /* CONFIG_CIFS_DFS_UPCALL */
1952#ifdef CONFIG_CIFS_UPCALL
1953 rc = init_cifs_spnego();
1954 if (rc)
1955 goto out_destroy_dfs_cache;
1956#endif /* CONFIG_CIFS_UPCALL */
1957#ifdef CONFIG_CIFS_SWN_UPCALL
1958 rc = cifs_genl_init();
1959 if (rc)
1960 goto out_register_key_type;
1961#endif /* CONFIG_CIFS_SWN_UPCALL */
1962
1963 rc = init_cifs_idmap();
1964 if (rc)
1965 goto out_cifs_swn_init;
1966
1967 rc = register_filesystem(&cifs_fs_type);
1968 if (rc)
1969 goto out_init_cifs_idmap;
1970
1971 rc = register_filesystem(&smb3_fs_type);
1972 if (rc) {
1973 unregister_filesystem(&cifs_fs_type);
1974 goto out_init_cifs_idmap;
1975 }
1976
1977 return 0;
1978
1979out_init_cifs_idmap:
1980 exit_cifs_idmap();
1981out_cifs_swn_init:
1982#ifdef CONFIG_CIFS_SWN_UPCALL
1983 cifs_genl_exit();
1984out_register_key_type:
1985#endif
1986#ifdef CONFIG_CIFS_UPCALL
1987 exit_cifs_spnego();
1988out_destroy_dfs_cache:
1989#endif
1990#ifdef CONFIG_CIFS_DFS_UPCALL
1991 dfs_cache_destroy();
1992out_destroy_request_bufs:
1993#endif
1994 cifs_destroy_request_bufs();
1995out_destroy_mids:
1996 destroy_mids();
1997out_destroy_netfs:
1998 cifs_destroy_netfs();
1999out_destroy_inodecache:
2000 cifs_destroy_inodecache();
2001out_destroy_cfid_put_wq:
2002 destroy_workqueue(cfid_put_wq);
2003out_destroy_serverclose_wq:
2004 destroy_workqueue(serverclose_wq);
2005out_destroy_deferredclose_wq:
2006 destroy_workqueue(deferredclose_wq);
2007out_destroy_cifsoplockd_wq:
2008 destroy_workqueue(cifsoplockd_wq);
2009out_destroy_fileinfo_put_wq:
2010 destroy_workqueue(fileinfo_put_wq);
2011out_destroy_decrypt_wq:
2012 destroy_workqueue(decrypt_wq);
2013out_destroy_cifsiod_wq:
2014 destroy_workqueue(cifsiod_wq);
2015out_clean_proc:
2016 cifs_proc_clean();
2017 return rc;
2018}
2019
2020static void __exit
2021exit_cifs(void)
2022{
2023 cifs_dbg(NOISY, "exit_smb3\n");
2024 unregister_filesystem(&cifs_fs_type);
2025 unregister_filesystem(&smb3_fs_type);
2026 cifs_release_automount_timer();
2027 exit_cifs_idmap();
2028#ifdef CONFIG_CIFS_SWN_UPCALL
2029 cifs_genl_exit();
2030#endif
2031#ifdef CONFIG_CIFS_UPCALL
2032 exit_cifs_spnego();
2033#endif
2034#ifdef CONFIG_CIFS_DFS_UPCALL
2035 dfs_cache_destroy();
2036#endif
2037 cifs_destroy_request_bufs();
2038 destroy_mids();
2039 cifs_destroy_netfs();
2040 cifs_destroy_inodecache();
2041 destroy_workqueue(deferredclose_wq);
2042 destroy_workqueue(cifsoplockd_wq);
2043 destroy_workqueue(decrypt_wq);
2044 destroy_workqueue(fileinfo_put_wq);
2045 destroy_workqueue(serverclose_wq);
2046 destroy_workqueue(cfid_put_wq);
2047 destroy_workqueue(cifsiod_wq);
2048 cifs_proc_clean();
2049}
2050
2051MODULE_AUTHOR("Steve French");
2052MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
2053MODULE_DESCRIPTION
2054 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
2055 "also older servers complying with the SNIA CIFS Specification)");
2056MODULE_VERSION(CIFS_VERSION);
2057MODULE_SOFTDEP("ecb");
2058MODULE_SOFTDEP("hmac");
2059MODULE_SOFTDEP("md5");
2060MODULE_SOFTDEP("nls");
2061MODULE_SOFTDEP("aes");
2062MODULE_SOFTDEP("cmac");
2063MODULE_SOFTDEP("sha256");
2064MODULE_SOFTDEP("sha512");
2065MODULE_SOFTDEP("aead2");
2066MODULE_SOFTDEP("ccm");
2067MODULE_SOFTDEP("gcm");
2068module_init(init_cifs)
2069module_exit(exit_cifs)
1// SPDX-License-Identifier: LGPL-2.1
2/*
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * Common Internet FileSystem (CIFS) client
8 *
9 */
10
11/* Note that BB means BUGBUG (ie something to fix eventually) */
12
13#include <linux/module.h>
14#include <linux/fs.h>
15#include <linux/filelock.h>
16#include <linux/mount.h>
17#include <linux/slab.h>
18#include <linux/init.h>
19#include <linux/list.h>
20#include <linux/seq_file.h>
21#include <linux/vfs.h>
22#include <linux/mempool.h>
23#include <linux/delay.h>
24#include <linux/kthread.h>
25#include <linux/freezer.h>
26#include <linux/namei.h>
27#include <linux/random.h>
28#include <linux/splice.h>
29#include <linux/uuid.h>
30#include <linux/xattr.h>
31#include <uapi/linux/magic.h>
32#include <net/ipv6.h>
33#include "cifsfs.h"
34#include "cifspdu.h"
35#define DECLARE_GLOBALS_HERE
36#include "cifsglob.h"
37#include "cifsproto.h"
38#include "cifs_debug.h"
39#include "cifs_fs_sb.h"
40#include <linux/mm.h>
41#include <linux/key-type.h>
42#include "cifs_spnego.h"
43#include "fscache.h"
44#ifdef CONFIG_CIFS_DFS_UPCALL
45#include "dfs_cache.h"
46#endif
47#ifdef CONFIG_CIFS_SWN_UPCALL
48#include "netlink.h"
49#endif
50#include "fs_context.h"
51#include "cached_dir.h"
52
53/*
54 * DOS dates from 1980/1/1 through 2107/12/31
55 * Protocol specifications indicate the range should be to 119, which
56 * limits maximum year to 2099. But this range has not been checked.
57 */
58#define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
59#define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
60#define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
61
62int cifsFYI = 0;
63bool traceSMB;
64bool enable_oplocks = true;
65bool linuxExtEnabled = true;
66bool lookupCacheEnabled = true;
67bool disable_legacy_dialects; /* false by default */
68bool enable_gcm_256 = true;
69bool require_gcm_256; /* false by default */
70bool enable_negotiate_signing; /* false by default */
71unsigned int global_secflags = CIFSSEC_DEF;
72/* unsigned int ntlmv2_support = 0; */
73unsigned int sign_CIFS_PDUs = 1;
74
75/*
76 * Global transaction id (XID) information
77 */
78unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */
79unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
80unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */
81spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
82
83/*
84 * Global counters, updated atomically
85 */
86atomic_t sesInfoAllocCount;
87atomic_t tconInfoAllocCount;
88atomic_t tcpSesNextId;
89atomic_t tcpSesAllocCount;
90atomic_t tcpSesReconnectCount;
91atomic_t tconInfoReconnectCount;
92
93atomic_t mid_count;
94atomic_t buf_alloc_count;
95atomic_t small_buf_alloc_count;
96#ifdef CONFIG_CIFS_STATS2
97atomic_t total_buf_alloc_count;
98atomic_t total_small_buf_alloc_count;
99#endif/* STATS2 */
100struct list_head cifs_tcp_ses_list;
101spinlock_t cifs_tcp_ses_lock;
102static const struct super_operations cifs_super_ops;
103unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
104module_param(CIFSMaxBufSize, uint, 0444);
105MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
106 "for CIFS requests. "
107 "Default: 16384 Range: 8192 to 130048");
108unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
109module_param(cifs_min_rcv, uint, 0444);
110MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
111 "1 to 64");
112unsigned int cifs_min_small = 30;
113module_param(cifs_min_small, uint, 0444);
114MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
115 "Range: 2 to 256");
116unsigned int cifs_max_pending = CIFS_MAX_REQ;
117module_param(cifs_max_pending, uint, 0444);
118MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
119 "CIFS/SMB1 dialect (N/A for SMB3) "
120 "Default: 32767 Range: 2 to 32767.");
121unsigned int dir_cache_timeout = 30;
122module_param(dir_cache_timeout, uint, 0644);
123MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
124 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
125#ifdef CONFIG_CIFS_STATS2
126unsigned int slow_rsp_threshold = 1;
127module_param(slow_rsp_threshold, uint, 0644);
128MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
129 "before logging that a response is delayed. "
130 "Default: 1 (if set to 0 disables msg).");
131#endif /* STATS2 */
132
133module_param(enable_oplocks, bool, 0644);
134MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
135
136module_param(enable_gcm_256, bool, 0644);
137MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0");
138
139module_param(require_gcm_256, bool, 0644);
140MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
141
142module_param(enable_negotiate_signing, bool, 0644);
143MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
144
145module_param(disable_legacy_dialects, bool, 0644);
146MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
147 "helpful to restrict the ability to "
148 "override the default dialects (SMB2.1, "
149 "SMB3 and SMB3.02) on mount with old "
150 "dialects (CIFS/SMB1 and SMB2) since "
151 "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
152 " and less secure. Default: n/N/0");
153
154extern mempool_t *cifs_sm_req_poolp;
155extern mempool_t *cifs_req_poolp;
156extern mempool_t *cifs_mid_poolp;
157
158struct workqueue_struct *cifsiod_wq;
159struct workqueue_struct *decrypt_wq;
160struct workqueue_struct *fileinfo_put_wq;
161struct workqueue_struct *cifsoplockd_wq;
162struct workqueue_struct *deferredclose_wq;
163__u32 cifs_lock_secret;
164
165/*
166 * Bumps refcount for cifs super block.
167 * Note that it should be only called if a referece to VFS super block is
168 * already held, e.g. in open-type syscalls context. Otherwise it can race with
169 * atomic_dec_and_test in deactivate_locked_super.
170 */
171void
172cifs_sb_active(struct super_block *sb)
173{
174 struct cifs_sb_info *server = CIFS_SB(sb);
175
176 if (atomic_inc_return(&server->active) == 1)
177 atomic_inc(&sb->s_active);
178}
179
180void
181cifs_sb_deactive(struct super_block *sb)
182{
183 struct cifs_sb_info *server = CIFS_SB(sb);
184
185 if (atomic_dec_and_test(&server->active))
186 deactivate_super(sb);
187}
188
189static int
190cifs_read_super(struct super_block *sb)
191{
192 struct inode *inode;
193 struct cifs_sb_info *cifs_sb;
194 struct cifs_tcon *tcon;
195 struct timespec64 ts;
196 int rc = 0;
197
198 cifs_sb = CIFS_SB(sb);
199 tcon = cifs_sb_master_tcon(cifs_sb);
200
201 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
202 sb->s_flags |= SB_POSIXACL;
203
204 if (tcon->snapshot_time)
205 sb->s_flags |= SB_RDONLY;
206
207 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
208 sb->s_maxbytes = MAX_LFS_FILESIZE;
209 else
210 sb->s_maxbytes = MAX_NON_LFS;
211
212 /*
213 * Some very old servers like DOS and OS/2 used 2 second granularity
214 * (while all current servers use 100ns granularity - see MS-DTYP)
215 * but 1 second is the maximum allowed granularity for the VFS
216 * so for old servers set time granularity to 1 second while for
217 * everything else (current servers) set it to 100ns.
218 */
219 if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
220 ((tcon->ses->capabilities &
221 tcon->ses->server->vals->cap_nt_find) == 0) &&
222 !tcon->unix_ext) {
223 sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
224 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
225 sb->s_time_min = ts.tv_sec;
226 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
227 cpu_to_le16(SMB_TIME_MAX), 0);
228 sb->s_time_max = ts.tv_sec;
229 } else {
230 /*
231 * Almost every server, including all SMB2+, uses DCE TIME
232 * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC
233 */
234 sb->s_time_gran = 100;
235 ts = cifs_NTtimeToUnix(0);
236 sb->s_time_min = ts.tv_sec;
237 ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
238 sb->s_time_max = ts.tv_sec;
239 }
240
241 sb->s_magic = CIFS_SUPER_MAGIC;
242 sb->s_op = &cifs_super_ops;
243 sb->s_xattr = cifs_xattr_handlers;
244 rc = super_setup_bdi(sb);
245 if (rc)
246 goto out_no_root;
247 /* tune readahead according to rsize if readahead size not set on mount */
248 if (cifs_sb->ctx->rsize == 0)
249 cifs_sb->ctx->rsize =
250 tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
251 if (cifs_sb->ctx->rasize)
252 sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
253 else
254 sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
255
256 sb->s_blocksize = CIFS_MAX_MSGSIZE;
257 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
258 inode = cifs_root_iget(sb);
259
260 if (IS_ERR(inode)) {
261 rc = PTR_ERR(inode);
262 goto out_no_root;
263 }
264
265 if (tcon->nocase)
266 sb->s_d_op = &cifs_ci_dentry_ops;
267 else
268 sb->s_d_op = &cifs_dentry_ops;
269
270 sb->s_root = d_make_root(inode);
271 if (!sb->s_root) {
272 rc = -ENOMEM;
273 goto out_no_root;
274 }
275
276#ifdef CONFIG_CIFS_NFSD_EXPORT
277 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
278 cifs_dbg(FYI, "export ops supported\n");
279 sb->s_export_op = &cifs_export_ops;
280 }
281#endif /* CONFIG_CIFS_NFSD_EXPORT */
282
283 return 0;
284
285out_no_root:
286 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
287 return rc;
288}
289
290static void cifs_kill_sb(struct super_block *sb)
291{
292 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
293
294 /*
295 * We ned to release all dentries for the cached directories
296 * before we kill the sb.
297 */
298 if (cifs_sb->root) {
299 close_all_cached_dirs(cifs_sb);
300
301 /* finally release root dentry */
302 dput(cifs_sb->root);
303 cifs_sb->root = NULL;
304 }
305
306 kill_anon_super(sb);
307 cifs_umount(cifs_sb);
308}
309
310static int
311cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
312{
313 struct super_block *sb = dentry->d_sb;
314 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
315 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
316 struct TCP_Server_Info *server = tcon->ses->server;
317 unsigned int xid;
318 int rc = 0;
319
320 xid = get_xid();
321
322 if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
323 buf->f_namelen =
324 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
325 else
326 buf->f_namelen = PATH_MAX;
327
328 buf->f_fsid.val[0] = tcon->vol_serial_number;
329 /* are using part of create time for more randomness, see man statfs */
330 buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time);
331
332 buf->f_files = 0; /* undefined */
333 buf->f_ffree = 0; /* unlimited */
334
335 if (server->ops->queryfs)
336 rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
337
338 free_xid(xid);
339 return rc;
340}
341
342static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
343{
344 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
345 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
346 struct TCP_Server_Info *server = tcon->ses->server;
347
348 if (server->ops->fallocate)
349 return server->ops->fallocate(file, tcon, mode, off, len);
350
351 return -EOPNOTSUPP;
352}
353
354static int cifs_permission(struct mnt_idmap *idmap,
355 struct inode *inode, int mask)
356{
357 struct cifs_sb_info *cifs_sb;
358
359 cifs_sb = CIFS_SB(inode->i_sb);
360
361 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
362 if ((mask & MAY_EXEC) && !execute_ok(inode))
363 return -EACCES;
364 else
365 return 0;
366 } else /* file mode might have been restricted at mount time
367 on the client (above and beyond ACL on servers) for
368 servers which do not support setting and viewing mode bits,
369 so allowing client to check permissions is useful */
370 return generic_permission(&nop_mnt_idmap, inode, mask);
371}
372
373static struct kmem_cache *cifs_inode_cachep;
374static struct kmem_cache *cifs_req_cachep;
375static struct kmem_cache *cifs_mid_cachep;
376static struct kmem_cache *cifs_sm_req_cachep;
377mempool_t *cifs_sm_req_poolp;
378mempool_t *cifs_req_poolp;
379mempool_t *cifs_mid_poolp;
380
381static struct inode *
382cifs_alloc_inode(struct super_block *sb)
383{
384 struct cifsInodeInfo *cifs_inode;
385 cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
386 if (!cifs_inode)
387 return NULL;
388 cifs_inode->cifsAttrs = 0x20; /* default */
389 cifs_inode->time = 0;
390 /*
391 * Until the file is open and we have gotten oplock info back from the
392 * server, can not assume caching of file data or metadata.
393 */
394 cifs_set_oplock_level(cifs_inode, 0);
395 cifs_inode->flags = 0;
396 spin_lock_init(&cifs_inode->writers_lock);
397 cifs_inode->writers = 0;
398 cifs_inode->netfs.inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
399 cifs_inode->netfs.remote_i_size = 0;
400 cifs_inode->uniqueid = 0;
401 cifs_inode->createtime = 0;
402 cifs_inode->epoch = 0;
403 spin_lock_init(&cifs_inode->open_file_lock);
404 generate_random_uuid(cifs_inode->lease_key);
405 cifs_inode->symlink_target = NULL;
406
407 /*
408 * Can not set i_flags here - they get immediately overwritten to zero
409 * by the VFS.
410 */
411 /* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
412 INIT_LIST_HEAD(&cifs_inode->openFileList);
413 INIT_LIST_HEAD(&cifs_inode->llist);
414 INIT_LIST_HEAD(&cifs_inode->deferred_closes);
415 spin_lock_init(&cifs_inode->deferred_lock);
416 return &cifs_inode->netfs.inode;
417}
418
419static void
420cifs_free_inode(struct inode *inode)
421{
422 struct cifsInodeInfo *cinode = CIFS_I(inode);
423
424 if (S_ISLNK(inode->i_mode))
425 kfree(cinode->symlink_target);
426 kmem_cache_free(cifs_inode_cachep, cinode);
427}
428
429static void
430cifs_evict_inode(struct inode *inode)
431{
432 truncate_inode_pages_final(&inode->i_data);
433 if (inode->i_state & I_PINNING_NETFS_WB)
434 cifs_fscache_unuse_inode_cookie(inode, true);
435 cifs_fscache_release_inode_cookie(inode);
436 clear_inode(inode);
437}
438
439static void
440cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
441{
442 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
443 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
444
445 seq_puts(s, ",addr=");
446
447 switch (server->dstaddr.ss_family) {
448 case AF_INET:
449 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
450 break;
451 case AF_INET6:
452 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
453 if (sa6->sin6_scope_id)
454 seq_printf(s, "%%%u", sa6->sin6_scope_id);
455 break;
456 default:
457 seq_puts(s, "(unknown)");
458 }
459 if (server->rdma)
460 seq_puts(s, ",rdma");
461}
462
463static void
464cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
465{
466 if (ses->sectype == Unspecified) {
467 if (ses->user_name == NULL)
468 seq_puts(s, ",sec=none");
469 return;
470 }
471
472 seq_puts(s, ",sec=");
473
474 switch (ses->sectype) {
475 case NTLMv2:
476 seq_puts(s, "ntlmv2");
477 break;
478 case Kerberos:
479 seq_puts(s, "krb5");
480 break;
481 case RawNTLMSSP:
482 seq_puts(s, "ntlmssp");
483 break;
484 default:
485 /* shouldn't ever happen */
486 seq_puts(s, "unknown");
487 break;
488 }
489
490 if (ses->sign)
491 seq_puts(s, "i");
492
493 if (ses->sectype == Kerberos)
494 seq_printf(s, ",cruid=%u",
495 from_kuid_munged(&init_user_ns, ses->cred_uid));
496}
497
498static void
499cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
500{
501 seq_puts(s, ",cache=");
502
503 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
504 seq_puts(s, "strict");
505 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
506 seq_puts(s, "none");
507 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
508 seq_puts(s, "singleclient"); /* assume only one client access */
509 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
510 seq_puts(s, "ro"); /* read only caching assumed */
511 else
512 seq_puts(s, "loose");
513}
514
515/*
516 * cifs_show_devname() is used so we show the mount device name with correct
517 * format (e.g. forward slashes vs. back slashes) in /proc/mounts
518 */
519static int cifs_show_devname(struct seq_file *m, struct dentry *root)
520{
521 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
522 char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
523
524 if (devname == NULL)
525 seq_puts(m, "none");
526 else {
527 convert_delimiter(devname, '/');
528 /* escape all spaces in share names */
529 seq_escape(m, devname, " \t");
530 kfree(devname);
531 }
532 return 0;
533}
534
535/*
536 * cifs_show_options() is for displaying mount options in /proc/mounts.
537 * Not all settable options are displayed but most of the important
538 * ones are.
539 */
540static int
541cifs_show_options(struct seq_file *s, struct dentry *root)
542{
543 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
544 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
545 struct sockaddr *srcaddr;
546 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
547
548 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
549 cifs_show_security(s, tcon->ses);
550 cifs_show_cache_flavor(s, cifs_sb);
551
552 if (tcon->no_lease)
553 seq_puts(s, ",nolease");
554 if (cifs_sb->ctx->multiuser)
555 seq_puts(s, ",multiuser");
556 else if (tcon->ses->user_name)
557 seq_show_option(s, "username", tcon->ses->user_name);
558
559 if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
560 seq_show_option(s, "domain", tcon->ses->domainName);
561
562 if (srcaddr->sa_family != AF_UNSPEC) {
563 struct sockaddr_in *saddr4;
564 struct sockaddr_in6 *saddr6;
565 saddr4 = (struct sockaddr_in *)srcaddr;
566 saddr6 = (struct sockaddr_in6 *)srcaddr;
567 if (srcaddr->sa_family == AF_INET6)
568 seq_printf(s, ",srcaddr=%pI6c",
569 &saddr6->sin6_addr);
570 else if (srcaddr->sa_family == AF_INET)
571 seq_printf(s, ",srcaddr=%pI4",
572 &saddr4->sin_addr.s_addr);
573 else
574 seq_printf(s, ",srcaddr=BAD-AF:%i",
575 (int)(srcaddr->sa_family));
576 }
577
578 seq_printf(s, ",uid=%u",
579 from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
580 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
581 seq_puts(s, ",forceuid");
582 else
583 seq_puts(s, ",noforceuid");
584
585 seq_printf(s, ",gid=%u",
586 from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
587 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
588 seq_puts(s, ",forcegid");
589 else
590 seq_puts(s, ",noforcegid");
591
592 cifs_show_address(s, tcon->ses->server);
593
594 if (!tcon->unix_ext)
595 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
596 cifs_sb->ctx->file_mode,
597 cifs_sb->ctx->dir_mode);
598 if (cifs_sb->ctx->iocharset)
599 seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
600 if (tcon->seal)
601 seq_puts(s, ",seal");
602 else if (tcon->ses->server->ignore_signature)
603 seq_puts(s, ",signloosely");
604 if (tcon->nocase)
605 seq_puts(s, ",nocase");
606 if (tcon->nodelete)
607 seq_puts(s, ",nodelete");
608 if (cifs_sb->ctx->no_sparse)
609 seq_puts(s, ",nosparse");
610 if (tcon->local_lease)
611 seq_puts(s, ",locallease");
612 if (tcon->retry)
613 seq_puts(s, ",hard");
614 else
615 seq_puts(s, ",soft");
616 if (tcon->use_persistent)
617 seq_puts(s, ",persistenthandles");
618 else if (tcon->use_resilient)
619 seq_puts(s, ",resilienthandles");
620 if (tcon->posix_extensions)
621 seq_puts(s, ",posix");
622 else if (tcon->unix_ext)
623 seq_puts(s, ",unix");
624 else
625 seq_puts(s, ",nounix");
626 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
627 seq_puts(s, ",nodfs");
628 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
629 seq_puts(s, ",posixpaths");
630 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
631 seq_puts(s, ",setuids");
632 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
633 seq_puts(s, ",idsfromsid");
634 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
635 seq_puts(s, ",serverino");
636 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
637 seq_puts(s, ",rwpidforward");
638 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
639 seq_puts(s, ",forcemand");
640 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
641 seq_puts(s, ",nouser_xattr");
642 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
643 seq_puts(s, ",mapchars");
644 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
645 seq_puts(s, ",mapposix");
646 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
647 seq_puts(s, ",sfu");
648 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
649 seq_puts(s, ",nobrl");
650 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
651 seq_puts(s, ",nohandlecache");
652 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
653 seq_puts(s, ",modefromsid");
654 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
655 seq_puts(s, ",cifsacl");
656 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
657 seq_puts(s, ",dynperm");
658 if (root->d_sb->s_flags & SB_POSIXACL)
659 seq_puts(s, ",acl");
660 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
661 seq_puts(s, ",mfsymlinks");
662 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
663 seq_puts(s, ",fsc");
664 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
665 seq_puts(s, ",nostrictsync");
666 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
667 seq_puts(s, ",noperm");
668 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
669 seq_printf(s, ",backupuid=%u",
670 from_kuid_munged(&init_user_ns,
671 cifs_sb->ctx->backupuid));
672 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
673 seq_printf(s, ",backupgid=%u",
674 from_kgid_munged(&init_user_ns,
675 cifs_sb->ctx->backupgid));
676
677 seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
678 seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
679 seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
680 if (cifs_sb->ctx->rasize)
681 seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
682 if (tcon->ses->server->min_offload)
683 seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
684 if (tcon->ses->server->retrans)
685 seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
686 seq_printf(s, ",echo_interval=%lu",
687 tcon->ses->server->echo_interval / HZ);
688
689 /* Only display the following if overridden on mount */
690 if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
691 seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
692 if (tcon->ses->server->tcp_nodelay)
693 seq_puts(s, ",tcpnodelay");
694 if (tcon->ses->server->noautotune)
695 seq_puts(s, ",noautotune");
696 if (tcon->ses->server->noblocksnd)
697 seq_puts(s, ",noblocksend");
698 if (tcon->ses->server->nosharesock)
699 seq_puts(s, ",nosharesock");
700
701 if (tcon->snapshot_time)
702 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
703 if (tcon->handle_timeout)
704 seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
705 if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
706 seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
707
708 /*
709 * Display file and directory attribute timeout in seconds.
710 * If file and directory attribute timeout the same then actimeo
711 * was likely specified on mount
712 */
713 if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
714 seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
715 else {
716 seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
717 seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
718 }
719 seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
720
721 if (tcon->ses->chan_max > 1)
722 seq_printf(s, ",multichannel,max_channels=%zu",
723 tcon->ses->chan_max);
724
725 if (tcon->use_witness)
726 seq_puts(s, ",witness");
727
728 return 0;
729}
730
731static void cifs_umount_begin(struct super_block *sb)
732{
733 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
734 struct cifs_tcon *tcon;
735
736 if (cifs_sb == NULL)
737 return;
738
739 tcon = cifs_sb_master_tcon(cifs_sb);
740
741 spin_lock(&cifs_tcp_ses_lock);
742 spin_lock(&tcon->tc_lock);
743 if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
744 /* we have other mounts to same share or we have
745 already tried to umount this and woken up
746 all waiting network requests, nothing to do */
747 spin_unlock(&tcon->tc_lock);
748 spin_unlock(&cifs_tcp_ses_lock);
749 return;
750 }
751 /*
752 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
753 * fail later (e.g. due to open files). TID_EXITING will be set just before tdis req sent
754 */
755 spin_unlock(&tcon->tc_lock);
756 spin_unlock(&cifs_tcp_ses_lock);
757
758 cifs_close_all_deferred_files(tcon);
759 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
760 /* cancel_notify_requests(tcon); */
761 if (tcon->ses && tcon->ses->server) {
762 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
763 wake_up_all(&tcon->ses->server->request_q);
764 wake_up_all(&tcon->ses->server->response_q);
765 msleep(1); /* yield */
766 /* we have to kick the requests once more */
767 wake_up_all(&tcon->ses->server->response_q);
768 msleep(1);
769 }
770
771 return;
772}
773
774static int cifs_freeze(struct super_block *sb)
775{
776 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
777 struct cifs_tcon *tcon;
778
779 if (cifs_sb == NULL)
780 return 0;
781
782 tcon = cifs_sb_master_tcon(cifs_sb);
783
784 cifs_close_all_deferred_files(tcon);
785 return 0;
786}
787
788#ifdef CONFIG_CIFS_STATS2
789static int cifs_show_stats(struct seq_file *s, struct dentry *root)
790{
791 /* BB FIXME */
792 return 0;
793}
794#endif
795
796static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
797{
798 return netfs_unpin_writeback(inode, wbc);
799}
800
801static int cifs_drop_inode(struct inode *inode)
802{
803 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
804
805 /* no serverino => unconditional eviction */
806 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
807 generic_drop_inode(inode);
808}
809
810static const struct super_operations cifs_super_ops = {
811 .statfs = cifs_statfs,
812 .alloc_inode = cifs_alloc_inode,
813 .write_inode = cifs_write_inode,
814 .free_inode = cifs_free_inode,
815 .drop_inode = cifs_drop_inode,
816 .evict_inode = cifs_evict_inode,
817/* .show_path = cifs_show_path, */ /* Would we ever need show path? */
818 .show_devname = cifs_show_devname,
819/* .delete_inode = cifs_delete_inode, */ /* Do not need above
820 function unless later we add lazy close of inodes or unless the
821 kernel forgets to call us with the same number of releases (closes)
822 as opens */
823 .show_options = cifs_show_options,
824 .umount_begin = cifs_umount_begin,
825 .freeze_fs = cifs_freeze,
826#ifdef CONFIG_CIFS_STATS2
827 .show_stats = cifs_show_stats,
828#endif
829};
830
831/*
832 * Get root dentry from superblock according to prefix path mount option.
833 * Return dentry with refcount + 1 on success and NULL otherwise.
834 */
835static struct dentry *
836cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
837{
838 struct dentry *dentry;
839 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
840 char *full_path = NULL;
841 char *s, *p;
842 char sep;
843
844 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
845 return dget(sb->s_root);
846
847 full_path = cifs_build_path_to_root(ctx, cifs_sb,
848 cifs_sb_master_tcon(cifs_sb), 0);
849 if (full_path == NULL)
850 return ERR_PTR(-ENOMEM);
851
852 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
853
854 sep = CIFS_DIR_SEP(cifs_sb);
855 dentry = dget(sb->s_root);
856 s = full_path;
857
858 do {
859 struct inode *dir = d_inode(dentry);
860 struct dentry *child;
861
862 if (!S_ISDIR(dir->i_mode)) {
863 dput(dentry);
864 dentry = ERR_PTR(-ENOTDIR);
865 break;
866 }
867
868 /* skip separators */
869 while (*s == sep)
870 s++;
871 if (!*s)
872 break;
873 p = s++;
874 /* next separator */
875 while (*s && *s != sep)
876 s++;
877
878 child = lookup_positive_unlocked(p, dentry, s - p);
879 dput(dentry);
880 dentry = child;
881 } while (!IS_ERR(dentry));
882 kfree(full_path);
883 return dentry;
884}
885
886static int cifs_set_super(struct super_block *sb, void *data)
887{
888 struct cifs_mnt_data *mnt_data = data;
889 sb->s_fs_info = mnt_data->cifs_sb;
890 return set_anon_super(sb, NULL);
891}
892
893struct dentry *
894cifs_smb3_do_mount(struct file_system_type *fs_type,
895 int flags, struct smb3_fs_context *old_ctx)
896{
897 struct cifs_mnt_data mnt_data;
898 struct cifs_sb_info *cifs_sb;
899 struct super_block *sb;
900 struct dentry *root;
901 int rc;
902
903 if (cifsFYI) {
904 cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
905 old_ctx->source, flags);
906 } else {
907 cifs_info("Attempting to mount %s\n", old_ctx->source);
908 }
909
910 cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
911 if (!cifs_sb)
912 return ERR_PTR(-ENOMEM);
913
914 cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
915 if (!cifs_sb->ctx) {
916 root = ERR_PTR(-ENOMEM);
917 goto out;
918 }
919 rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
920 if (rc) {
921 root = ERR_PTR(rc);
922 goto out;
923 }
924
925 rc = cifs_setup_cifs_sb(cifs_sb);
926 if (rc) {
927 root = ERR_PTR(rc);
928 goto out;
929 }
930
931 rc = cifs_mount(cifs_sb, cifs_sb->ctx);
932 if (rc) {
933 if (!(flags & SB_SILENT))
934 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
935 rc);
936 root = ERR_PTR(rc);
937 goto out;
938 }
939
940 mnt_data.ctx = cifs_sb->ctx;
941 mnt_data.cifs_sb = cifs_sb;
942 mnt_data.flags = flags;
943
944 /* BB should we make this contingent on mount parm? */
945 flags |= SB_NODIRATIME | SB_NOATIME;
946
947 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
948 if (IS_ERR(sb)) {
949 cifs_umount(cifs_sb);
950 return ERR_CAST(sb);
951 }
952
953 if (sb->s_root) {
954 cifs_dbg(FYI, "Use existing superblock\n");
955 cifs_umount(cifs_sb);
956 cifs_sb = NULL;
957 } else {
958 rc = cifs_read_super(sb);
959 if (rc) {
960 root = ERR_PTR(rc);
961 goto out_super;
962 }
963
964 sb->s_flags |= SB_ACTIVE;
965 }
966
967 root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
968 if (IS_ERR(root))
969 goto out_super;
970
971 if (cifs_sb)
972 cifs_sb->root = dget(root);
973
974 cifs_dbg(FYI, "dentry root is: %p\n", root);
975 return root;
976
977out_super:
978 deactivate_locked_super(sb);
979 return root;
980out:
981 kfree(cifs_sb->prepath);
982 smb3_cleanup_fs_context(cifs_sb->ctx);
983 kfree(cifs_sb);
984 return root;
985}
986
987
988static ssize_t
989cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
990{
991 ssize_t rc;
992 struct inode *inode = file_inode(iocb->ki_filp);
993
994 if (iocb->ki_flags & IOCB_DIRECT)
995 return cifs_user_readv(iocb, iter);
996
997 rc = cifs_revalidate_mapping(inode);
998 if (rc)
999 return rc;
1000
1001 return generic_file_read_iter(iocb, iter);
1002}
1003
1004static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1005{
1006 struct inode *inode = file_inode(iocb->ki_filp);
1007 struct cifsInodeInfo *cinode = CIFS_I(inode);
1008 ssize_t written;
1009 int rc;
1010
1011 if (iocb->ki_filp->f_flags & O_DIRECT) {
1012 written = cifs_user_writev(iocb, from);
1013 if (written > 0 && CIFS_CACHE_READ(cinode)) {
1014 cifs_zap_mapping(inode);
1015 cifs_dbg(FYI,
1016 "Set no oplock for inode=%p after a write operation\n",
1017 inode);
1018 cinode->oplock = 0;
1019 }
1020 return written;
1021 }
1022
1023 written = cifs_get_writer(cinode);
1024 if (written)
1025 return written;
1026
1027 written = generic_file_write_iter(iocb, from);
1028
1029 if (CIFS_CACHE_WRITE(CIFS_I(inode)))
1030 goto out;
1031
1032 rc = filemap_fdatawrite(inode->i_mapping);
1033 if (rc)
1034 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
1035 rc, inode);
1036
1037out:
1038 cifs_put_writer(cinode);
1039 return written;
1040}
1041
1042static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1043{
1044 struct cifsFileInfo *cfile = file->private_data;
1045 struct cifs_tcon *tcon;
1046
1047 /*
1048 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1049 * the cached file length
1050 */
1051 if (whence != SEEK_SET && whence != SEEK_CUR) {
1052 int rc;
1053 struct inode *inode = file_inode(file);
1054
1055 /*
1056 * We need to be sure that all dirty pages are written and the
1057 * server has the newest file length.
1058 */
1059 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1060 inode->i_mapping->nrpages != 0) {
1061 rc = filemap_fdatawait(inode->i_mapping);
1062 if (rc) {
1063 mapping_set_error(inode->i_mapping, rc);
1064 return rc;
1065 }
1066 }
1067 /*
1068 * Some applications poll for the file length in this strange
1069 * way so we must seek to end on non-oplocked files by
1070 * setting the revalidate time to zero.
1071 */
1072 CIFS_I(inode)->time = 0;
1073
1074 rc = cifs_revalidate_file_attr(file);
1075 if (rc < 0)
1076 return (loff_t)rc;
1077 }
1078 if (cfile && cfile->tlink) {
1079 tcon = tlink_tcon(cfile->tlink);
1080 if (tcon->ses->server->ops->llseek)
1081 return tcon->ses->server->ops->llseek(file, tcon,
1082 offset, whence);
1083 }
1084 return generic_file_llseek(file, offset, whence);
1085}
1086
1087static int
1088cifs_setlease(struct file *file, int arg, struct file_lock **lease, void **priv)
1089{
1090 /*
1091 * Note that this is called by vfs setlease with i_lock held to
1092 * protect *lease from going away.
1093 */
1094 struct inode *inode = file_inode(file);
1095 struct cifsFileInfo *cfile = file->private_data;
1096
1097 if (!(S_ISREG(inode->i_mode)))
1098 return -EINVAL;
1099
1100 /* Check if file is oplocked if this is request for new lease */
1101 if (arg == F_UNLCK ||
1102 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1103 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1104 return generic_setlease(file, arg, lease, priv);
1105 else if (tlink_tcon(cfile->tlink)->local_lease &&
1106 !CIFS_CACHE_READ(CIFS_I(inode)))
1107 /*
1108 * If the server claims to support oplock on this file, then we
1109 * still need to check oplock even if the local_lease mount
1110 * option is set, but there are servers which do not support
1111 * oplock for which this mount option may be useful if the user
1112 * knows that the file won't be changed on the server by anyone
1113 * else.
1114 */
1115 return generic_setlease(file, arg, lease, priv);
1116 else
1117 return -EAGAIN;
1118}
1119
1120struct file_system_type cifs_fs_type = {
1121 .owner = THIS_MODULE,
1122 .name = "cifs",
1123 .init_fs_context = smb3_init_fs_context,
1124 .parameters = smb3_fs_parameters,
1125 .kill_sb = cifs_kill_sb,
1126 .fs_flags = FS_RENAME_DOES_D_MOVE,
1127};
1128MODULE_ALIAS_FS("cifs");
1129
1130struct file_system_type smb3_fs_type = {
1131 .owner = THIS_MODULE,
1132 .name = "smb3",
1133 .init_fs_context = smb3_init_fs_context,
1134 .parameters = smb3_fs_parameters,
1135 .kill_sb = cifs_kill_sb,
1136 .fs_flags = FS_RENAME_DOES_D_MOVE,
1137};
1138MODULE_ALIAS_FS("smb3");
1139MODULE_ALIAS("smb3");
1140
1141const struct inode_operations cifs_dir_inode_ops = {
1142 .create = cifs_create,
1143 .atomic_open = cifs_atomic_open,
1144 .lookup = cifs_lookup,
1145 .getattr = cifs_getattr,
1146 .unlink = cifs_unlink,
1147 .link = cifs_hardlink,
1148 .mkdir = cifs_mkdir,
1149 .rmdir = cifs_rmdir,
1150 .rename = cifs_rename2,
1151 .permission = cifs_permission,
1152 .setattr = cifs_setattr,
1153 .symlink = cifs_symlink,
1154 .mknod = cifs_mknod,
1155 .listxattr = cifs_listxattr,
1156 .get_acl = cifs_get_acl,
1157 .set_acl = cifs_set_acl,
1158};
1159
1160const struct inode_operations cifs_file_inode_ops = {
1161 .setattr = cifs_setattr,
1162 .getattr = cifs_getattr,
1163 .permission = cifs_permission,
1164 .listxattr = cifs_listxattr,
1165 .fiemap = cifs_fiemap,
1166 .get_acl = cifs_get_acl,
1167 .set_acl = cifs_set_acl,
1168};
1169
1170const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1171 struct delayed_call *done)
1172{
1173 char *target_path;
1174
1175 if (!dentry)
1176 return ERR_PTR(-ECHILD);
1177
1178 target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1179 if (!target_path)
1180 return ERR_PTR(-ENOMEM);
1181
1182 spin_lock(&inode->i_lock);
1183 if (likely(CIFS_I(inode)->symlink_target)) {
1184 strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1185 } else {
1186 kfree(target_path);
1187 target_path = ERR_PTR(-EOPNOTSUPP);
1188 }
1189 spin_unlock(&inode->i_lock);
1190
1191 if (!IS_ERR(target_path))
1192 set_delayed_call(done, kfree_link, target_path);
1193
1194 return target_path;
1195}
1196
1197const struct inode_operations cifs_symlink_inode_ops = {
1198 .get_link = cifs_get_link,
1199 .setattr = cifs_setattr,
1200 .permission = cifs_permission,
1201 .listxattr = cifs_listxattr,
1202};
1203
1204/*
1205 * Advance the EOF marker to after the source range.
1206 */
1207static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1208 struct cifs_tcon *src_tcon,
1209 unsigned int xid, loff_t src_end)
1210{
1211 struct cifsFileInfo *writeable_srcfile;
1212 int rc = -EINVAL;
1213
1214 writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
1215 if (writeable_srcfile) {
1216 if (src_tcon->ses->server->ops->set_file_size)
1217 rc = src_tcon->ses->server->ops->set_file_size(
1218 xid, src_tcon, writeable_srcfile,
1219 src_inode->i_size, true /* no need to set sparse */);
1220 else
1221 rc = -ENOSYS;
1222 cifsFileInfo_put(writeable_srcfile);
1223 cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1224 }
1225
1226 if (rc < 0)
1227 goto set_failed;
1228
1229 netfs_resize_file(&src_cifsi->netfs, src_end, true);
1230 fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1231 return 0;
1232
1233set_failed:
1234 return filemap_write_and_wait(src_inode->i_mapping);
1235}
1236
1237/*
1238 * Flush out either the folio that overlaps the beginning of a range in which
1239 * pos resides or the folio that overlaps the end of a range unless that folio
1240 * is entirely within the range we're going to invalidate. We extend the flush
1241 * bounds to encompass the folio.
1242 */
1243static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1244 bool first)
1245{
1246 struct folio *folio;
1247 unsigned long long fpos, fend;
1248 pgoff_t index = pos / PAGE_SIZE;
1249 size_t size;
1250 int rc = 0;
1251
1252 folio = filemap_get_folio(inode->i_mapping, index);
1253 if (IS_ERR(folio))
1254 return 0;
1255
1256 size = folio_size(folio);
1257 fpos = folio_pos(folio);
1258 fend = fpos + size - 1;
1259 *_fstart = min_t(unsigned long long, *_fstart, fpos);
1260 *_fend = max_t(unsigned long long, *_fend, fend);
1261 if ((first && pos == fpos) || (!first && pos == fend))
1262 goto out;
1263
1264 rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1265out:
1266 folio_put(folio);
1267 return rc;
1268}
1269
1270static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1271 struct file *dst_file, loff_t destoff, loff_t len,
1272 unsigned int remap_flags)
1273{
1274 struct inode *src_inode = file_inode(src_file);
1275 struct inode *target_inode = file_inode(dst_file);
1276 struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1277 struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1278 struct cifsFileInfo *smb_file_src = src_file->private_data;
1279 struct cifsFileInfo *smb_file_target = dst_file->private_data;
1280 struct cifs_tcon *target_tcon, *src_tcon;
1281 unsigned long long destend, fstart, fend, new_size;
1282 unsigned int xid;
1283 int rc;
1284
1285 if (remap_flags & REMAP_FILE_DEDUP)
1286 return -EOPNOTSUPP;
1287 if (remap_flags & ~REMAP_FILE_ADVISORY)
1288 return -EINVAL;
1289
1290 cifs_dbg(FYI, "clone range\n");
1291
1292 xid = get_xid();
1293
1294 if (!smb_file_src || !smb_file_target) {
1295 rc = -EBADF;
1296 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1297 goto out;
1298 }
1299
1300 src_tcon = tlink_tcon(smb_file_src->tlink);
1301 target_tcon = tlink_tcon(smb_file_target->tlink);
1302
1303 /*
1304 * Note: cifs case is easier than btrfs since server responsible for
1305 * checks for proper open modes and file type and if it wants
1306 * server could even support copy of range where source = target
1307 */
1308 lock_two_nondirectories(target_inode, src_inode);
1309
1310 if (len == 0)
1311 len = src_inode->i_size - off;
1312
1313 cifs_dbg(FYI, "clone range\n");
1314
1315 /* Flush the source buffer */
1316 rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1317 off + len - 1);
1318 if (rc)
1319 goto unlock;
1320
1321 /* The server-side copy will fail if the source crosses the EOF marker.
1322 * Advance the EOF marker after the flush above to the end of the range
1323 * if it's short of that.
1324 */
1325 if (src_cifsi->netfs.remote_i_size < off + len) {
1326 rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1327 if (rc < 0)
1328 goto unlock;
1329 }
1330
1331 new_size = destoff + len;
1332 destend = destoff + len - 1;
1333
1334 /* Flush the folios at either end of the destination range to prevent
1335 * accidental loss of dirty data outside of the range.
1336 */
1337 fstart = destoff;
1338 fend = destend;
1339
1340 rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1341 if (rc)
1342 goto unlock;
1343 rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1344 if (rc)
1345 goto unlock;
1346
1347 /* Discard all the folios that overlap the destination region. */
1348 cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1349 truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1350
1351 fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1352 i_size_read(target_inode), 0);
1353
1354 rc = -EOPNOTSUPP;
1355 if (target_tcon->ses->server->ops->duplicate_extents) {
1356 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1357 smb_file_src, smb_file_target, off, len, destoff);
1358 if (rc == 0 && new_size > i_size_read(target_inode)) {
1359 truncate_setsize(target_inode, new_size);
1360 netfs_resize_file(&target_cifsi->netfs, new_size, true);
1361 fscache_resize_cookie(cifs_inode_cookie(target_inode),
1362 new_size);
1363 }
1364 }
1365
1366 /* force revalidate of size and timestamps of target file now
1367 that target is updated on the server */
1368 CIFS_I(target_inode)->time = 0;
1369unlock:
1370 /* although unlocking in the reverse order from locking is not
1371 strictly necessary here it is a little cleaner to be consistent */
1372 unlock_two_nondirectories(src_inode, target_inode);
1373out:
1374 free_xid(xid);
1375 return rc < 0 ? rc : len;
1376}
1377
1378ssize_t cifs_file_copychunk_range(unsigned int xid,
1379 struct file *src_file, loff_t off,
1380 struct file *dst_file, loff_t destoff,
1381 size_t len, unsigned int flags)
1382{
1383 struct inode *src_inode = file_inode(src_file);
1384 struct inode *target_inode = file_inode(dst_file);
1385 struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1386 struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1387 struct cifsFileInfo *smb_file_src;
1388 struct cifsFileInfo *smb_file_target;
1389 struct cifs_tcon *src_tcon;
1390 struct cifs_tcon *target_tcon;
1391 unsigned long long destend, fstart, fend;
1392 ssize_t rc;
1393
1394 cifs_dbg(FYI, "copychunk range\n");
1395
1396 if (!src_file->private_data || !dst_file->private_data) {
1397 rc = -EBADF;
1398 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1399 goto out;
1400 }
1401
1402 rc = -EXDEV;
1403 smb_file_target = dst_file->private_data;
1404 smb_file_src = src_file->private_data;
1405 src_tcon = tlink_tcon(smb_file_src->tlink);
1406 target_tcon = tlink_tcon(smb_file_target->tlink);
1407
1408 if (src_tcon->ses != target_tcon->ses) {
1409 cifs_dbg(VFS, "source and target of copy not on same server\n");
1410 goto out;
1411 }
1412
1413 rc = -EOPNOTSUPP;
1414 if (!target_tcon->ses->server->ops->copychunk_range)
1415 goto out;
1416
1417 /*
1418 * Note: cifs case is easier than btrfs since server responsible for
1419 * checks for proper open modes and file type and if it wants
1420 * server could even support copy of range where source = target
1421 */
1422 lock_two_nondirectories(target_inode, src_inode);
1423
1424 cifs_dbg(FYI, "about to flush pages\n");
1425
1426 rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1427 off + len - 1);
1428 if (rc)
1429 goto unlock;
1430
1431 /* The server-side copy will fail if the source crosses the EOF marker.
1432 * Advance the EOF marker after the flush above to the end of the range
1433 * if it's short of that.
1434 */
1435 if (src_cifsi->netfs.remote_i_size < off + len) {
1436 rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1437 if (rc < 0)
1438 goto unlock;
1439 }
1440
1441 destend = destoff + len - 1;
1442
1443 /* Flush the folios at either end of the destination range to prevent
1444 * accidental loss of dirty data outside of the range.
1445 */
1446 fstart = destoff;
1447 fend = destend;
1448
1449 rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1450 if (rc)
1451 goto unlock;
1452 rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1453 if (rc)
1454 goto unlock;
1455
1456 /* Discard all the folios that overlap the destination region. */
1457 truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1458
1459 fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1460 i_size_read(target_inode), 0);
1461
1462 rc = file_modified(dst_file);
1463 if (!rc) {
1464 rc = target_tcon->ses->server->ops->copychunk_range(xid,
1465 smb_file_src, smb_file_target, off, len, destoff);
1466 if (rc > 0 && destoff + rc > i_size_read(target_inode)) {
1467 truncate_setsize(target_inode, destoff + rc);
1468 netfs_resize_file(&target_cifsi->netfs,
1469 i_size_read(target_inode), true);
1470 fscache_resize_cookie(cifs_inode_cookie(target_inode),
1471 i_size_read(target_inode));
1472 }
1473 if (rc > 0 && destoff + rc > target_cifsi->netfs.zero_point)
1474 target_cifsi->netfs.zero_point = destoff + rc;
1475 }
1476
1477 file_accessed(src_file);
1478
1479 /* force revalidate of size and timestamps of target file now
1480 * that target is updated on the server
1481 */
1482 CIFS_I(target_inode)->time = 0;
1483
1484unlock:
1485 /* although unlocking in the reverse order from locking is not
1486 * strictly necessary here it is a little cleaner to be consistent
1487 */
1488 unlock_two_nondirectories(src_inode, target_inode);
1489
1490out:
1491 return rc;
1492}
1493
1494/*
1495 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1496 * is a dummy operation.
1497 */
1498static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1499{
1500 cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1501 file, datasync);
1502
1503 return 0;
1504}
1505
1506static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1507 struct file *dst_file, loff_t destoff,
1508 size_t len, unsigned int flags)
1509{
1510 unsigned int xid = get_xid();
1511 ssize_t rc;
1512 struct cifsFileInfo *cfile = dst_file->private_data;
1513
1514 if (cfile->swapfile) {
1515 rc = -EOPNOTSUPP;
1516 free_xid(xid);
1517 return rc;
1518 }
1519
1520 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1521 len, flags);
1522 free_xid(xid);
1523
1524 if (rc == -EOPNOTSUPP || rc == -EXDEV)
1525 rc = splice_copy_file_range(src_file, off, dst_file,
1526 destoff, len);
1527 return rc;
1528}
1529
1530const struct file_operations cifs_file_ops = {
1531 .read_iter = cifs_loose_read_iter,
1532 .write_iter = cifs_file_write_iter,
1533 .open = cifs_open,
1534 .release = cifs_close,
1535 .lock = cifs_lock,
1536 .flock = cifs_flock,
1537 .fsync = cifs_fsync,
1538 .flush = cifs_flush,
1539 .mmap = cifs_file_mmap,
1540 .splice_read = filemap_splice_read,
1541 .splice_write = iter_file_splice_write,
1542 .llseek = cifs_llseek,
1543 .unlocked_ioctl = cifs_ioctl,
1544 .copy_file_range = cifs_copy_file_range,
1545 .remap_file_range = cifs_remap_file_range,
1546 .setlease = cifs_setlease,
1547 .fallocate = cifs_fallocate,
1548};
1549
1550const struct file_operations cifs_file_strict_ops = {
1551 .read_iter = cifs_strict_readv,
1552 .write_iter = cifs_strict_writev,
1553 .open = cifs_open,
1554 .release = cifs_close,
1555 .lock = cifs_lock,
1556 .flock = cifs_flock,
1557 .fsync = cifs_strict_fsync,
1558 .flush = cifs_flush,
1559 .mmap = cifs_file_strict_mmap,
1560 .splice_read = filemap_splice_read,
1561 .splice_write = iter_file_splice_write,
1562 .llseek = cifs_llseek,
1563 .unlocked_ioctl = cifs_ioctl,
1564 .copy_file_range = cifs_copy_file_range,
1565 .remap_file_range = cifs_remap_file_range,
1566 .setlease = cifs_setlease,
1567 .fallocate = cifs_fallocate,
1568};
1569
1570const struct file_operations cifs_file_direct_ops = {
1571 .read_iter = cifs_direct_readv,
1572 .write_iter = cifs_direct_writev,
1573 .open = cifs_open,
1574 .release = cifs_close,
1575 .lock = cifs_lock,
1576 .flock = cifs_flock,
1577 .fsync = cifs_fsync,
1578 .flush = cifs_flush,
1579 .mmap = cifs_file_mmap,
1580 .splice_read = copy_splice_read,
1581 .splice_write = iter_file_splice_write,
1582 .unlocked_ioctl = cifs_ioctl,
1583 .copy_file_range = cifs_copy_file_range,
1584 .remap_file_range = cifs_remap_file_range,
1585 .llseek = cifs_llseek,
1586 .setlease = cifs_setlease,
1587 .fallocate = cifs_fallocate,
1588};
1589
1590const struct file_operations cifs_file_nobrl_ops = {
1591 .read_iter = cifs_loose_read_iter,
1592 .write_iter = cifs_file_write_iter,
1593 .open = cifs_open,
1594 .release = cifs_close,
1595 .fsync = cifs_fsync,
1596 .flush = cifs_flush,
1597 .mmap = cifs_file_mmap,
1598 .splice_read = filemap_splice_read,
1599 .splice_write = iter_file_splice_write,
1600 .llseek = cifs_llseek,
1601 .unlocked_ioctl = cifs_ioctl,
1602 .copy_file_range = cifs_copy_file_range,
1603 .remap_file_range = cifs_remap_file_range,
1604 .setlease = cifs_setlease,
1605 .fallocate = cifs_fallocate,
1606};
1607
1608const struct file_operations cifs_file_strict_nobrl_ops = {
1609 .read_iter = cifs_strict_readv,
1610 .write_iter = cifs_strict_writev,
1611 .open = cifs_open,
1612 .release = cifs_close,
1613 .fsync = cifs_strict_fsync,
1614 .flush = cifs_flush,
1615 .mmap = cifs_file_strict_mmap,
1616 .splice_read = filemap_splice_read,
1617 .splice_write = iter_file_splice_write,
1618 .llseek = cifs_llseek,
1619 .unlocked_ioctl = cifs_ioctl,
1620 .copy_file_range = cifs_copy_file_range,
1621 .remap_file_range = cifs_remap_file_range,
1622 .setlease = cifs_setlease,
1623 .fallocate = cifs_fallocate,
1624};
1625
1626const struct file_operations cifs_file_direct_nobrl_ops = {
1627 .read_iter = cifs_direct_readv,
1628 .write_iter = cifs_direct_writev,
1629 .open = cifs_open,
1630 .release = cifs_close,
1631 .fsync = cifs_fsync,
1632 .flush = cifs_flush,
1633 .mmap = cifs_file_mmap,
1634 .splice_read = copy_splice_read,
1635 .splice_write = iter_file_splice_write,
1636 .unlocked_ioctl = cifs_ioctl,
1637 .copy_file_range = cifs_copy_file_range,
1638 .remap_file_range = cifs_remap_file_range,
1639 .llseek = cifs_llseek,
1640 .setlease = cifs_setlease,
1641 .fallocate = cifs_fallocate,
1642};
1643
1644const struct file_operations cifs_dir_ops = {
1645 .iterate_shared = cifs_readdir,
1646 .release = cifs_closedir,
1647 .read = generic_read_dir,
1648 .unlocked_ioctl = cifs_ioctl,
1649 .copy_file_range = cifs_copy_file_range,
1650 .remap_file_range = cifs_remap_file_range,
1651 .llseek = generic_file_llseek,
1652 .fsync = cifs_dir_fsync,
1653};
1654
1655static void
1656cifs_init_once(void *inode)
1657{
1658 struct cifsInodeInfo *cifsi = inode;
1659
1660 inode_init_once(&cifsi->netfs.inode);
1661 init_rwsem(&cifsi->lock_sem);
1662}
1663
1664static int __init
1665cifs_init_inodecache(void)
1666{
1667 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1668 sizeof(struct cifsInodeInfo),
1669 0, (SLAB_RECLAIM_ACCOUNT|
1670 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1671 cifs_init_once);
1672 if (cifs_inode_cachep == NULL)
1673 return -ENOMEM;
1674
1675 return 0;
1676}
1677
1678static void
1679cifs_destroy_inodecache(void)
1680{
1681 /*
1682 * Make sure all delayed rcu free inodes are flushed before we
1683 * destroy cache.
1684 */
1685 rcu_barrier();
1686 kmem_cache_destroy(cifs_inode_cachep);
1687}
1688
1689static int
1690cifs_init_request_bufs(void)
1691{
1692 /*
1693 * SMB2 maximum header size is bigger than CIFS one - no problems to
1694 * allocate some more bytes for CIFS.
1695 */
1696 size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1697
1698 if (CIFSMaxBufSize < 8192) {
1699 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1700 Unicode path name has to fit in any SMB/CIFS path based frames */
1701 CIFSMaxBufSize = 8192;
1702 } else if (CIFSMaxBufSize > 1024*127) {
1703 CIFSMaxBufSize = 1024 * 127;
1704 } else {
1705 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1706 }
1707/*
1708 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1709 CIFSMaxBufSize, CIFSMaxBufSize);
1710*/
1711 cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1712 CIFSMaxBufSize + max_hdr_size, 0,
1713 SLAB_HWCACHE_ALIGN, 0,
1714 CIFSMaxBufSize + max_hdr_size,
1715 NULL);
1716 if (cifs_req_cachep == NULL)
1717 return -ENOMEM;
1718
1719 if (cifs_min_rcv < 1)
1720 cifs_min_rcv = 1;
1721 else if (cifs_min_rcv > 64) {
1722 cifs_min_rcv = 64;
1723 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1724 }
1725
1726 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1727 cifs_req_cachep);
1728
1729 if (cifs_req_poolp == NULL) {
1730 kmem_cache_destroy(cifs_req_cachep);
1731 return -ENOMEM;
1732 }
1733 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1734 almost all handle based requests (but not write response, nor is it
1735 sufficient for path based requests). A smaller size would have
1736 been more efficient (compacting multiple slab items on one 4k page)
1737 for the case in which debug was on, but this larger size allows
1738 more SMBs to use small buffer alloc and is still much more
1739 efficient to alloc 1 per page off the slab compared to 17K (5page)
1740 alloc of large cifs buffers even when page debugging is on */
1741 cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1742 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1743 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1744 if (cifs_sm_req_cachep == NULL) {
1745 mempool_destroy(cifs_req_poolp);
1746 kmem_cache_destroy(cifs_req_cachep);
1747 return -ENOMEM;
1748 }
1749
1750 if (cifs_min_small < 2)
1751 cifs_min_small = 2;
1752 else if (cifs_min_small > 256) {
1753 cifs_min_small = 256;
1754 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1755 }
1756
1757 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1758 cifs_sm_req_cachep);
1759
1760 if (cifs_sm_req_poolp == NULL) {
1761 mempool_destroy(cifs_req_poolp);
1762 kmem_cache_destroy(cifs_req_cachep);
1763 kmem_cache_destroy(cifs_sm_req_cachep);
1764 return -ENOMEM;
1765 }
1766
1767 return 0;
1768}
1769
1770static void
1771cifs_destroy_request_bufs(void)
1772{
1773 mempool_destroy(cifs_req_poolp);
1774 kmem_cache_destroy(cifs_req_cachep);
1775 mempool_destroy(cifs_sm_req_poolp);
1776 kmem_cache_destroy(cifs_sm_req_cachep);
1777}
1778
1779static int init_mids(void)
1780{
1781 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1782 sizeof(struct mid_q_entry), 0,
1783 SLAB_HWCACHE_ALIGN, NULL);
1784 if (cifs_mid_cachep == NULL)
1785 return -ENOMEM;
1786
1787 /* 3 is a reasonable minimum number of simultaneous operations */
1788 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1789 if (cifs_mid_poolp == NULL) {
1790 kmem_cache_destroy(cifs_mid_cachep);
1791 return -ENOMEM;
1792 }
1793
1794 return 0;
1795}
1796
1797static void destroy_mids(void)
1798{
1799 mempool_destroy(cifs_mid_poolp);
1800 kmem_cache_destroy(cifs_mid_cachep);
1801}
1802
1803static int __init
1804init_cifs(void)
1805{
1806 int rc = 0;
1807 cifs_proc_init();
1808 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1809/*
1810 * Initialize Global counters
1811 */
1812 atomic_set(&sesInfoAllocCount, 0);
1813 atomic_set(&tconInfoAllocCount, 0);
1814 atomic_set(&tcpSesNextId, 0);
1815 atomic_set(&tcpSesAllocCount, 0);
1816 atomic_set(&tcpSesReconnectCount, 0);
1817 atomic_set(&tconInfoReconnectCount, 0);
1818
1819 atomic_set(&buf_alloc_count, 0);
1820 atomic_set(&small_buf_alloc_count, 0);
1821#ifdef CONFIG_CIFS_STATS2
1822 atomic_set(&total_buf_alloc_count, 0);
1823 atomic_set(&total_small_buf_alloc_count, 0);
1824 if (slow_rsp_threshold < 1)
1825 cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1826 else if (slow_rsp_threshold > 32767)
1827 cifs_dbg(VFS,
1828 "slow response threshold set higher than recommended (0 to 32767)\n");
1829#endif /* CONFIG_CIFS_STATS2 */
1830
1831 atomic_set(&mid_count, 0);
1832 GlobalCurrentXid = 0;
1833 GlobalTotalActiveXid = 0;
1834 GlobalMaxActiveXid = 0;
1835 spin_lock_init(&cifs_tcp_ses_lock);
1836 spin_lock_init(&GlobalMid_Lock);
1837
1838 cifs_lock_secret = get_random_u32();
1839
1840 if (cifs_max_pending < 2) {
1841 cifs_max_pending = 2;
1842 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1843 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1844 cifs_max_pending = CIFS_MAX_REQ;
1845 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1846 CIFS_MAX_REQ);
1847 }
1848
1849 /* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1850 if (dir_cache_timeout > 65000) {
1851 dir_cache_timeout = 65000;
1852 cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1853 }
1854
1855 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1856 if (!cifsiod_wq) {
1857 rc = -ENOMEM;
1858 goto out_clean_proc;
1859 }
1860
1861 /*
1862 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1863 * so that we don't launch too many worker threads but
1864 * Documentation/core-api/workqueue.rst recommends setting it to 0
1865 */
1866
1867 /* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1868 decrypt_wq = alloc_workqueue("smb3decryptd",
1869 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1870 if (!decrypt_wq) {
1871 rc = -ENOMEM;
1872 goto out_destroy_cifsiod_wq;
1873 }
1874
1875 fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1876 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1877 if (!fileinfo_put_wq) {
1878 rc = -ENOMEM;
1879 goto out_destroy_decrypt_wq;
1880 }
1881
1882 cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1883 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1884 if (!cifsoplockd_wq) {
1885 rc = -ENOMEM;
1886 goto out_destroy_fileinfo_put_wq;
1887 }
1888
1889 deferredclose_wq = alloc_workqueue("deferredclose",
1890 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1891 if (!deferredclose_wq) {
1892 rc = -ENOMEM;
1893 goto out_destroy_cifsoplockd_wq;
1894 }
1895
1896 rc = cifs_init_inodecache();
1897 if (rc)
1898 goto out_destroy_deferredclose_wq;
1899
1900 rc = init_mids();
1901 if (rc)
1902 goto out_destroy_inodecache;
1903
1904 rc = cifs_init_request_bufs();
1905 if (rc)
1906 goto out_destroy_mids;
1907
1908#ifdef CONFIG_CIFS_DFS_UPCALL
1909 rc = dfs_cache_init();
1910 if (rc)
1911 goto out_destroy_request_bufs;
1912#endif /* CONFIG_CIFS_DFS_UPCALL */
1913#ifdef CONFIG_CIFS_UPCALL
1914 rc = init_cifs_spnego();
1915 if (rc)
1916 goto out_destroy_dfs_cache;
1917#endif /* CONFIG_CIFS_UPCALL */
1918#ifdef CONFIG_CIFS_SWN_UPCALL
1919 rc = cifs_genl_init();
1920 if (rc)
1921 goto out_register_key_type;
1922#endif /* CONFIG_CIFS_SWN_UPCALL */
1923
1924 rc = init_cifs_idmap();
1925 if (rc)
1926 goto out_cifs_swn_init;
1927
1928 rc = register_filesystem(&cifs_fs_type);
1929 if (rc)
1930 goto out_init_cifs_idmap;
1931
1932 rc = register_filesystem(&smb3_fs_type);
1933 if (rc) {
1934 unregister_filesystem(&cifs_fs_type);
1935 goto out_init_cifs_idmap;
1936 }
1937
1938 return 0;
1939
1940out_init_cifs_idmap:
1941 exit_cifs_idmap();
1942out_cifs_swn_init:
1943#ifdef CONFIG_CIFS_SWN_UPCALL
1944 cifs_genl_exit();
1945out_register_key_type:
1946#endif
1947#ifdef CONFIG_CIFS_UPCALL
1948 exit_cifs_spnego();
1949out_destroy_dfs_cache:
1950#endif
1951#ifdef CONFIG_CIFS_DFS_UPCALL
1952 dfs_cache_destroy();
1953out_destroy_request_bufs:
1954#endif
1955 cifs_destroy_request_bufs();
1956out_destroy_mids:
1957 destroy_mids();
1958out_destroy_inodecache:
1959 cifs_destroy_inodecache();
1960out_destroy_deferredclose_wq:
1961 destroy_workqueue(deferredclose_wq);
1962out_destroy_cifsoplockd_wq:
1963 destroy_workqueue(cifsoplockd_wq);
1964out_destroy_fileinfo_put_wq:
1965 destroy_workqueue(fileinfo_put_wq);
1966out_destroy_decrypt_wq:
1967 destroy_workqueue(decrypt_wq);
1968out_destroy_cifsiod_wq:
1969 destroy_workqueue(cifsiod_wq);
1970out_clean_proc:
1971 cifs_proc_clean();
1972 return rc;
1973}
1974
1975static void __exit
1976exit_cifs(void)
1977{
1978 cifs_dbg(NOISY, "exit_smb3\n");
1979 unregister_filesystem(&cifs_fs_type);
1980 unregister_filesystem(&smb3_fs_type);
1981 cifs_release_automount_timer();
1982 exit_cifs_idmap();
1983#ifdef CONFIG_CIFS_SWN_UPCALL
1984 cifs_genl_exit();
1985#endif
1986#ifdef CONFIG_CIFS_UPCALL
1987 exit_cifs_spnego();
1988#endif
1989#ifdef CONFIG_CIFS_DFS_UPCALL
1990 dfs_cache_destroy();
1991#endif
1992 cifs_destroy_request_bufs();
1993 destroy_mids();
1994 cifs_destroy_inodecache();
1995 destroy_workqueue(deferredclose_wq);
1996 destroy_workqueue(cifsoplockd_wq);
1997 destroy_workqueue(decrypt_wq);
1998 destroy_workqueue(fileinfo_put_wq);
1999 destroy_workqueue(cifsiod_wq);
2000 cifs_proc_clean();
2001}
2002
2003MODULE_AUTHOR("Steve French");
2004MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
2005MODULE_DESCRIPTION
2006 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
2007 "also older servers complying with the SNIA CIFS Specification)");
2008MODULE_VERSION(CIFS_VERSION);
2009MODULE_SOFTDEP("ecb");
2010MODULE_SOFTDEP("hmac");
2011MODULE_SOFTDEP("md5");
2012MODULE_SOFTDEP("nls");
2013MODULE_SOFTDEP("aes");
2014MODULE_SOFTDEP("cmac");
2015MODULE_SOFTDEP("sha256");
2016MODULE_SOFTDEP("sha512");
2017MODULE_SOFTDEP("aead2");
2018MODULE_SOFTDEP("ccm");
2019MODULE_SOFTDEP("gcm");
2020module_init(init_cifs)
2021module_exit(exit_cifs)