Loading...
1// SPDX-License-Identifier: LGPL-2.1
2/*
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * Common Internet FileSystem (CIFS) client
8 *
9 */
10
11/* Note that BB means BUGBUG (ie something to fix eventually) */
12
13#include <linux/module.h>
14#include <linux/fs.h>
15#include <linux/mount.h>
16#include <linux/slab.h>
17#include <linux/init.h>
18#include <linux/list.h>
19#include <linux/seq_file.h>
20#include <linux/vfs.h>
21#include <linux/mempool.h>
22#include <linux/delay.h>
23#include <linux/kthread.h>
24#include <linux/freezer.h>
25#include <linux/namei.h>
26#include <linux/random.h>
27#include <linux/uuid.h>
28#include <linux/xattr.h>
29#include <uapi/linux/magic.h>
30#include <net/ipv6.h>
31#include "cifsfs.h"
32#include "cifspdu.h"
33#define DECLARE_GLOBALS_HERE
34#include "cifsglob.h"
35#include "cifsproto.h"
36#include "cifs_debug.h"
37#include "cifs_fs_sb.h"
38#include <linux/mm.h>
39#include <linux/key-type.h>
40#include "cifs_spnego.h"
41#include "fscache.h"
42#ifdef CONFIG_CIFS_DFS_UPCALL
43#include "dfs_cache.h"
44#endif
45#ifdef CONFIG_CIFS_SWN_UPCALL
46#include "netlink.h"
47#endif
48#include "fs_context.h"
49#include "cached_dir.h"
50
51/*
52 * DOS dates from 1980/1/1 through 2107/12/31
53 * Protocol specifications indicate the range should be to 119, which
54 * limits maximum year to 2099. But this range has not been checked.
55 */
56#define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
57#define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
58#define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
59
60int cifsFYI = 0;
61bool traceSMB;
62bool enable_oplocks = true;
63bool linuxExtEnabled = true;
64bool lookupCacheEnabled = true;
65bool disable_legacy_dialects; /* false by default */
66bool enable_gcm_256 = true;
67bool require_gcm_256; /* false by default */
68bool enable_negotiate_signing; /* false by default */
69unsigned int global_secflags = CIFSSEC_DEF;
70/* unsigned int ntlmv2_support = 0; */
71unsigned int sign_CIFS_PDUs = 1;
72
73/*
74 * Global transaction id (XID) information
75 */
76unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */
77unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
78unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */
79spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
80
81/*
82 * Global counters, updated atomically
83 */
84atomic_t sesInfoAllocCount;
85atomic_t tconInfoAllocCount;
86atomic_t tcpSesNextId;
87atomic_t tcpSesAllocCount;
88atomic_t tcpSesReconnectCount;
89atomic_t tconInfoReconnectCount;
90
91atomic_t mid_count;
92atomic_t buf_alloc_count;
93atomic_t small_buf_alloc_count;
94#ifdef CONFIG_CIFS_STATS2
95atomic_t total_buf_alloc_count;
96atomic_t total_small_buf_alloc_count;
97#endif/* STATS2 */
98struct list_head cifs_tcp_ses_list;
99spinlock_t cifs_tcp_ses_lock;
100static const struct super_operations cifs_super_ops;
101unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
102module_param(CIFSMaxBufSize, uint, 0444);
103MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
104 "for CIFS requests. "
105 "Default: 16384 Range: 8192 to 130048");
106unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
107module_param(cifs_min_rcv, uint, 0444);
108MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
109 "1 to 64");
110unsigned int cifs_min_small = 30;
111module_param(cifs_min_small, uint, 0444);
112MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
113 "Range: 2 to 256");
114unsigned int cifs_max_pending = CIFS_MAX_REQ;
115module_param(cifs_max_pending, uint, 0444);
116MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
117 "CIFS/SMB1 dialect (N/A for SMB3) "
118 "Default: 32767 Range: 2 to 32767.");
119#ifdef CONFIG_CIFS_STATS2
120unsigned int slow_rsp_threshold = 1;
121module_param(slow_rsp_threshold, uint, 0644);
122MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
123 "before logging that a response is delayed. "
124 "Default: 1 (if set to 0 disables msg).");
125#endif /* STATS2 */
126
127module_param(enable_oplocks, bool, 0644);
128MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
129
130module_param(enable_gcm_256, bool, 0644);
131MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0");
132
133module_param(require_gcm_256, bool, 0644);
134MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
135
136module_param(enable_negotiate_signing, bool, 0644);
137MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
138
139module_param(disable_legacy_dialects, bool, 0644);
140MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
141 "helpful to restrict the ability to "
142 "override the default dialects (SMB2.1, "
143 "SMB3 and SMB3.02) on mount with old "
144 "dialects (CIFS/SMB1 and SMB2) since "
145 "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
146 " and less secure. Default: n/N/0");
147
148extern mempool_t *cifs_sm_req_poolp;
149extern mempool_t *cifs_req_poolp;
150extern mempool_t *cifs_mid_poolp;
151
152struct workqueue_struct *cifsiod_wq;
153struct workqueue_struct *decrypt_wq;
154struct workqueue_struct *fileinfo_put_wq;
155struct workqueue_struct *cifsoplockd_wq;
156struct workqueue_struct *deferredclose_wq;
157__u32 cifs_lock_secret;
158
159/*
160 * Bumps refcount for cifs super block.
161 * Note that it should be only called if a referece to VFS super block is
162 * already held, e.g. in open-type syscalls context. Otherwise it can race with
163 * atomic_dec_and_test in deactivate_locked_super.
164 */
165void
166cifs_sb_active(struct super_block *sb)
167{
168 struct cifs_sb_info *server = CIFS_SB(sb);
169
170 if (atomic_inc_return(&server->active) == 1)
171 atomic_inc(&sb->s_active);
172}
173
174void
175cifs_sb_deactive(struct super_block *sb)
176{
177 struct cifs_sb_info *server = CIFS_SB(sb);
178
179 if (atomic_dec_and_test(&server->active))
180 deactivate_super(sb);
181}
182
183static int
184cifs_read_super(struct super_block *sb)
185{
186 struct inode *inode;
187 struct cifs_sb_info *cifs_sb;
188 struct cifs_tcon *tcon;
189 struct timespec64 ts;
190 int rc = 0;
191
192 cifs_sb = CIFS_SB(sb);
193 tcon = cifs_sb_master_tcon(cifs_sb);
194
195 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
196 sb->s_flags |= SB_POSIXACL;
197
198 if (tcon->snapshot_time)
199 sb->s_flags |= SB_RDONLY;
200
201 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
202 sb->s_maxbytes = MAX_LFS_FILESIZE;
203 else
204 sb->s_maxbytes = MAX_NON_LFS;
205
206 /*
207 * Some very old servers like DOS and OS/2 used 2 second granularity
208 * (while all current servers use 100ns granularity - see MS-DTYP)
209 * but 1 second is the maximum allowed granularity for the VFS
210 * so for old servers set time granularity to 1 second while for
211 * everything else (current servers) set it to 100ns.
212 */
213 if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
214 ((tcon->ses->capabilities &
215 tcon->ses->server->vals->cap_nt_find) == 0) &&
216 !tcon->unix_ext) {
217 sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
218 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
219 sb->s_time_min = ts.tv_sec;
220 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
221 cpu_to_le16(SMB_TIME_MAX), 0);
222 sb->s_time_max = ts.tv_sec;
223 } else {
224 /*
225 * Almost every server, including all SMB2+, uses DCE TIME
226 * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC
227 */
228 sb->s_time_gran = 100;
229 ts = cifs_NTtimeToUnix(0);
230 sb->s_time_min = ts.tv_sec;
231 ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
232 sb->s_time_max = ts.tv_sec;
233 }
234
235 sb->s_magic = CIFS_SUPER_MAGIC;
236 sb->s_op = &cifs_super_ops;
237 sb->s_xattr = cifs_xattr_handlers;
238 rc = super_setup_bdi(sb);
239 if (rc)
240 goto out_no_root;
241 /* tune readahead according to rsize if readahead size not set on mount */
242 if (cifs_sb->ctx->rsize == 0)
243 cifs_sb->ctx->rsize =
244 tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
245 if (cifs_sb->ctx->rasize)
246 sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
247 else
248 sb->s_bdi->ra_pages = cifs_sb->ctx->rsize / PAGE_SIZE;
249
250 sb->s_blocksize = CIFS_MAX_MSGSIZE;
251 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
252 inode = cifs_root_iget(sb);
253
254 if (IS_ERR(inode)) {
255 rc = PTR_ERR(inode);
256 goto out_no_root;
257 }
258
259 if (tcon->nocase)
260 sb->s_d_op = &cifs_ci_dentry_ops;
261 else
262 sb->s_d_op = &cifs_dentry_ops;
263
264 sb->s_root = d_make_root(inode);
265 if (!sb->s_root) {
266 rc = -ENOMEM;
267 goto out_no_root;
268 }
269
270#ifdef CONFIG_CIFS_NFSD_EXPORT
271 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
272 cifs_dbg(FYI, "export ops supported\n");
273 sb->s_export_op = &cifs_export_ops;
274 }
275#endif /* CONFIG_CIFS_NFSD_EXPORT */
276
277 return 0;
278
279out_no_root:
280 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
281 return rc;
282}
283
284static void cifs_kill_sb(struct super_block *sb)
285{
286 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
287
288 /*
289 * We ned to release all dentries for the cached directories
290 * before we kill the sb.
291 */
292 if (cifs_sb->root) {
293 close_all_cached_dirs(cifs_sb);
294
295 /* finally release root dentry */
296 dput(cifs_sb->root);
297 cifs_sb->root = NULL;
298 }
299
300 kill_anon_super(sb);
301 cifs_umount(cifs_sb);
302}
303
304static int
305cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
306{
307 struct super_block *sb = dentry->d_sb;
308 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
309 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
310 struct TCP_Server_Info *server = tcon->ses->server;
311 unsigned int xid;
312 int rc = 0;
313
314 xid = get_xid();
315
316 if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
317 buf->f_namelen =
318 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
319 else
320 buf->f_namelen = PATH_MAX;
321
322 buf->f_fsid.val[0] = tcon->vol_serial_number;
323 /* are using part of create time for more randomness, see man statfs */
324 buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time);
325
326 buf->f_files = 0; /* undefined */
327 buf->f_ffree = 0; /* unlimited */
328
329 if (server->ops->queryfs)
330 rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
331
332 free_xid(xid);
333 return rc;
334}
335
336static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
337{
338 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
339 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
340 struct TCP_Server_Info *server = tcon->ses->server;
341
342 if (server->ops->fallocate)
343 return server->ops->fallocate(file, tcon, mode, off, len);
344
345 return -EOPNOTSUPP;
346}
347
348static int cifs_permission(struct user_namespace *mnt_userns,
349 struct inode *inode, int mask)
350{
351 struct cifs_sb_info *cifs_sb;
352
353 cifs_sb = CIFS_SB(inode->i_sb);
354
355 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
356 if ((mask & MAY_EXEC) && !execute_ok(inode))
357 return -EACCES;
358 else
359 return 0;
360 } else /* file mode might have been restricted at mount time
361 on the client (above and beyond ACL on servers) for
362 servers which do not support setting and viewing mode bits,
363 so allowing client to check permissions is useful */
364 return generic_permission(&init_user_ns, inode, mask);
365}
366
367static struct kmem_cache *cifs_inode_cachep;
368static struct kmem_cache *cifs_req_cachep;
369static struct kmem_cache *cifs_mid_cachep;
370static struct kmem_cache *cifs_sm_req_cachep;
371mempool_t *cifs_sm_req_poolp;
372mempool_t *cifs_req_poolp;
373mempool_t *cifs_mid_poolp;
374
375static struct inode *
376cifs_alloc_inode(struct super_block *sb)
377{
378 struct cifsInodeInfo *cifs_inode;
379 cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
380 if (!cifs_inode)
381 return NULL;
382 cifs_inode->cifsAttrs = 0x20; /* default */
383 cifs_inode->time = 0;
384 /*
385 * Until the file is open and we have gotten oplock info back from the
386 * server, can not assume caching of file data or metadata.
387 */
388 cifs_set_oplock_level(cifs_inode, 0);
389 cifs_inode->flags = 0;
390 spin_lock_init(&cifs_inode->writers_lock);
391 cifs_inode->writers = 0;
392 cifs_inode->netfs.inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
393 cifs_inode->server_eof = 0;
394 cifs_inode->uniqueid = 0;
395 cifs_inode->createtime = 0;
396 cifs_inode->epoch = 0;
397 spin_lock_init(&cifs_inode->open_file_lock);
398 generate_random_uuid(cifs_inode->lease_key);
399 cifs_inode->symlink_target = NULL;
400
401 /*
402 * Can not set i_flags here - they get immediately overwritten to zero
403 * by the VFS.
404 */
405 /* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
406 INIT_LIST_HEAD(&cifs_inode->openFileList);
407 INIT_LIST_HEAD(&cifs_inode->llist);
408 INIT_LIST_HEAD(&cifs_inode->deferred_closes);
409 spin_lock_init(&cifs_inode->deferred_lock);
410 return &cifs_inode->netfs.inode;
411}
412
413static void
414cifs_free_inode(struct inode *inode)
415{
416 struct cifsInodeInfo *cinode = CIFS_I(inode);
417
418 if (S_ISLNK(inode->i_mode))
419 kfree(cinode->symlink_target);
420 kmem_cache_free(cifs_inode_cachep, cinode);
421}
422
423static void
424cifs_evict_inode(struct inode *inode)
425{
426 truncate_inode_pages_final(&inode->i_data);
427 if (inode->i_state & I_PINNING_FSCACHE_WB)
428 cifs_fscache_unuse_inode_cookie(inode, true);
429 cifs_fscache_release_inode_cookie(inode);
430 clear_inode(inode);
431}
432
433static void
434cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
435{
436 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
437 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
438
439 seq_puts(s, ",addr=");
440
441 switch (server->dstaddr.ss_family) {
442 case AF_INET:
443 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
444 break;
445 case AF_INET6:
446 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
447 if (sa6->sin6_scope_id)
448 seq_printf(s, "%%%u", sa6->sin6_scope_id);
449 break;
450 default:
451 seq_puts(s, "(unknown)");
452 }
453 if (server->rdma)
454 seq_puts(s, ",rdma");
455}
456
457static void
458cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
459{
460 if (ses->sectype == Unspecified) {
461 if (ses->user_name == NULL)
462 seq_puts(s, ",sec=none");
463 return;
464 }
465
466 seq_puts(s, ",sec=");
467
468 switch (ses->sectype) {
469 case NTLMv2:
470 seq_puts(s, "ntlmv2");
471 break;
472 case Kerberos:
473 seq_puts(s, "krb5");
474 break;
475 case RawNTLMSSP:
476 seq_puts(s, "ntlmssp");
477 break;
478 default:
479 /* shouldn't ever happen */
480 seq_puts(s, "unknown");
481 break;
482 }
483
484 if (ses->sign)
485 seq_puts(s, "i");
486
487 if (ses->sectype == Kerberos)
488 seq_printf(s, ",cruid=%u",
489 from_kuid_munged(&init_user_ns, ses->cred_uid));
490}
491
492static void
493cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
494{
495 seq_puts(s, ",cache=");
496
497 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
498 seq_puts(s, "strict");
499 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
500 seq_puts(s, "none");
501 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
502 seq_puts(s, "singleclient"); /* assume only one client access */
503 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
504 seq_puts(s, "ro"); /* read only caching assumed */
505 else
506 seq_puts(s, "loose");
507}
508
509/*
510 * cifs_show_devname() is used so we show the mount device name with correct
511 * format (e.g. forward slashes vs. back slashes) in /proc/mounts
512 */
513static int cifs_show_devname(struct seq_file *m, struct dentry *root)
514{
515 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
516 char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
517
518 if (devname == NULL)
519 seq_puts(m, "none");
520 else {
521 convert_delimiter(devname, '/');
522 /* escape all spaces in share names */
523 seq_escape(m, devname, " \t");
524 kfree(devname);
525 }
526 return 0;
527}
528
529/*
530 * cifs_show_options() is for displaying mount options in /proc/mounts.
531 * Not all settable options are displayed but most of the important
532 * ones are.
533 */
534static int
535cifs_show_options(struct seq_file *s, struct dentry *root)
536{
537 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
538 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
539 struct sockaddr *srcaddr;
540 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
541
542 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
543 cifs_show_security(s, tcon->ses);
544 cifs_show_cache_flavor(s, cifs_sb);
545
546 if (tcon->no_lease)
547 seq_puts(s, ",nolease");
548 if (cifs_sb->ctx->multiuser)
549 seq_puts(s, ",multiuser");
550 else if (tcon->ses->user_name)
551 seq_show_option(s, "username", tcon->ses->user_name);
552
553 if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
554 seq_show_option(s, "domain", tcon->ses->domainName);
555
556 if (srcaddr->sa_family != AF_UNSPEC) {
557 struct sockaddr_in *saddr4;
558 struct sockaddr_in6 *saddr6;
559 saddr4 = (struct sockaddr_in *)srcaddr;
560 saddr6 = (struct sockaddr_in6 *)srcaddr;
561 if (srcaddr->sa_family == AF_INET6)
562 seq_printf(s, ",srcaddr=%pI6c",
563 &saddr6->sin6_addr);
564 else if (srcaddr->sa_family == AF_INET)
565 seq_printf(s, ",srcaddr=%pI4",
566 &saddr4->sin_addr.s_addr);
567 else
568 seq_printf(s, ",srcaddr=BAD-AF:%i",
569 (int)(srcaddr->sa_family));
570 }
571
572 seq_printf(s, ",uid=%u",
573 from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
574 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
575 seq_puts(s, ",forceuid");
576 else
577 seq_puts(s, ",noforceuid");
578
579 seq_printf(s, ",gid=%u",
580 from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
581 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
582 seq_puts(s, ",forcegid");
583 else
584 seq_puts(s, ",noforcegid");
585
586 cifs_show_address(s, tcon->ses->server);
587
588 if (!tcon->unix_ext)
589 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
590 cifs_sb->ctx->file_mode,
591 cifs_sb->ctx->dir_mode);
592 if (cifs_sb->ctx->iocharset)
593 seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
594 if (tcon->seal)
595 seq_puts(s, ",seal");
596 else if (tcon->ses->server->ignore_signature)
597 seq_puts(s, ",signloosely");
598 if (tcon->nocase)
599 seq_puts(s, ",nocase");
600 if (tcon->nodelete)
601 seq_puts(s, ",nodelete");
602 if (cifs_sb->ctx->no_sparse)
603 seq_puts(s, ",nosparse");
604 if (tcon->local_lease)
605 seq_puts(s, ",locallease");
606 if (tcon->retry)
607 seq_puts(s, ",hard");
608 else
609 seq_puts(s, ",soft");
610 if (tcon->use_persistent)
611 seq_puts(s, ",persistenthandles");
612 else if (tcon->use_resilient)
613 seq_puts(s, ",resilienthandles");
614 if (tcon->posix_extensions)
615 seq_puts(s, ",posix");
616 else if (tcon->unix_ext)
617 seq_puts(s, ",unix");
618 else
619 seq_puts(s, ",nounix");
620 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
621 seq_puts(s, ",nodfs");
622 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
623 seq_puts(s, ",posixpaths");
624 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
625 seq_puts(s, ",setuids");
626 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
627 seq_puts(s, ",idsfromsid");
628 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
629 seq_puts(s, ",serverino");
630 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
631 seq_puts(s, ",rwpidforward");
632 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
633 seq_puts(s, ",forcemand");
634 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
635 seq_puts(s, ",nouser_xattr");
636 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
637 seq_puts(s, ",mapchars");
638 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
639 seq_puts(s, ",mapposix");
640 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
641 seq_puts(s, ",sfu");
642 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
643 seq_puts(s, ",nobrl");
644 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
645 seq_puts(s, ",nohandlecache");
646 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
647 seq_puts(s, ",modefromsid");
648 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
649 seq_puts(s, ",cifsacl");
650 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
651 seq_puts(s, ",dynperm");
652 if (root->d_sb->s_flags & SB_POSIXACL)
653 seq_puts(s, ",acl");
654 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
655 seq_puts(s, ",mfsymlinks");
656 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
657 seq_puts(s, ",fsc");
658 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
659 seq_puts(s, ",nostrictsync");
660 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
661 seq_puts(s, ",noperm");
662 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
663 seq_printf(s, ",backupuid=%u",
664 from_kuid_munged(&init_user_ns,
665 cifs_sb->ctx->backupuid));
666 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
667 seq_printf(s, ",backupgid=%u",
668 from_kgid_munged(&init_user_ns,
669 cifs_sb->ctx->backupgid));
670
671 seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
672 seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
673 seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
674 if (cifs_sb->ctx->rasize)
675 seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
676 if (tcon->ses->server->min_offload)
677 seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
678 seq_printf(s, ",echo_interval=%lu",
679 tcon->ses->server->echo_interval / HZ);
680
681 /* Only display the following if overridden on mount */
682 if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
683 seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
684 if (tcon->ses->server->tcp_nodelay)
685 seq_puts(s, ",tcpnodelay");
686 if (tcon->ses->server->noautotune)
687 seq_puts(s, ",noautotune");
688 if (tcon->ses->server->noblocksnd)
689 seq_puts(s, ",noblocksend");
690
691 if (tcon->snapshot_time)
692 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
693 if (tcon->handle_timeout)
694 seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
695
696 /*
697 * Display file and directory attribute timeout in seconds.
698 * If file and directory attribute timeout the same then actimeo
699 * was likely specified on mount
700 */
701 if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
702 seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
703 else {
704 seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
705 seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
706 }
707 seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
708
709 if (tcon->ses->chan_max > 1)
710 seq_printf(s, ",multichannel,max_channels=%zu",
711 tcon->ses->chan_max);
712
713 if (tcon->use_witness)
714 seq_puts(s, ",witness");
715
716 return 0;
717}
718
719static void cifs_umount_begin(struct super_block *sb)
720{
721 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
722 struct cifs_tcon *tcon;
723
724 if (cifs_sb == NULL)
725 return;
726
727 tcon = cifs_sb_master_tcon(cifs_sb);
728
729 spin_lock(&cifs_tcp_ses_lock);
730 spin_lock(&tcon->tc_lock);
731 if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
732 /* we have other mounts to same share or we have
733 already tried to force umount this and woken up
734 all waiting network requests, nothing to do */
735 spin_unlock(&tcon->tc_lock);
736 spin_unlock(&cifs_tcp_ses_lock);
737 return;
738 } else if (tcon->tc_count == 1)
739 tcon->status = TID_EXITING;
740 spin_unlock(&tcon->tc_lock);
741 spin_unlock(&cifs_tcp_ses_lock);
742
743 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
744 /* cancel_notify_requests(tcon); */
745 if (tcon->ses && tcon->ses->server) {
746 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
747 wake_up_all(&tcon->ses->server->request_q);
748 wake_up_all(&tcon->ses->server->response_q);
749 msleep(1); /* yield */
750 /* we have to kick the requests once more */
751 wake_up_all(&tcon->ses->server->response_q);
752 msleep(1);
753 }
754
755 return;
756}
757
758#ifdef CONFIG_CIFS_STATS2
759static int cifs_show_stats(struct seq_file *s, struct dentry *root)
760{
761 /* BB FIXME */
762 return 0;
763}
764#endif
765
766static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
767{
768 fscache_unpin_writeback(wbc, cifs_inode_cookie(inode));
769 return 0;
770}
771
772static int cifs_drop_inode(struct inode *inode)
773{
774 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
775
776 /* no serverino => unconditional eviction */
777 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
778 generic_drop_inode(inode);
779}
780
781static const struct super_operations cifs_super_ops = {
782 .statfs = cifs_statfs,
783 .alloc_inode = cifs_alloc_inode,
784 .write_inode = cifs_write_inode,
785 .free_inode = cifs_free_inode,
786 .drop_inode = cifs_drop_inode,
787 .evict_inode = cifs_evict_inode,
788/* .show_path = cifs_show_path, */ /* Would we ever need show path? */
789 .show_devname = cifs_show_devname,
790/* .delete_inode = cifs_delete_inode, */ /* Do not need above
791 function unless later we add lazy close of inodes or unless the
792 kernel forgets to call us with the same number of releases (closes)
793 as opens */
794 .show_options = cifs_show_options,
795 .umount_begin = cifs_umount_begin,
796#ifdef CONFIG_CIFS_STATS2
797 .show_stats = cifs_show_stats,
798#endif
799};
800
801/*
802 * Get root dentry from superblock according to prefix path mount option.
803 * Return dentry with refcount + 1 on success and NULL otherwise.
804 */
805static struct dentry *
806cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
807{
808 struct dentry *dentry;
809 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
810 char *full_path = NULL;
811 char *s, *p;
812 char sep;
813
814 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
815 return dget(sb->s_root);
816
817 full_path = cifs_build_path_to_root(ctx, cifs_sb,
818 cifs_sb_master_tcon(cifs_sb), 0);
819 if (full_path == NULL)
820 return ERR_PTR(-ENOMEM);
821
822 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
823
824 sep = CIFS_DIR_SEP(cifs_sb);
825 dentry = dget(sb->s_root);
826 s = full_path;
827
828 do {
829 struct inode *dir = d_inode(dentry);
830 struct dentry *child;
831
832 if (!S_ISDIR(dir->i_mode)) {
833 dput(dentry);
834 dentry = ERR_PTR(-ENOTDIR);
835 break;
836 }
837
838 /* skip separators */
839 while (*s == sep)
840 s++;
841 if (!*s)
842 break;
843 p = s++;
844 /* next separator */
845 while (*s && *s != sep)
846 s++;
847
848 child = lookup_positive_unlocked(p, dentry, s - p);
849 dput(dentry);
850 dentry = child;
851 } while (!IS_ERR(dentry));
852 kfree(full_path);
853 return dentry;
854}
855
856static int cifs_set_super(struct super_block *sb, void *data)
857{
858 struct cifs_mnt_data *mnt_data = data;
859 sb->s_fs_info = mnt_data->cifs_sb;
860 return set_anon_super(sb, NULL);
861}
862
863struct dentry *
864cifs_smb3_do_mount(struct file_system_type *fs_type,
865 int flags, struct smb3_fs_context *old_ctx)
866{
867 int rc;
868 struct super_block *sb = NULL;
869 struct cifs_sb_info *cifs_sb = NULL;
870 struct cifs_mnt_data mnt_data;
871 struct dentry *root;
872
873 /*
874 * Prints in Kernel / CIFS log the attempted mount operation
875 * If CIFS_DEBUG && cifs_FYI
876 */
877 if (cifsFYI)
878 cifs_dbg(FYI, "Devname: %s flags: %d\n", old_ctx->UNC, flags);
879 else
880 cifs_info("Attempting to mount %s\n", old_ctx->UNC);
881
882 cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
883 if (cifs_sb == NULL) {
884 root = ERR_PTR(-ENOMEM);
885 goto out;
886 }
887
888 cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
889 if (!cifs_sb->ctx) {
890 root = ERR_PTR(-ENOMEM);
891 goto out;
892 }
893 rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
894 if (rc) {
895 root = ERR_PTR(rc);
896 goto out;
897 }
898
899 rc = cifs_setup_cifs_sb(cifs_sb);
900 if (rc) {
901 root = ERR_PTR(rc);
902 goto out;
903 }
904
905 rc = cifs_mount(cifs_sb, cifs_sb->ctx);
906 if (rc) {
907 if (!(flags & SB_SILENT))
908 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
909 rc);
910 root = ERR_PTR(rc);
911 goto out;
912 }
913
914 mnt_data.ctx = cifs_sb->ctx;
915 mnt_data.cifs_sb = cifs_sb;
916 mnt_data.flags = flags;
917
918 /* BB should we make this contingent on mount parm? */
919 flags |= SB_NODIRATIME | SB_NOATIME;
920
921 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
922 if (IS_ERR(sb)) {
923 root = ERR_CAST(sb);
924 cifs_umount(cifs_sb);
925 cifs_sb = NULL;
926 goto out;
927 }
928
929 if (sb->s_root) {
930 cifs_dbg(FYI, "Use existing superblock\n");
931 cifs_umount(cifs_sb);
932 cifs_sb = NULL;
933 } else {
934 rc = cifs_read_super(sb);
935 if (rc) {
936 root = ERR_PTR(rc);
937 goto out_super;
938 }
939
940 sb->s_flags |= SB_ACTIVE;
941 }
942
943 root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
944 if (IS_ERR(root))
945 goto out_super;
946
947 if (cifs_sb)
948 cifs_sb->root = dget(root);
949
950 cifs_dbg(FYI, "dentry root is: %p\n", root);
951 return root;
952
953out_super:
954 deactivate_locked_super(sb);
955 return root;
956out:
957 if (cifs_sb) {
958 if (!sb || IS_ERR(sb)) { /* otherwise kill_sb will handle */
959 kfree(cifs_sb->prepath);
960 smb3_cleanup_fs_context(cifs_sb->ctx);
961 kfree(cifs_sb);
962 }
963 }
964 return root;
965}
966
967
968static ssize_t
969cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
970{
971 ssize_t rc;
972 struct inode *inode = file_inode(iocb->ki_filp);
973
974 if (iocb->ki_flags & IOCB_DIRECT)
975 return cifs_user_readv(iocb, iter);
976
977 rc = cifs_revalidate_mapping(inode);
978 if (rc)
979 return rc;
980
981 return generic_file_read_iter(iocb, iter);
982}
983
984static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
985{
986 struct inode *inode = file_inode(iocb->ki_filp);
987 struct cifsInodeInfo *cinode = CIFS_I(inode);
988 ssize_t written;
989 int rc;
990
991 if (iocb->ki_filp->f_flags & O_DIRECT) {
992 written = cifs_user_writev(iocb, from);
993 if (written > 0 && CIFS_CACHE_READ(cinode)) {
994 cifs_zap_mapping(inode);
995 cifs_dbg(FYI,
996 "Set no oplock for inode=%p after a write operation\n",
997 inode);
998 cinode->oplock = 0;
999 }
1000 return written;
1001 }
1002
1003 written = cifs_get_writer(cinode);
1004 if (written)
1005 return written;
1006
1007 written = generic_file_write_iter(iocb, from);
1008
1009 if (CIFS_CACHE_WRITE(CIFS_I(inode)))
1010 goto out;
1011
1012 rc = filemap_fdatawrite(inode->i_mapping);
1013 if (rc)
1014 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
1015 rc, inode);
1016
1017out:
1018 cifs_put_writer(cinode);
1019 return written;
1020}
1021
1022static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1023{
1024 struct cifsFileInfo *cfile = file->private_data;
1025 struct cifs_tcon *tcon;
1026
1027 /*
1028 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1029 * the cached file length
1030 */
1031 if (whence != SEEK_SET && whence != SEEK_CUR) {
1032 int rc;
1033 struct inode *inode = file_inode(file);
1034
1035 /*
1036 * We need to be sure that all dirty pages are written and the
1037 * server has the newest file length.
1038 */
1039 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1040 inode->i_mapping->nrpages != 0) {
1041 rc = filemap_fdatawait(inode->i_mapping);
1042 if (rc) {
1043 mapping_set_error(inode->i_mapping, rc);
1044 return rc;
1045 }
1046 }
1047 /*
1048 * Some applications poll for the file length in this strange
1049 * way so we must seek to end on non-oplocked files by
1050 * setting the revalidate time to zero.
1051 */
1052 CIFS_I(inode)->time = 0;
1053
1054 rc = cifs_revalidate_file_attr(file);
1055 if (rc < 0)
1056 return (loff_t)rc;
1057 }
1058 if (cfile && cfile->tlink) {
1059 tcon = tlink_tcon(cfile->tlink);
1060 if (tcon->ses->server->ops->llseek)
1061 return tcon->ses->server->ops->llseek(file, tcon,
1062 offset, whence);
1063 }
1064 return generic_file_llseek(file, offset, whence);
1065}
1066
1067static int
1068cifs_setlease(struct file *file, long arg, struct file_lock **lease, void **priv)
1069{
1070 /*
1071 * Note that this is called by vfs setlease with i_lock held to
1072 * protect *lease from going away.
1073 */
1074 struct inode *inode = file_inode(file);
1075 struct cifsFileInfo *cfile = file->private_data;
1076
1077 if (!(S_ISREG(inode->i_mode)))
1078 return -EINVAL;
1079
1080 /* Check if file is oplocked if this is request for new lease */
1081 if (arg == F_UNLCK ||
1082 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1083 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1084 return generic_setlease(file, arg, lease, priv);
1085 else if (tlink_tcon(cfile->tlink)->local_lease &&
1086 !CIFS_CACHE_READ(CIFS_I(inode)))
1087 /*
1088 * If the server claims to support oplock on this file, then we
1089 * still need to check oplock even if the local_lease mount
1090 * option is set, but there are servers which do not support
1091 * oplock for which this mount option may be useful if the user
1092 * knows that the file won't be changed on the server by anyone
1093 * else.
1094 */
1095 return generic_setlease(file, arg, lease, priv);
1096 else
1097 return -EAGAIN;
1098}
1099
1100struct file_system_type cifs_fs_type = {
1101 .owner = THIS_MODULE,
1102 .name = "cifs",
1103 .init_fs_context = smb3_init_fs_context,
1104 .parameters = smb3_fs_parameters,
1105 .kill_sb = cifs_kill_sb,
1106 .fs_flags = FS_RENAME_DOES_D_MOVE,
1107};
1108MODULE_ALIAS_FS("cifs");
1109
1110struct file_system_type smb3_fs_type = {
1111 .owner = THIS_MODULE,
1112 .name = "smb3",
1113 .init_fs_context = smb3_init_fs_context,
1114 .parameters = smb3_fs_parameters,
1115 .kill_sb = cifs_kill_sb,
1116 .fs_flags = FS_RENAME_DOES_D_MOVE,
1117};
1118MODULE_ALIAS_FS("smb3");
1119MODULE_ALIAS("smb3");
1120
1121const struct inode_operations cifs_dir_inode_ops = {
1122 .create = cifs_create,
1123 .atomic_open = cifs_atomic_open,
1124 .lookup = cifs_lookup,
1125 .getattr = cifs_getattr,
1126 .unlink = cifs_unlink,
1127 .link = cifs_hardlink,
1128 .mkdir = cifs_mkdir,
1129 .rmdir = cifs_rmdir,
1130 .rename = cifs_rename2,
1131 .permission = cifs_permission,
1132 .setattr = cifs_setattr,
1133 .symlink = cifs_symlink,
1134 .mknod = cifs_mknod,
1135 .listxattr = cifs_listxattr,
1136 .get_acl = cifs_get_acl,
1137 .set_acl = cifs_set_acl,
1138};
1139
1140const struct inode_operations cifs_file_inode_ops = {
1141 .setattr = cifs_setattr,
1142 .getattr = cifs_getattr,
1143 .permission = cifs_permission,
1144 .listxattr = cifs_listxattr,
1145 .fiemap = cifs_fiemap,
1146 .get_acl = cifs_get_acl,
1147 .set_acl = cifs_set_acl,
1148};
1149
1150const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1151 struct delayed_call *done)
1152{
1153 char *target_path;
1154
1155 target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1156 if (!target_path)
1157 return ERR_PTR(-ENOMEM);
1158
1159 spin_lock(&inode->i_lock);
1160 if (likely(CIFS_I(inode)->symlink_target)) {
1161 strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1162 } else {
1163 kfree(target_path);
1164 target_path = ERR_PTR(-EOPNOTSUPP);
1165 }
1166 spin_unlock(&inode->i_lock);
1167
1168 if (!IS_ERR(target_path))
1169 set_delayed_call(done, kfree_link, target_path);
1170
1171 return target_path;
1172}
1173
1174const struct inode_operations cifs_symlink_inode_ops = {
1175 .get_link = cifs_get_link,
1176 .permission = cifs_permission,
1177 .listxattr = cifs_listxattr,
1178};
1179
1180static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1181 struct file *dst_file, loff_t destoff, loff_t len,
1182 unsigned int remap_flags)
1183{
1184 struct inode *src_inode = file_inode(src_file);
1185 struct inode *target_inode = file_inode(dst_file);
1186 struct cifsFileInfo *smb_file_src = src_file->private_data;
1187 struct cifsFileInfo *smb_file_target;
1188 struct cifs_tcon *target_tcon;
1189 unsigned int xid;
1190 int rc;
1191
1192 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
1193 return -EINVAL;
1194
1195 cifs_dbg(FYI, "clone range\n");
1196
1197 xid = get_xid();
1198
1199 if (!src_file->private_data || !dst_file->private_data) {
1200 rc = -EBADF;
1201 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1202 goto out;
1203 }
1204
1205 smb_file_target = dst_file->private_data;
1206 target_tcon = tlink_tcon(smb_file_target->tlink);
1207
1208 /*
1209 * Note: cifs case is easier than btrfs since server responsible for
1210 * checks for proper open modes and file type and if it wants
1211 * server could even support copy of range where source = target
1212 */
1213 lock_two_nondirectories(target_inode, src_inode);
1214
1215 if (len == 0)
1216 len = src_inode->i_size - off;
1217
1218 cifs_dbg(FYI, "about to flush pages\n");
1219 /* should we flush first and last page first */
1220 truncate_inode_pages_range(&target_inode->i_data, destoff,
1221 PAGE_ALIGN(destoff + len)-1);
1222
1223 if (target_tcon->ses->server->ops->duplicate_extents)
1224 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1225 smb_file_src, smb_file_target, off, len, destoff);
1226 else
1227 rc = -EOPNOTSUPP;
1228
1229 /* force revalidate of size and timestamps of target file now
1230 that target is updated on the server */
1231 CIFS_I(target_inode)->time = 0;
1232 /* although unlocking in the reverse order from locking is not
1233 strictly necessary here it is a little cleaner to be consistent */
1234 unlock_two_nondirectories(src_inode, target_inode);
1235out:
1236 free_xid(xid);
1237 return rc < 0 ? rc : len;
1238}
1239
1240ssize_t cifs_file_copychunk_range(unsigned int xid,
1241 struct file *src_file, loff_t off,
1242 struct file *dst_file, loff_t destoff,
1243 size_t len, unsigned int flags)
1244{
1245 struct inode *src_inode = file_inode(src_file);
1246 struct inode *target_inode = file_inode(dst_file);
1247 struct cifsFileInfo *smb_file_src;
1248 struct cifsFileInfo *smb_file_target;
1249 struct cifs_tcon *src_tcon;
1250 struct cifs_tcon *target_tcon;
1251 ssize_t rc;
1252
1253 cifs_dbg(FYI, "copychunk range\n");
1254
1255 if (!src_file->private_data || !dst_file->private_data) {
1256 rc = -EBADF;
1257 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1258 goto out;
1259 }
1260
1261 rc = -EXDEV;
1262 smb_file_target = dst_file->private_data;
1263 smb_file_src = src_file->private_data;
1264 src_tcon = tlink_tcon(smb_file_src->tlink);
1265 target_tcon = tlink_tcon(smb_file_target->tlink);
1266
1267 if (src_tcon->ses != target_tcon->ses) {
1268 cifs_dbg(VFS, "source and target of copy not on same server\n");
1269 goto out;
1270 }
1271
1272 rc = -EOPNOTSUPP;
1273 if (!target_tcon->ses->server->ops->copychunk_range)
1274 goto out;
1275
1276 /*
1277 * Note: cifs case is easier than btrfs since server responsible for
1278 * checks for proper open modes and file type and if it wants
1279 * server could even support copy of range where source = target
1280 */
1281 lock_two_nondirectories(target_inode, src_inode);
1282
1283 cifs_dbg(FYI, "about to flush pages\n");
1284
1285 rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1286 off + len - 1);
1287 if (rc)
1288 goto unlock;
1289
1290 /* should we flush first and last page first */
1291 truncate_inode_pages(&target_inode->i_data, 0);
1292
1293 rc = file_modified(dst_file);
1294 if (!rc)
1295 rc = target_tcon->ses->server->ops->copychunk_range(xid,
1296 smb_file_src, smb_file_target, off, len, destoff);
1297
1298 file_accessed(src_file);
1299
1300 /* force revalidate of size and timestamps of target file now
1301 * that target is updated on the server
1302 */
1303 CIFS_I(target_inode)->time = 0;
1304
1305unlock:
1306 /* although unlocking in the reverse order from locking is not
1307 * strictly necessary here it is a little cleaner to be consistent
1308 */
1309 unlock_two_nondirectories(src_inode, target_inode);
1310
1311out:
1312 return rc;
1313}
1314
1315/*
1316 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1317 * is a dummy operation.
1318 */
1319static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1320{
1321 cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1322 file, datasync);
1323
1324 return 0;
1325}
1326
1327static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1328 struct file *dst_file, loff_t destoff,
1329 size_t len, unsigned int flags)
1330{
1331 unsigned int xid = get_xid();
1332 ssize_t rc;
1333 struct cifsFileInfo *cfile = dst_file->private_data;
1334
1335 if (cfile->swapfile) {
1336 rc = -EOPNOTSUPP;
1337 free_xid(xid);
1338 return rc;
1339 }
1340
1341 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1342 len, flags);
1343 free_xid(xid);
1344
1345 if (rc == -EOPNOTSUPP || rc == -EXDEV)
1346 rc = generic_copy_file_range(src_file, off, dst_file,
1347 destoff, len, flags);
1348 return rc;
1349}
1350
1351const struct file_operations cifs_file_ops = {
1352 .read_iter = cifs_loose_read_iter,
1353 .write_iter = cifs_file_write_iter,
1354 .open = cifs_open,
1355 .release = cifs_close,
1356 .lock = cifs_lock,
1357 .flock = cifs_flock,
1358 .fsync = cifs_fsync,
1359 .flush = cifs_flush,
1360 .mmap = cifs_file_mmap,
1361 .splice_read = generic_file_splice_read,
1362 .splice_write = iter_file_splice_write,
1363 .llseek = cifs_llseek,
1364 .unlocked_ioctl = cifs_ioctl,
1365 .copy_file_range = cifs_copy_file_range,
1366 .remap_file_range = cifs_remap_file_range,
1367 .setlease = cifs_setlease,
1368 .fallocate = cifs_fallocate,
1369};
1370
1371const struct file_operations cifs_file_strict_ops = {
1372 .read_iter = cifs_strict_readv,
1373 .write_iter = cifs_strict_writev,
1374 .open = cifs_open,
1375 .release = cifs_close,
1376 .lock = cifs_lock,
1377 .flock = cifs_flock,
1378 .fsync = cifs_strict_fsync,
1379 .flush = cifs_flush,
1380 .mmap = cifs_file_strict_mmap,
1381 .splice_read = generic_file_splice_read,
1382 .splice_write = iter_file_splice_write,
1383 .llseek = cifs_llseek,
1384 .unlocked_ioctl = cifs_ioctl,
1385 .copy_file_range = cifs_copy_file_range,
1386 .remap_file_range = cifs_remap_file_range,
1387 .setlease = cifs_setlease,
1388 .fallocate = cifs_fallocate,
1389};
1390
1391const struct file_operations cifs_file_direct_ops = {
1392 .read_iter = cifs_direct_readv,
1393 .write_iter = cifs_direct_writev,
1394 .open = cifs_open,
1395 .release = cifs_close,
1396 .lock = cifs_lock,
1397 .flock = cifs_flock,
1398 .fsync = cifs_fsync,
1399 .flush = cifs_flush,
1400 .mmap = cifs_file_mmap,
1401 .splice_read = generic_file_splice_read,
1402 .splice_write = iter_file_splice_write,
1403 .unlocked_ioctl = cifs_ioctl,
1404 .copy_file_range = cifs_copy_file_range,
1405 .remap_file_range = cifs_remap_file_range,
1406 .llseek = cifs_llseek,
1407 .setlease = cifs_setlease,
1408 .fallocate = cifs_fallocate,
1409};
1410
1411const struct file_operations cifs_file_nobrl_ops = {
1412 .read_iter = cifs_loose_read_iter,
1413 .write_iter = cifs_file_write_iter,
1414 .open = cifs_open,
1415 .release = cifs_close,
1416 .fsync = cifs_fsync,
1417 .flush = cifs_flush,
1418 .mmap = cifs_file_mmap,
1419 .splice_read = generic_file_splice_read,
1420 .splice_write = iter_file_splice_write,
1421 .llseek = cifs_llseek,
1422 .unlocked_ioctl = cifs_ioctl,
1423 .copy_file_range = cifs_copy_file_range,
1424 .remap_file_range = cifs_remap_file_range,
1425 .setlease = cifs_setlease,
1426 .fallocate = cifs_fallocate,
1427};
1428
1429const struct file_operations cifs_file_strict_nobrl_ops = {
1430 .read_iter = cifs_strict_readv,
1431 .write_iter = cifs_strict_writev,
1432 .open = cifs_open,
1433 .release = cifs_close,
1434 .fsync = cifs_strict_fsync,
1435 .flush = cifs_flush,
1436 .mmap = cifs_file_strict_mmap,
1437 .splice_read = generic_file_splice_read,
1438 .splice_write = iter_file_splice_write,
1439 .llseek = cifs_llseek,
1440 .unlocked_ioctl = cifs_ioctl,
1441 .copy_file_range = cifs_copy_file_range,
1442 .remap_file_range = cifs_remap_file_range,
1443 .setlease = cifs_setlease,
1444 .fallocate = cifs_fallocate,
1445};
1446
1447const struct file_operations cifs_file_direct_nobrl_ops = {
1448 .read_iter = cifs_direct_readv,
1449 .write_iter = cifs_direct_writev,
1450 .open = cifs_open,
1451 .release = cifs_close,
1452 .fsync = cifs_fsync,
1453 .flush = cifs_flush,
1454 .mmap = cifs_file_mmap,
1455 .splice_read = generic_file_splice_read,
1456 .splice_write = iter_file_splice_write,
1457 .unlocked_ioctl = cifs_ioctl,
1458 .copy_file_range = cifs_copy_file_range,
1459 .remap_file_range = cifs_remap_file_range,
1460 .llseek = cifs_llseek,
1461 .setlease = cifs_setlease,
1462 .fallocate = cifs_fallocate,
1463};
1464
1465const struct file_operations cifs_dir_ops = {
1466 .iterate_shared = cifs_readdir,
1467 .release = cifs_closedir,
1468 .read = generic_read_dir,
1469 .unlocked_ioctl = cifs_ioctl,
1470 .copy_file_range = cifs_copy_file_range,
1471 .remap_file_range = cifs_remap_file_range,
1472 .llseek = generic_file_llseek,
1473 .fsync = cifs_dir_fsync,
1474};
1475
1476static void
1477cifs_init_once(void *inode)
1478{
1479 struct cifsInodeInfo *cifsi = inode;
1480
1481 inode_init_once(&cifsi->netfs.inode);
1482 init_rwsem(&cifsi->lock_sem);
1483}
1484
1485static int __init
1486cifs_init_inodecache(void)
1487{
1488 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1489 sizeof(struct cifsInodeInfo),
1490 0, (SLAB_RECLAIM_ACCOUNT|
1491 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1492 cifs_init_once);
1493 if (cifs_inode_cachep == NULL)
1494 return -ENOMEM;
1495
1496 return 0;
1497}
1498
1499static void
1500cifs_destroy_inodecache(void)
1501{
1502 /*
1503 * Make sure all delayed rcu free inodes are flushed before we
1504 * destroy cache.
1505 */
1506 rcu_barrier();
1507 kmem_cache_destroy(cifs_inode_cachep);
1508}
1509
1510static int
1511cifs_init_request_bufs(void)
1512{
1513 /*
1514 * SMB2 maximum header size is bigger than CIFS one - no problems to
1515 * allocate some more bytes for CIFS.
1516 */
1517 size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1518
1519 if (CIFSMaxBufSize < 8192) {
1520 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1521 Unicode path name has to fit in any SMB/CIFS path based frames */
1522 CIFSMaxBufSize = 8192;
1523 } else if (CIFSMaxBufSize > 1024*127) {
1524 CIFSMaxBufSize = 1024 * 127;
1525 } else {
1526 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1527 }
1528/*
1529 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1530 CIFSMaxBufSize, CIFSMaxBufSize);
1531*/
1532 cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1533 CIFSMaxBufSize + max_hdr_size, 0,
1534 SLAB_HWCACHE_ALIGN, 0,
1535 CIFSMaxBufSize + max_hdr_size,
1536 NULL);
1537 if (cifs_req_cachep == NULL)
1538 return -ENOMEM;
1539
1540 if (cifs_min_rcv < 1)
1541 cifs_min_rcv = 1;
1542 else if (cifs_min_rcv > 64) {
1543 cifs_min_rcv = 64;
1544 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1545 }
1546
1547 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1548 cifs_req_cachep);
1549
1550 if (cifs_req_poolp == NULL) {
1551 kmem_cache_destroy(cifs_req_cachep);
1552 return -ENOMEM;
1553 }
1554 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1555 almost all handle based requests (but not write response, nor is it
1556 sufficient for path based requests). A smaller size would have
1557 been more efficient (compacting multiple slab items on one 4k page)
1558 for the case in which debug was on, but this larger size allows
1559 more SMBs to use small buffer alloc and is still much more
1560 efficient to alloc 1 per page off the slab compared to 17K (5page)
1561 alloc of large cifs buffers even when page debugging is on */
1562 cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1563 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1564 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1565 if (cifs_sm_req_cachep == NULL) {
1566 mempool_destroy(cifs_req_poolp);
1567 kmem_cache_destroy(cifs_req_cachep);
1568 return -ENOMEM;
1569 }
1570
1571 if (cifs_min_small < 2)
1572 cifs_min_small = 2;
1573 else if (cifs_min_small > 256) {
1574 cifs_min_small = 256;
1575 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1576 }
1577
1578 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1579 cifs_sm_req_cachep);
1580
1581 if (cifs_sm_req_poolp == NULL) {
1582 mempool_destroy(cifs_req_poolp);
1583 kmem_cache_destroy(cifs_req_cachep);
1584 kmem_cache_destroy(cifs_sm_req_cachep);
1585 return -ENOMEM;
1586 }
1587
1588 return 0;
1589}
1590
1591static void
1592cifs_destroy_request_bufs(void)
1593{
1594 mempool_destroy(cifs_req_poolp);
1595 kmem_cache_destroy(cifs_req_cachep);
1596 mempool_destroy(cifs_sm_req_poolp);
1597 kmem_cache_destroy(cifs_sm_req_cachep);
1598}
1599
1600static int init_mids(void)
1601{
1602 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1603 sizeof(struct mid_q_entry), 0,
1604 SLAB_HWCACHE_ALIGN, NULL);
1605 if (cifs_mid_cachep == NULL)
1606 return -ENOMEM;
1607
1608 /* 3 is a reasonable minimum number of simultaneous operations */
1609 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1610 if (cifs_mid_poolp == NULL) {
1611 kmem_cache_destroy(cifs_mid_cachep);
1612 return -ENOMEM;
1613 }
1614
1615 return 0;
1616}
1617
1618static void destroy_mids(void)
1619{
1620 mempool_destroy(cifs_mid_poolp);
1621 kmem_cache_destroy(cifs_mid_cachep);
1622}
1623
1624static int __init
1625init_cifs(void)
1626{
1627 int rc = 0;
1628 cifs_proc_init();
1629 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1630/*
1631 * Initialize Global counters
1632 */
1633 atomic_set(&sesInfoAllocCount, 0);
1634 atomic_set(&tconInfoAllocCount, 0);
1635 atomic_set(&tcpSesNextId, 0);
1636 atomic_set(&tcpSesAllocCount, 0);
1637 atomic_set(&tcpSesReconnectCount, 0);
1638 atomic_set(&tconInfoReconnectCount, 0);
1639
1640 atomic_set(&buf_alloc_count, 0);
1641 atomic_set(&small_buf_alloc_count, 0);
1642#ifdef CONFIG_CIFS_STATS2
1643 atomic_set(&total_buf_alloc_count, 0);
1644 atomic_set(&total_small_buf_alloc_count, 0);
1645 if (slow_rsp_threshold < 1)
1646 cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1647 else if (slow_rsp_threshold > 32767)
1648 cifs_dbg(VFS,
1649 "slow response threshold set higher than recommended (0 to 32767)\n");
1650#endif /* CONFIG_CIFS_STATS2 */
1651
1652 atomic_set(&mid_count, 0);
1653 GlobalCurrentXid = 0;
1654 GlobalTotalActiveXid = 0;
1655 GlobalMaxActiveXid = 0;
1656 spin_lock_init(&cifs_tcp_ses_lock);
1657 spin_lock_init(&GlobalMid_Lock);
1658
1659 cifs_lock_secret = get_random_u32();
1660
1661 if (cifs_max_pending < 2) {
1662 cifs_max_pending = 2;
1663 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1664 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1665 cifs_max_pending = CIFS_MAX_REQ;
1666 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1667 CIFS_MAX_REQ);
1668 }
1669
1670 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1671 if (!cifsiod_wq) {
1672 rc = -ENOMEM;
1673 goto out_clean_proc;
1674 }
1675
1676 /*
1677 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1678 * so that we don't launch too many worker threads but
1679 * Documentation/core-api/workqueue.rst recommends setting it to 0
1680 */
1681
1682 /* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1683 decrypt_wq = alloc_workqueue("smb3decryptd",
1684 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1685 if (!decrypt_wq) {
1686 rc = -ENOMEM;
1687 goto out_destroy_cifsiod_wq;
1688 }
1689
1690 fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1691 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1692 if (!fileinfo_put_wq) {
1693 rc = -ENOMEM;
1694 goto out_destroy_decrypt_wq;
1695 }
1696
1697 cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1698 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1699 if (!cifsoplockd_wq) {
1700 rc = -ENOMEM;
1701 goto out_destroy_fileinfo_put_wq;
1702 }
1703
1704 deferredclose_wq = alloc_workqueue("deferredclose",
1705 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1706 if (!deferredclose_wq) {
1707 rc = -ENOMEM;
1708 goto out_destroy_cifsoplockd_wq;
1709 }
1710
1711 rc = cifs_init_inodecache();
1712 if (rc)
1713 goto out_destroy_deferredclose_wq;
1714
1715 rc = init_mids();
1716 if (rc)
1717 goto out_destroy_inodecache;
1718
1719 rc = cifs_init_request_bufs();
1720 if (rc)
1721 goto out_destroy_mids;
1722
1723#ifdef CONFIG_CIFS_DFS_UPCALL
1724 rc = dfs_cache_init();
1725 if (rc)
1726 goto out_destroy_request_bufs;
1727#endif /* CONFIG_CIFS_DFS_UPCALL */
1728#ifdef CONFIG_CIFS_UPCALL
1729 rc = init_cifs_spnego();
1730 if (rc)
1731 goto out_destroy_dfs_cache;
1732#endif /* CONFIG_CIFS_UPCALL */
1733#ifdef CONFIG_CIFS_SWN_UPCALL
1734 rc = cifs_genl_init();
1735 if (rc)
1736 goto out_register_key_type;
1737#endif /* CONFIG_CIFS_SWN_UPCALL */
1738
1739 rc = init_cifs_idmap();
1740 if (rc)
1741 goto out_cifs_swn_init;
1742
1743 rc = register_filesystem(&cifs_fs_type);
1744 if (rc)
1745 goto out_init_cifs_idmap;
1746
1747 rc = register_filesystem(&smb3_fs_type);
1748 if (rc) {
1749 unregister_filesystem(&cifs_fs_type);
1750 goto out_init_cifs_idmap;
1751 }
1752
1753 return 0;
1754
1755out_init_cifs_idmap:
1756 exit_cifs_idmap();
1757out_cifs_swn_init:
1758#ifdef CONFIG_CIFS_SWN_UPCALL
1759 cifs_genl_exit();
1760out_register_key_type:
1761#endif
1762#ifdef CONFIG_CIFS_UPCALL
1763 exit_cifs_spnego();
1764out_destroy_dfs_cache:
1765#endif
1766#ifdef CONFIG_CIFS_DFS_UPCALL
1767 dfs_cache_destroy();
1768out_destroy_request_bufs:
1769#endif
1770 cifs_destroy_request_bufs();
1771out_destroy_mids:
1772 destroy_mids();
1773out_destroy_inodecache:
1774 cifs_destroy_inodecache();
1775out_destroy_deferredclose_wq:
1776 destroy_workqueue(deferredclose_wq);
1777out_destroy_cifsoplockd_wq:
1778 destroy_workqueue(cifsoplockd_wq);
1779out_destroy_fileinfo_put_wq:
1780 destroy_workqueue(fileinfo_put_wq);
1781out_destroy_decrypt_wq:
1782 destroy_workqueue(decrypt_wq);
1783out_destroy_cifsiod_wq:
1784 destroy_workqueue(cifsiod_wq);
1785out_clean_proc:
1786 cifs_proc_clean();
1787 return rc;
1788}
1789
1790static void __exit
1791exit_cifs(void)
1792{
1793 cifs_dbg(NOISY, "exit_smb3\n");
1794 unregister_filesystem(&cifs_fs_type);
1795 unregister_filesystem(&smb3_fs_type);
1796 cifs_dfs_release_automount_timer();
1797 exit_cifs_idmap();
1798#ifdef CONFIG_CIFS_SWN_UPCALL
1799 cifs_genl_exit();
1800#endif
1801#ifdef CONFIG_CIFS_UPCALL
1802 exit_cifs_spnego();
1803#endif
1804#ifdef CONFIG_CIFS_DFS_UPCALL
1805 dfs_cache_destroy();
1806#endif
1807 cifs_destroy_request_bufs();
1808 destroy_mids();
1809 cifs_destroy_inodecache();
1810 destroy_workqueue(deferredclose_wq);
1811 destroy_workqueue(cifsoplockd_wq);
1812 destroy_workqueue(decrypt_wq);
1813 destroy_workqueue(fileinfo_put_wq);
1814 destroy_workqueue(cifsiod_wq);
1815 cifs_proc_clean();
1816}
1817
1818MODULE_AUTHOR("Steve French");
1819MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1820MODULE_DESCRIPTION
1821 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
1822 "also older servers complying with the SNIA CIFS Specification)");
1823MODULE_VERSION(CIFS_VERSION);
1824MODULE_SOFTDEP("ecb");
1825MODULE_SOFTDEP("hmac");
1826MODULE_SOFTDEP("md5");
1827MODULE_SOFTDEP("nls");
1828MODULE_SOFTDEP("aes");
1829MODULE_SOFTDEP("cmac");
1830MODULE_SOFTDEP("sha256");
1831MODULE_SOFTDEP("sha512");
1832MODULE_SOFTDEP("aead2");
1833MODULE_SOFTDEP("ccm");
1834MODULE_SOFTDEP("gcm");
1835module_init(init_cifs)
1836module_exit(exit_cifs)
1/*
2 * fs/cifs/cifsfs.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * Common Internet FileSystem (CIFS) client
8 *
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24/* Note that BB means BUGBUG (ie something to fix eventually) */
25
26#include <linux/module.h>
27#include <linux/fs.h>
28#include <linux/mount.h>
29#include <linux/slab.h>
30#include <linux/init.h>
31#include <linux/list.h>
32#include <linux/seq_file.h>
33#include <linux/vfs.h>
34#include <linux/mempool.h>
35#include <linux/delay.h>
36#include <linux/kthread.h>
37#include <linux/freezer.h>
38#include <linux/namei.h>
39#include <linux/random.h>
40#include <linux/uuid.h>
41#include <linux/xattr.h>
42#include <net/ipv6.h>
43#include "cifsfs.h"
44#include "cifspdu.h"
45#define DECLARE_GLOBALS_HERE
46#include "cifsglob.h"
47#include "cifsproto.h"
48#include "cifs_debug.h"
49#include "cifs_fs_sb.h"
50#include <linux/mm.h>
51#include <linux/key-type.h>
52#include "cifs_spnego.h"
53#include "fscache.h"
54#include "smb2pdu.h"
55#ifdef CONFIG_CIFS_DFS_UPCALL
56#include "dfs_cache.h"
57#endif
58
59/*
60 * DOS dates from 1980/1/1 through 2107/12/31
61 * Protocol specifications indicate the range should be to 119, which
62 * limits maximum year to 2099. But this range has not been checked.
63 */
64#define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
65#define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
66#define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
67
68int cifsFYI = 0;
69bool traceSMB;
70bool enable_oplocks = true;
71bool linuxExtEnabled = true;
72bool lookupCacheEnabled = true;
73bool disable_legacy_dialects; /* false by default */
74unsigned int global_secflags = CIFSSEC_DEF;
75/* unsigned int ntlmv2_support = 0; */
76unsigned int sign_CIFS_PDUs = 1;
77static const struct super_operations cifs_super_ops;
78unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
79module_param(CIFSMaxBufSize, uint, 0444);
80MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
81 "for CIFS requests. "
82 "Default: 16384 Range: 8192 to 130048");
83unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
84module_param(cifs_min_rcv, uint, 0444);
85MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
86 "1 to 64");
87unsigned int cifs_min_small = 30;
88module_param(cifs_min_small, uint, 0444);
89MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
90 "Range: 2 to 256");
91unsigned int cifs_max_pending = CIFS_MAX_REQ;
92module_param(cifs_max_pending, uint, 0444);
93MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
94 "CIFS/SMB1 dialect (N/A for SMB3) "
95 "Default: 32767 Range: 2 to 32767.");
96#ifdef CONFIG_CIFS_STATS2
97unsigned int slow_rsp_threshold = 1;
98module_param(slow_rsp_threshold, uint, 0644);
99MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
100 "before logging that a response is delayed. "
101 "Default: 1 (if set to 0 disables msg).");
102#endif /* STATS2 */
103
104module_param(enable_oplocks, bool, 0644);
105MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
106
107module_param(disable_legacy_dialects, bool, 0644);
108MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
109 "helpful to restrict the ability to "
110 "override the default dialects (SMB2.1, "
111 "SMB3 and SMB3.02) on mount with old "
112 "dialects (CIFS/SMB1 and SMB2) since "
113 "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
114 " and less secure. Default: n/N/0");
115
116extern mempool_t *cifs_sm_req_poolp;
117extern mempool_t *cifs_req_poolp;
118extern mempool_t *cifs_mid_poolp;
119
120struct workqueue_struct *cifsiod_wq;
121struct workqueue_struct *decrypt_wq;
122struct workqueue_struct *fileinfo_put_wq;
123struct workqueue_struct *cifsoplockd_wq;
124__u32 cifs_lock_secret;
125
126/*
127 * Bumps refcount for cifs super block.
128 * Note that it should be only called if a referece to VFS super block is
129 * already held, e.g. in open-type syscalls context. Otherwise it can race with
130 * atomic_dec_and_test in deactivate_locked_super.
131 */
132void
133cifs_sb_active(struct super_block *sb)
134{
135 struct cifs_sb_info *server = CIFS_SB(sb);
136
137 if (atomic_inc_return(&server->active) == 1)
138 atomic_inc(&sb->s_active);
139}
140
141void
142cifs_sb_deactive(struct super_block *sb)
143{
144 struct cifs_sb_info *server = CIFS_SB(sb);
145
146 if (atomic_dec_and_test(&server->active))
147 deactivate_super(sb);
148}
149
150static int
151cifs_read_super(struct super_block *sb)
152{
153 struct inode *inode;
154 struct cifs_sb_info *cifs_sb;
155 struct cifs_tcon *tcon;
156 struct timespec64 ts;
157 int rc = 0;
158
159 cifs_sb = CIFS_SB(sb);
160 tcon = cifs_sb_master_tcon(cifs_sb);
161
162 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
163 sb->s_flags |= SB_POSIXACL;
164
165 if (tcon->snapshot_time)
166 sb->s_flags |= SB_RDONLY;
167
168 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
169 sb->s_maxbytes = MAX_LFS_FILESIZE;
170 else
171 sb->s_maxbytes = MAX_NON_LFS;
172
173 /*
174 * Some very old servers like DOS and OS/2 used 2 second granularity
175 * (while all current servers use 100ns granularity - see MS-DTYP)
176 * but 1 second is the maximum allowed granularity for the VFS
177 * so for old servers set time granularity to 1 second while for
178 * everything else (current servers) set it to 100ns.
179 */
180 if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
181 ((tcon->ses->capabilities &
182 tcon->ses->server->vals->cap_nt_find) == 0) &&
183 !tcon->unix_ext) {
184 sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
185 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
186 sb->s_time_min = ts.tv_sec;
187 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
188 cpu_to_le16(SMB_TIME_MAX), 0);
189 sb->s_time_max = ts.tv_sec;
190 } else {
191 /*
192 * Almost every server, including all SMB2+, uses DCE TIME
193 * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC
194 */
195 sb->s_time_gran = 100;
196 ts = cifs_NTtimeToUnix(0);
197 sb->s_time_min = ts.tv_sec;
198 ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
199 sb->s_time_max = ts.tv_sec;
200 }
201
202 sb->s_magic = CIFS_MAGIC_NUMBER;
203 sb->s_op = &cifs_super_ops;
204 sb->s_xattr = cifs_xattr_handlers;
205 rc = super_setup_bdi(sb);
206 if (rc)
207 goto out_no_root;
208 /* tune readahead according to rsize */
209 sb->s_bdi->ra_pages = cifs_sb->rsize / PAGE_SIZE;
210
211 sb->s_blocksize = CIFS_MAX_MSGSIZE;
212 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
213 inode = cifs_root_iget(sb);
214
215 if (IS_ERR(inode)) {
216 rc = PTR_ERR(inode);
217 goto out_no_root;
218 }
219
220 if (tcon->nocase)
221 sb->s_d_op = &cifs_ci_dentry_ops;
222 else
223 sb->s_d_op = &cifs_dentry_ops;
224
225 sb->s_root = d_make_root(inode);
226 if (!sb->s_root) {
227 rc = -ENOMEM;
228 goto out_no_root;
229 }
230
231#ifdef CONFIG_CIFS_NFSD_EXPORT
232 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
233 cifs_dbg(FYI, "export ops supported\n");
234 sb->s_export_op = &cifs_export_ops;
235 }
236#endif /* CONFIG_CIFS_NFSD_EXPORT */
237
238 return 0;
239
240out_no_root:
241 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
242 return rc;
243}
244
245static void cifs_kill_sb(struct super_block *sb)
246{
247 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
248 kill_anon_super(sb);
249 cifs_umount(cifs_sb);
250}
251
252static int
253cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
254{
255 struct super_block *sb = dentry->d_sb;
256 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
257 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
258 struct TCP_Server_Info *server = tcon->ses->server;
259 unsigned int xid;
260 int rc = 0;
261
262 xid = get_xid();
263
264 if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
265 buf->f_namelen =
266 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
267 else
268 buf->f_namelen = PATH_MAX;
269
270 buf->f_fsid.val[0] = tcon->vol_serial_number;
271 /* are using part of create time for more randomness, see man statfs */
272 buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time);
273
274 buf->f_files = 0; /* undefined */
275 buf->f_ffree = 0; /* unlimited */
276
277 if (server->ops->queryfs)
278 rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
279
280 free_xid(xid);
281 return 0;
282}
283
284static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
285{
286 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
287 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
288 struct TCP_Server_Info *server = tcon->ses->server;
289
290 if (server->ops->fallocate)
291 return server->ops->fallocate(file, tcon, mode, off, len);
292
293 return -EOPNOTSUPP;
294}
295
296static int cifs_permission(struct inode *inode, int mask)
297{
298 struct cifs_sb_info *cifs_sb;
299
300 cifs_sb = CIFS_SB(inode->i_sb);
301
302 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
303 if ((mask & MAY_EXEC) && !execute_ok(inode))
304 return -EACCES;
305 else
306 return 0;
307 } else /* file mode might have been restricted at mount time
308 on the client (above and beyond ACL on servers) for
309 servers which do not support setting and viewing mode bits,
310 so allowing client to check permissions is useful */
311 return generic_permission(inode, mask);
312}
313
314static struct kmem_cache *cifs_inode_cachep;
315static struct kmem_cache *cifs_req_cachep;
316static struct kmem_cache *cifs_mid_cachep;
317static struct kmem_cache *cifs_sm_req_cachep;
318mempool_t *cifs_sm_req_poolp;
319mempool_t *cifs_req_poolp;
320mempool_t *cifs_mid_poolp;
321
322static struct inode *
323cifs_alloc_inode(struct super_block *sb)
324{
325 struct cifsInodeInfo *cifs_inode;
326 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
327 if (!cifs_inode)
328 return NULL;
329 cifs_inode->cifsAttrs = 0x20; /* default */
330 cifs_inode->time = 0;
331 /*
332 * Until the file is open and we have gotten oplock info back from the
333 * server, can not assume caching of file data or metadata.
334 */
335 cifs_set_oplock_level(cifs_inode, 0);
336 cifs_inode->flags = 0;
337 spin_lock_init(&cifs_inode->writers_lock);
338 cifs_inode->writers = 0;
339 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
340 cifs_inode->server_eof = 0;
341 cifs_inode->uniqueid = 0;
342 cifs_inode->createtime = 0;
343 cifs_inode->epoch = 0;
344 spin_lock_init(&cifs_inode->open_file_lock);
345 generate_random_uuid(cifs_inode->lease_key);
346
347 /*
348 * Can not set i_flags here - they get immediately overwritten to zero
349 * by the VFS.
350 */
351 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME; */
352 INIT_LIST_HEAD(&cifs_inode->openFileList);
353 INIT_LIST_HEAD(&cifs_inode->llist);
354 return &cifs_inode->vfs_inode;
355}
356
357static void
358cifs_free_inode(struct inode *inode)
359{
360 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
361}
362
363static void
364cifs_evict_inode(struct inode *inode)
365{
366 truncate_inode_pages_final(&inode->i_data);
367 clear_inode(inode);
368 cifs_fscache_release_inode_cookie(inode);
369}
370
371static void
372cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
373{
374 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
375 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
376
377 seq_puts(s, ",addr=");
378
379 switch (server->dstaddr.ss_family) {
380 case AF_INET:
381 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
382 break;
383 case AF_INET6:
384 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
385 if (sa6->sin6_scope_id)
386 seq_printf(s, "%%%u", sa6->sin6_scope_id);
387 break;
388 default:
389 seq_puts(s, "(unknown)");
390 }
391 if (server->rdma)
392 seq_puts(s, ",rdma");
393}
394
395static void
396cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
397{
398 if (ses->sectype == Unspecified) {
399 if (ses->user_name == NULL)
400 seq_puts(s, ",sec=none");
401 return;
402 }
403
404 seq_puts(s, ",sec=");
405
406 switch (ses->sectype) {
407 case LANMAN:
408 seq_puts(s, "lanman");
409 break;
410 case NTLMv2:
411 seq_puts(s, "ntlmv2");
412 break;
413 case NTLM:
414 seq_puts(s, "ntlm");
415 break;
416 case Kerberos:
417 seq_puts(s, "krb5");
418 break;
419 case RawNTLMSSP:
420 seq_puts(s, "ntlmssp");
421 break;
422 default:
423 /* shouldn't ever happen */
424 seq_puts(s, "unknown");
425 break;
426 }
427
428 if (ses->sign)
429 seq_puts(s, "i");
430
431 if (ses->sectype == Kerberos)
432 seq_printf(s, ",cruid=%u",
433 from_kuid_munged(&init_user_ns, ses->cred_uid));
434}
435
436static void
437cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
438{
439 seq_puts(s, ",cache=");
440
441 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
442 seq_puts(s, "strict");
443 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
444 seq_puts(s, "none");
445 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
446 seq_puts(s, "singleclient"); /* assume only one client access */
447 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
448 seq_puts(s, "ro"); /* read only caching assumed */
449 else
450 seq_puts(s, "loose");
451}
452
453static void
454cifs_show_nls(struct seq_file *s, struct nls_table *cur)
455{
456 struct nls_table *def;
457
458 /* Display iocharset= option if it's not default charset */
459 def = load_nls_default();
460 if (def != cur)
461 seq_printf(s, ",iocharset=%s", cur->charset);
462 unload_nls(def);
463}
464
465/*
466 * cifs_show_options() is for displaying mount options in /proc/mounts.
467 * Not all settable options are displayed but most of the important
468 * ones are.
469 */
470static int
471cifs_show_options(struct seq_file *s, struct dentry *root)
472{
473 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
474 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
475 struct sockaddr *srcaddr;
476 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
477
478 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
479 cifs_show_security(s, tcon->ses);
480 cifs_show_cache_flavor(s, cifs_sb);
481
482 if (tcon->no_lease)
483 seq_puts(s, ",nolease");
484 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
485 seq_puts(s, ",multiuser");
486 else if (tcon->ses->user_name)
487 seq_show_option(s, "username", tcon->ses->user_name);
488
489 if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
490 seq_show_option(s, "domain", tcon->ses->domainName);
491
492 if (srcaddr->sa_family != AF_UNSPEC) {
493 struct sockaddr_in *saddr4;
494 struct sockaddr_in6 *saddr6;
495 saddr4 = (struct sockaddr_in *)srcaddr;
496 saddr6 = (struct sockaddr_in6 *)srcaddr;
497 if (srcaddr->sa_family == AF_INET6)
498 seq_printf(s, ",srcaddr=%pI6c",
499 &saddr6->sin6_addr);
500 else if (srcaddr->sa_family == AF_INET)
501 seq_printf(s, ",srcaddr=%pI4",
502 &saddr4->sin_addr.s_addr);
503 else
504 seq_printf(s, ",srcaddr=BAD-AF:%i",
505 (int)(srcaddr->sa_family));
506 }
507
508 seq_printf(s, ",uid=%u",
509 from_kuid_munged(&init_user_ns, cifs_sb->mnt_uid));
510 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
511 seq_puts(s, ",forceuid");
512 else
513 seq_puts(s, ",noforceuid");
514
515 seq_printf(s, ",gid=%u",
516 from_kgid_munged(&init_user_ns, cifs_sb->mnt_gid));
517 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
518 seq_puts(s, ",forcegid");
519 else
520 seq_puts(s, ",noforcegid");
521
522 cifs_show_address(s, tcon->ses->server);
523
524 if (!tcon->unix_ext)
525 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
526 cifs_sb->mnt_file_mode,
527 cifs_sb->mnt_dir_mode);
528
529 cifs_show_nls(s, cifs_sb->local_nls);
530
531 if (tcon->seal)
532 seq_puts(s, ",seal");
533 else if (tcon->ses->server->ignore_signature)
534 seq_puts(s, ",signloosely");
535 if (tcon->nocase)
536 seq_puts(s, ",nocase");
537 if (tcon->nodelete)
538 seq_puts(s, ",nodelete");
539 if (tcon->local_lease)
540 seq_puts(s, ",locallease");
541 if (tcon->retry)
542 seq_puts(s, ",hard");
543 else
544 seq_puts(s, ",soft");
545 if (tcon->use_persistent)
546 seq_puts(s, ",persistenthandles");
547 else if (tcon->use_resilient)
548 seq_puts(s, ",resilienthandles");
549 if (tcon->posix_extensions)
550 seq_puts(s, ",posix");
551 else if (tcon->unix_ext)
552 seq_puts(s, ",unix");
553 else
554 seq_puts(s, ",nounix");
555 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
556 seq_puts(s, ",nodfs");
557 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
558 seq_puts(s, ",posixpaths");
559 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
560 seq_puts(s, ",setuids");
561 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
562 seq_puts(s, ",idsfromsid");
563 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
564 seq_puts(s, ",serverino");
565 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
566 seq_puts(s, ",rwpidforward");
567 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
568 seq_puts(s, ",forcemand");
569 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
570 seq_puts(s, ",nouser_xattr");
571 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
572 seq_puts(s, ",mapchars");
573 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
574 seq_puts(s, ",mapposix");
575 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
576 seq_puts(s, ",sfu");
577 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
578 seq_puts(s, ",nobrl");
579 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
580 seq_puts(s, ",nohandlecache");
581 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
582 seq_puts(s, ",modefromsid");
583 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
584 seq_puts(s, ",cifsacl");
585 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
586 seq_puts(s, ",dynperm");
587 if (root->d_sb->s_flags & SB_POSIXACL)
588 seq_puts(s, ",acl");
589 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
590 seq_puts(s, ",mfsymlinks");
591 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
592 seq_puts(s, ",fsc");
593 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
594 seq_puts(s, ",nostrictsync");
595 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
596 seq_puts(s, ",noperm");
597 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
598 seq_printf(s, ",backupuid=%u",
599 from_kuid_munged(&init_user_ns,
600 cifs_sb->mnt_backupuid));
601 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
602 seq_printf(s, ",backupgid=%u",
603 from_kgid_munged(&init_user_ns,
604 cifs_sb->mnt_backupgid));
605
606 seq_printf(s, ",rsize=%u", cifs_sb->rsize);
607 seq_printf(s, ",wsize=%u", cifs_sb->wsize);
608 seq_printf(s, ",bsize=%u", cifs_sb->bsize);
609 if (tcon->ses->server->min_offload)
610 seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
611 seq_printf(s, ",echo_interval=%lu",
612 tcon->ses->server->echo_interval / HZ);
613
614 /* Only display max_credits if it was overridden on mount */
615 if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
616 seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
617
618 if (tcon->snapshot_time)
619 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
620 if (tcon->handle_timeout)
621 seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
622 /* convert actimeo and display it in seconds */
623 seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
624
625 if (tcon->ses->chan_max > 1)
626 seq_printf(s, ",multichannel,max_channels=%zu",
627 tcon->ses->chan_max);
628
629 return 0;
630}
631
632static void cifs_umount_begin(struct super_block *sb)
633{
634 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
635 struct cifs_tcon *tcon;
636
637 if (cifs_sb == NULL)
638 return;
639
640 tcon = cifs_sb_master_tcon(cifs_sb);
641
642 spin_lock(&cifs_tcp_ses_lock);
643 if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) {
644 /* we have other mounts to same share or we have
645 already tried to force umount this and woken up
646 all waiting network requests, nothing to do */
647 spin_unlock(&cifs_tcp_ses_lock);
648 return;
649 } else if (tcon->tc_count == 1)
650 tcon->tidStatus = CifsExiting;
651 spin_unlock(&cifs_tcp_ses_lock);
652
653 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
654 /* cancel_notify_requests(tcon); */
655 if (tcon->ses && tcon->ses->server) {
656 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
657 wake_up_all(&tcon->ses->server->request_q);
658 wake_up_all(&tcon->ses->server->response_q);
659 msleep(1); /* yield */
660 /* we have to kick the requests once more */
661 wake_up_all(&tcon->ses->server->response_q);
662 msleep(1);
663 }
664
665 return;
666}
667
668#ifdef CONFIG_CIFS_STATS2
669static int cifs_show_stats(struct seq_file *s, struct dentry *root)
670{
671 /* BB FIXME */
672 return 0;
673}
674#endif
675
676static int cifs_remount(struct super_block *sb, int *flags, char *data)
677{
678 sync_filesystem(sb);
679 *flags |= SB_NODIRATIME;
680 return 0;
681}
682
683static int cifs_drop_inode(struct inode *inode)
684{
685 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
686
687 /* no serverino => unconditional eviction */
688 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
689 generic_drop_inode(inode);
690}
691
692static const struct super_operations cifs_super_ops = {
693 .statfs = cifs_statfs,
694 .alloc_inode = cifs_alloc_inode,
695 .free_inode = cifs_free_inode,
696 .drop_inode = cifs_drop_inode,
697 .evict_inode = cifs_evict_inode,
698/* .delete_inode = cifs_delete_inode, */ /* Do not need above
699 function unless later we add lazy close of inodes or unless the
700 kernel forgets to call us with the same number of releases (closes)
701 as opens */
702 .show_options = cifs_show_options,
703 .umount_begin = cifs_umount_begin,
704 .remount_fs = cifs_remount,
705#ifdef CONFIG_CIFS_STATS2
706 .show_stats = cifs_show_stats,
707#endif
708};
709
710/*
711 * Get root dentry from superblock according to prefix path mount option.
712 * Return dentry with refcount + 1 on success and NULL otherwise.
713 */
714static struct dentry *
715cifs_get_root(struct smb_vol *vol, struct super_block *sb)
716{
717 struct dentry *dentry;
718 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
719 char *full_path = NULL;
720 char *s, *p;
721 char sep;
722
723 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
724 return dget(sb->s_root);
725
726 full_path = cifs_build_path_to_root(vol, cifs_sb,
727 cifs_sb_master_tcon(cifs_sb), 0);
728 if (full_path == NULL)
729 return ERR_PTR(-ENOMEM);
730
731 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
732
733 sep = CIFS_DIR_SEP(cifs_sb);
734 dentry = dget(sb->s_root);
735 p = s = full_path;
736
737 do {
738 struct inode *dir = d_inode(dentry);
739 struct dentry *child;
740
741 if (!S_ISDIR(dir->i_mode)) {
742 dput(dentry);
743 dentry = ERR_PTR(-ENOTDIR);
744 break;
745 }
746
747 /* skip separators */
748 while (*s == sep)
749 s++;
750 if (!*s)
751 break;
752 p = s++;
753 /* next separator */
754 while (*s && *s != sep)
755 s++;
756
757 child = lookup_positive_unlocked(p, dentry, s - p);
758 dput(dentry);
759 dentry = child;
760 } while (!IS_ERR(dentry));
761 kfree(full_path);
762 return dentry;
763}
764
765static int cifs_set_super(struct super_block *sb, void *data)
766{
767 struct cifs_mnt_data *mnt_data = data;
768 sb->s_fs_info = mnt_data->cifs_sb;
769 return set_anon_super(sb, NULL);
770}
771
772static struct dentry *
773cifs_smb3_do_mount(struct file_system_type *fs_type,
774 int flags, const char *dev_name, void *data, bool is_smb3)
775{
776 int rc;
777 struct super_block *sb;
778 struct cifs_sb_info *cifs_sb;
779 struct smb_vol *volume_info;
780 struct cifs_mnt_data mnt_data;
781 struct dentry *root;
782
783 /*
784 * Prints in Kernel / CIFS log the attempted mount operation
785 * If CIFS_DEBUG && cifs_FYI
786 */
787 if (cifsFYI)
788 cifs_dbg(FYI, "Devname: %s flags: %d\n", dev_name, flags);
789 else
790 cifs_info("Attempting to mount %s\n", dev_name);
791
792 volume_info = cifs_get_volume_info((char *)data, dev_name, is_smb3);
793 if (IS_ERR(volume_info))
794 return ERR_CAST(volume_info);
795
796 cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
797 if (cifs_sb == NULL) {
798 root = ERR_PTR(-ENOMEM);
799 goto out_nls;
800 }
801
802 cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
803 if (cifs_sb->mountdata == NULL) {
804 root = ERR_PTR(-ENOMEM);
805 goto out_free;
806 }
807
808 rc = cifs_setup_cifs_sb(volume_info, cifs_sb);
809 if (rc) {
810 root = ERR_PTR(rc);
811 goto out_free;
812 }
813
814 rc = cifs_mount(cifs_sb, volume_info);
815 if (rc) {
816 if (!(flags & SB_SILENT))
817 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
818 rc);
819 root = ERR_PTR(rc);
820 goto out_free;
821 }
822
823 mnt_data.vol = volume_info;
824 mnt_data.cifs_sb = cifs_sb;
825 mnt_data.flags = flags;
826
827 /* BB should we make this contingent on mount parm? */
828 flags |= SB_NODIRATIME | SB_NOATIME;
829
830 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
831 if (IS_ERR(sb)) {
832 root = ERR_CAST(sb);
833 cifs_umount(cifs_sb);
834 goto out;
835 }
836
837 if (sb->s_root) {
838 cifs_dbg(FYI, "Use existing superblock\n");
839 cifs_umount(cifs_sb);
840 } else {
841 rc = cifs_read_super(sb);
842 if (rc) {
843 root = ERR_PTR(rc);
844 goto out_super;
845 }
846
847 sb->s_flags |= SB_ACTIVE;
848 }
849
850 root = cifs_get_root(volume_info, sb);
851 if (IS_ERR(root))
852 goto out_super;
853
854 cifs_dbg(FYI, "dentry root is: %p\n", root);
855 goto out;
856
857out_super:
858 deactivate_locked_super(sb);
859out:
860 cifs_cleanup_volume_info(volume_info);
861 return root;
862
863out_free:
864 kfree(cifs_sb->prepath);
865 kfree(cifs_sb->mountdata);
866 kfree(cifs_sb);
867out_nls:
868 unload_nls(volume_info->local_nls);
869 goto out;
870}
871
872static struct dentry *
873smb3_do_mount(struct file_system_type *fs_type,
874 int flags, const char *dev_name, void *data)
875{
876 return cifs_smb3_do_mount(fs_type, flags, dev_name, data, true);
877}
878
879static struct dentry *
880cifs_do_mount(struct file_system_type *fs_type,
881 int flags, const char *dev_name, void *data)
882{
883 return cifs_smb3_do_mount(fs_type, flags, dev_name, data, false);
884}
885
886static ssize_t
887cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
888{
889 ssize_t rc;
890 struct inode *inode = file_inode(iocb->ki_filp);
891
892 if (iocb->ki_filp->f_flags & O_DIRECT)
893 return cifs_user_readv(iocb, iter);
894
895 rc = cifs_revalidate_mapping(inode);
896 if (rc)
897 return rc;
898
899 return generic_file_read_iter(iocb, iter);
900}
901
902static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
903{
904 struct inode *inode = file_inode(iocb->ki_filp);
905 struct cifsInodeInfo *cinode = CIFS_I(inode);
906 ssize_t written;
907 int rc;
908
909 if (iocb->ki_filp->f_flags & O_DIRECT) {
910 written = cifs_user_writev(iocb, from);
911 if (written > 0 && CIFS_CACHE_READ(cinode)) {
912 cifs_zap_mapping(inode);
913 cifs_dbg(FYI,
914 "Set no oplock for inode=%p after a write operation\n",
915 inode);
916 cinode->oplock = 0;
917 }
918 return written;
919 }
920
921 written = cifs_get_writer(cinode);
922 if (written)
923 return written;
924
925 written = generic_file_write_iter(iocb, from);
926
927 if (CIFS_CACHE_WRITE(CIFS_I(inode)))
928 goto out;
929
930 rc = filemap_fdatawrite(inode->i_mapping);
931 if (rc)
932 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
933 rc, inode);
934
935out:
936 cifs_put_writer(cinode);
937 return written;
938}
939
940static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
941{
942 struct cifsFileInfo *cfile = file->private_data;
943 struct cifs_tcon *tcon;
944
945 /*
946 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
947 * the cached file length
948 */
949 if (whence != SEEK_SET && whence != SEEK_CUR) {
950 int rc;
951 struct inode *inode = file_inode(file);
952
953 /*
954 * We need to be sure that all dirty pages are written and the
955 * server has the newest file length.
956 */
957 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
958 inode->i_mapping->nrpages != 0) {
959 rc = filemap_fdatawait(inode->i_mapping);
960 if (rc) {
961 mapping_set_error(inode->i_mapping, rc);
962 return rc;
963 }
964 }
965 /*
966 * Some applications poll for the file length in this strange
967 * way so we must seek to end on non-oplocked files by
968 * setting the revalidate time to zero.
969 */
970 CIFS_I(inode)->time = 0;
971
972 rc = cifs_revalidate_file_attr(file);
973 if (rc < 0)
974 return (loff_t)rc;
975 }
976 if (cfile && cfile->tlink) {
977 tcon = tlink_tcon(cfile->tlink);
978 if (tcon->ses->server->ops->llseek)
979 return tcon->ses->server->ops->llseek(file, tcon,
980 offset, whence);
981 }
982 return generic_file_llseek(file, offset, whence);
983}
984
985static int
986cifs_setlease(struct file *file, long arg, struct file_lock **lease, void **priv)
987{
988 /*
989 * Note that this is called by vfs setlease with i_lock held to
990 * protect *lease from going away.
991 */
992 struct inode *inode = file_inode(file);
993 struct cifsFileInfo *cfile = file->private_data;
994
995 if (!(S_ISREG(inode->i_mode)))
996 return -EINVAL;
997
998 /* Check if file is oplocked if this is request for new lease */
999 if (arg == F_UNLCK ||
1000 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1001 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1002 return generic_setlease(file, arg, lease, priv);
1003 else if (tlink_tcon(cfile->tlink)->local_lease &&
1004 !CIFS_CACHE_READ(CIFS_I(inode)))
1005 /*
1006 * If the server claims to support oplock on this file, then we
1007 * still need to check oplock even if the local_lease mount
1008 * option is set, but there are servers which do not support
1009 * oplock for which this mount option may be useful if the user
1010 * knows that the file won't be changed on the server by anyone
1011 * else.
1012 */
1013 return generic_setlease(file, arg, lease, priv);
1014 else
1015 return -EAGAIN;
1016}
1017
1018struct file_system_type cifs_fs_type = {
1019 .owner = THIS_MODULE,
1020 .name = "cifs",
1021 .mount = cifs_do_mount,
1022 .kill_sb = cifs_kill_sb,
1023 .fs_flags = FS_RENAME_DOES_D_MOVE,
1024};
1025MODULE_ALIAS_FS("cifs");
1026
1027static struct file_system_type smb3_fs_type = {
1028 .owner = THIS_MODULE,
1029 .name = "smb3",
1030 .mount = smb3_do_mount,
1031 .kill_sb = cifs_kill_sb,
1032 .fs_flags = FS_RENAME_DOES_D_MOVE,
1033};
1034MODULE_ALIAS_FS("smb3");
1035MODULE_ALIAS("smb3");
1036
1037const struct inode_operations cifs_dir_inode_ops = {
1038 .create = cifs_create,
1039 .atomic_open = cifs_atomic_open,
1040 .lookup = cifs_lookup,
1041 .getattr = cifs_getattr,
1042 .unlink = cifs_unlink,
1043 .link = cifs_hardlink,
1044 .mkdir = cifs_mkdir,
1045 .rmdir = cifs_rmdir,
1046 .rename = cifs_rename2,
1047 .permission = cifs_permission,
1048 .setattr = cifs_setattr,
1049 .symlink = cifs_symlink,
1050 .mknod = cifs_mknod,
1051 .listxattr = cifs_listxattr,
1052};
1053
1054const struct inode_operations cifs_file_inode_ops = {
1055 .setattr = cifs_setattr,
1056 .getattr = cifs_getattr,
1057 .permission = cifs_permission,
1058 .listxattr = cifs_listxattr,
1059 .fiemap = cifs_fiemap,
1060};
1061
1062const struct inode_operations cifs_symlink_inode_ops = {
1063 .get_link = cifs_get_link,
1064 .permission = cifs_permission,
1065 .listxattr = cifs_listxattr,
1066};
1067
1068static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1069 struct file *dst_file, loff_t destoff, loff_t len,
1070 unsigned int remap_flags)
1071{
1072 struct inode *src_inode = file_inode(src_file);
1073 struct inode *target_inode = file_inode(dst_file);
1074 struct cifsFileInfo *smb_file_src = src_file->private_data;
1075 struct cifsFileInfo *smb_file_target;
1076 struct cifs_tcon *target_tcon;
1077 unsigned int xid;
1078 int rc;
1079
1080 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
1081 return -EINVAL;
1082
1083 cifs_dbg(FYI, "clone range\n");
1084
1085 xid = get_xid();
1086
1087 if (!src_file->private_data || !dst_file->private_data) {
1088 rc = -EBADF;
1089 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1090 goto out;
1091 }
1092
1093 smb_file_target = dst_file->private_data;
1094 target_tcon = tlink_tcon(smb_file_target->tlink);
1095
1096 /*
1097 * Note: cifs case is easier than btrfs since server responsible for
1098 * checks for proper open modes and file type and if it wants
1099 * server could even support copy of range where source = target
1100 */
1101 lock_two_nondirectories(target_inode, src_inode);
1102
1103 if (len == 0)
1104 len = src_inode->i_size - off;
1105
1106 cifs_dbg(FYI, "about to flush pages\n");
1107 /* should we flush first and last page first */
1108 truncate_inode_pages_range(&target_inode->i_data, destoff,
1109 PAGE_ALIGN(destoff + len)-1);
1110
1111 if (target_tcon->ses->server->ops->duplicate_extents)
1112 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1113 smb_file_src, smb_file_target, off, len, destoff);
1114 else
1115 rc = -EOPNOTSUPP;
1116
1117 /* force revalidate of size and timestamps of target file now
1118 that target is updated on the server */
1119 CIFS_I(target_inode)->time = 0;
1120 /* although unlocking in the reverse order from locking is not
1121 strictly necessary here it is a little cleaner to be consistent */
1122 unlock_two_nondirectories(src_inode, target_inode);
1123out:
1124 free_xid(xid);
1125 return rc < 0 ? rc : len;
1126}
1127
1128ssize_t cifs_file_copychunk_range(unsigned int xid,
1129 struct file *src_file, loff_t off,
1130 struct file *dst_file, loff_t destoff,
1131 size_t len, unsigned int flags)
1132{
1133 struct inode *src_inode = file_inode(src_file);
1134 struct inode *target_inode = file_inode(dst_file);
1135 struct cifsFileInfo *smb_file_src;
1136 struct cifsFileInfo *smb_file_target;
1137 struct cifs_tcon *src_tcon;
1138 struct cifs_tcon *target_tcon;
1139 ssize_t rc;
1140
1141 cifs_dbg(FYI, "copychunk range\n");
1142
1143 if (!src_file->private_data || !dst_file->private_data) {
1144 rc = -EBADF;
1145 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1146 goto out;
1147 }
1148
1149 rc = -EXDEV;
1150 smb_file_target = dst_file->private_data;
1151 smb_file_src = src_file->private_data;
1152 src_tcon = tlink_tcon(smb_file_src->tlink);
1153 target_tcon = tlink_tcon(smb_file_target->tlink);
1154
1155 if (src_tcon->ses != target_tcon->ses) {
1156 cifs_dbg(VFS, "source and target of copy not on same server\n");
1157 goto out;
1158 }
1159
1160 rc = -EOPNOTSUPP;
1161 if (!target_tcon->ses->server->ops->copychunk_range)
1162 goto out;
1163
1164 /*
1165 * Note: cifs case is easier than btrfs since server responsible for
1166 * checks for proper open modes and file type and if it wants
1167 * server could even support copy of range where source = target
1168 */
1169 lock_two_nondirectories(target_inode, src_inode);
1170
1171 cifs_dbg(FYI, "about to flush pages\n");
1172 /* should we flush first and last page first */
1173 truncate_inode_pages(&target_inode->i_data, 0);
1174
1175 rc = file_modified(dst_file);
1176 if (!rc)
1177 rc = target_tcon->ses->server->ops->copychunk_range(xid,
1178 smb_file_src, smb_file_target, off, len, destoff);
1179
1180 file_accessed(src_file);
1181
1182 /* force revalidate of size and timestamps of target file now
1183 * that target is updated on the server
1184 */
1185 CIFS_I(target_inode)->time = 0;
1186 /* although unlocking in the reverse order from locking is not
1187 * strictly necessary here it is a little cleaner to be consistent
1188 */
1189 unlock_two_nondirectories(src_inode, target_inode);
1190
1191out:
1192 return rc;
1193}
1194
1195/*
1196 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1197 * is a dummy operation.
1198 */
1199static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1200{
1201 cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1202 file, datasync);
1203
1204 return 0;
1205}
1206
1207static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1208 struct file *dst_file, loff_t destoff,
1209 size_t len, unsigned int flags)
1210{
1211 unsigned int xid = get_xid();
1212 ssize_t rc;
1213 struct cifsFileInfo *cfile = dst_file->private_data;
1214
1215 if (cfile->swapfile)
1216 return -EOPNOTSUPP;
1217
1218 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1219 len, flags);
1220 free_xid(xid);
1221
1222 if (rc == -EOPNOTSUPP || rc == -EXDEV)
1223 rc = generic_copy_file_range(src_file, off, dst_file,
1224 destoff, len, flags);
1225 return rc;
1226}
1227
1228const struct file_operations cifs_file_ops = {
1229 .read_iter = cifs_loose_read_iter,
1230 .write_iter = cifs_file_write_iter,
1231 .open = cifs_open,
1232 .release = cifs_close,
1233 .lock = cifs_lock,
1234 .flock = cifs_flock,
1235 .fsync = cifs_fsync,
1236 .flush = cifs_flush,
1237 .mmap = cifs_file_mmap,
1238 .splice_read = generic_file_splice_read,
1239 .splice_write = iter_file_splice_write,
1240 .llseek = cifs_llseek,
1241 .unlocked_ioctl = cifs_ioctl,
1242 .copy_file_range = cifs_copy_file_range,
1243 .remap_file_range = cifs_remap_file_range,
1244 .setlease = cifs_setlease,
1245 .fallocate = cifs_fallocate,
1246};
1247
1248const struct file_operations cifs_file_strict_ops = {
1249 .read_iter = cifs_strict_readv,
1250 .write_iter = cifs_strict_writev,
1251 .open = cifs_open,
1252 .release = cifs_close,
1253 .lock = cifs_lock,
1254 .flock = cifs_flock,
1255 .fsync = cifs_strict_fsync,
1256 .flush = cifs_flush,
1257 .mmap = cifs_file_strict_mmap,
1258 .splice_read = generic_file_splice_read,
1259 .splice_write = iter_file_splice_write,
1260 .llseek = cifs_llseek,
1261 .unlocked_ioctl = cifs_ioctl,
1262 .copy_file_range = cifs_copy_file_range,
1263 .remap_file_range = cifs_remap_file_range,
1264 .setlease = cifs_setlease,
1265 .fallocate = cifs_fallocate,
1266};
1267
1268const struct file_operations cifs_file_direct_ops = {
1269 .read_iter = cifs_direct_readv,
1270 .write_iter = cifs_direct_writev,
1271 .open = cifs_open,
1272 .release = cifs_close,
1273 .lock = cifs_lock,
1274 .flock = cifs_flock,
1275 .fsync = cifs_fsync,
1276 .flush = cifs_flush,
1277 .mmap = cifs_file_mmap,
1278 .splice_read = generic_file_splice_read,
1279 .splice_write = iter_file_splice_write,
1280 .unlocked_ioctl = cifs_ioctl,
1281 .copy_file_range = cifs_copy_file_range,
1282 .remap_file_range = cifs_remap_file_range,
1283 .llseek = cifs_llseek,
1284 .setlease = cifs_setlease,
1285 .fallocate = cifs_fallocate,
1286};
1287
1288const struct file_operations cifs_file_nobrl_ops = {
1289 .read_iter = cifs_loose_read_iter,
1290 .write_iter = cifs_file_write_iter,
1291 .open = cifs_open,
1292 .release = cifs_close,
1293 .fsync = cifs_fsync,
1294 .flush = cifs_flush,
1295 .mmap = cifs_file_mmap,
1296 .splice_read = generic_file_splice_read,
1297 .splice_write = iter_file_splice_write,
1298 .llseek = cifs_llseek,
1299 .unlocked_ioctl = cifs_ioctl,
1300 .copy_file_range = cifs_copy_file_range,
1301 .remap_file_range = cifs_remap_file_range,
1302 .setlease = cifs_setlease,
1303 .fallocate = cifs_fallocate,
1304};
1305
1306const struct file_operations cifs_file_strict_nobrl_ops = {
1307 .read_iter = cifs_strict_readv,
1308 .write_iter = cifs_strict_writev,
1309 .open = cifs_open,
1310 .release = cifs_close,
1311 .fsync = cifs_strict_fsync,
1312 .flush = cifs_flush,
1313 .mmap = cifs_file_strict_mmap,
1314 .splice_read = generic_file_splice_read,
1315 .splice_write = iter_file_splice_write,
1316 .llseek = cifs_llseek,
1317 .unlocked_ioctl = cifs_ioctl,
1318 .copy_file_range = cifs_copy_file_range,
1319 .remap_file_range = cifs_remap_file_range,
1320 .setlease = cifs_setlease,
1321 .fallocate = cifs_fallocate,
1322};
1323
1324const struct file_operations cifs_file_direct_nobrl_ops = {
1325 .read_iter = cifs_direct_readv,
1326 .write_iter = cifs_direct_writev,
1327 .open = cifs_open,
1328 .release = cifs_close,
1329 .fsync = cifs_fsync,
1330 .flush = cifs_flush,
1331 .mmap = cifs_file_mmap,
1332 .splice_read = generic_file_splice_read,
1333 .splice_write = iter_file_splice_write,
1334 .unlocked_ioctl = cifs_ioctl,
1335 .copy_file_range = cifs_copy_file_range,
1336 .remap_file_range = cifs_remap_file_range,
1337 .llseek = cifs_llseek,
1338 .setlease = cifs_setlease,
1339 .fallocate = cifs_fallocate,
1340};
1341
1342const struct file_operations cifs_dir_ops = {
1343 .iterate_shared = cifs_readdir,
1344 .release = cifs_closedir,
1345 .read = generic_read_dir,
1346 .unlocked_ioctl = cifs_ioctl,
1347 .copy_file_range = cifs_copy_file_range,
1348 .remap_file_range = cifs_remap_file_range,
1349 .llseek = generic_file_llseek,
1350 .fsync = cifs_dir_fsync,
1351};
1352
1353static void
1354cifs_init_once(void *inode)
1355{
1356 struct cifsInodeInfo *cifsi = inode;
1357
1358 inode_init_once(&cifsi->vfs_inode);
1359 init_rwsem(&cifsi->lock_sem);
1360}
1361
1362static int __init
1363cifs_init_inodecache(void)
1364{
1365 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1366 sizeof(struct cifsInodeInfo),
1367 0, (SLAB_RECLAIM_ACCOUNT|
1368 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1369 cifs_init_once);
1370 if (cifs_inode_cachep == NULL)
1371 return -ENOMEM;
1372
1373 return 0;
1374}
1375
1376static void
1377cifs_destroy_inodecache(void)
1378{
1379 /*
1380 * Make sure all delayed rcu free inodes are flushed before we
1381 * destroy cache.
1382 */
1383 rcu_barrier();
1384 kmem_cache_destroy(cifs_inode_cachep);
1385}
1386
1387static int
1388cifs_init_request_bufs(void)
1389{
1390 /*
1391 * SMB2 maximum header size is bigger than CIFS one - no problems to
1392 * allocate some more bytes for CIFS.
1393 */
1394 size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1395
1396 if (CIFSMaxBufSize < 8192) {
1397 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1398 Unicode path name has to fit in any SMB/CIFS path based frames */
1399 CIFSMaxBufSize = 8192;
1400 } else if (CIFSMaxBufSize > 1024*127) {
1401 CIFSMaxBufSize = 1024 * 127;
1402 } else {
1403 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1404 }
1405/*
1406 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1407 CIFSMaxBufSize, CIFSMaxBufSize);
1408*/
1409 cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1410 CIFSMaxBufSize + max_hdr_size, 0,
1411 SLAB_HWCACHE_ALIGN, 0,
1412 CIFSMaxBufSize + max_hdr_size,
1413 NULL);
1414 if (cifs_req_cachep == NULL)
1415 return -ENOMEM;
1416
1417 if (cifs_min_rcv < 1)
1418 cifs_min_rcv = 1;
1419 else if (cifs_min_rcv > 64) {
1420 cifs_min_rcv = 64;
1421 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1422 }
1423
1424 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1425 cifs_req_cachep);
1426
1427 if (cifs_req_poolp == NULL) {
1428 kmem_cache_destroy(cifs_req_cachep);
1429 return -ENOMEM;
1430 }
1431 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1432 almost all handle based requests (but not write response, nor is it
1433 sufficient for path based requests). A smaller size would have
1434 been more efficient (compacting multiple slab items on one 4k page)
1435 for the case in which debug was on, but this larger size allows
1436 more SMBs to use small buffer alloc and is still much more
1437 efficient to alloc 1 per page off the slab compared to 17K (5page)
1438 alloc of large cifs buffers even when page debugging is on */
1439 cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1440 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1441 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1442 if (cifs_sm_req_cachep == NULL) {
1443 mempool_destroy(cifs_req_poolp);
1444 kmem_cache_destroy(cifs_req_cachep);
1445 return -ENOMEM;
1446 }
1447
1448 if (cifs_min_small < 2)
1449 cifs_min_small = 2;
1450 else if (cifs_min_small > 256) {
1451 cifs_min_small = 256;
1452 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1453 }
1454
1455 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1456 cifs_sm_req_cachep);
1457
1458 if (cifs_sm_req_poolp == NULL) {
1459 mempool_destroy(cifs_req_poolp);
1460 kmem_cache_destroy(cifs_req_cachep);
1461 kmem_cache_destroy(cifs_sm_req_cachep);
1462 return -ENOMEM;
1463 }
1464
1465 return 0;
1466}
1467
1468static void
1469cifs_destroy_request_bufs(void)
1470{
1471 mempool_destroy(cifs_req_poolp);
1472 kmem_cache_destroy(cifs_req_cachep);
1473 mempool_destroy(cifs_sm_req_poolp);
1474 kmem_cache_destroy(cifs_sm_req_cachep);
1475}
1476
1477static int
1478cifs_init_mids(void)
1479{
1480 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1481 sizeof(struct mid_q_entry), 0,
1482 SLAB_HWCACHE_ALIGN, NULL);
1483 if (cifs_mid_cachep == NULL)
1484 return -ENOMEM;
1485
1486 /* 3 is a reasonable minimum number of simultaneous operations */
1487 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1488 if (cifs_mid_poolp == NULL) {
1489 kmem_cache_destroy(cifs_mid_cachep);
1490 return -ENOMEM;
1491 }
1492
1493 return 0;
1494}
1495
1496static void
1497cifs_destroy_mids(void)
1498{
1499 mempool_destroy(cifs_mid_poolp);
1500 kmem_cache_destroy(cifs_mid_cachep);
1501}
1502
1503static int __init
1504init_cifs(void)
1505{
1506 int rc = 0;
1507 cifs_proc_init();
1508 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1509#ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
1510 INIT_LIST_HEAD(&GlobalDnotifyReqList);
1511 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
1512#endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
1513/*
1514 * Initialize Global counters
1515 */
1516 atomic_set(&sesInfoAllocCount, 0);
1517 atomic_set(&tconInfoAllocCount, 0);
1518 atomic_set(&tcpSesAllocCount, 0);
1519 atomic_set(&tcpSesReconnectCount, 0);
1520 atomic_set(&tconInfoReconnectCount, 0);
1521
1522 atomic_set(&bufAllocCount, 0);
1523 atomic_set(&smBufAllocCount, 0);
1524#ifdef CONFIG_CIFS_STATS2
1525 atomic_set(&totBufAllocCount, 0);
1526 atomic_set(&totSmBufAllocCount, 0);
1527 if (slow_rsp_threshold < 1)
1528 cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1529 else if (slow_rsp_threshold > 32767)
1530 cifs_dbg(VFS,
1531 "slow response threshold set higher than recommended (0 to 32767)\n");
1532#endif /* CONFIG_CIFS_STATS2 */
1533
1534 atomic_set(&midCount, 0);
1535 GlobalCurrentXid = 0;
1536 GlobalTotalActiveXid = 0;
1537 GlobalMaxActiveXid = 0;
1538 spin_lock_init(&cifs_tcp_ses_lock);
1539 spin_lock_init(&GlobalMid_Lock);
1540
1541 cifs_lock_secret = get_random_u32();
1542
1543 if (cifs_max_pending < 2) {
1544 cifs_max_pending = 2;
1545 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1546 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1547 cifs_max_pending = CIFS_MAX_REQ;
1548 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1549 CIFS_MAX_REQ);
1550 }
1551
1552 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1553 if (!cifsiod_wq) {
1554 rc = -ENOMEM;
1555 goto out_clean_proc;
1556 }
1557
1558 /*
1559 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1560 * so that we don't launch too many worker threads but
1561 * Documentation/core-api/workqueue.rst recommends setting it to 0
1562 */
1563
1564 /* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1565 decrypt_wq = alloc_workqueue("smb3decryptd",
1566 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1567 if (!decrypt_wq) {
1568 rc = -ENOMEM;
1569 goto out_destroy_cifsiod_wq;
1570 }
1571
1572 fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1573 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1574 if (!fileinfo_put_wq) {
1575 rc = -ENOMEM;
1576 goto out_destroy_decrypt_wq;
1577 }
1578
1579 cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1580 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1581 if (!cifsoplockd_wq) {
1582 rc = -ENOMEM;
1583 goto out_destroy_fileinfo_put_wq;
1584 }
1585
1586 rc = cifs_fscache_register();
1587 if (rc)
1588 goto out_destroy_cifsoplockd_wq;
1589
1590 rc = cifs_init_inodecache();
1591 if (rc)
1592 goto out_unreg_fscache;
1593
1594 rc = cifs_init_mids();
1595 if (rc)
1596 goto out_destroy_inodecache;
1597
1598 rc = cifs_init_request_bufs();
1599 if (rc)
1600 goto out_destroy_mids;
1601
1602#ifdef CONFIG_CIFS_DFS_UPCALL
1603 rc = dfs_cache_init();
1604 if (rc)
1605 goto out_destroy_request_bufs;
1606#endif /* CONFIG_CIFS_DFS_UPCALL */
1607#ifdef CONFIG_CIFS_UPCALL
1608 rc = init_cifs_spnego();
1609 if (rc)
1610 goto out_destroy_dfs_cache;
1611#endif /* CONFIG_CIFS_UPCALL */
1612
1613 rc = init_cifs_idmap();
1614 if (rc)
1615 goto out_register_key_type;
1616
1617 rc = register_filesystem(&cifs_fs_type);
1618 if (rc)
1619 goto out_init_cifs_idmap;
1620
1621 rc = register_filesystem(&smb3_fs_type);
1622 if (rc) {
1623 unregister_filesystem(&cifs_fs_type);
1624 goto out_init_cifs_idmap;
1625 }
1626
1627 return 0;
1628
1629out_init_cifs_idmap:
1630 exit_cifs_idmap();
1631out_register_key_type:
1632#ifdef CONFIG_CIFS_UPCALL
1633 exit_cifs_spnego();
1634out_destroy_dfs_cache:
1635#endif
1636#ifdef CONFIG_CIFS_DFS_UPCALL
1637 dfs_cache_destroy();
1638out_destroy_request_bufs:
1639#endif
1640 cifs_destroy_request_bufs();
1641out_destroy_mids:
1642 cifs_destroy_mids();
1643out_destroy_inodecache:
1644 cifs_destroy_inodecache();
1645out_unreg_fscache:
1646 cifs_fscache_unregister();
1647out_destroy_cifsoplockd_wq:
1648 destroy_workqueue(cifsoplockd_wq);
1649out_destroy_fileinfo_put_wq:
1650 destroy_workqueue(fileinfo_put_wq);
1651out_destroy_decrypt_wq:
1652 destroy_workqueue(decrypt_wq);
1653out_destroy_cifsiod_wq:
1654 destroy_workqueue(cifsiod_wq);
1655out_clean_proc:
1656 cifs_proc_clean();
1657 return rc;
1658}
1659
1660static void __exit
1661exit_cifs(void)
1662{
1663 cifs_dbg(NOISY, "exit_smb3\n");
1664 unregister_filesystem(&cifs_fs_type);
1665 unregister_filesystem(&smb3_fs_type);
1666 cifs_dfs_release_automount_timer();
1667 exit_cifs_idmap();
1668#ifdef CONFIG_CIFS_UPCALL
1669 exit_cifs_spnego();
1670#endif
1671#ifdef CONFIG_CIFS_DFS_UPCALL
1672 dfs_cache_destroy();
1673#endif
1674 cifs_destroy_request_bufs();
1675 cifs_destroy_mids();
1676 cifs_destroy_inodecache();
1677 cifs_fscache_unregister();
1678 destroy_workqueue(cifsoplockd_wq);
1679 destroy_workqueue(decrypt_wq);
1680 destroy_workqueue(fileinfo_put_wq);
1681 destroy_workqueue(cifsiod_wq);
1682 cifs_proc_clean();
1683}
1684
1685MODULE_AUTHOR("Steve French");
1686MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1687MODULE_DESCRIPTION
1688 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
1689 "also older servers complying with the SNIA CIFS Specification)");
1690MODULE_VERSION(CIFS_VERSION);
1691MODULE_SOFTDEP("ecb");
1692MODULE_SOFTDEP("hmac");
1693MODULE_SOFTDEP("md4");
1694MODULE_SOFTDEP("md5");
1695MODULE_SOFTDEP("nls");
1696MODULE_SOFTDEP("aes");
1697MODULE_SOFTDEP("cmac");
1698MODULE_SOFTDEP("sha256");
1699MODULE_SOFTDEP("sha512");
1700MODULE_SOFTDEP("aead2");
1701MODULE_SOFTDEP("ccm");
1702MODULE_SOFTDEP("gcm");
1703module_init(init_cifs)
1704module_exit(exit_cifs)