Loading...
1// SPDX-License-Identifier: LGPL-2.1
2/*
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * Common Internet FileSystem (CIFS) client
8 *
9 */
10
11/* Note that BB means BUGBUG (ie something to fix eventually) */
12
13#include <linux/module.h>
14#include <linux/fs.h>
15#include <linux/mount.h>
16#include <linux/slab.h>
17#include <linux/init.h>
18#include <linux/list.h>
19#include <linux/seq_file.h>
20#include <linux/vfs.h>
21#include <linux/mempool.h>
22#include <linux/delay.h>
23#include <linux/kthread.h>
24#include <linux/freezer.h>
25#include <linux/namei.h>
26#include <linux/random.h>
27#include <linux/uuid.h>
28#include <linux/xattr.h>
29#include <uapi/linux/magic.h>
30#include <net/ipv6.h>
31#include "cifsfs.h"
32#include "cifspdu.h"
33#define DECLARE_GLOBALS_HERE
34#include "cifsglob.h"
35#include "cifsproto.h"
36#include "cifs_debug.h"
37#include "cifs_fs_sb.h"
38#include <linux/mm.h>
39#include <linux/key-type.h>
40#include "cifs_spnego.h"
41#include "fscache.h"
42#ifdef CONFIG_CIFS_DFS_UPCALL
43#include "dfs_cache.h"
44#endif
45#ifdef CONFIG_CIFS_SWN_UPCALL
46#include "netlink.h"
47#endif
48#include "fs_context.h"
49#include "cached_dir.h"
50
51/*
52 * DOS dates from 1980/1/1 through 2107/12/31
53 * Protocol specifications indicate the range should be to 119, which
54 * limits maximum year to 2099. But this range has not been checked.
55 */
56#define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
57#define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
58#define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
59
60int cifsFYI = 0;
61bool traceSMB;
62bool enable_oplocks = true;
63bool linuxExtEnabled = true;
64bool lookupCacheEnabled = true;
65bool disable_legacy_dialects; /* false by default */
66bool enable_gcm_256 = true;
67bool require_gcm_256; /* false by default */
68bool enable_negotiate_signing; /* false by default */
69unsigned int global_secflags = CIFSSEC_DEF;
70/* unsigned int ntlmv2_support = 0; */
71unsigned int sign_CIFS_PDUs = 1;
72
73/*
74 * Global transaction id (XID) information
75 */
76unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */
77unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
78unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */
79spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
80
81/*
82 * Global counters, updated atomically
83 */
84atomic_t sesInfoAllocCount;
85atomic_t tconInfoAllocCount;
86atomic_t tcpSesNextId;
87atomic_t tcpSesAllocCount;
88atomic_t tcpSesReconnectCount;
89atomic_t tconInfoReconnectCount;
90
91atomic_t mid_count;
92atomic_t buf_alloc_count;
93atomic_t small_buf_alloc_count;
94#ifdef CONFIG_CIFS_STATS2
95atomic_t total_buf_alloc_count;
96atomic_t total_small_buf_alloc_count;
97#endif/* STATS2 */
98struct list_head cifs_tcp_ses_list;
99spinlock_t cifs_tcp_ses_lock;
100static const struct super_operations cifs_super_ops;
101unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
102module_param(CIFSMaxBufSize, uint, 0444);
103MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
104 "for CIFS requests. "
105 "Default: 16384 Range: 8192 to 130048");
106unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
107module_param(cifs_min_rcv, uint, 0444);
108MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
109 "1 to 64");
110unsigned int cifs_min_small = 30;
111module_param(cifs_min_small, uint, 0444);
112MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
113 "Range: 2 to 256");
114unsigned int cifs_max_pending = CIFS_MAX_REQ;
115module_param(cifs_max_pending, uint, 0444);
116MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
117 "CIFS/SMB1 dialect (N/A for SMB3) "
118 "Default: 32767 Range: 2 to 32767.");
119#ifdef CONFIG_CIFS_STATS2
120unsigned int slow_rsp_threshold = 1;
121module_param(slow_rsp_threshold, uint, 0644);
122MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
123 "before logging that a response is delayed. "
124 "Default: 1 (if set to 0 disables msg).");
125#endif /* STATS2 */
126
127module_param(enable_oplocks, bool, 0644);
128MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
129
130module_param(enable_gcm_256, bool, 0644);
131MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0");
132
133module_param(require_gcm_256, bool, 0644);
134MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
135
136module_param(enable_negotiate_signing, bool, 0644);
137MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
138
139module_param(disable_legacy_dialects, bool, 0644);
140MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
141 "helpful to restrict the ability to "
142 "override the default dialects (SMB2.1, "
143 "SMB3 and SMB3.02) on mount with old "
144 "dialects (CIFS/SMB1 and SMB2) since "
145 "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
146 " and less secure. Default: n/N/0");
147
148extern mempool_t *cifs_sm_req_poolp;
149extern mempool_t *cifs_req_poolp;
150extern mempool_t *cifs_mid_poolp;
151
152struct workqueue_struct *cifsiod_wq;
153struct workqueue_struct *decrypt_wq;
154struct workqueue_struct *fileinfo_put_wq;
155struct workqueue_struct *cifsoplockd_wq;
156struct workqueue_struct *deferredclose_wq;
157__u32 cifs_lock_secret;
158
159/*
160 * Bumps refcount for cifs super block.
161 * Note that it should be only called if a referece to VFS super block is
162 * already held, e.g. in open-type syscalls context. Otherwise it can race with
163 * atomic_dec_and_test in deactivate_locked_super.
164 */
165void
166cifs_sb_active(struct super_block *sb)
167{
168 struct cifs_sb_info *server = CIFS_SB(sb);
169
170 if (atomic_inc_return(&server->active) == 1)
171 atomic_inc(&sb->s_active);
172}
173
174void
175cifs_sb_deactive(struct super_block *sb)
176{
177 struct cifs_sb_info *server = CIFS_SB(sb);
178
179 if (atomic_dec_and_test(&server->active))
180 deactivate_super(sb);
181}
182
183static int
184cifs_read_super(struct super_block *sb)
185{
186 struct inode *inode;
187 struct cifs_sb_info *cifs_sb;
188 struct cifs_tcon *tcon;
189 struct timespec64 ts;
190 int rc = 0;
191
192 cifs_sb = CIFS_SB(sb);
193 tcon = cifs_sb_master_tcon(cifs_sb);
194
195 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
196 sb->s_flags |= SB_POSIXACL;
197
198 if (tcon->snapshot_time)
199 sb->s_flags |= SB_RDONLY;
200
201 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
202 sb->s_maxbytes = MAX_LFS_FILESIZE;
203 else
204 sb->s_maxbytes = MAX_NON_LFS;
205
206 /*
207 * Some very old servers like DOS and OS/2 used 2 second granularity
208 * (while all current servers use 100ns granularity - see MS-DTYP)
209 * but 1 second is the maximum allowed granularity for the VFS
210 * so for old servers set time granularity to 1 second while for
211 * everything else (current servers) set it to 100ns.
212 */
213 if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
214 ((tcon->ses->capabilities &
215 tcon->ses->server->vals->cap_nt_find) == 0) &&
216 !tcon->unix_ext) {
217 sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
218 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
219 sb->s_time_min = ts.tv_sec;
220 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
221 cpu_to_le16(SMB_TIME_MAX), 0);
222 sb->s_time_max = ts.tv_sec;
223 } else {
224 /*
225 * Almost every server, including all SMB2+, uses DCE TIME
226 * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC
227 */
228 sb->s_time_gran = 100;
229 ts = cifs_NTtimeToUnix(0);
230 sb->s_time_min = ts.tv_sec;
231 ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
232 sb->s_time_max = ts.tv_sec;
233 }
234
235 sb->s_magic = CIFS_SUPER_MAGIC;
236 sb->s_op = &cifs_super_ops;
237 sb->s_xattr = cifs_xattr_handlers;
238 rc = super_setup_bdi(sb);
239 if (rc)
240 goto out_no_root;
241 /* tune readahead according to rsize if readahead size not set on mount */
242 if (cifs_sb->ctx->rsize == 0)
243 cifs_sb->ctx->rsize =
244 tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
245 if (cifs_sb->ctx->rasize)
246 sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
247 else
248 sb->s_bdi->ra_pages = cifs_sb->ctx->rsize / PAGE_SIZE;
249
250 sb->s_blocksize = CIFS_MAX_MSGSIZE;
251 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
252 inode = cifs_root_iget(sb);
253
254 if (IS_ERR(inode)) {
255 rc = PTR_ERR(inode);
256 goto out_no_root;
257 }
258
259 if (tcon->nocase)
260 sb->s_d_op = &cifs_ci_dentry_ops;
261 else
262 sb->s_d_op = &cifs_dentry_ops;
263
264 sb->s_root = d_make_root(inode);
265 if (!sb->s_root) {
266 rc = -ENOMEM;
267 goto out_no_root;
268 }
269
270#ifdef CONFIG_CIFS_NFSD_EXPORT
271 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
272 cifs_dbg(FYI, "export ops supported\n");
273 sb->s_export_op = &cifs_export_ops;
274 }
275#endif /* CONFIG_CIFS_NFSD_EXPORT */
276
277 return 0;
278
279out_no_root:
280 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
281 return rc;
282}
283
284static void cifs_kill_sb(struct super_block *sb)
285{
286 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
287
288 /*
289 * We ned to release all dentries for the cached directories
290 * before we kill the sb.
291 */
292 if (cifs_sb->root) {
293 close_all_cached_dirs(cifs_sb);
294
295 /* finally release root dentry */
296 dput(cifs_sb->root);
297 cifs_sb->root = NULL;
298 }
299
300 kill_anon_super(sb);
301 cifs_umount(cifs_sb);
302}
303
304static int
305cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
306{
307 struct super_block *sb = dentry->d_sb;
308 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
309 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
310 struct TCP_Server_Info *server = tcon->ses->server;
311 unsigned int xid;
312 int rc = 0;
313
314 xid = get_xid();
315
316 if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
317 buf->f_namelen =
318 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
319 else
320 buf->f_namelen = PATH_MAX;
321
322 buf->f_fsid.val[0] = tcon->vol_serial_number;
323 /* are using part of create time for more randomness, see man statfs */
324 buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time);
325
326 buf->f_files = 0; /* undefined */
327 buf->f_ffree = 0; /* unlimited */
328
329 if (server->ops->queryfs)
330 rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
331
332 free_xid(xid);
333 return rc;
334}
335
336static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
337{
338 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
339 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
340 struct TCP_Server_Info *server = tcon->ses->server;
341
342 if (server->ops->fallocate)
343 return server->ops->fallocate(file, tcon, mode, off, len);
344
345 return -EOPNOTSUPP;
346}
347
348static int cifs_permission(struct user_namespace *mnt_userns,
349 struct inode *inode, int mask)
350{
351 struct cifs_sb_info *cifs_sb;
352
353 cifs_sb = CIFS_SB(inode->i_sb);
354
355 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
356 if ((mask & MAY_EXEC) && !execute_ok(inode))
357 return -EACCES;
358 else
359 return 0;
360 } else /* file mode might have been restricted at mount time
361 on the client (above and beyond ACL on servers) for
362 servers which do not support setting and viewing mode bits,
363 so allowing client to check permissions is useful */
364 return generic_permission(&init_user_ns, inode, mask);
365}
366
367static struct kmem_cache *cifs_inode_cachep;
368static struct kmem_cache *cifs_req_cachep;
369static struct kmem_cache *cifs_mid_cachep;
370static struct kmem_cache *cifs_sm_req_cachep;
371mempool_t *cifs_sm_req_poolp;
372mempool_t *cifs_req_poolp;
373mempool_t *cifs_mid_poolp;
374
375static struct inode *
376cifs_alloc_inode(struct super_block *sb)
377{
378 struct cifsInodeInfo *cifs_inode;
379 cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
380 if (!cifs_inode)
381 return NULL;
382 cifs_inode->cifsAttrs = 0x20; /* default */
383 cifs_inode->time = 0;
384 /*
385 * Until the file is open and we have gotten oplock info back from the
386 * server, can not assume caching of file data or metadata.
387 */
388 cifs_set_oplock_level(cifs_inode, 0);
389 cifs_inode->flags = 0;
390 spin_lock_init(&cifs_inode->writers_lock);
391 cifs_inode->writers = 0;
392 cifs_inode->netfs.inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
393 cifs_inode->server_eof = 0;
394 cifs_inode->uniqueid = 0;
395 cifs_inode->createtime = 0;
396 cifs_inode->epoch = 0;
397 spin_lock_init(&cifs_inode->open_file_lock);
398 generate_random_uuid(cifs_inode->lease_key);
399 cifs_inode->symlink_target = NULL;
400
401 /*
402 * Can not set i_flags here - they get immediately overwritten to zero
403 * by the VFS.
404 */
405 /* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
406 INIT_LIST_HEAD(&cifs_inode->openFileList);
407 INIT_LIST_HEAD(&cifs_inode->llist);
408 INIT_LIST_HEAD(&cifs_inode->deferred_closes);
409 spin_lock_init(&cifs_inode->deferred_lock);
410 return &cifs_inode->netfs.inode;
411}
412
413static void
414cifs_free_inode(struct inode *inode)
415{
416 struct cifsInodeInfo *cinode = CIFS_I(inode);
417
418 if (S_ISLNK(inode->i_mode))
419 kfree(cinode->symlink_target);
420 kmem_cache_free(cifs_inode_cachep, cinode);
421}
422
423static void
424cifs_evict_inode(struct inode *inode)
425{
426 truncate_inode_pages_final(&inode->i_data);
427 if (inode->i_state & I_PINNING_FSCACHE_WB)
428 cifs_fscache_unuse_inode_cookie(inode, true);
429 cifs_fscache_release_inode_cookie(inode);
430 clear_inode(inode);
431}
432
433static void
434cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
435{
436 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
437 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
438
439 seq_puts(s, ",addr=");
440
441 switch (server->dstaddr.ss_family) {
442 case AF_INET:
443 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
444 break;
445 case AF_INET6:
446 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
447 if (sa6->sin6_scope_id)
448 seq_printf(s, "%%%u", sa6->sin6_scope_id);
449 break;
450 default:
451 seq_puts(s, "(unknown)");
452 }
453 if (server->rdma)
454 seq_puts(s, ",rdma");
455}
456
457static void
458cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
459{
460 if (ses->sectype == Unspecified) {
461 if (ses->user_name == NULL)
462 seq_puts(s, ",sec=none");
463 return;
464 }
465
466 seq_puts(s, ",sec=");
467
468 switch (ses->sectype) {
469 case NTLMv2:
470 seq_puts(s, "ntlmv2");
471 break;
472 case Kerberos:
473 seq_puts(s, "krb5");
474 break;
475 case RawNTLMSSP:
476 seq_puts(s, "ntlmssp");
477 break;
478 default:
479 /* shouldn't ever happen */
480 seq_puts(s, "unknown");
481 break;
482 }
483
484 if (ses->sign)
485 seq_puts(s, "i");
486
487 if (ses->sectype == Kerberos)
488 seq_printf(s, ",cruid=%u",
489 from_kuid_munged(&init_user_ns, ses->cred_uid));
490}
491
492static void
493cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
494{
495 seq_puts(s, ",cache=");
496
497 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
498 seq_puts(s, "strict");
499 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
500 seq_puts(s, "none");
501 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
502 seq_puts(s, "singleclient"); /* assume only one client access */
503 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
504 seq_puts(s, "ro"); /* read only caching assumed */
505 else
506 seq_puts(s, "loose");
507}
508
509/*
510 * cifs_show_devname() is used so we show the mount device name with correct
511 * format (e.g. forward slashes vs. back slashes) in /proc/mounts
512 */
513static int cifs_show_devname(struct seq_file *m, struct dentry *root)
514{
515 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
516 char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
517
518 if (devname == NULL)
519 seq_puts(m, "none");
520 else {
521 convert_delimiter(devname, '/');
522 /* escape all spaces in share names */
523 seq_escape(m, devname, " \t");
524 kfree(devname);
525 }
526 return 0;
527}
528
529/*
530 * cifs_show_options() is for displaying mount options in /proc/mounts.
531 * Not all settable options are displayed but most of the important
532 * ones are.
533 */
534static int
535cifs_show_options(struct seq_file *s, struct dentry *root)
536{
537 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
538 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
539 struct sockaddr *srcaddr;
540 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
541
542 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
543 cifs_show_security(s, tcon->ses);
544 cifs_show_cache_flavor(s, cifs_sb);
545
546 if (tcon->no_lease)
547 seq_puts(s, ",nolease");
548 if (cifs_sb->ctx->multiuser)
549 seq_puts(s, ",multiuser");
550 else if (tcon->ses->user_name)
551 seq_show_option(s, "username", tcon->ses->user_name);
552
553 if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
554 seq_show_option(s, "domain", tcon->ses->domainName);
555
556 if (srcaddr->sa_family != AF_UNSPEC) {
557 struct sockaddr_in *saddr4;
558 struct sockaddr_in6 *saddr6;
559 saddr4 = (struct sockaddr_in *)srcaddr;
560 saddr6 = (struct sockaddr_in6 *)srcaddr;
561 if (srcaddr->sa_family == AF_INET6)
562 seq_printf(s, ",srcaddr=%pI6c",
563 &saddr6->sin6_addr);
564 else if (srcaddr->sa_family == AF_INET)
565 seq_printf(s, ",srcaddr=%pI4",
566 &saddr4->sin_addr.s_addr);
567 else
568 seq_printf(s, ",srcaddr=BAD-AF:%i",
569 (int)(srcaddr->sa_family));
570 }
571
572 seq_printf(s, ",uid=%u",
573 from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
574 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
575 seq_puts(s, ",forceuid");
576 else
577 seq_puts(s, ",noforceuid");
578
579 seq_printf(s, ",gid=%u",
580 from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
581 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
582 seq_puts(s, ",forcegid");
583 else
584 seq_puts(s, ",noforcegid");
585
586 cifs_show_address(s, tcon->ses->server);
587
588 if (!tcon->unix_ext)
589 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
590 cifs_sb->ctx->file_mode,
591 cifs_sb->ctx->dir_mode);
592 if (cifs_sb->ctx->iocharset)
593 seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
594 if (tcon->seal)
595 seq_puts(s, ",seal");
596 else if (tcon->ses->server->ignore_signature)
597 seq_puts(s, ",signloosely");
598 if (tcon->nocase)
599 seq_puts(s, ",nocase");
600 if (tcon->nodelete)
601 seq_puts(s, ",nodelete");
602 if (cifs_sb->ctx->no_sparse)
603 seq_puts(s, ",nosparse");
604 if (tcon->local_lease)
605 seq_puts(s, ",locallease");
606 if (tcon->retry)
607 seq_puts(s, ",hard");
608 else
609 seq_puts(s, ",soft");
610 if (tcon->use_persistent)
611 seq_puts(s, ",persistenthandles");
612 else if (tcon->use_resilient)
613 seq_puts(s, ",resilienthandles");
614 if (tcon->posix_extensions)
615 seq_puts(s, ",posix");
616 else if (tcon->unix_ext)
617 seq_puts(s, ",unix");
618 else
619 seq_puts(s, ",nounix");
620 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
621 seq_puts(s, ",nodfs");
622 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
623 seq_puts(s, ",posixpaths");
624 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
625 seq_puts(s, ",setuids");
626 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
627 seq_puts(s, ",idsfromsid");
628 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
629 seq_puts(s, ",serverino");
630 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
631 seq_puts(s, ",rwpidforward");
632 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
633 seq_puts(s, ",forcemand");
634 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
635 seq_puts(s, ",nouser_xattr");
636 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
637 seq_puts(s, ",mapchars");
638 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
639 seq_puts(s, ",mapposix");
640 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
641 seq_puts(s, ",sfu");
642 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
643 seq_puts(s, ",nobrl");
644 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
645 seq_puts(s, ",nohandlecache");
646 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
647 seq_puts(s, ",modefromsid");
648 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
649 seq_puts(s, ",cifsacl");
650 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
651 seq_puts(s, ",dynperm");
652 if (root->d_sb->s_flags & SB_POSIXACL)
653 seq_puts(s, ",acl");
654 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
655 seq_puts(s, ",mfsymlinks");
656 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
657 seq_puts(s, ",fsc");
658 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
659 seq_puts(s, ",nostrictsync");
660 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
661 seq_puts(s, ",noperm");
662 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
663 seq_printf(s, ",backupuid=%u",
664 from_kuid_munged(&init_user_ns,
665 cifs_sb->ctx->backupuid));
666 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
667 seq_printf(s, ",backupgid=%u",
668 from_kgid_munged(&init_user_ns,
669 cifs_sb->ctx->backupgid));
670
671 seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
672 seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
673 seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
674 if (cifs_sb->ctx->rasize)
675 seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
676 if (tcon->ses->server->min_offload)
677 seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
678 seq_printf(s, ",echo_interval=%lu",
679 tcon->ses->server->echo_interval / HZ);
680
681 /* Only display the following if overridden on mount */
682 if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
683 seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
684 if (tcon->ses->server->tcp_nodelay)
685 seq_puts(s, ",tcpnodelay");
686 if (tcon->ses->server->noautotune)
687 seq_puts(s, ",noautotune");
688 if (tcon->ses->server->noblocksnd)
689 seq_puts(s, ",noblocksend");
690
691 if (tcon->snapshot_time)
692 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
693 if (tcon->handle_timeout)
694 seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
695
696 /*
697 * Display file and directory attribute timeout in seconds.
698 * If file and directory attribute timeout the same then actimeo
699 * was likely specified on mount
700 */
701 if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
702 seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
703 else {
704 seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
705 seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
706 }
707 seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
708
709 if (tcon->ses->chan_max > 1)
710 seq_printf(s, ",multichannel,max_channels=%zu",
711 tcon->ses->chan_max);
712
713 if (tcon->use_witness)
714 seq_puts(s, ",witness");
715
716 return 0;
717}
718
719static void cifs_umount_begin(struct super_block *sb)
720{
721 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
722 struct cifs_tcon *tcon;
723
724 if (cifs_sb == NULL)
725 return;
726
727 tcon = cifs_sb_master_tcon(cifs_sb);
728
729 spin_lock(&cifs_tcp_ses_lock);
730 spin_lock(&tcon->tc_lock);
731 if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
732 /* we have other mounts to same share or we have
733 already tried to force umount this and woken up
734 all waiting network requests, nothing to do */
735 spin_unlock(&tcon->tc_lock);
736 spin_unlock(&cifs_tcp_ses_lock);
737 return;
738 } else if (tcon->tc_count == 1)
739 tcon->status = TID_EXITING;
740 spin_unlock(&tcon->tc_lock);
741 spin_unlock(&cifs_tcp_ses_lock);
742
743 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
744 /* cancel_notify_requests(tcon); */
745 if (tcon->ses && tcon->ses->server) {
746 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
747 wake_up_all(&tcon->ses->server->request_q);
748 wake_up_all(&tcon->ses->server->response_q);
749 msleep(1); /* yield */
750 /* we have to kick the requests once more */
751 wake_up_all(&tcon->ses->server->response_q);
752 msleep(1);
753 }
754
755 return;
756}
757
758#ifdef CONFIG_CIFS_STATS2
759static int cifs_show_stats(struct seq_file *s, struct dentry *root)
760{
761 /* BB FIXME */
762 return 0;
763}
764#endif
765
766static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
767{
768 fscache_unpin_writeback(wbc, cifs_inode_cookie(inode));
769 return 0;
770}
771
772static int cifs_drop_inode(struct inode *inode)
773{
774 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
775
776 /* no serverino => unconditional eviction */
777 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
778 generic_drop_inode(inode);
779}
780
781static const struct super_operations cifs_super_ops = {
782 .statfs = cifs_statfs,
783 .alloc_inode = cifs_alloc_inode,
784 .write_inode = cifs_write_inode,
785 .free_inode = cifs_free_inode,
786 .drop_inode = cifs_drop_inode,
787 .evict_inode = cifs_evict_inode,
788/* .show_path = cifs_show_path, */ /* Would we ever need show path? */
789 .show_devname = cifs_show_devname,
790/* .delete_inode = cifs_delete_inode, */ /* Do not need above
791 function unless later we add lazy close of inodes or unless the
792 kernel forgets to call us with the same number of releases (closes)
793 as opens */
794 .show_options = cifs_show_options,
795 .umount_begin = cifs_umount_begin,
796#ifdef CONFIG_CIFS_STATS2
797 .show_stats = cifs_show_stats,
798#endif
799};
800
801/*
802 * Get root dentry from superblock according to prefix path mount option.
803 * Return dentry with refcount + 1 on success and NULL otherwise.
804 */
805static struct dentry *
806cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
807{
808 struct dentry *dentry;
809 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
810 char *full_path = NULL;
811 char *s, *p;
812 char sep;
813
814 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
815 return dget(sb->s_root);
816
817 full_path = cifs_build_path_to_root(ctx, cifs_sb,
818 cifs_sb_master_tcon(cifs_sb), 0);
819 if (full_path == NULL)
820 return ERR_PTR(-ENOMEM);
821
822 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
823
824 sep = CIFS_DIR_SEP(cifs_sb);
825 dentry = dget(sb->s_root);
826 s = full_path;
827
828 do {
829 struct inode *dir = d_inode(dentry);
830 struct dentry *child;
831
832 if (!S_ISDIR(dir->i_mode)) {
833 dput(dentry);
834 dentry = ERR_PTR(-ENOTDIR);
835 break;
836 }
837
838 /* skip separators */
839 while (*s == sep)
840 s++;
841 if (!*s)
842 break;
843 p = s++;
844 /* next separator */
845 while (*s && *s != sep)
846 s++;
847
848 child = lookup_positive_unlocked(p, dentry, s - p);
849 dput(dentry);
850 dentry = child;
851 } while (!IS_ERR(dentry));
852 kfree(full_path);
853 return dentry;
854}
855
856static int cifs_set_super(struct super_block *sb, void *data)
857{
858 struct cifs_mnt_data *mnt_data = data;
859 sb->s_fs_info = mnt_data->cifs_sb;
860 return set_anon_super(sb, NULL);
861}
862
863struct dentry *
864cifs_smb3_do_mount(struct file_system_type *fs_type,
865 int flags, struct smb3_fs_context *old_ctx)
866{
867 int rc;
868 struct super_block *sb = NULL;
869 struct cifs_sb_info *cifs_sb = NULL;
870 struct cifs_mnt_data mnt_data;
871 struct dentry *root;
872
873 /*
874 * Prints in Kernel / CIFS log the attempted mount operation
875 * If CIFS_DEBUG && cifs_FYI
876 */
877 if (cifsFYI)
878 cifs_dbg(FYI, "Devname: %s flags: %d\n", old_ctx->UNC, flags);
879 else
880 cifs_info("Attempting to mount %s\n", old_ctx->UNC);
881
882 cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
883 if (cifs_sb == NULL) {
884 root = ERR_PTR(-ENOMEM);
885 goto out;
886 }
887
888 cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
889 if (!cifs_sb->ctx) {
890 root = ERR_PTR(-ENOMEM);
891 goto out;
892 }
893 rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
894 if (rc) {
895 root = ERR_PTR(rc);
896 goto out;
897 }
898
899 rc = cifs_setup_cifs_sb(cifs_sb);
900 if (rc) {
901 root = ERR_PTR(rc);
902 goto out;
903 }
904
905 rc = cifs_mount(cifs_sb, cifs_sb->ctx);
906 if (rc) {
907 if (!(flags & SB_SILENT))
908 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
909 rc);
910 root = ERR_PTR(rc);
911 goto out;
912 }
913
914 mnt_data.ctx = cifs_sb->ctx;
915 mnt_data.cifs_sb = cifs_sb;
916 mnt_data.flags = flags;
917
918 /* BB should we make this contingent on mount parm? */
919 flags |= SB_NODIRATIME | SB_NOATIME;
920
921 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
922 if (IS_ERR(sb)) {
923 root = ERR_CAST(sb);
924 cifs_umount(cifs_sb);
925 cifs_sb = NULL;
926 goto out;
927 }
928
929 if (sb->s_root) {
930 cifs_dbg(FYI, "Use existing superblock\n");
931 cifs_umount(cifs_sb);
932 cifs_sb = NULL;
933 } else {
934 rc = cifs_read_super(sb);
935 if (rc) {
936 root = ERR_PTR(rc);
937 goto out_super;
938 }
939
940 sb->s_flags |= SB_ACTIVE;
941 }
942
943 root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
944 if (IS_ERR(root))
945 goto out_super;
946
947 if (cifs_sb)
948 cifs_sb->root = dget(root);
949
950 cifs_dbg(FYI, "dentry root is: %p\n", root);
951 return root;
952
953out_super:
954 deactivate_locked_super(sb);
955 return root;
956out:
957 if (cifs_sb) {
958 if (!sb || IS_ERR(sb)) { /* otherwise kill_sb will handle */
959 kfree(cifs_sb->prepath);
960 smb3_cleanup_fs_context(cifs_sb->ctx);
961 kfree(cifs_sb);
962 }
963 }
964 return root;
965}
966
967
968static ssize_t
969cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
970{
971 ssize_t rc;
972 struct inode *inode = file_inode(iocb->ki_filp);
973
974 if (iocb->ki_flags & IOCB_DIRECT)
975 return cifs_user_readv(iocb, iter);
976
977 rc = cifs_revalidate_mapping(inode);
978 if (rc)
979 return rc;
980
981 return generic_file_read_iter(iocb, iter);
982}
983
984static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
985{
986 struct inode *inode = file_inode(iocb->ki_filp);
987 struct cifsInodeInfo *cinode = CIFS_I(inode);
988 ssize_t written;
989 int rc;
990
991 if (iocb->ki_filp->f_flags & O_DIRECT) {
992 written = cifs_user_writev(iocb, from);
993 if (written > 0 && CIFS_CACHE_READ(cinode)) {
994 cifs_zap_mapping(inode);
995 cifs_dbg(FYI,
996 "Set no oplock for inode=%p after a write operation\n",
997 inode);
998 cinode->oplock = 0;
999 }
1000 return written;
1001 }
1002
1003 written = cifs_get_writer(cinode);
1004 if (written)
1005 return written;
1006
1007 written = generic_file_write_iter(iocb, from);
1008
1009 if (CIFS_CACHE_WRITE(CIFS_I(inode)))
1010 goto out;
1011
1012 rc = filemap_fdatawrite(inode->i_mapping);
1013 if (rc)
1014 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
1015 rc, inode);
1016
1017out:
1018 cifs_put_writer(cinode);
1019 return written;
1020}
1021
1022static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1023{
1024 struct cifsFileInfo *cfile = file->private_data;
1025 struct cifs_tcon *tcon;
1026
1027 /*
1028 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1029 * the cached file length
1030 */
1031 if (whence != SEEK_SET && whence != SEEK_CUR) {
1032 int rc;
1033 struct inode *inode = file_inode(file);
1034
1035 /*
1036 * We need to be sure that all dirty pages are written and the
1037 * server has the newest file length.
1038 */
1039 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1040 inode->i_mapping->nrpages != 0) {
1041 rc = filemap_fdatawait(inode->i_mapping);
1042 if (rc) {
1043 mapping_set_error(inode->i_mapping, rc);
1044 return rc;
1045 }
1046 }
1047 /*
1048 * Some applications poll for the file length in this strange
1049 * way so we must seek to end on non-oplocked files by
1050 * setting the revalidate time to zero.
1051 */
1052 CIFS_I(inode)->time = 0;
1053
1054 rc = cifs_revalidate_file_attr(file);
1055 if (rc < 0)
1056 return (loff_t)rc;
1057 }
1058 if (cfile && cfile->tlink) {
1059 tcon = tlink_tcon(cfile->tlink);
1060 if (tcon->ses->server->ops->llseek)
1061 return tcon->ses->server->ops->llseek(file, tcon,
1062 offset, whence);
1063 }
1064 return generic_file_llseek(file, offset, whence);
1065}
1066
1067static int
1068cifs_setlease(struct file *file, long arg, struct file_lock **lease, void **priv)
1069{
1070 /*
1071 * Note that this is called by vfs setlease with i_lock held to
1072 * protect *lease from going away.
1073 */
1074 struct inode *inode = file_inode(file);
1075 struct cifsFileInfo *cfile = file->private_data;
1076
1077 if (!(S_ISREG(inode->i_mode)))
1078 return -EINVAL;
1079
1080 /* Check if file is oplocked if this is request for new lease */
1081 if (arg == F_UNLCK ||
1082 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1083 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1084 return generic_setlease(file, arg, lease, priv);
1085 else if (tlink_tcon(cfile->tlink)->local_lease &&
1086 !CIFS_CACHE_READ(CIFS_I(inode)))
1087 /*
1088 * If the server claims to support oplock on this file, then we
1089 * still need to check oplock even if the local_lease mount
1090 * option is set, but there are servers which do not support
1091 * oplock for which this mount option may be useful if the user
1092 * knows that the file won't be changed on the server by anyone
1093 * else.
1094 */
1095 return generic_setlease(file, arg, lease, priv);
1096 else
1097 return -EAGAIN;
1098}
1099
1100struct file_system_type cifs_fs_type = {
1101 .owner = THIS_MODULE,
1102 .name = "cifs",
1103 .init_fs_context = smb3_init_fs_context,
1104 .parameters = smb3_fs_parameters,
1105 .kill_sb = cifs_kill_sb,
1106 .fs_flags = FS_RENAME_DOES_D_MOVE,
1107};
1108MODULE_ALIAS_FS("cifs");
1109
1110struct file_system_type smb3_fs_type = {
1111 .owner = THIS_MODULE,
1112 .name = "smb3",
1113 .init_fs_context = smb3_init_fs_context,
1114 .parameters = smb3_fs_parameters,
1115 .kill_sb = cifs_kill_sb,
1116 .fs_flags = FS_RENAME_DOES_D_MOVE,
1117};
1118MODULE_ALIAS_FS("smb3");
1119MODULE_ALIAS("smb3");
1120
1121const struct inode_operations cifs_dir_inode_ops = {
1122 .create = cifs_create,
1123 .atomic_open = cifs_atomic_open,
1124 .lookup = cifs_lookup,
1125 .getattr = cifs_getattr,
1126 .unlink = cifs_unlink,
1127 .link = cifs_hardlink,
1128 .mkdir = cifs_mkdir,
1129 .rmdir = cifs_rmdir,
1130 .rename = cifs_rename2,
1131 .permission = cifs_permission,
1132 .setattr = cifs_setattr,
1133 .symlink = cifs_symlink,
1134 .mknod = cifs_mknod,
1135 .listxattr = cifs_listxattr,
1136 .get_acl = cifs_get_acl,
1137 .set_acl = cifs_set_acl,
1138};
1139
1140const struct inode_operations cifs_file_inode_ops = {
1141 .setattr = cifs_setattr,
1142 .getattr = cifs_getattr,
1143 .permission = cifs_permission,
1144 .listxattr = cifs_listxattr,
1145 .fiemap = cifs_fiemap,
1146 .get_acl = cifs_get_acl,
1147 .set_acl = cifs_set_acl,
1148};
1149
1150const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1151 struct delayed_call *done)
1152{
1153 char *target_path;
1154
1155 target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1156 if (!target_path)
1157 return ERR_PTR(-ENOMEM);
1158
1159 spin_lock(&inode->i_lock);
1160 if (likely(CIFS_I(inode)->symlink_target)) {
1161 strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1162 } else {
1163 kfree(target_path);
1164 target_path = ERR_PTR(-EOPNOTSUPP);
1165 }
1166 spin_unlock(&inode->i_lock);
1167
1168 if (!IS_ERR(target_path))
1169 set_delayed_call(done, kfree_link, target_path);
1170
1171 return target_path;
1172}
1173
1174const struct inode_operations cifs_symlink_inode_ops = {
1175 .get_link = cifs_get_link,
1176 .permission = cifs_permission,
1177 .listxattr = cifs_listxattr,
1178};
1179
1180static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1181 struct file *dst_file, loff_t destoff, loff_t len,
1182 unsigned int remap_flags)
1183{
1184 struct inode *src_inode = file_inode(src_file);
1185 struct inode *target_inode = file_inode(dst_file);
1186 struct cifsFileInfo *smb_file_src = src_file->private_data;
1187 struct cifsFileInfo *smb_file_target;
1188 struct cifs_tcon *target_tcon;
1189 unsigned int xid;
1190 int rc;
1191
1192 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
1193 return -EINVAL;
1194
1195 cifs_dbg(FYI, "clone range\n");
1196
1197 xid = get_xid();
1198
1199 if (!src_file->private_data || !dst_file->private_data) {
1200 rc = -EBADF;
1201 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1202 goto out;
1203 }
1204
1205 smb_file_target = dst_file->private_data;
1206 target_tcon = tlink_tcon(smb_file_target->tlink);
1207
1208 /*
1209 * Note: cifs case is easier than btrfs since server responsible for
1210 * checks for proper open modes and file type and if it wants
1211 * server could even support copy of range where source = target
1212 */
1213 lock_two_nondirectories(target_inode, src_inode);
1214
1215 if (len == 0)
1216 len = src_inode->i_size - off;
1217
1218 cifs_dbg(FYI, "about to flush pages\n");
1219 /* should we flush first and last page first */
1220 truncate_inode_pages_range(&target_inode->i_data, destoff,
1221 PAGE_ALIGN(destoff + len)-1);
1222
1223 if (target_tcon->ses->server->ops->duplicate_extents)
1224 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1225 smb_file_src, smb_file_target, off, len, destoff);
1226 else
1227 rc = -EOPNOTSUPP;
1228
1229 /* force revalidate of size and timestamps of target file now
1230 that target is updated on the server */
1231 CIFS_I(target_inode)->time = 0;
1232 /* although unlocking in the reverse order from locking is not
1233 strictly necessary here it is a little cleaner to be consistent */
1234 unlock_two_nondirectories(src_inode, target_inode);
1235out:
1236 free_xid(xid);
1237 return rc < 0 ? rc : len;
1238}
1239
1240ssize_t cifs_file_copychunk_range(unsigned int xid,
1241 struct file *src_file, loff_t off,
1242 struct file *dst_file, loff_t destoff,
1243 size_t len, unsigned int flags)
1244{
1245 struct inode *src_inode = file_inode(src_file);
1246 struct inode *target_inode = file_inode(dst_file);
1247 struct cifsFileInfo *smb_file_src;
1248 struct cifsFileInfo *smb_file_target;
1249 struct cifs_tcon *src_tcon;
1250 struct cifs_tcon *target_tcon;
1251 ssize_t rc;
1252
1253 cifs_dbg(FYI, "copychunk range\n");
1254
1255 if (!src_file->private_data || !dst_file->private_data) {
1256 rc = -EBADF;
1257 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1258 goto out;
1259 }
1260
1261 rc = -EXDEV;
1262 smb_file_target = dst_file->private_data;
1263 smb_file_src = src_file->private_data;
1264 src_tcon = tlink_tcon(smb_file_src->tlink);
1265 target_tcon = tlink_tcon(smb_file_target->tlink);
1266
1267 if (src_tcon->ses != target_tcon->ses) {
1268 cifs_dbg(VFS, "source and target of copy not on same server\n");
1269 goto out;
1270 }
1271
1272 rc = -EOPNOTSUPP;
1273 if (!target_tcon->ses->server->ops->copychunk_range)
1274 goto out;
1275
1276 /*
1277 * Note: cifs case is easier than btrfs since server responsible for
1278 * checks for proper open modes and file type and if it wants
1279 * server could even support copy of range where source = target
1280 */
1281 lock_two_nondirectories(target_inode, src_inode);
1282
1283 cifs_dbg(FYI, "about to flush pages\n");
1284
1285 rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1286 off + len - 1);
1287 if (rc)
1288 goto unlock;
1289
1290 /* should we flush first and last page first */
1291 truncate_inode_pages(&target_inode->i_data, 0);
1292
1293 rc = file_modified(dst_file);
1294 if (!rc)
1295 rc = target_tcon->ses->server->ops->copychunk_range(xid,
1296 smb_file_src, smb_file_target, off, len, destoff);
1297
1298 file_accessed(src_file);
1299
1300 /* force revalidate of size and timestamps of target file now
1301 * that target is updated on the server
1302 */
1303 CIFS_I(target_inode)->time = 0;
1304
1305unlock:
1306 /* although unlocking in the reverse order from locking is not
1307 * strictly necessary here it is a little cleaner to be consistent
1308 */
1309 unlock_two_nondirectories(src_inode, target_inode);
1310
1311out:
1312 return rc;
1313}
1314
1315/*
1316 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1317 * is a dummy operation.
1318 */
1319static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1320{
1321 cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1322 file, datasync);
1323
1324 return 0;
1325}
1326
1327static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1328 struct file *dst_file, loff_t destoff,
1329 size_t len, unsigned int flags)
1330{
1331 unsigned int xid = get_xid();
1332 ssize_t rc;
1333 struct cifsFileInfo *cfile = dst_file->private_data;
1334
1335 if (cfile->swapfile) {
1336 rc = -EOPNOTSUPP;
1337 free_xid(xid);
1338 return rc;
1339 }
1340
1341 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1342 len, flags);
1343 free_xid(xid);
1344
1345 if (rc == -EOPNOTSUPP || rc == -EXDEV)
1346 rc = generic_copy_file_range(src_file, off, dst_file,
1347 destoff, len, flags);
1348 return rc;
1349}
1350
1351const struct file_operations cifs_file_ops = {
1352 .read_iter = cifs_loose_read_iter,
1353 .write_iter = cifs_file_write_iter,
1354 .open = cifs_open,
1355 .release = cifs_close,
1356 .lock = cifs_lock,
1357 .flock = cifs_flock,
1358 .fsync = cifs_fsync,
1359 .flush = cifs_flush,
1360 .mmap = cifs_file_mmap,
1361 .splice_read = generic_file_splice_read,
1362 .splice_write = iter_file_splice_write,
1363 .llseek = cifs_llseek,
1364 .unlocked_ioctl = cifs_ioctl,
1365 .copy_file_range = cifs_copy_file_range,
1366 .remap_file_range = cifs_remap_file_range,
1367 .setlease = cifs_setlease,
1368 .fallocate = cifs_fallocate,
1369};
1370
1371const struct file_operations cifs_file_strict_ops = {
1372 .read_iter = cifs_strict_readv,
1373 .write_iter = cifs_strict_writev,
1374 .open = cifs_open,
1375 .release = cifs_close,
1376 .lock = cifs_lock,
1377 .flock = cifs_flock,
1378 .fsync = cifs_strict_fsync,
1379 .flush = cifs_flush,
1380 .mmap = cifs_file_strict_mmap,
1381 .splice_read = generic_file_splice_read,
1382 .splice_write = iter_file_splice_write,
1383 .llseek = cifs_llseek,
1384 .unlocked_ioctl = cifs_ioctl,
1385 .copy_file_range = cifs_copy_file_range,
1386 .remap_file_range = cifs_remap_file_range,
1387 .setlease = cifs_setlease,
1388 .fallocate = cifs_fallocate,
1389};
1390
1391const struct file_operations cifs_file_direct_ops = {
1392 .read_iter = cifs_direct_readv,
1393 .write_iter = cifs_direct_writev,
1394 .open = cifs_open,
1395 .release = cifs_close,
1396 .lock = cifs_lock,
1397 .flock = cifs_flock,
1398 .fsync = cifs_fsync,
1399 .flush = cifs_flush,
1400 .mmap = cifs_file_mmap,
1401 .splice_read = generic_file_splice_read,
1402 .splice_write = iter_file_splice_write,
1403 .unlocked_ioctl = cifs_ioctl,
1404 .copy_file_range = cifs_copy_file_range,
1405 .remap_file_range = cifs_remap_file_range,
1406 .llseek = cifs_llseek,
1407 .setlease = cifs_setlease,
1408 .fallocate = cifs_fallocate,
1409};
1410
1411const struct file_operations cifs_file_nobrl_ops = {
1412 .read_iter = cifs_loose_read_iter,
1413 .write_iter = cifs_file_write_iter,
1414 .open = cifs_open,
1415 .release = cifs_close,
1416 .fsync = cifs_fsync,
1417 .flush = cifs_flush,
1418 .mmap = cifs_file_mmap,
1419 .splice_read = generic_file_splice_read,
1420 .splice_write = iter_file_splice_write,
1421 .llseek = cifs_llseek,
1422 .unlocked_ioctl = cifs_ioctl,
1423 .copy_file_range = cifs_copy_file_range,
1424 .remap_file_range = cifs_remap_file_range,
1425 .setlease = cifs_setlease,
1426 .fallocate = cifs_fallocate,
1427};
1428
1429const struct file_operations cifs_file_strict_nobrl_ops = {
1430 .read_iter = cifs_strict_readv,
1431 .write_iter = cifs_strict_writev,
1432 .open = cifs_open,
1433 .release = cifs_close,
1434 .fsync = cifs_strict_fsync,
1435 .flush = cifs_flush,
1436 .mmap = cifs_file_strict_mmap,
1437 .splice_read = generic_file_splice_read,
1438 .splice_write = iter_file_splice_write,
1439 .llseek = cifs_llseek,
1440 .unlocked_ioctl = cifs_ioctl,
1441 .copy_file_range = cifs_copy_file_range,
1442 .remap_file_range = cifs_remap_file_range,
1443 .setlease = cifs_setlease,
1444 .fallocate = cifs_fallocate,
1445};
1446
1447const struct file_operations cifs_file_direct_nobrl_ops = {
1448 .read_iter = cifs_direct_readv,
1449 .write_iter = cifs_direct_writev,
1450 .open = cifs_open,
1451 .release = cifs_close,
1452 .fsync = cifs_fsync,
1453 .flush = cifs_flush,
1454 .mmap = cifs_file_mmap,
1455 .splice_read = generic_file_splice_read,
1456 .splice_write = iter_file_splice_write,
1457 .unlocked_ioctl = cifs_ioctl,
1458 .copy_file_range = cifs_copy_file_range,
1459 .remap_file_range = cifs_remap_file_range,
1460 .llseek = cifs_llseek,
1461 .setlease = cifs_setlease,
1462 .fallocate = cifs_fallocate,
1463};
1464
1465const struct file_operations cifs_dir_ops = {
1466 .iterate_shared = cifs_readdir,
1467 .release = cifs_closedir,
1468 .read = generic_read_dir,
1469 .unlocked_ioctl = cifs_ioctl,
1470 .copy_file_range = cifs_copy_file_range,
1471 .remap_file_range = cifs_remap_file_range,
1472 .llseek = generic_file_llseek,
1473 .fsync = cifs_dir_fsync,
1474};
1475
1476static void
1477cifs_init_once(void *inode)
1478{
1479 struct cifsInodeInfo *cifsi = inode;
1480
1481 inode_init_once(&cifsi->netfs.inode);
1482 init_rwsem(&cifsi->lock_sem);
1483}
1484
1485static int __init
1486cifs_init_inodecache(void)
1487{
1488 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1489 sizeof(struct cifsInodeInfo),
1490 0, (SLAB_RECLAIM_ACCOUNT|
1491 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1492 cifs_init_once);
1493 if (cifs_inode_cachep == NULL)
1494 return -ENOMEM;
1495
1496 return 0;
1497}
1498
1499static void
1500cifs_destroy_inodecache(void)
1501{
1502 /*
1503 * Make sure all delayed rcu free inodes are flushed before we
1504 * destroy cache.
1505 */
1506 rcu_barrier();
1507 kmem_cache_destroy(cifs_inode_cachep);
1508}
1509
1510static int
1511cifs_init_request_bufs(void)
1512{
1513 /*
1514 * SMB2 maximum header size is bigger than CIFS one - no problems to
1515 * allocate some more bytes for CIFS.
1516 */
1517 size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1518
1519 if (CIFSMaxBufSize < 8192) {
1520 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1521 Unicode path name has to fit in any SMB/CIFS path based frames */
1522 CIFSMaxBufSize = 8192;
1523 } else if (CIFSMaxBufSize > 1024*127) {
1524 CIFSMaxBufSize = 1024 * 127;
1525 } else {
1526 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1527 }
1528/*
1529 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1530 CIFSMaxBufSize, CIFSMaxBufSize);
1531*/
1532 cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1533 CIFSMaxBufSize + max_hdr_size, 0,
1534 SLAB_HWCACHE_ALIGN, 0,
1535 CIFSMaxBufSize + max_hdr_size,
1536 NULL);
1537 if (cifs_req_cachep == NULL)
1538 return -ENOMEM;
1539
1540 if (cifs_min_rcv < 1)
1541 cifs_min_rcv = 1;
1542 else if (cifs_min_rcv > 64) {
1543 cifs_min_rcv = 64;
1544 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1545 }
1546
1547 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1548 cifs_req_cachep);
1549
1550 if (cifs_req_poolp == NULL) {
1551 kmem_cache_destroy(cifs_req_cachep);
1552 return -ENOMEM;
1553 }
1554 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1555 almost all handle based requests (but not write response, nor is it
1556 sufficient for path based requests). A smaller size would have
1557 been more efficient (compacting multiple slab items on one 4k page)
1558 for the case in which debug was on, but this larger size allows
1559 more SMBs to use small buffer alloc and is still much more
1560 efficient to alloc 1 per page off the slab compared to 17K (5page)
1561 alloc of large cifs buffers even when page debugging is on */
1562 cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1563 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1564 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1565 if (cifs_sm_req_cachep == NULL) {
1566 mempool_destroy(cifs_req_poolp);
1567 kmem_cache_destroy(cifs_req_cachep);
1568 return -ENOMEM;
1569 }
1570
1571 if (cifs_min_small < 2)
1572 cifs_min_small = 2;
1573 else if (cifs_min_small > 256) {
1574 cifs_min_small = 256;
1575 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1576 }
1577
1578 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1579 cifs_sm_req_cachep);
1580
1581 if (cifs_sm_req_poolp == NULL) {
1582 mempool_destroy(cifs_req_poolp);
1583 kmem_cache_destroy(cifs_req_cachep);
1584 kmem_cache_destroy(cifs_sm_req_cachep);
1585 return -ENOMEM;
1586 }
1587
1588 return 0;
1589}
1590
1591static void
1592cifs_destroy_request_bufs(void)
1593{
1594 mempool_destroy(cifs_req_poolp);
1595 kmem_cache_destroy(cifs_req_cachep);
1596 mempool_destroy(cifs_sm_req_poolp);
1597 kmem_cache_destroy(cifs_sm_req_cachep);
1598}
1599
1600static int init_mids(void)
1601{
1602 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1603 sizeof(struct mid_q_entry), 0,
1604 SLAB_HWCACHE_ALIGN, NULL);
1605 if (cifs_mid_cachep == NULL)
1606 return -ENOMEM;
1607
1608 /* 3 is a reasonable minimum number of simultaneous operations */
1609 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1610 if (cifs_mid_poolp == NULL) {
1611 kmem_cache_destroy(cifs_mid_cachep);
1612 return -ENOMEM;
1613 }
1614
1615 return 0;
1616}
1617
1618static void destroy_mids(void)
1619{
1620 mempool_destroy(cifs_mid_poolp);
1621 kmem_cache_destroy(cifs_mid_cachep);
1622}
1623
1624static int __init
1625init_cifs(void)
1626{
1627 int rc = 0;
1628 cifs_proc_init();
1629 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1630/*
1631 * Initialize Global counters
1632 */
1633 atomic_set(&sesInfoAllocCount, 0);
1634 atomic_set(&tconInfoAllocCount, 0);
1635 atomic_set(&tcpSesNextId, 0);
1636 atomic_set(&tcpSesAllocCount, 0);
1637 atomic_set(&tcpSesReconnectCount, 0);
1638 atomic_set(&tconInfoReconnectCount, 0);
1639
1640 atomic_set(&buf_alloc_count, 0);
1641 atomic_set(&small_buf_alloc_count, 0);
1642#ifdef CONFIG_CIFS_STATS2
1643 atomic_set(&total_buf_alloc_count, 0);
1644 atomic_set(&total_small_buf_alloc_count, 0);
1645 if (slow_rsp_threshold < 1)
1646 cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1647 else if (slow_rsp_threshold > 32767)
1648 cifs_dbg(VFS,
1649 "slow response threshold set higher than recommended (0 to 32767)\n");
1650#endif /* CONFIG_CIFS_STATS2 */
1651
1652 atomic_set(&mid_count, 0);
1653 GlobalCurrentXid = 0;
1654 GlobalTotalActiveXid = 0;
1655 GlobalMaxActiveXid = 0;
1656 spin_lock_init(&cifs_tcp_ses_lock);
1657 spin_lock_init(&GlobalMid_Lock);
1658
1659 cifs_lock_secret = get_random_u32();
1660
1661 if (cifs_max_pending < 2) {
1662 cifs_max_pending = 2;
1663 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1664 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1665 cifs_max_pending = CIFS_MAX_REQ;
1666 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1667 CIFS_MAX_REQ);
1668 }
1669
1670 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1671 if (!cifsiod_wq) {
1672 rc = -ENOMEM;
1673 goto out_clean_proc;
1674 }
1675
1676 /*
1677 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1678 * so that we don't launch too many worker threads but
1679 * Documentation/core-api/workqueue.rst recommends setting it to 0
1680 */
1681
1682 /* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1683 decrypt_wq = alloc_workqueue("smb3decryptd",
1684 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1685 if (!decrypt_wq) {
1686 rc = -ENOMEM;
1687 goto out_destroy_cifsiod_wq;
1688 }
1689
1690 fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1691 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1692 if (!fileinfo_put_wq) {
1693 rc = -ENOMEM;
1694 goto out_destroy_decrypt_wq;
1695 }
1696
1697 cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1698 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1699 if (!cifsoplockd_wq) {
1700 rc = -ENOMEM;
1701 goto out_destroy_fileinfo_put_wq;
1702 }
1703
1704 deferredclose_wq = alloc_workqueue("deferredclose",
1705 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1706 if (!deferredclose_wq) {
1707 rc = -ENOMEM;
1708 goto out_destroy_cifsoplockd_wq;
1709 }
1710
1711 rc = cifs_init_inodecache();
1712 if (rc)
1713 goto out_destroy_deferredclose_wq;
1714
1715 rc = init_mids();
1716 if (rc)
1717 goto out_destroy_inodecache;
1718
1719 rc = cifs_init_request_bufs();
1720 if (rc)
1721 goto out_destroy_mids;
1722
1723#ifdef CONFIG_CIFS_DFS_UPCALL
1724 rc = dfs_cache_init();
1725 if (rc)
1726 goto out_destroy_request_bufs;
1727#endif /* CONFIG_CIFS_DFS_UPCALL */
1728#ifdef CONFIG_CIFS_UPCALL
1729 rc = init_cifs_spnego();
1730 if (rc)
1731 goto out_destroy_dfs_cache;
1732#endif /* CONFIG_CIFS_UPCALL */
1733#ifdef CONFIG_CIFS_SWN_UPCALL
1734 rc = cifs_genl_init();
1735 if (rc)
1736 goto out_register_key_type;
1737#endif /* CONFIG_CIFS_SWN_UPCALL */
1738
1739 rc = init_cifs_idmap();
1740 if (rc)
1741 goto out_cifs_swn_init;
1742
1743 rc = register_filesystem(&cifs_fs_type);
1744 if (rc)
1745 goto out_init_cifs_idmap;
1746
1747 rc = register_filesystem(&smb3_fs_type);
1748 if (rc) {
1749 unregister_filesystem(&cifs_fs_type);
1750 goto out_init_cifs_idmap;
1751 }
1752
1753 return 0;
1754
1755out_init_cifs_idmap:
1756 exit_cifs_idmap();
1757out_cifs_swn_init:
1758#ifdef CONFIG_CIFS_SWN_UPCALL
1759 cifs_genl_exit();
1760out_register_key_type:
1761#endif
1762#ifdef CONFIG_CIFS_UPCALL
1763 exit_cifs_spnego();
1764out_destroy_dfs_cache:
1765#endif
1766#ifdef CONFIG_CIFS_DFS_UPCALL
1767 dfs_cache_destroy();
1768out_destroy_request_bufs:
1769#endif
1770 cifs_destroy_request_bufs();
1771out_destroy_mids:
1772 destroy_mids();
1773out_destroy_inodecache:
1774 cifs_destroy_inodecache();
1775out_destroy_deferredclose_wq:
1776 destroy_workqueue(deferredclose_wq);
1777out_destroy_cifsoplockd_wq:
1778 destroy_workqueue(cifsoplockd_wq);
1779out_destroy_fileinfo_put_wq:
1780 destroy_workqueue(fileinfo_put_wq);
1781out_destroy_decrypt_wq:
1782 destroy_workqueue(decrypt_wq);
1783out_destroy_cifsiod_wq:
1784 destroy_workqueue(cifsiod_wq);
1785out_clean_proc:
1786 cifs_proc_clean();
1787 return rc;
1788}
1789
1790static void __exit
1791exit_cifs(void)
1792{
1793 cifs_dbg(NOISY, "exit_smb3\n");
1794 unregister_filesystem(&cifs_fs_type);
1795 unregister_filesystem(&smb3_fs_type);
1796 cifs_dfs_release_automount_timer();
1797 exit_cifs_idmap();
1798#ifdef CONFIG_CIFS_SWN_UPCALL
1799 cifs_genl_exit();
1800#endif
1801#ifdef CONFIG_CIFS_UPCALL
1802 exit_cifs_spnego();
1803#endif
1804#ifdef CONFIG_CIFS_DFS_UPCALL
1805 dfs_cache_destroy();
1806#endif
1807 cifs_destroy_request_bufs();
1808 destroy_mids();
1809 cifs_destroy_inodecache();
1810 destroy_workqueue(deferredclose_wq);
1811 destroy_workqueue(cifsoplockd_wq);
1812 destroy_workqueue(decrypt_wq);
1813 destroy_workqueue(fileinfo_put_wq);
1814 destroy_workqueue(cifsiod_wq);
1815 cifs_proc_clean();
1816}
1817
1818MODULE_AUTHOR("Steve French");
1819MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1820MODULE_DESCRIPTION
1821 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
1822 "also older servers complying with the SNIA CIFS Specification)");
1823MODULE_VERSION(CIFS_VERSION);
1824MODULE_SOFTDEP("ecb");
1825MODULE_SOFTDEP("hmac");
1826MODULE_SOFTDEP("md5");
1827MODULE_SOFTDEP("nls");
1828MODULE_SOFTDEP("aes");
1829MODULE_SOFTDEP("cmac");
1830MODULE_SOFTDEP("sha256");
1831MODULE_SOFTDEP("sha512");
1832MODULE_SOFTDEP("aead2");
1833MODULE_SOFTDEP("ccm");
1834MODULE_SOFTDEP("gcm");
1835module_init(init_cifs)
1836module_exit(exit_cifs)
1/*
2 * fs/cifs/cifsfs.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * Common Internet FileSystem (CIFS) client
8 *
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24/* Note that BB means BUGBUG (ie something to fix eventually) */
25
26#include <linux/module.h>
27#include <linux/fs.h>
28#include <linux/mount.h>
29#include <linux/slab.h>
30#include <linux/init.h>
31#include <linux/list.h>
32#include <linux/seq_file.h>
33#include <linux/vfs.h>
34#include <linux/mempool.h>
35#include <linux/delay.h>
36#include <linux/kthread.h>
37#include <linux/freezer.h>
38#include <linux/namei.h>
39#include <linux/random.h>
40#include <linux/xattr.h>
41#include <net/ipv6.h>
42#include "cifsfs.h"
43#include "cifspdu.h"
44#define DECLARE_GLOBALS_HERE
45#include "cifsglob.h"
46#include "cifsproto.h"
47#include "cifs_debug.h"
48#include "cifs_fs_sb.h"
49#include <linux/mm.h>
50#include <linux/key-type.h>
51#include "cifs_spnego.h"
52#include "fscache.h"
53#ifdef CONFIG_CIFS_SMB2
54#include "smb2pdu.h"
55#endif
56
57int cifsFYI = 0;
58bool traceSMB;
59bool enable_oplocks = true;
60bool linuxExtEnabled = true;
61bool lookupCacheEnabled = true;
62unsigned int global_secflags = CIFSSEC_DEF;
63/* unsigned int ntlmv2_support = 0; */
64unsigned int sign_CIFS_PDUs = 1;
65static const struct super_operations cifs_super_ops;
66unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
67module_param(CIFSMaxBufSize, uint, 0444);
68MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header). "
69 "Default: 16384 Range: 8192 to 130048");
70unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
71module_param(cifs_min_rcv, uint, 0444);
72MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
73 "1 to 64");
74unsigned int cifs_min_small = 30;
75module_param(cifs_min_small, uint, 0444);
76MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
77 "Range: 2 to 256");
78unsigned int cifs_max_pending = CIFS_MAX_REQ;
79module_param(cifs_max_pending, uint, 0444);
80MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
81 "Default: 32767 Range: 2 to 32767.");
82module_param(enable_oplocks, bool, 0644);
83MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
84
85extern mempool_t *cifs_sm_req_poolp;
86extern mempool_t *cifs_req_poolp;
87extern mempool_t *cifs_mid_poolp;
88
89struct workqueue_struct *cifsiod_wq;
90__u32 cifs_lock_secret;
91
92/*
93 * Bumps refcount for cifs super block.
94 * Note that it should be only called if a referece to VFS super block is
95 * already held, e.g. in open-type syscalls context. Otherwise it can race with
96 * atomic_dec_and_test in deactivate_locked_super.
97 */
98void
99cifs_sb_active(struct super_block *sb)
100{
101 struct cifs_sb_info *server = CIFS_SB(sb);
102
103 if (atomic_inc_return(&server->active) == 1)
104 atomic_inc(&sb->s_active);
105}
106
107void
108cifs_sb_deactive(struct super_block *sb)
109{
110 struct cifs_sb_info *server = CIFS_SB(sb);
111
112 if (atomic_dec_and_test(&server->active))
113 deactivate_super(sb);
114}
115
116static int
117cifs_read_super(struct super_block *sb)
118{
119 struct inode *inode;
120 struct cifs_sb_info *cifs_sb;
121 struct cifs_tcon *tcon;
122 int rc = 0;
123
124 cifs_sb = CIFS_SB(sb);
125 tcon = cifs_sb_master_tcon(cifs_sb);
126
127 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
128 sb->s_flags |= MS_POSIXACL;
129
130 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
131 sb->s_maxbytes = MAX_LFS_FILESIZE;
132 else
133 sb->s_maxbytes = MAX_NON_LFS;
134
135 /* BB FIXME fix time_gran to be larger for LANMAN sessions */
136 sb->s_time_gran = 100;
137
138 sb->s_magic = CIFS_MAGIC_NUMBER;
139 sb->s_op = &cifs_super_ops;
140 sb->s_xattr = cifs_xattr_handlers;
141 sb->s_bdi = &cifs_sb->bdi;
142 sb->s_blocksize = CIFS_MAX_MSGSIZE;
143 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
144 inode = cifs_root_iget(sb);
145
146 if (IS_ERR(inode)) {
147 rc = PTR_ERR(inode);
148 goto out_no_root;
149 }
150
151 if (tcon->nocase)
152 sb->s_d_op = &cifs_ci_dentry_ops;
153 else
154 sb->s_d_op = &cifs_dentry_ops;
155
156 sb->s_root = d_make_root(inode);
157 if (!sb->s_root) {
158 rc = -ENOMEM;
159 goto out_no_root;
160 }
161
162#ifdef CONFIG_CIFS_NFSD_EXPORT
163 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
164 cifs_dbg(FYI, "export ops supported\n");
165 sb->s_export_op = &cifs_export_ops;
166 }
167#endif /* CONFIG_CIFS_NFSD_EXPORT */
168
169 return 0;
170
171out_no_root:
172 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
173 return rc;
174}
175
176static void cifs_kill_sb(struct super_block *sb)
177{
178 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
179 kill_anon_super(sb);
180 cifs_umount(cifs_sb);
181}
182
183static int
184cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
185{
186 struct super_block *sb = dentry->d_sb;
187 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
188 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
189 struct TCP_Server_Info *server = tcon->ses->server;
190 unsigned int xid;
191 int rc = 0;
192
193 xid = get_xid();
194
195 /*
196 * PATH_MAX may be too long - it would presumably be total path,
197 * but note that some servers (includinng Samba 3) have a shorter
198 * maximum path.
199 *
200 * Instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO.
201 */
202 buf->f_namelen = PATH_MAX;
203 buf->f_files = 0; /* undefined */
204 buf->f_ffree = 0; /* unlimited */
205
206 if (server->ops->queryfs)
207 rc = server->ops->queryfs(xid, tcon, buf);
208
209 free_xid(xid);
210 return 0;
211}
212
213static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
214{
215 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
216 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
217 struct TCP_Server_Info *server = tcon->ses->server;
218
219 if (server->ops->fallocate)
220 return server->ops->fallocate(file, tcon, mode, off, len);
221
222 return -EOPNOTSUPP;
223}
224
225static int cifs_permission(struct inode *inode, int mask)
226{
227 struct cifs_sb_info *cifs_sb;
228
229 cifs_sb = CIFS_SB(inode->i_sb);
230
231 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
232 if ((mask & MAY_EXEC) && !execute_ok(inode))
233 return -EACCES;
234 else
235 return 0;
236 } else /* file mode might have been restricted at mount time
237 on the client (above and beyond ACL on servers) for
238 servers which do not support setting and viewing mode bits,
239 so allowing client to check permissions is useful */
240 return generic_permission(inode, mask);
241}
242
243static struct kmem_cache *cifs_inode_cachep;
244static struct kmem_cache *cifs_req_cachep;
245static struct kmem_cache *cifs_mid_cachep;
246static struct kmem_cache *cifs_sm_req_cachep;
247mempool_t *cifs_sm_req_poolp;
248mempool_t *cifs_req_poolp;
249mempool_t *cifs_mid_poolp;
250
251static struct inode *
252cifs_alloc_inode(struct super_block *sb)
253{
254 struct cifsInodeInfo *cifs_inode;
255 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
256 if (!cifs_inode)
257 return NULL;
258 cifs_inode->cifsAttrs = 0x20; /* default */
259 cifs_inode->time = 0;
260 /*
261 * Until the file is open and we have gotten oplock info back from the
262 * server, can not assume caching of file data or metadata.
263 */
264 cifs_set_oplock_level(cifs_inode, 0);
265 cifs_inode->flags = 0;
266 spin_lock_init(&cifs_inode->writers_lock);
267 cifs_inode->writers = 0;
268 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
269 cifs_inode->server_eof = 0;
270 cifs_inode->uniqueid = 0;
271 cifs_inode->createtime = 0;
272 cifs_inode->epoch = 0;
273#ifdef CONFIG_CIFS_SMB2
274 generate_random_uuid(cifs_inode->lease_key);
275#endif
276 /*
277 * Can not set i_flags here - they get immediately overwritten to zero
278 * by the VFS.
279 */
280 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME; */
281 INIT_LIST_HEAD(&cifs_inode->openFileList);
282 INIT_LIST_HEAD(&cifs_inode->llist);
283 return &cifs_inode->vfs_inode;
284}
285
286static void cifs_i_callback(struct rcu_head *head)
287{
288 struct inode *inode = container_of(head, struct inode, i_rcu);
289 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
290}
291
292static void
293cifs_destroy_inode(struct inode *inode)
294{
295 call_rcu(&inode->i_rcu, cifs_i_callback);
296}
297
298static void
299cifs_evict_inode(struct inode *inode)
300{
301 truncate_inode_pages_final(&inode->i_data);
302 clear_inode(inode);
303 cifs_fscache_release_inode_cookie(inode);
304}
305
306static void
307cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
308{
309 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
310 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
311
312 seq_puts(s, ",addr=");
313
314 switch (server->dstaddr.ss_family) {
315 case AF_INET:
316 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
317 break;
318 case AF_INET6:
319 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
320 if (sa6->sin6_scope_id)
321 seq_printf(s, "%%%u", sa6->sin6_scope_id);
322 break;
323 default:
324 seq_puts(s, "(unknown)");
325 }
326}
327
328static void
329cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
330{
331 if (ses->sectype == Unspecified) {
332 if (ses->user_name == NULL)
333 seq_puts(s, ",sec=none");
334 return;
335 }
336
337 seq_puts(s, ",sec=");
338
339 switch (ses->sectype) {
340 case LANMAN:
341 seq_puts(s, "lanman");
342 break;
343 case NTLMv2:
344 seq_puts(s, "ntlmv2");
345 break;
346 case NTLM:
347 seq_puts(s, "ntlm");
348 break;
349 case Kerberos:
350 seq_puts(s, "krb5");
351 break;
352 case RawNTLMSSP:
353 seq_puts(s, "ntlmssp");
354 break;
355 default:
356 /* shouldn't ever happen */
357 seq_puts(s, "unknown");
358 break;
359 }
360
361 if (ses->sign)
362 seq_puts(s, "i");
363}
364
365static void
366cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
367{
368 seq_puts(s, ",cache=");
369
370 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
371 seq_puts(s, "strict");
372 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
373 seq_puts(s, "none");
374 else
375 seq_puts(s, "loose");
376}
377
378static void
379cifs_show_nls(struct seq_file *s, struct nls_table *cur)
380{
381 struct nls_table *def;
382
383 /* Display iocharset= option if it's not default charset */
384 def = load_nls_default();
385 if (def != cur)
386 seq_printf(s, ",iocharset=%s", cur->charset);
387 unload_nls(def);
388}
389
390/*
391 * cifs_show_options() is for displaying mount options in /proc/mounts.
392 * Not all settable options are displayed but most of the important
393 * ones are.
394 */
395static int
396cifs_show_options(struct seq_file *s, struct dentry *root)
397{
398 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
399 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
400 struct sockaddr *srcaddr;
401 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
402
403 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
404 cifs_show_security(s, tcon->ses);
405 cifs_show_cache_flavor(s, cifs_sb);
406
407 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
408 seq_puts(s, ",multiuser");
409 else if (tcon->ses->user_name)
410 seq_show_option(s, "username", tcon->ses->user_name);
411
412 if (tcon->ses->domainName)
413 seq_show_option(s, "domain", tcon->ses->domainName);
414
415 if (srcaddr->sa_family != AF_UNSPEC) {
416 struct sockaddr_in *saddr4;
417 struct sockaddr_in6 *saddr6;
418 saddr4 = (struct sockaddr_in *)srcaddr;
419 saddr6 = (struct sockaddr_in6 *)srcaddr;
420 if (srcaddr->sa_family == AF_INET6)
421 seq_printf(s, ",srcaddr=%pI6c",
422 &saddr6->sin6_addr);
423 else if (srcaddr->sa_family == AF_INET)
424 seq_printf(s, ",srcaddr=%pI4",
425 &saddr4->sin_addr.s_addr);
426 else
427 seq_printf(s, ",srcaddr=BAD-AF:%i",
428 (int)(srcaddr->sa_family));
429 }
430
431 seq_printf(s, ",uid=%u",
432 from_kuid_munged(&init_user_ns, cifs_sb->mnt_uid));
433 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
434 seq_puts(s, ",forceuid");
435 else
436 seq_puts(s, ",noforceuid");
437
438 seq_printf(s, ",gid=%u",
439 from_kgid_munged(&init_user_ns, cifs_sb->mnt_gid));
440 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
441 seq_puts(s, ",forcegid");
442 else
443 seq_puts(s, ",noforcegid");
444
445 cifs_show_address(s, tcon->ses->server);
446
447 if (!tcon->unix_ext)
448 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
449 cifs_sb->mnt_file_mode,
450 cifs_sb->mnt_dir_mode);
451
452 cifs_show_nls(s, cifs_sb->local_nls);
453
454 if (tcon->seal)
455 seq_puts(s, ",seal");
456 if (tcon->nocase)
457 seq_puts(s, ",nocase");
458 if (tcon->retry)
459 seq_puts(s, ",hard");
460 if (tcon->use_persistent)
461 seq_puts(s, ",persistenthandles");
462 else if (tcon->use_resilient)
463 seq_puts(s, ",resilienthandles");
464 if (tcon->unix_ext)
465 seq_puts(s, ",unix");
466 else
467 seq_puts(s, ",nounix");
468 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
469 seq_puts(s, ",posixpaths");
470 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
471 seq_puts(s, ",setuids");
472 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
473 seq_puts(s, ",idsfromsid");
474 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
475 seq_puts(s, ",serverino");
476 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
477 seq_puts(s, ",rwpidforward");
478 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
479 seq_puts(s, ",forcemand");
480 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
481 seq_puts(s, ",nouser_xattr");
482 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
483 seq_puts(s, ",mapchars");
484 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
485 seq_puts(s, ",mapposix");
486 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
487 seq_puts(s, ",sfu");
488 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
489 seq_puts(s, ",nobrl");
490 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
491 seq_puts(s, ",cifsacl");
492 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
493 seq_puts(s, ",dynperm");
494 if (root->d_sb->s_flags & MS_POSIXACL)
495 seq_puts(s, ",acl");
496 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
497 seq_puts(s, ",mfsymlinks");
498 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
499 seq_puts(s, ",fsc");
500 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
501 seq_puts(s, ",nostrictsync");
502 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
503 seq_puts(s, ",noperm");
504 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
505 seq_printf(s, ",backupuid=%u",
506 from_kuid_munged(&init_user_ns,
507 cifs_sb->mnt_backupuid));
508 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
509 seq_printf(s, ",backupgid=%u",
510 from_kgid_munged(&init_user_ns,
511 cifs_sb->mnt_backupgid));
512
513 seq_printf(s, ",rsize=%u", cifs_sb->rsize);
514 seq_printf(s, ",wsize=%u", cifs_sb->wsize);
515 seq_printf(s, ",echo_interval=%lu",
516 tcon->ses->server->echo_interval / HZ);
517 /* convert actimeo and display it in seconds */
518 seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
519
520 return 0;
521}
522
523static void cifs_umount_begin(struct super_block *sb)
524{
525 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
526 struct cifs_tcon *tcon;
527
528 if (cifs_sb == NULL)
529 return;
530
531 tcon = cifs_sb_master_tcon(cifs_sb);
532
533 spin_lock(&cifs_tcp_ses_lock);
534 if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) {
535 /* we have other mounts to same share or we have
536 already tried to force umount this and woken up
537 all waiting network requests, nothing to do */
538 spin_unlock(&cifs_tcp_ses_lock);
539 return;
540 } else if (tcon->tc_count == 1)
541 tcon->tidStatus = CifsExiting;
542 spin_unlock(&cifs_tcp_ses_lock);
543
544 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
545 /* cancel_notify_requests(tcon); */
546 if (tcon->ses && tcon->ses->server) {
547 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
548 wake_up_all(&tcon->ses->server->request_q);
549 wake_up_all(&tcon->ses->server->response_q);
550 msleep(1); /* yield */
551 /* we have to kick the requests once more */
552 wake_up_all(&tcon->ses->server->response_q);
553 msleep(1);
554 }
555
556 return;
557}
558
559#ifdef CONFIG_CIFS_STATS2
560static int cifs_show_stats(struct seq_file *s, struct dentry *root)
561{
562 /* BB FIXME */
563 return 0;
564}
565#endif
566
567static int cifs_remount(struct super_block *sb, int *flags, char *data)
568{
569 sync_filesystem(sb);
570 *flags |= MS_NODIRATIME;
571 return 0;
572}
573
574static int cifs_drop_inode(struct inode *inode)
575{
576 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
577
578 /* no serverino => unconditional eviction */
579 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
580 generic_drop_inode(inode);
581}
582
583static const struct super_operations cifs_super_ops = {
584 .statfs = cifs_statfs,
585 .alloc_inode = cifs_alloc_inode,
586 .destroy_inode = cifs_destroy_inode,
587 .drop_inode = cifs_drop_inode,
588 .evict_inode = cifs_evict_inode,
589/* .delete_inode = cifs_delete_inode, */ /* Do not need above
590 function unless later we add lazy close of inodes or unless the
591 kernel forgets to call us with the same number of releases (closes)
592 as opens */
593 .show_options = cifs_show_options,
594 .umount_begin = cifs_umount_begin,
595 .remount_fs = cifs_remount,
596#ifdef CONFIG_CIFS_STATS2
597 .show_stats = cifs_show_stats,
598#endif
599};
600
601/*
602 * Get root dentry from superblock according to prefix path mount option.
603 * Return dentry with refcount + 1 on success and NULL otherwise.
604 */
605static struct dentry *
606cifs_get_root(struct smb_vol *vol, struct super_block *sb)
607{
608 struct dentry *dentry;
609 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
610 char *full_path = NULL;
611 char *s, *p;
612 char sep;
613
614 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
615 return dget(sb->s_root);
616
617 full_path = cifs_build_path_to_root(vol, cifs_sb,
618 cifs_sb_master_tcon(cifs_sb), 0);
619 if (full_path == NULL)
620 return ERR_PTR(-ENOMEM);
621
622 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
623
624 sep = CIFS_DIR_SEP(cifs_sb);
625 dentry = dget(sb->s_root);
626 p = s = full_path;
627
628 do {
629 struct inode *dir = d_inode(dentry);
630 struct dentry *child;
631
632 if (!dir) {
633 dput(dentry);
634 dentry = ERR_PTR(-ENOENT);
635 break;
636 }
637 if (!S_ISDIR(dir->i_mode)) {
638 dput(dentry);
639 dentry = ERR_PTR(-ENOTDIR);
640 break;
641 }
642
643 /* skip separators */
644 while (*s == sep)
645 s++;
646 if (!*s)
647 break;
648 p = s++;
649 /* next separator */
650 while (*s && *s != sep)
651 s++;
652
653 child = lookup_one_len_unlocked(p, dentry, s - p);
654 dput(dentry);
655 dentry = child;
656 } while (!IS_ERR(dentry));
657 kfree(full_path);
658 return dentry;
659}
660
661static int cifs_set_super(struct super_block *sb, void *data)
662{
663 struct cifs_mnt_data *mnt_data = data;
664 sb->s_fs_info = mnt_data->cifs_sb;
665 return set_anon_super(sb, NULL);
666}
667
668static struct dentry *
669cifs_do_mount(struct file_system_type *fs_type,
670 int flags, const char *dev_name, void *data)
671{
672 int rc;
673 struct super_block *sb;
674 struct cifs_sb_info *cifs_sb;
675 struct smb_vol *volume_info;
676 struct cifs_mnt_data mnt_data;
677 struct dentry *root;
678
679 cifs_dbg(FYI, "Devname: %s flags: %d\n", dev_name, flags);
680
681 volume_info = cifs_get_volume_info((char *)data, dev_name);
682 if (IS_ERR(volume_info))
683 return ERR_CAST(volume_info);
684
685 cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
686 if (cifs_sb == NULL) {
687 root = ERR_PTR(-ENOMEM);
688 goto out_nls;
689 }
690
691 cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
692 if (cifs_sb->mountdata == NULL) {
693 root = ERR_PTR(-ENOMEM);
694 goto out_free;
695 }
696
697 rc = cifs_setup_cifs_sb(volume_info, cifs_sb);
698 if (rc) {
699 root = ERR_PTR(rc);
700 goto out_free;
701 }
702
703 rc = cifs_mount(cifs_sb, volume_info);
704 if (rc) {
705 if (!(flags & MS_SILENT))
706 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
707 rc);
708 root = ERR_PTR(rc);
709 goto out_free;
710 }
711
712 mnt_data.vol = volume_info;
713 mnt_data.cifs_sb = cifs_sb;
714 mnt_data.flags = flags;
715
716 /* BB should we make this contingent on mount parm? */
717 flags |= MS_NODIRATIME | MS_NOATIME;
718
719 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
720 if (IS_ERR(sb)) {
721 root = ERR_CAST(sb);
722 cifs_umount(cifs_sb);
723 goto out;
724 }
725
726 if (sb->s_root) {
727 cifs_dbg(FYI, "Use existing superblock\n");
728 cifs_umount(cifs_sb);
729 } else {
730 rc = cifs_read_super(sb);
731 if (rc) {
732 root = ERR_PTR(rc);
733 goto out_super;
734 }
735
736 sb->s_flags |= MS_ACTIVE;
737 }
738
739 root = cifs_get_root(volume_info, sb);
740 if (IS_ERR(root))
741 goto out_super;
742
743 cifs_dbg(FYI, "dentry root is: %p\n", root);
744 goto out;
745
746out_super:
747 deactivate_locked_super(sb);
748out:
749 cifs_cleanup_volume_info(volume_info);
750 return root;
751
752out_free:
753 kfree(cifs_sb->prepath);
754 kfree(cifs_sb->mountdata);
755 kfree(cifs_sb);
756out_nls:
757 unload_nls(volume_info->local_nls);
758 goto out;
759}
760
761static ssize_t
762cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
763{
764 ssize_t rc;
765 struct inode *inode = file_inode(iocb->ki_filp);
766
767 if (iocb->ki_filp->f_flags & O_DIRECT)
768 return cifs_user_readv(iocb, iter);
769
770 rc = cifs_revalidate_mapping(inode);
771 if (rc)
772 return rc;
773
774 return generic_file_read_iter(iocb, iter);
775}
776
777static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
778{
779 struct inode *inode = file_inode(iocb->ki_filp);
780 struct cifsInodeInfo *cinode = CIFS_I(inode);
781 ssize_t written;
782 int rc;
783
784 if (iocb->ki_filp->f_flags & O_DIRECT) {
785 written = cifs_user_writev(iocb, from);
786 if (written > 0 && CIFS_CACHE_READ(cinode)) {
787 cifs_zap_mapping(inode);
788 cifs_dbg(FYI,
789 "Set no oplock for inode=%p after a write operation\n",
790 inode);
791 cinode->oplock = 0;
792 }
793 return written;
794 }
795
796 written = cifs_get_writer(cinode);
797 if (written)
798 return written;
799
800 written = generic_file_write_iter(iocb, from);
801
802 if (CIFS_CACHE_WRITE(CIFS_I(inode)))
803 goto out;
804
805 rc = filemap_fdatawrite(inode->i_mapping);
806 if (rc)
807 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
808 rc, inode);
809
810out:
811 cifs_put_writer(cinode);
812 return written;
813}
814
815static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
816{
817 /*
818 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
819 * the cached file length
820 */
821 if (whence != SEEK_SET && whence != SEEK_CUR) {
822 int rc;
823 struct inode *inode = file_inode(file);
824
825 /*
826 * We need to be sure that all dirty pages are written and the
827 * server has the newest file length.
828 */
829 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
830 inode->i_mapping->nrpages != 0) {
831 rc = filemap_fdatawait(inode->i_mapping);
832 if (rc) {
833 mapping_set_error(inode->i_mapping, rc);
834 return rc;
835 }
836 }
837 /*
838 * Some applications poll for the file length in this strange
839 * way so we must seek to end on non-oplocked files by
840 * setting the revalidate time to zero.
841 */
842 CIFS_I(inode)->time = 0;
843
844 rc = cifs_revalidate_file_attr(file);
845 if (rc < 0)
846 return (loff_t)rc;
847 }
848 return generic_file_llseek(file, offset, whence);
849}
850
851static int
852cifs_setlease(struct file *file, long arg, struct file_lock **lease, void **priv)
853{
854 /*
855 * Note that this is called by vfs setlease with i_lock held to
856 * protect *lease from going away.
857 */
858 struct inode *inode = file_inode(file);
859 struct cifsFileInfo *cfile = file->private_data;
860
861 if (!(S_ISREG(inode->i_mode)))
862 return -EINVAL;
863
864 /* Check if file is oplocked if this is request for new lease */
865 if (arg == F_UNLCK ||
866 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
867 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
868 return generic_setlease(file, arg, lease, priv);
869 else if (tlink_tcon(cfile->tlink)->local_lease &&
870 !CIFS_CACHE_READ(CIFS_I(inode)))
871 /*
872 * If the server claims to support oplock on this file, then we
873 * still need to check oplock even if the local_lease mount
874 * option is set, but there are servers which do not support
875 * oplock for which this mount option may be useful if the user
876 * knows that the file won't be changed on the server by anyone
877 * else.
878 */
879 return generic_setlease(file, arg, lease, priv);
880 else
881 return -EAGAIN;
882}
883
884struct file_system_type cifs_fs_type = {
885 .owner = THIS_MODULE,
886 .name = "cifs",
887 .mount = cifs_do_mount,
888 .kill_sb = cifs_kill_sb,
889 /* .fs_flags */
890};
891MODULE_ALIAS_FS("cifs");
892const struct inode_operations cifs_dir_inode_ops = {
893 .create = cifs_create,
894 .atomic_open = cifs_atomic_open,
895 .lookup = cifs_lookup,
896 .getattr = cifs_getattr,
897 .unlink = cifs_unlink,
898 .link = cifs_hardlink,
899 .mkdir = cifs_mkdir,
900 .rmdir = cifs_rmdir,
901 .rename = cifs_rename2,
902 .permission = cifs_permission,
903 .setattr = cifs_setattr,
904 .symlink = cifs_symlink,
905 .mknod = cifs_mknod,
906 .listxattr = cifs_listxattr,
907};
908
909const struct inode_operations cifs_file_inode_ops = {
910 .setattr = cifs_setattr,
911 .getattr = cifs_getattr,
912 .permission = cifs_permission,
913 .listxattr = cifs_listxattr,
914};
915
916const struct inode_operations cifs_symlink_inode_ops = {
917 .get_link = cifs_get_link,
918 .permission = cifs_permission,
919 .listxattr = cifs_listxattr,
920};
921
922static int cifs_clone_file_range(struct file *src_file, loff_t off,
923 struct file *dst_file, loff_t destoff, u64 len)
924{
925 struct inode *src_inode = file_inode(src_file);
926 struct inode *target_inode = file_inode(dst_file);
927 struct cifsFileInfo *smb_file_src = src_file->private_data;
928 struct cifsFileInfo *smb_file_target = dst_file->private_data;
929 struct cifs_tcon *target_tcon = tlink_tcon(smb_file_target->tlink);
930 unsigned int xid;
931 int rc;
932
933 cifs_dbg(FYI, "clone range\n");
934
935 xid = get_xid();
936
937 if (!src_file->private_data || !dst_file->private_data) {
938 rc = -EBADF;
939 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
940 goto out;
941 }
942
943 /*
944 * Note: cifs case is easier than btrfs since server responsible for
945 * checks for proper open modes and file type and if it wants
946 * server could even support copy of range where source = target
947 */
948 lock_two_nondirectories(target_inode, src_inode);
949
950 if (len == 0)
951 len = src_inode->i_size - off;
952
953 cifs_dbg(FYI, "about to flush pages\n");
954 /* should we flush first and last page first */
955 truncate_inode_pages_range(&target_inode->i_data, destoff,
956 PAGE_ALIGN(destoff + len)-1);
957
958 if (target_tcon->ses->server->ops->duplicate_extents)
959 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
960 smb_file_src, smb_file_target, off, len, destoff);
961 else
962 rc = -EOPNOTSUPP;
963
964 /* force revalidate of size and timestamps of target file now
965 that target is updated on the server */
966 CIFS_I(target_inode)->time = 0;
967 /* although unlocking in the reverse order from locking is not
968 strictly necessary here it is a little cleaner to be consistent */
969 unlock_two_nondirectories(src_inode, target_inode);
970out:
971 free_xid(xid);
972 return rc;
973}
974
975const struct file_operations cifs_file_ops = {
976 .read_iter = cifs_loose_read_iter,
977 .write_iter = cifs_file_write_iter,
978 .open = cifs_open,
979 .release = cifs_close,
980 .lock = cifs_lock,
981 .fsync = cifs_fsync,
982 .flush = cifs_flush,
983 .mmap = cifs_file_mmap,
984 .splice_read = generic_file_splice_read,
985 .llseek = cifs_llseek,
986 .unlocked_ioctl = cifs_ioctl,
987 .clone_file_range = cifs_clone_file_range,
988 .setlease = cifs_setlease,
989 .fallocate = cifs_fallocate,
990};
991
992const struct file_operations cifs_file_strict_ops = {
993 .read_iter = cifs_strict_readv,
994 .write_iter = cifs_strict_writev,
995 .open = cifs_open,
996 .release = cifs_close,
997 .lock = cifs_lock,
998 .fsync = cifs_strict_fsync,
999 .flush = cifs_flush,
1000 .mmap = cifs_file_strict_mmap,
1001 .splice_read = generic_file_splice_read,
1002 .llseek = cifs_llseek,
1003 .unlocked_ioctl = cifs_ioctl,
1004 .clone_file_range = cifs_clone_file_range,
1005 .setlease = cifs_setlease,
1006 .fallocate = cifs_fallocate,
1007};
1008
1009const struct file_operations cifs_file_direct_ops = {
1010 /* BB reevaluate whether they can be done with directio, no cache */
1011 .read_iter = cifs_user_readv,
1012 .write_iter = cifs_user_writev,
1013 .open = cifs_open,
1014 .release = cifs_close,
1015 .lock = cifs_lock,
1016 .fsync = cifs_fsync,
1017 .flush = cifs_flush,
1018 .mmap = cifs_file_mmap,
1019 .splice_read = generic_file_splice_read,
1020 .unlocked_ioctl = cifs_ioctl,
1021 .clone_file_range = cifs_clone_file_range,
1022 .llseek = cifs_llseek,
1023 .setlease = cifs_setlease,
1024 .fallocate = cifs_fallocate,
1025};
1026
1027const struct file_operations cifs_file_nobrl_ops = {
1028 .read_iter = cifs_loose_read_iter,
1029 .write_iter = cifs_file_write_iter,
1030 .open = cifs_open,
1031 .release = cifs_close,
1032 .fsync = cifs_fsync,
1033 .flush = cifs_flush,
1034 .mmap = cifs_file_mmap,
1035 .splice_read = generic_file_splice_read,
1036 .llseek = cifs_llseek,
1037 .unlocked_ioctl = cifs_ioctl,
1038 .clone_file_range = cifs_clone_file_range,
1039 .setlease = cifs_setlease,
1040 .fallocate = cifs_fallocate,
1041};
1042
1043const struct file_operations cifs_file_strict_nobrl_ops = {
1044 .read_iter = cifs_strict_readv,
1045 .write_iter = cifs_strict_writev,
1046 .open = cifs_open,
1047 .release = cifs_close,
1048 .fsync = cifs_strict_fsync,
1049 .flush = cifs_flush,
1050 .mmap = cifs_file_strict_mmap,
1051 .splice_read = generic_file_splice_read,
1052 .llseek = cifs_llseek,
1053 .unlocked_ioctl = cifs_ioctl,
1054 .clone_file_range = cifs_clone_file_range,
1055 .setlease = cifs_setlease,
1056 .fallocate = cifs_fallocate,
1057};
1058
1059const struct file_operations cifs_file_direct_nobrl_ops = {
1060 /* BB reevaluate whether they can be done with directio, no cache */
1061 .read_iter = cifs_user_readv,
1062 .write_iter = cifs_user_writev,
1063 .open = cifs_open,
1064 .release = cifs_close,
1065 .fsync = cifs_fsync,
1066 .flush = cifs_flush,
1067 .mmap = cifs_file_mmap,
1068 .splice_read = generic_file_splice_read,
1069 .unlocked_ioctl = cifs_ioctl,
1070 .clone_file_range = cifs_clone_file_range,
1071 .llseek = cifs_llseek,
1072 .setlease = cifs_setlease,
1073 .fallocate = cifs_fallocate,
1074};
1075
1076const struct file_operations cifs_dir_ops = {
1077 .iterate_shared = cifs_readdir,
1078 .release = cifs_closedir,
1079 .read = generic_read_dir,
1080 .unlocked_ioctl = cifs_ioctl,
1081 .clone_file_range = cifs_clone_file_range,
1082 .llseek = generic_file_llseek,
1083};
1084
1085static void
1086cifs_init_once(void *inode)
1087{
1088 struct cifsInodeInfo *cifsi = inode;
1089
1090 inode_init_once(&cifsi->vfs_inode);
1091 init_rwsem(&cifsi->lock_sem);
1092}
1093
1094static int __init
1095cifs_init_inodecache(void)
1096{
1097 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1098 sizeof(struct cifsInodeInfo),
1099 0, (SLAB_RECLAIM_ACCOUNT|
1100 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1101 cifs_init_once);
1102 if (cifs_inode_cachep == NULL)
1103 return -ENOMEM;
1104
1105 return 0;
1106}
1107
1108static void
1109cifs_destroy_inodecache(void)
1110{
1111 /*
1112 * Make sure all delayed rcu free inodes are flushed before we
1113 * destroy cache.
1114 */
1115 rcu_barrier();
1116 kmem_cache_destroy(cifs_inode_cachep);
1117}
1118
1119static int
1120cifs_init_request_bufs(void)
1121{
1122 size_t max_hdr_size = MAX_CIFS_HDR_SIZE;
1123#ifdef CONFIG_CIFS_SMB2
1124 /*
1125 * SMB2 maximum header size is bigger than CIFS one - no problems to
1126 * allocate some more bytes for CIFS.
1127 */
1128 max_hdr_size = MAX_SMB2_HDR_SIZE;
1129#endif
1130 if (CIFSMaxBufSize < 8192) {
1131 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1132 Unicode path name has to fit in any SMB/CIFS path based frames */
1133 CIFSMaxBufSize = 8192;
1134 } else if (CIFSMaxBufSize > 1024*127) {
1135 CIFSMaxBufSize = 1024 * 127;
1136 } else {
1137 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1138 }
1139/*
1140 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1141 CIFSMaxBufSize, CIFSMaxBufSize);
1142*/
1143 cifs_req_cachep = kmem_cache_create("cifs_request",
1144 CIFSMaxBufSize + max_hdr_size, 0,
1145 SLAB_HWCACHE_ALIGN, NULL);
1146 if (cifs_req_cachep == NULL)
1147 return -ENOMEM;
1148
1149 if (cifs_min_rcv < 1)
1150 cifs_min_rcv = 1;
1151 else if (cifs_min_rcv > 64) {
1152 cifs_min_rcv = 64;
1153 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1154 }
1155
1156 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1157 cifs_req_cachep);
1158
1159 if (cifs_req_poolp == NULL) {
1160 kmem_cache_destroy(cifs_req_cachep);
1161 return -ENOMEM;
1162 }
1163 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1164 almost all handle based requests (but not write response, nor is it
1165 sufficient for path based requests). A smaller size would have
1166 been more efficient (compacting multiple slab items on one 4k page)
1167 for the case in which debug was on, but this larger size allows
1168 more SMBs to use small buffer alloc and is still much more
1169 efficient to alloc 1 per page off the slab compared to 17K (5page)
1170 alloc of large cifs buffers even when page debugging is on */
1171 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
1172 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1173 NULL);
1174 if (cifs_sm_req_cachep == NULL) {
1175 mempool_destroy(cifs_req_poolp);
1176 kmem_cache_destroy(cifs_req_cachep);
1177 return -ENOMEM;
1178 }
1179
1180 if (cifs_min_small < 2)
1181 cifs_min_small = 2;
1182 else if (cifs_min_small > 256) {
1183 cifs_min_small = 256;
1184 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1185 }
1186
1187 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1188 cifs_sm_req_cachep);
1189
1190 if (cifs_sm_req_poolp == NULL) {
1191 mempool_destroy(cifs_req_poolp);
1192 kmem_cache_destroy(cifs_req_cachep);
1193 kmem_cache_destroy(cifs_sm_req_cachep);
1194 return -ENOMEM;
1195 }
1196
1197 return 0;
1198}
1199
1200static void
1201cifs_destroy_request_bufs(void)
1202{
1203 mempool_destroy(cifs_req_poolp);
1204 kmem_cache_destroy(cifs_req_cachep);
1205 mempool_destroy(cifs_sm_req_poolp);
1206 kmem_cache_destroy(cifs_sm_req_cachep);
1207}
1208
1209static int
1210cifs_init_mids(void)
1211{
1212 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1213 sizeof(struct mid_q_entry), 0,
1214 SLAB_HWCACHE_ALIGN, NULL);
1215 if (cifs_mid_cachep == NULL)
1216 return -ENOMEM;
1217
1218 /* 3 is a reasonable minimum number of simultaneous operations */
1219 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1220 if (cifs_mid_poolp == NULL) {
1221 kmem_cache_destroy(cifs_mid_cachep);
1222 return -ENOMEM;
1223 }
1224
1225 return 0;
1226}
1227
1228static void
1229cifs_destroy_mids(void)
1230{
1231 mempool_destroy(cifs_mid_poolp);
1232 kmem_cache_destroy(cifs_mid_cachep);
1233}
1234
1235static int __init
1236init_cifs(void)
1237{
1238 int rc = 0;
1239 cifs_proc_init();
1240 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1241#ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
1242 INIT_LIST_HEAD(&GlobalDnotifyReqList);
1243 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
1244#endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
1245/*
1246 * Initialize Global counters
1247 */
1248 atomic_set(&sesInfoAllocCount, 0);
1249 atomic_set(&tconInfoAllocCount, 0);
1250 atomic_set(&tcpSesAllocCount, 0);
1251 atomic_set(&tcpSesReconnectCount, 0);
1252 atomic_set(&tconInfoReconnectCount, 0);
1253
1254 atomic_set(&bufAllocCount, 0);
1255 atomic_set(&smBufAllocCount, 0);
1256#ifdef CONFIG_CIFS_STATS2
1257 atomic_set(&totBufAllocCount, 0);
1258 atomic_set(&totSmBufAllocCount, 0);
1259#endif /* CONFIG_CIFS_STATS2 */
1260
1261 atomic_set(&midCount, 0);
1262 GlobalCurrentXid = 0;
1263 GlobalTotalActiveXid = 0;
1264 GlobalMaxActiveXid = 0;
1265 spin_lock_init(&cifs_tcp_ses_lock);
1266 spin_lock_init(&GlobalMid_Lock);
1267
1268 get_random_bytes(&cifs_lock_secret, sizeof(cifs_lock_secret));
1269
1270 if (cifs_max_pending < 2) {
1271 cifs_max_pending = 2;
1272 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1273 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1274 cifs_max_pending = CIFS_MAX_REQ;
1275 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1276 CIFS_MAX_REQ);
1277 }
1278
1279 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1280 if (!cifsiod_wq) {
1281 rc = -ENOMEM;
1282 goto out_clean_proc;
1283 }
1284
1285 rc = cifs_fscache_register();
1286 if (rc)
1287 goto out_destroy_wq;
1288
1289 rc = cifs_init_inodecache();
1290 if (rc)
1291 goto out_unreg_fscache;
1292
1293 rc = cifs_init_mids();
1294 if (rc)
1295 goto out_destroy_inodecache;
1296
1297 rc = cifs_init_request_bufs();
1298 if (rc)
1299 goto out_destroy_mids;
1300
1301#ifdef CONFIG_CIFS_UPCALL
1302 rc = init_cifs_spnego();
1303 if (rc)
1304 goto out_destroy_request_bufs;
1305#endif /* CONFIG_CIFS_UPCALL */
1306
1307#ifdef CONFIG_CIFS_ACL
1308 rc = init_cifs_idmap();
1309 if (rc)
1310 goto out_register_key_type;
1311#endif /* CONFIG_CIFS_ACL */
1312
1313 rc = register_filesystem(&cifs_fs_type);
1314 if (rc)
1315 goto out_init_cifs_idmap;
1316
1317 return 0;
1318
1319out_init_cifs_idmap:
1320#ifdef CONFIG_CIFS_ACL
1321 exit_cifs_idmap();
1322out_register_key_type:
1323#endif
1324#ifdef CONFIG_CIFS_UPCALL
1325 exit_cifs_spnego();
1326out_destroy_request_bufs:
1327#endif
1328 cifs_destroy_request_bufs();
1329out_destroy_mids:
1330 cifs_destroy_mids();
1331out_destroy_inodecache:
1332 cifs_destroy_inodecache();
1333out_unreg_fscache:
1334 cifs_fscache_unregister();
1335out_destroy_wq:
1336 destroy_workqueue(cifsiod_wq);
1337out_clean_proc:
1338 cifs_proc_clean();
1339 return rc;
1340}
1341
1342static void __exit
1343exit_cifs(void)
1344{
1345 cifs_dbg(NOISY, "exit_cifs\n");
1346 unregister_filesystem(&cifs_fs_type);
1347 cifs_dfs_release_automount_timer();
1348#ifdef CONFIG_CIFS_ACL
1349 exit_cifs_idmap();
1350#endif
1351#ifdef CONFIG_CIFS_UPCALL
1352 unregister_key_type(&cifs_spnego_key_type);
1353#endif
1354 cifs_destroy_request_bufs();
1355 cifs_destroy_mids();
1356 cifs_destroy_inodecache();
1357 cifs_fscache_unregister();
1358 destroy_workqueue(cifsiod_wq);
1359 cifs_proc_clean();
1360}
1361
1362MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1363MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1364MODULE_DESCRIPTION
1365 ("VFS to access servers complying with the SNIA CIFS Specification "
1366 "e.g. Samba and Windows");
1367MODULE_VERSION(CIFS_VERSION);
1368module_init(init_cifs)
1369module_exit(exit_cifs)