Loading...
Note: File does not exist in v3.5.6.
1// SPDX-License-Identifier: LGPL-2.1
2/*
3 *
4 * vfs operations that deal with files
5 *
6 * Copyright (C) International Business Machines Corp., 2002,2010
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Jeremy Allison (jra@samba.org)
9 *
10 */
11#include <linux/fs.h>
12#include <linux/filelock.h>
13#include <linux/backing-dev.h>
14#include <linux/stat.h>
15#include <linux/fcntl.h>
16#include <linux/pagemap.h>
17#include <linux/pagevec.h>
18#include <linux/writeback.h>
19#include <linux/task_io_accounting_ops.h>
20#include <linux/delay.h>
21#include <linux/mount.h>
22#include <linux/slab.h>
23#include <linux/swap.h>
24#include <linux/mm.h>
25#include <asm/div64.h>
26#include "cifsfs.h"
27#include "cifspdu.h"
28#include "cifsglob.h"
29#include "cifsproto.h"
30#include "smb2proto.h"
31#include "cifs_unicode.h"
32#include "cifs_debug.h"
33#include "cifs_fs_sb.h"
34#include "fscache.h"
35#include "smbdirect.h"
36#include "fs_context.h"
37#include "cifs_ioctl.h"
38#include "cached_dir.h"
39#include <trace/events/netfs.h>
40
41static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
42
43/*
44 * Prepare a subrequest to upload to the server. We need to allocate credits
45 * so that we know the maximum amount of data that we can include in it.
46 */
47static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
48{
49 struct cifs_io_subrequest *wdata =
50 container_of(subreq, struct cifs_io_subrequest, subreq);
51 struct cifs_io_request *req = wdata->req;
52 struct netfs_io_stream *stream = &req->rreq.io_streams[subreq->stream_nr];
53 struct TCP_Server_Info *server;
54 struct cifsFileInfo *open_file = req->cfile;
55 size_t wsize = req->rreq.wsize;
56 int rc;
57
58 if (!wdata->have_xid) {
59 wdata->xid = get_xid();
60 wdata->have_xid = true;
61 }
62
63 server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
64 wdata->server = server;
65
66retry:
67 if (open_file->invalidHandle) {
68 rc = cifs_reopen_file(open_file, false);
69 if (rc < 0) {
70 if (rc == -EAGAIN)
71 goto retry;
72 subreq->error = rc;
73 return netfs_prepare_write_failed(subreq);
74 }
75 }
76
77 rc = server->ops->wait_mtu_credits(server, wsize, &stream->sreq_max_len,
78 &wdata->credits);
79 if (rc < 0) {
80 subreq->error = rc;
81 return netfs_prepare_write_failed(subreq);
82 }
83
84 wdata->credits.rreq_debug_id = subreq->rreq->debug_id;
85 wdata->credits.rreq_debug_index = subreq->debug_index;
86 wdata->credits.in_flight_check = 1;
87 trace_smb3_rw_credits(wdata->rreq->debug_id,
88 wdata->subreq.debug_index,
89 wdata->credits.value,
90 server->credits, server->in_flight,
91 wdata->credits.value,
92 cifs_trace_rw_credits_write_prepare);
93
94#ifdef CONFIG_CIFS_SMB_DIRECT
95 if (server->smbd_conn)
96 stream->sreq_max_segs = server->smbd_conn->max_frmr_depth;
97#endif
98}
99
100/*
101 * Issue a subrequest to upload to the server.
102 */
103static void cifs_issue_write(struct netfs_io_subrequest *subreq)
104{
105 struct cifs_io_subrequest *wdata =
106 container_of(subreq, struct cifs_io_subrequest, subreq);
107 struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
108 int rc;
109
110 if (cifs_forced_shutdown(sbi)) {
111 rc = -EIO;
112 goto fail;
113 }
114
115 rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust);
116 if (rc)
117 goto fail;
118
119 rc = -EAGAIN;
120 if (wdata->req->cfile->invalidHandle)
121 goto fail;
122
123 wdata->server->ops->async_writev(wdata);
124out:
125 return;
126
127fail:
128 if (rc == -EAGAIN)
129 trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
130 else
131 trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
132 add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
133 cifs_write_subrequest_terminated(wdata, rc, false);
134 goto out;
135}
136
137static void cifs_netfs_invalidate_cache(struct netfs_io_request *wreq)
138{
139 cifs_invalidate_cache(wreq->inode, 0);
140}
141
142/*
143 * Negotiate the size of a read operation on behalf of the netfs library.
144 */
145static int cifs_prepare_read(struct netfs_io_subrequest *subreq)
146{
147 struct netfs_io_request *rreq = subreq->rreq;
148 struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
149 struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
150 struct TCP_Server_Info *server;
151 struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
152 size_t size;
153 int rc = 0;
154
155 if (!rdata->have_xid) {
156 rdata->xid = get_xid();
157 rdata->have_xid = true;
158 }
159
160 server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
161 rdata->server = server;
162
163 if (cifs_sb->ctx->rsize == 0)
164 cifs_sb->ctx->rsize =
165 server->ops->negotiate_rsize(tlink_tcon(req->cfile->tlink),
166 cifs_sb->ctx);
167
168 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
169 &size, &rdata->credits);
170 if (rc)
171 return rc;
172
173 rreq->io_streams[0].sreq_max_len = size;
174
175 rdata->credits.in_flight_check = 1;
176 rdata->credits.rreq_debug_id = rreq->debug_id;
177 rdata->credits.rreq_debug_index = subreq->debug_index;
178
179 trace_smb3_rw_credits(rdata->rreq->debug_id,
180 rdata->subreq.debug_index,
181 rdata->credits.value,
182 server->credits, server->in_flight, 0,
183 cifs_trace_rw_credits_read_submit);
184
185#ifdef CONFIG_CIFS_SMB_DIRECT
186 if (server->smbd_conn)
187 rreq->io_streams[0].sreq_max_segs = server->smbd_conn->max_frmr_depth;
188#endif
189 return 0;
190}
191
192/*
193 * Issue a read operation on behalf of the netfs helper functions. We're asked
194 * to make a read of a certain size at a point in the file. We are permitted
195 * to only read a portion of that, but as long as we read something, the netfs
196 * helper will call us again so that we can issue another read.
197 */
198static void cifs_issue_read(struct netfs_io_subrequest *subreq)
199{
200 struct netfs_io_request *rreq = subreq->rreq;
201 struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
202 struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
203 struct TCP_Server_Info *server = rdata->server;
204 int rc = 0;
205
206 cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
207 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
208 subreq->transferred, subreq->len);
209
210 rc = adjust_credits(server, rdata, cifs_trace_rw_credits_issue_read_adjust);
211 if (rc)
212 goto failed;
213
214 if (req->cfile->invalidHandle) {
215 do {
216 rc = cifs_reopen_file(req->cfile, true);
217 } while (rc == -EAGAIN);
218 if (rc)
219 goto failed;
220 }
221
222 if (subreq->rreq->origin != NETFS_DIO_READ)
223 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
224
225 trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
226 rc = rdata->server->ops->async_readv(rdata);
227 if (rc)
228 goto failed;
229 return;
230
231failed:
232 netfs_read_subreq_terminated(subreq, rc, false);
233}
234
235/*
236 * Writeback calls this when it finds a folio that needs uploading. This isn't
237 * called if writeback only has copy-to-cache to deal with.
238 */
239static void cifs_begin_writeback(struct netfs_io_request *wreq)
240{
241 struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
242 int ret;
243
244 ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile);
245 if (ret) {
246 cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
247 return;
248 }
249
250 wreq->io_streams[0].avail = true;
251}
252
253/*
254 * Initialise a request.
255 */
256static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
257{
258 struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
259 struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
260 struct cifsFileInfo *open_file = NULL;
261
262 rreq->rsize = cifs_sb->ctx->rsize;
263 rreq->wsize = cifs_sb->ctx->wsize;
264 req->pid = current->tgid; // Ummm... This may be a workqueue
265
266 if (file) {
267 open_file = file->private_data;
268 rreq->netfs_priv = file->private_data;
269 req->cfile = cifsFileInfo_get(open_file);
270 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
271 req->pid = req->cfile->pid;
272 } else if (rreq->origin != NETFS_WRITEBACK) {
273 WARN_ON_ONCE(1);
274 return -EIO;
275 }
276
277 return 0;
278}
279
280/*
281 * Completion of a request operation.
282 */
283static void cifs_rreq_done(struct netfs_io_request *rreq)
284{
285 struct timespec64 atime, mtime;
286 struct inode *inode = rreq->inode;
287
288 /* we do not want atime to be less than mtime, it broke some apps */
289 atime = inode_set_atime_to_ts(inode, current_time(inode));
290 mtime = inode_get_mtime(inode);
291 if (timespec64_compare(&atime, &mtime))
292 inode_set_atime_to_ts(inode, inode_get_mtime(inode));
293}
294
295static void cifs_free_request(struct netfs_io_request *rreq)
296{
297 struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
298
299 if (req->cfile)
300 cifsFileInfo_put(req->cfile);
301}
302
303static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
304{
305 struct cifs_io_subrequest *rdata =
306 container_of(subreq, struct cifs_io_subrequest, subreq);
307 int rc = subreq->error;
308
309 if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
310#ifdef CONFIG_CIFS_SMB_DIRECT
311 if (rdata->mr) {
312 smbd_deregister_mr(rdata->mr);
313 rdata->mr = NULL;
314 }
315#endif
316 }
317
318 if (rdata->credits.value != 0) {
319 trace_smb3_rw_credits(rdata->rreq->debug_id,
320 rdata->subreq.debug_index,
321 rdata->credits.value,
322 rdata->server ? rdata->server->credits : 0,
323 rdata->server ? rdata->server->in_flight : 0,
324 -rdata->credits.value,
325 cifs_trace_rw_credits_free_subreq);
326 if (rdata->server)
327 add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
328 else
329 rdata->credits.value = 0;
330 }
331
332 if (rdata->have_xid)
333 free_xid(rdata->xid);
334}
335
336const struct netfs_request_ops cifs_req_ops = {
337 .request_pool = &cifs_io_request_pool,
338 .subrequest_pool = &cifs_io_subrequest_pool,
339 .init_request = cifs_init_request,
340 .free_request = cifs_free_request,
341 .free_subrequest = cifs_free_subrequest,
342 .prepare_read = cifs_prepare_read,
343 .issue_read = cifs_issue_read,
344 .done = cifs_rreq_done,
345 .begin_writeback = cifs_begin_writeback,
346 .prepare_write = cifs_prepare_write,
347 .issue_write = cifs_issue_write,
348 .invalidate_cache = cifs_netfs_invalidate_cache,
349};
350
351/*
352 * Mark as invalid, all open files on tree connections since they
353 * were closed when session to server was lost.
354 */
355void
356cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
357{
358 struct cifsFileInfo *open_file = NULL;
359 struct list_head *tmp;
360 struct list_head *tmp1;
361
362 /* only send once per connect */
363 spin_lock(&tcon->tc_lock);
364 if (tcon->need_reconnect)
365 tcon->status = TID_NEED_RECON;
366
367 if (tcon->status != TID_NEED_RECON) {
368 spin_unlock(&tcon->tc_lock);
369 return;
370 }
371 tcon->status = TID_IN_FILES_INVALIDATE;
372 spin_unlock(&tcon->tc_lock);
373
374 /* list all files open on tree connection and mark them invalid */
375 spin_lock(&tcon->open_file_lock);
376 list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
377 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
378 open_file->invalidHandle = true;
379 open_file->oplock_break_cancelled = true;
380 }
381 spin_unlock(&tcon->open_file_lock);
382
383 invalidate_all_cached_dirs(tcon);
384 spin_lock(&tcon->tc_lock);
385 if (tcon->status == TID_IN_FILES_INVALIDATE)
386 tcon->status = TID_NEED_TCON;
387 spin_unlock(&tcon->tc_lock);
388
389 /*
390 * BB Add call to invalidate_inodes(sb) for all superblocks mounted
391 * to this tcon.
392 */
393}
394
395static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
396{
397 if ((flags & O_ACCMODE) == O_RDONLY)
398 return GENERIC_READ;
399 else if ((flags & O_ACCMODE) == O_WRONLY)
400 return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
401 else if ((flags & O_ACCMODE) == O_RDWR) {
402 /* GENERIC_ALL is too much permission to request
403 can cause unnecessary access denied on create */
404 /* return GENERIC_ALL; */
405 return (GENERIC_READ | GENERIC_WRITE);
406 }
407
408 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
409 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
410 FILE_READ_DATA);
411}
412
413#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
414static u32 cifs_posix_convert_flags(unsigned int flags)
415{
416 u32 posix_flags = 0;
417
418 if ((flags & O_ACCMODE) == O_RDONLY)
419 posix_flags = SMB_O_RDONLY;
420 else if ((flags & O_ACCMODE) == O_WRONLY)
421 posix_flags = SMB_O_WRONLY;
422 else if ((flags & O_ACCMODE) == O_RDWR)
423 posix_flags = SMB_O_RDWR;
424
425 if (flags & O_CREAT) {
426 posix_flags |= SMB_O_CREAT;
427 if (flags & O_EXCL)
428 posix_flags |= SMB_O_EXCL;
429 } else if (flags & O_EXCL)
430 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
431 current->comm, current->tgid);
432
433 if (flags & O_TRUNC)
434 posix_flags |= SMB_O_TRUNC;
435 /* be safe and imply O_SYNC for O_DSYNC */
436 if (flags & O_DSYNC)
437 posix_flags |= SMB_O_SYNC;
438 if (flags & O_DIRECTORY)
439 posix_flags |= SMB_O_DIRECTORY;
440 if (flags & O_NOFOLLOW)
441 posix_flags |= SMB_O_NOFOLLOW;
442 if (flags & O_DIRECT)
443 posix_flags |= SMB_O_DIRECT;
444
445 return posix_flags;
446}
447#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
448
449static inline int cifs_get_disposition(unsigned int flags)
450{
451 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
452 return FILE_CREATE;
453 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
454 return FILE_OVERWRITE_IF;
455 else if ((flags & O_CREAT) == O_CREAT)
456 return FILE_OPEN_IF;
457 else if ((flags & O_TRUNC) == O_TRUNC)
458 return FILE_OVERWRITE;
459 else
460 return FILE_OPEN;
461}
462
463#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
464int cifs_posix_open(const char *full_path, struct inode **pinode,
465 struct super_block *sb, int mode, unsigned int f_flags,
466 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
467{
468 int rc;
469 FILE_UNIX_BASIC_INFO *presp_data;
470 __u32 posix_flags = 0;
471 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
472 struct cifs_fattr fattr;
473 struct tcon_link *tlink;
474 struct cifs_tcon *tcon;
475
476 cifs_dbg(FYI, "posix open %s\n", full_path);
477
478 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
479 if (presp_data == NULL)
480 return -ENOMEM;
481
482 tlink = cifs_sb_tlink(cifs_sb);
483 if (IS_ERR(tlink)) {
484 rc = PTR_ERR(tlink);
485 goto posix_open_ret;
486 }
487
488 tcon = tlink_tcon(tlink);
489 mode &= ~current_umask();
490
491 posix_flags = cifs_posix_convert_flags(f_flags);
492 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
493 poplock, full_path, cifs_sb->local_nls,
494 cifs_remap(cifs_sb));
495 cifs_put_tlink(tlink);
496
497 if (rc)
498 goto posix_open_ret;
499
500 if (presp_data->Type == cpu_to_le32(-1))
501 goto posix_open_ret; /* open ok, caller does qpathinfo */
502
503 if (!pinode)
504 goto posix_open_ret; /* caller does not need info */
505
506 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
507
508 /* get new inode and set it up */
509 if (*pinode == NULL) {
510 cifs_fill_uniqueid(sb, &fattr);
511 *pinode = cifs_iget(sb, &fattr);
512 if (!*pinode) {
513 rc = -ENOMEM;
514 goto posix_open_ret;
515 }
516 } else {
517 cifs_revalidate_mapping(*pinode);
518 rc = cifs_fattr_to_inode(*pinode, &fattr, false);
519 }
520
521posix_open_ret:
522 kfree(presp_data);
523 return rc;
524}
525#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
526
527static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
528 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
529 struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
530{
531 int rc;
532 int desired_access;
533 int disposition;
534 int create_options = CREATE_NOT_DIR;
535 struct TCP_Server_Info *server = tcon->ses->server;
536 struct cifs_open_parms oparms;
537 int rdwr_for_fscache = 0;
538
539 if (!server->ops->open)
540 return -ENOSYS;
541
542 /* If we're caching, we need to be able to fill in around partial writes. */
543 if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
544 rdwr_for_fscache = 1;
545
546 desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
547
548/*********************************************************************
549 * open flag mapping table:
550 *
551 * POSIX Flag CIFS Disposition
552 * ---------- ----------------
553 * O_CREAT FILE_OPEN_IF
554 * O_CREAT | O_EXCL FILE_CREATE
555 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
556 * O_TRUNC FILE_OVERWRITE
557 * none of the above FILE_OPEN
558 *
559 * Note that there is not a direct match between disposition
560 * FILE_SUPERSEDE (ie create whether or not file exists although
561 * O_CREAT | O_TRUNC is similar but truncates the existing
562 * file rather than creating a new file as FILE_SUPERSEDE does
563 * (which uses the attributes / metadata passed in on open call)
564 *?
565 *? O_SYNC is a reasonable match to CIFS writethrough flag
566 *? and the read write flags match reasonably. O_LARGEFILE
567 *? is irrelevant because largefile support is always used
568 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
569 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
570 *********************************************************************/
571
572 disposition = cifs_get_disposition(f_flags);
573
574 /* BB pass O_SYNC flag through on file attributes .. BB */
575
576 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
577 if (f_flags & O_SYNC)
578 create_options |= CREATE_WRITE_THROUGH;
579
580 if (f_flags & O_DIRECT)
581 create_options |= CREATE_NO_BUFFER;
582
583retry_open:
584 oparms = (struct cifs_open_parms) {
585 .tcon = tcon,
586 .cifs_sb = cifs_sb,
587 .desired_access = desired_access,
588 .create_options = cifs_create_options(cifs_sb, create_options),
589 .disposition = disposition,
590 .path = full_path,
591 .fid = fid,
592 };
593
594 rc = server->ops->open(xid, &oparms, oplock, buf);
595 if (rc) {
596 if (rc == -EACCES && rdwr_for_fscache == 1) {
597 desired_access = cifs_convert_flags(f_flags, 0);
598 rdwr_for_fscache = 2;
599 goto retry_open;
600 }
601 return rc;
602 }
603 if (rdwr_for_fscache == 2)
604 cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
605
606 /* TODO: Add support for calling posix query info but with passing in fid */
607 if (tcon->unix_ext)
608 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
609 xid);
610 else
611 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
612 xid, fid);
613
614 if (rc) {
615 server->ops->close(xid, tcon, fid);
616 if (rc == -ESTALE)
617 rc = -EOPENSTALE;
618 }
619
620 return rc;
621}
622
623static bool
624cifs_has_mand_locks(struct cifsInodeInfo *cinode)
625{
626 struct cifs_fid_locks *cur;
627 bool has_locks = false;
628
629 down_read(&cinode->lock_sem);
630 list_for_each_entry(cur, &cinode->llist, llist) {
631 if (!list_empty(&cur->locks)) {
632 has_locks = true;
633 break;
634 }
635 }
636 up_read(&cinode->lock_sem);
637 return has_locks;
638}
639
640void
641cifs_down_write(struct rw_semaphore *sem)
642{
643 while (!down_write_trylock(sem))
644 msleep(10);
645}
646
647static void cifsFileInfo_put_work(struct work_struct *work);
648void serverclose_work(struct work_struct *work);
649
650struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
651 struct tcon_link *tlink, __u32 oplock,
652 const char *symlink_target)
653{
654 struct dentry *dentry = file_dentry(file);
655 struct inode *inode = d_inode(dentry);
656 struct cifsInodeInfo *cinode = CIFS_I(inode);
657 struct cifsFileInfo *cfile;
658 struct cifs_fid_locks *fdlocks;
659 struct cifs_tcon *tcon = tlink_tcon(tlink);
660 struct TCP_Server_Info *server = tcon->ses->server;
661
662 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
663 if (cfile == NULL)
664 return cfile;
665
666 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
667 if (!fdlocks) {
668 kfree(cfile);
669 return NULL;
670 }
671
672 if (symlink_target) {
673 cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
674 if (!cfile->symlink_target) {
675 kfree(fdlocks);
676 kfree(cfile);
677 return NULL;
678 }
679 }
680
681 INIT_LIST_HEAD(&fdlocks->locks);
682 fdlocks->cfile = cfile;
683 cfile->llist = fdlocks;
684
685 cfile->count = 1;
686 cfile->pid = current->tgid;
687 cfile->uid = current_fsuid();
688 cfile->dentry = dget(dentry);
689 cfile->f_flags = file->f_flags;
690 cfile->invalidHandle = false;
691 cfile->deferred_close_scheduled = false;
692 cfile->tlink = cifs_get_tlink(tlink);
693 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
694 INIT_WORK(&cfile->put, cifsFileInfo_put_work);
695 INIT_WORK(&cfile->serverclose, serverclose_work);
696 INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
697 mutex_init(&cfile->fh_mutex);
698 spin_lock_init(&cfile->file_info_lock);
699
700 cifs_sb_active(inode->i_sb);
701
702 /*
703 * If the server returned a read oplock and we have mandatory brlocks,
704 * set oplock level to None.
705 */
706 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
707 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
708 oplock = 0;
709 }
710
711 cifs_down_write(&cinode->lock_sem);
712 list_add(&fdlocks->llist, &cinode->llist);
713 up_write(&cinode->lock_sem);
714
715 spin_lock(&tcon->open_file_lock);
716 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
717 oplock = fid->pending_open->oplock;
718 list_del(&fid->pending_open->olist);
719
720 fid->purge_cache = false;
721 server->ops->set_fid(cfile, fid, oplock);
722
723 list_add(&cfile->tlist, &tcon->openFileList);
724 atomic_inc(&tcon->num_local_opens);
725
726 /* if readable file instance put first in list*/
727 spin_lock(&cinode->open_file_lock);
728 if (file->f_mode & FMODE_READ)
729 list_add(&cfile->flist, &cinode->openFileList);
730 else
731 list_add_tail(&cfile->flist, &cinode->openFileList);
732 spin_unlock(&cinode->open_file_lock);
733 spin_unlock(&tcon->open_file_lock);
734
735 if (fid->purge_cache)
736 cifs_zap_mapping(inode);
737
738 file->private_data = cfile;
739 return cfile;
740}
741
742struct cifsFileInfo *
743cifsFileInfo_get(struct cifsFileInfo *cifs_file)
744{
745 spin_lock(&cifs_file->file_info_lock);
746 cifsFileInfo_get_locked(cifs_file);
747 spin_unlock(&cifs_file->file_info_lock);
748 return cifs_file;
749}
750
751static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
752{
753 struct inode *inode = d_inode(cifs_file->dentry);
754 struct cifsInodeInfo *cifsi = CIFS_I(inode);
755 struct cifsLockInfo *li, *tmp;
756 struct super_block *sb = inode->i_sb;
757
758 /*
759 * Delete any outstanding lock records. We'll lose them when the file
760 * is closed anyway.
761 */
762 cifs_down_write(&cifsi->lock_sem);
763 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
764 list_del(&li->llist);
765 cifs_del_lock_waiters(li);
766 kfree(li);
767 }
768 list_del(&cifs_file->llist->llist);
769 kfree(cifs_file->llist);
770 up_write(&cifsi->lock_sem);
771
772 cifs_put_tlink(cifs_file->tlink);
773 dput(cifs_file->dentry);
774 cifs_sb_deactive(sb);
775 kfree(cifs_file->symlink_target);
776 kfree(cifs_file);
777}
778
779static void cifsFileInfo_put_work(struct work_struct *work)
780{
781 struct cifsFileInfo *cifs_file = container_of(work,
782 struct cifsFileInfo, put);
783
784 cifsFileInfo_put_final(cifs_file);
785}
786
787void serverclose_work(struct work_struct *work)
788{
789 struct cifsFileInfo *cifs_file = container_of(work,
790 struct cifsFileInfo, serverclose);
791
792 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
793
794 struct TCP_Server_Info *server = tcon->ses->server;
795 int rc = 0;
796 int retries = 0;
797 int MAX_RETRIES = 4;
798
799 do {
800 if (server->ops->close_getattr)
801 rc = server->ops->close_getattr(0, tcon, cifs_file);
802 else if (server->ops->close)
803 rc = server->ops->close(0, tcon, &cifs_file->fid);
804
805 if (rc == -EBUSY || rc == -EAGAIN) {
806 retries++;
807 msleep(250);
808 }
809 } while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
810 );
811
812 if (retries == MAX_RETRIES)
813 pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
814
815 if (cifs_file->offload)
816 queue_work(fileinfo_put_wq, &cifs_file->put);
817 else
818 cifsFileInfo_put_final(cifs_file);
819}
820
821/**
822 * cifsFileInfo_put - release a reference of file priv data
823 *
824 * Always potentially wait for oplock handler. See _cifsFileInfo_put().
825 *
826 * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
827 */
828void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
829{
830 _cifsFileInfo_put(cifs_file, true, true);
831}
832
833/**
834 * _cifsFileInfo_put - release a reference of file priv data
835 *
836 * This may involve closing the filehandle @cifs_file out on the
837 * server. Must be called without holding tcon->open_file_lock,
838 * cinode->open_file_lock and cifs_file->file_info_lock.
839 *
840 * If @wait_for_oplock_handler is true and we are releasing the last
841 * reference, wait for any running oplock break handler of the file
842 * and cancel any pending one.
843 *
844 * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
845 * @wait_oplock_handler: must be false if called from oplock_break_handler
846 * @offload: not offloaded on close and oplock breaks
847 *
848 */
849void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
850 bool wait_oplock_handler, bool offload)
851{
852 struct inode *inode = d_inode(cifs_file->dentry);
853 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
854 struct TCP_Server_Info *server = tcon->ses->server;
855 struct cifsInodeInfo *cifsi = CIFS_I(inode);
856 struct super_block *sb = inode->i_sb;
857 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
858 struct cifs_fid fid = {};
859 struct cifs_pending_open open;
860 bool oplock_break_cancelled;
861 bool serverclose_offloaded = false;
862
863 spin_lock(&tcon->open_file_lock);
864 spin_lock(&cifsi->open_file_lock);
865 spin_lock(&cifs_file->file_info_lock);
866
867 cifs_file->offload = offload;
868 if (--cifs_file->count > 0) {
869 spin_unlock(&cifs_file->file_info_lock);
870 spin_unlock(&cifsi->open_file_lock);
871 spin_unlock(&tcon->open_file_lock);
872 return;
873 }
874 spin_unlock(&cifs_file->file_info_lock);
875
876 if (server->ops->get_lease_key)
877 server->ops->get_lease_key(inode, &fid);
878
879 /* store open in pending opens to make sure we don't miss lease break */
880 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
881
882 /* remove it from the lists */
883 list_del(&cifs_file->flist);
884 list_del(&cifs_file->tlist);
885 atomic_dec(&tcon->num_local_opens);
886
887 if (list_empty(&cifsi->openFileList)) {
888 cifs_dbg(FYI, "closing last open instance for inode %p\n",
889 d_inode(cifs_file->dentry));
890 /*
891 * In strict cache mode we need invalidate mapping on the last
892 * close because it may cause a error when we open this file
893 * again and get at least level II oplock.
894 */
895 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
896 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
897 cifs_set_oplock_level(cifsi, 0);
898 }
899
900 spin_unlock(&cifsi->open_file_lock);
901 spin_unlock(&tcon->open_file_lock);
902
903 oplock_break_cancelled = wait_oplock_handler ?
904 cancel_work_sync(&cifs_file->oplock_break) : false;
905
906 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
907 struct TCP_Server_Info *server = tcon->ses->server;
908 unsigned int xid;
909 int rc = 0;
910
911 xid = get_xid();
912 if (server->ops->close_getattr)
913 rc = server->ops->close_getattr(xid, tcon, cifs_file);
914 else if (server->ops->close)
915 rc = server->ops->close(xid, tcon, &cifs_file->fid);
916 _free_xid(xid);
917
918 if (rc == -EBUSY || rc == -EAGAIN) {
919 // Server close failed, hence offloading it as an async op
920 queue_work(serverclose_wq, &cifs_file->serverclose);
921 serverclose_offloaded = true;
922 }
923 }
924
925 if (oplock_break_cancelled)
926 cifs_done_oplock_break(cifsi);
927
928 cifs_del_pending_open(&open);
929
930 // if serverclose has been offloaded to wq (on failure), it will
931 // handle offloading put as well. If serverclose not offloaded,
932 // we need to handle offloading put here.
933 if (!serverclose_offloaded) {
934 if (offload)
935 queue_work(fileinfo_put_wq, &cifs_file->put);
936 else
937 cifsFileInfo_put_final(cifs_file);
938 }
939}
940
941int cifs_open(struct inode *inode, struct file *file)
942
943{
944 int rc = -EACCES;
945 unsigned int xid;
946 __u32 oplock;
947 struct cifs_sb_info *cifs_sb;
948 struct TCP_Server_Info *server;
949 struct cifs_tcon *tcon;
950 struct tcon_link *tlink;
951 struct cifsFileInfo *cfile = NULL;
952 void *page;
953 const char *full_path;
954 bool posix_open_ok = false;
955 struct cifs_fid fid = {};
956 struct cifs_pending_open open;
957 struct cifs_open_info_data data = {};
958
959 xid = get_xid();
960
961 cifs_sb = CIFS_SB(inode->i_sb);
962 if (unlikely(cifs_forced_shutdown(cifs_sb))) {
963 free_xid(xid);
964 return -EIO;
965 }
966
967 tlink = cifs_sb_tlink(cifs_sb);
968 if (IS_ERR(tlink)) {
969 free_xid(xid);
970 return PTR_ERR(tlink);
971 }
972 tcon = tlink_tcon(tlink);
973 server = tcon->ses->server;
974
975 page = alloc_dentry_path();
976 full_path = build_path_from_dentry(file_dentry(file), page);
977 if (IS_ERR(full_path)) {
978 rc = PTR_ERR(full_path);
979 goto out;
980 }
981
982 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
983 inode, file->f_flags, full_path);
984
985 if (file->f_flags & O_DIRECT &&
986 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
987 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
988 file->f_op = &cifs_file_direct_nobrl_ops;
989 else
990 file->f_op = &cifs_file_direct_ops;
991 }
992
993 /* Get the cached handle as SMB2 close is deferred */
994 if (OPEN_FMODE(file->f_flags) & FMODE_WRITE) {
995 rc = cifs_get_writable_path(tcon, full_path, FIND_WR_FSUID_ONLY, &cfile);
996 } else {
997 rc = cifs_get_readable_path(tcon, full_path, &cfile);
998 }
999 if (rc == 0) {
1000 if (file->f_flags == cfile->f_flags) {
1001 file->private_data = cfile;
1002 spin_lock(&CIFS_I(inode)->deferred_lock);
1003 cifs_del_deferred_close(cfile);
1004 spin_unlock(&CIFS_I(inode)->deferred_lock);
1005 goto use_cache;
1006 } else {
1007 _cifsFileInfo_put(cfile, true, false);
1008 }
1009 }
1010
1011 if (server->oplocks)
1012 oplock = REQ_OPLOCK;
1013 else
1014 oplock = 0;
1015
1016#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1017 if (!tcon->broken_posix_open && tcon->unix_ext &&
1018 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1019 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1020 /* can not refresh inode info since size could be stale */
1021 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
1022 cifs_sb->ctx->file_mode /* ignored */,
1023 file->f_flags, &oplock, &fid.netfid, xid);
1024 if (rc == 0) {
1025 cifs_dbg(FYI, "posix open succeeded\n");
1026 posix_open_ok = true;
1027 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
1028 if (tcon->ses->serverNOS)
1029 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
1030 tcon->ses->ip_addr,
1031 tcon->ses->serverNOS);
1032 tcon->broken_posix_open = true;
1033 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
1034 (rc != -EOPNOTSUPP)) /* path not found or net err */
1035 goto out;
1036 /*
1037 * Else fallthrough to retry open the old way on network i/o
1038 * or DFS errors.
1039 */
1040 }
1041#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1042
1043 if (server->ops->get_lease_key)
1044 server->ops->get_lease_key(inode, &fid);
1045
1046 cifs_add_pending_open(&fid, tlink, &open);
1047
1048 if (!posix_open_ok) {
1049 if (server->ops->get_lease_key)
1050 server->ops->get_lease_key(inode, &fid);
1051
1052 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
1053 xid, &data);
1054 if (rc) {
1055 cifs_del_pending_open(&open);
1056 goto out;
1057 }
1058 }
1059
1060 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
1061 if (cfile == NULL) {
1062 if (server->ops->close)
1063 server->ops->close(xid, tcon, &fid);
1064 cifs_del_pending_open(&open);
1065 rc = -ENOMEM;
1066 goto out;
1067 }
1068
1069#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1070 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1071 /*
1072 * Time to set mode which we can not set earlier due to
1073 * problems creating new read-only files.
1074 */
1075 struct cifs_unix_set_info_args args = {
1076 .mode = inode->i_mode,
1077 .uid = INVALID_UID, /* no change */
1078 .gid = INVALID_GID, /* no change */
1079 .ctime = NO_CHANGE_64,
1080 .atime = NO_CHANGE_64,
1081 .mtime = NO_CHANGE_64,
1082 .device = 0,
1083 };
1084 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
1085 cfile->pid);
1086 }
1087#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1088
1089use_cache:
1090 fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
1091 file->f_mode & FMODE_WRITE);
1092 if (!(file->f_flags & O_DIRECT))
1093 goto out;
1094 if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
1095 goto out;
1096 cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
1097
1098out:
1099 free_dentry_path(page);
1100 free_xid(xid);
1101 cifs_put_tlink(tlink);
1102 cifs_free_open_info(&data);
1103 return rc;
1104}
1105
1106#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1107static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
1108#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1109
1110/*
1111 * Try to reacquire byte range locks that were released when session
1112 * to server was lost.
1113 */
1114static int
1115cifs_relock_file(struct cifsFileInfo *cfile)
1116{
1117 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1118 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1119 int rc = 0;
1120#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1121 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1122#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1123
1124 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
1125 if (cinode->can_cache_brlcks) {
1126 /* can cache locks - no need to relock */
1127 up_read(&cinode->lock_sem);
1128 return rc;
1129 }
1130
1131#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1132 if (cap_unix(tcon->ses) &&
1133 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1134 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1135 rc = cifs_push_posix_locks(cfile);
1136 else
1137#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1138 rc = tcon->ses->server->ops->push_mand_locks(cfile);
1139
1140 up_read(&cinode->lock_sem);
1141 return rc;
1142}
1143
1144static int
1145cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1146{
1147 int rc = -EACCES;
1148 unsigned int xid;
1149 __u32 oplock;
1150 struct cifs_sb_info *cifs_sb;
1151 struct cifs_tcon *tcon;
1152 struct TCP_Server_Info *server;
1153 struct cifsInodeInfo *cinode;
1154 struct inode *inode;
1155 void *page;
1156 const char *full_path;
1157 int desired_access;
1158 int disposition = FILE_OPEN;
1159 int create_options = CREATE_NOT_DIR;
1160 struct cifs_open_parms oparms;
1161 int rdwr_for_fscache = 0;
1162
1163 xid = get_xid();
1164 mutex_lock(&cfile->fh_mutex);
1165 if (!cfile->invalidHandle) {
1166 mutex_unlock(&cfile->fh_mutex);
1167 free_xid(xid);
1168 return 0;
1169 }
1170
1171 inode = d_inode(cfile->dentry);
1172 cifs_sb = CIFS_SB(inode->i_sb);
1173 tcon = tlink_tcon(cfile->tlink);
1174 server = tcon->ses->server;
1175
1176 /*
1177 * Can not grab rename sem here because various ops, including those
1178 * that already have the rename sem can end up causing writepage to get
1179 * called and if the server was down that means we end up here, and we
1180 * can never tell if the caller already has the rename_sem.
1181 */
1182 page = alloc_dentry_path();
1183 full_path = build_path_from_dentry(cfile->dentry, page);
1184 if (IS_ERR(full_path)) {
1185 mutex_unlock(&cfile->fh_mutex);
1186 free_dentry_path(page);
1187 free_xid(xid);
1188 return PTR_ERR(full_path);
1189 }
1190
1191 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
1192 inode, cfile->f_flags, full_path);
1193
1194 if (tcon->ses->server->oplocks)
1195 oplock = REQ_OPLOCK;
1196 else
1197 oplock = 0;
1198
1199#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1200 if (tcon->unix_ext && cap_unix(tcon->ses) &&
1201 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1202 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1203 /*
1204 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
1205 * original open. Must mask them off for a reopen.
1206 */
1207 unsigned int oflags = cfile->f_flags &
1208 ~(O_CREAT | O_EXCL | O_TRUNC);
1209
1210 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
1211 cifs_sb->ctx->file_mode /* ignored */,
1212 oflags, &oplock, &cfile->fid.netfid, xid);
1213 if (rc == 0) {
1214 cifs_dbg(FYI, "posix reopen succeeded\n");
1215 oparms.reconnect = true;
1216 goto reopen_success;
1217 }
1218 /*
1219 * fallthrough to retry open the old way on errors, especially
1220 * in the reconnect path it is important to retry hard
1221 */
1222 }
1223#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1224
1225 /* If we're caching, we need to be able to fill in around partial writes. */
1226 if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
1227 rdwr_for_fscache = 1;
1228
1229 desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
1230
1231 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
1232 if (cfile->f_flags & O_SYNC)
1233 create_options |= CREATE_WRITE_THROUGH;
1234
1235 if (cfile->f_flags & O_DIRECT)
1236 create_options |= CREATE_NO_BUFFER;
1237
1238 if (server->ops->get_lease_key)
1239 server->ops->get_lease_key(inode, &cfile->fid);
1240
1241retry_open:
1242 oparms = (struct cifs_open_parms) {
1243 .tcon = tcon,
1244 .cifs_sb = cifs_sb,
1245 .desired_access = desired_access,
1246 .create_options = cifs_create_options(cifs_sb, create_options),
1247 .disposition = disposition,
1248 .path = full_path,
1249 .fid = &cfile->fid,
1250 .reconnect = true,
1251 };
1252
1253 /*
1254 * Can not refresh inode by passing in file_info buf to be returned by
1255 * ops->open and then calling get_inode_info with returned buf since
1256 * file might have write behind data that needs to be flushed and server
1257 * version of file size can be stale. If we knew for sure that inode was
1258 * not dirty locally we could do this.
1259 */
1260 rc = server->ops->open(xid, &oparms, &oplock, NULL);
1261 if (rc == -ENOENT && oparms.reconnect == false) {
1262 /* durable handle timeout is expired - open the file again */
1263 rc = server->ops->open(xid, &oparms, &oplock, NULL);
1264 /* indicate that we need to relock the file */
1265 oparms.reconnect = true;
1266 }
1267 if (rc == -EACCES && rdwr_for_fscache == 1) {
1268 desired_access = cifs_convert_flags(cfile->f_flags, 0);
1269 rdwr_for_fscache = 2;
1270 goto retry_open;
1271 }
1272
1273 if (rc) {
1274 mutex_unlock(&cfile->fh_mutex);
1275 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
1276 cifs_dbg(FYI, "oplock: %d\n", oplock);
1277 goto reopen_error_exit;
1278 }
1279
1280 if (rdwr_for_fscache == 2)
1281 cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
1282
1283#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1284reopen_success:
1285#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1286 cfile->invalidHandle = false;
1287 mutex_unlock(&cfile->fh_mutex);
1288 cinode = CIFS_I(inode);
1289
1290 if (can_flush) {
1291 rc = filemap_write_and_wait(inode->i_mapping);
1292 if (!is_interrupt_error(rc))
1293 mapping_set_error(inode->i_mapping, rc);
1294
1295 if (tcon->posix_extensions) {
1296 rc = smb311_posix_get_inode_info(&inode, full_path,
1297 NULL, inode->i_sb, xid);
1298 } else if (tcon->unix_ext) {
1299 rc = cifs_get_inode_info_unix(&inode, full_path,
1300 inode->i_sb, xid);
1301 } else {
1302 rc = cifs_get_inode_info(&inode, full_path, NULL,
1303 inode->i_sb, xid, NULL);
1304 }
1305 }
1306 /*
1307 * Else we are writing out data to server already and could deadlock if
1308 * we tried to flush data, and since we do not know if we have data that
1309 * would invalidate the current end of file on the server we can not go
1310 * to the server to get the new inode info.
1311 */
1312
1313 /*
1314 * If the server returned a read oplock and we have mandatory brlocks,
1315 * set oplock level to None.
1316 */
1317 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
1318 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1319 oplock = 0;
1320 }
1321
1322 server->ops->set_fid(cfile, &cfile->fid, oplock);
1323 if (oparms.reconnect)
1324 cifs_relock_file(cfile);
1325
1326reopen_error_exit:
1327 free_dentry_path(page);
1328 free_xid(xid);
1329 return rc;
1330}
1331
1332void smb2_deferred_work_close(struct work_struct *work)
1333{
1334 struct cifsFileInfo *cfile = container_of(work,
1335 struct cifsFileInfo, deferred.work);
1336
1337 spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1338 cifs_del_deferred_close(cfile);
1339 cfile->deferred_close_scheduled = false;
1340 spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1341 _cifsFileInfo_put(cfile, true, false);
1342}
1343
1344static bool
1345smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
1346{
1347 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1348 struct cifsInodeInfo *cinode = CIFS_I(inode);
1349
1350 return (cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
1351 (cinode->oplock == CIFS_CACHE_RHW_FLG ||
1352 cinode->oplock == CIFS_CACHE_RH_FLG) &&
1353 !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags));
1354
1355}
1356
1357int cifs_close(struct inode *inode, struct file *file)
1358{
1359 struct cifsFileInfo *cfile;
1360 struct cifsInodeInfo *cinode = CIFS_I(inode);
1361 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1362 struct cifs_deferred_close *dclose;
1363
1364 cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
1365
1366 if (file->private_data != NULL) {
1367 cfile = file->private_data;
1368 file->private_data = NULL;
1369 dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
1370 if ((cfile->status_file_deleted == false) &&
1371 (smb2_can_defer_close(inode, dclose))) {
1372 if (test_and_clear_bit(NETFS_ICTX_MODIFIED_ATTR, &cinode->netfs.flags)) {
1373 inode_set_mtime_to_ts(inode,
1374 inode_set_ctime_current(inode));
1375 }
1376 spin_lock(&cinode->deferred_lock);
1377 cifs_add_deferred_close(cfile, dclose);
1378 if (cfile->deferred_close_scheduled &&
1379 delayed_work_pending(&cfile->deferred)) {
1380 /*
1381 * If there is no pending work, mod_delayed_work queues new work.
1382 * So, Increase the ref count to avoid use-after-free.
1383 */
1384 if (!mod_delayed_work(deferredclose_wq,
1385 &cfile->deferred, cifs_sb->ctx->closetimeo))
1386 cifsFileInfo_get(cfile);
1387 } else {
1388 /* Deferred close for files */
1389 queue_delayed_work(deferredclose_wq,
1390 &cfile->deferred, cifs_sb->ctx->closetimeo);
1391 cfile->deferred_close_scheduled = true;
1392 spin_unlock(&cinode->deferred_lock);
1393 return 0;
1394 }
1395 spin_unlock(&cinode->deferred_lock);
1396 _cifsFileInfo_put(cfile, true, false);
1397 } else {
1398 _cifsFileInfo_put(cfile, true, false);
1399 kfree(dclose);
1400 }
1401 }
1402
1403 /* return code from the ->release op is always ignored */
1404 return 0;
1405}
1406
1407void
1408cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1409{
1410 struct cifsFileInfo *open_file, *tmp;
1411 LIST_HEAD(tmp_list);
1412
1413 if (!tcon->use_persistent || !tcon->need_reopen_files)
1414 return;
1415
1416 tcon->need_reopen_files = false;
1417
1418 cifs_dbg(FYI, "Reopen persistent handles\n");
1419
1420 /* list all files open on tree connection, reopen resilient handles */
1421 spin_lock(&tcon->open_file_lock);
1422 list_for_each_entry(open_file, &tcon->openFileList, tlist) {
1423 if (!open_file->invalidHandle)
1424 continue;
1425 cifsFileInfo_get(open_file);
1426 list_add_tail(&open_file->rlist, &tmp_list);
1427 }
1428 spin_unlock(&tcon->open_file_lock);
1429
1430 list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
1431 if (cifs_reopen_file(open_file, false /* do not flush */))
1432 tcon->need_reopen_files = true;
1433 list_del_init(&open_file->rlist);
1434 cifsFileInfo_put(open_file);
1435 }
1436}
1437
1438int cifs_closedir(struct inode *inode, struct file *file)
1439{
1440 int rc = 0;
1441 unsigned int xid;
1442 struct cifsFileInfo *cfile = file->private_data;
1443 struct cifs_tcon *tcon;
1444 struct TCP_Server_Info *server;
1445 char *buf;
1446
1447 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1448
1449 if (cfile == NULL)
1450 return rc;
1451
1452 xid = get_xid();
1453 tcon = tlink_tcon(cfile->tlink);
1454 server = tcon->ses->server;
1455
1456 cifs_dbg(FYI, "Freeing private data in close dir\n");
1457 spin_lock(&cfile->file_info_lock);
1458 if (server->ops->dir_needs_close(cfile)) {
1459 cfile->invalidHandle = true;
1460 spin_unlock(&cfile->file_info_lock);
1461 if (server->ops->close_dir)
1462 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
1463 else
1464 rc = -ENOSYS;
1465 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
1466 /* not much we can do if it fails anyway, ignore rc */
1467 rc = 0;
1468 } else
1469 spin_unlock(&cfile->file_info_lock);
1470
1471 buf = cfile->srch_inf.ntwrk_buf_start;
1472 if (buf) {
1473 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
1474 cfile->srch_inf.ntwrk_buf_start = NULL;
1475 if (cfile->srch_inf.smallBuf)
1476 cifs_small_buf_release(buf);
1477 else
1478 cifs_buf_release(buf);
1479 }
1480
1481 cifs_put_tlink(cfile->tlink);
1482 kfree(file->private_data);
1483 file->private_data = NULL;
1484 /* BB can we lock the filestruct while this is going on? */
1485 free_xid(xid);
1486 return rc;
1487}
1488
1489static struct cifsLockInfo *
1490cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
1491{
1492 struct cifsLockInfo *lock =
1493 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
1494 if (!lock)
1495 return lock;
1496 lock->offset = offset;
1497 lock->length = length;
1498 lock->type = type;
1499 lock->pid = current->tgid;
1500 lock->flags = flags;
1501 INIT_LIST_HEAD(&lock->blist);
1502 init_waitqueue_head(&lock->block_q);
1503 return lock;
1504}
1505
1506void
1507cifs_del_lock_waiters(struct cifsLockInfo *lock)
1508{
1509 struct cifsLockInfo *li, *tmp;
1510 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1511 list_del_init(&li->blist);
1512 wake_up(&li->block_q);
1513 }
1514}
1515
1516#define CIFS_LOCK_OP 0
1517#define CIFS_READ_OP 1
1518#define CIFS_WRITE_OP 2
1519
1520/* @rw_check : 0 - no op, 1 - read, 2 - write */
1521static bool
1522cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
1523 __u64 length, __u8 type, __u16 flags,
1524 struct cifsFileInfo *cfile,
1525 struct cifsLockInfo **conf_lock, int rw_check)
1526{
1527 struct cifsLockInfo *li;
1528 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
1529 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1530
1531 list_for_each_entry(li, &fdlocks->locks, llist) {
1532 if (offset + length <= li->offset ||
1533 offset >= li->offset + li->length)
1534 continue;
1535 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1536 server->ops->compare_fids(cfile, cur_cfile)) {
1537 /* shared lock prevents write op through the same fid */
1538 if (!(li->type & server->vals->shared_lock_type) ||
1539 rw_check != CIFS_WRITE_OP)
1540 continue;
1541 }
1542 if ((type & server->vals->shared_lock_type) &&
1543 ((server->ops->compare_fids(cfile, cur_cfile) &&
1544 current->tgid == li->pid) || type == li->type))
1545 continue;
1546 if (rw_check == CIFS_LOCK_OP &&
1547 (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1548 server->ops->compare_fids(cfile, cur_cfile))
1549 continue;
1550 if (conf_lock)
1551 *conf_lock = li;
1552 return true;
1553 }
1554 return false;
1555}
1556
1557bool
1558cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1559 __u8 type, __u16 flags,
1560 struct cifsLockInfo **conf_lock, int rw_check)
1561{
1562 bool rc = false;
1563 struct cifs_fid_locks *cur;
1564 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1565
1566 list_for_each_entry(cur, &cinode->llist, llist) {
1567 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
1568 flags, cfile, conf_lock,
1569 rw_check);
1570 if (rc)
1571 break;
1572 }
1573
1574 return rc;
1575}
1576
1577/*
1578 * Check if there is another lock that prevents us to set the lock (mandatory
1579 * style). If such a lock exists, update the flock structure with its
1580 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1581 * or leave it the same if we can't. Returns 0 if we don't need to request to
1582 * the server or 1 otherwise.
1583 */
1584static int
1585cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1586 __u8 type, struct file_lock *flock)
1587{
1588 int rc = 0;
1589 struct cifsLockInfo *conf_lock;
1590 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1591 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1592 bool exist;
1593
1594 down_read(&cinode->lock_sem);
1595
1596 exist = cifs_find_lock_conflict(cfile, offset, length, type,
1597 flock->c.flc_flags, &conf_lock,
1598 CIFS_LOCK_OP);
1599 if (exist) {
1600 flock->fl_start = conf_lock->offset;
1601 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1602 flock->c.flc_pid = conf_lock->pid;
1603 if (conf_lock->type & server->vals->shared_lock_type)
1604 flock->c.flc_type = F_RDLCK;
1605 else
1606 flock->c.flc_type = F_WRLCK;
1607 } else if (!cinode->can_cache_brlcks)
1608 rc = 1;
1609 else
1610 flock->c.flc_type = F_UNLCK;
1611
1612 up_read(&cinode->lock_sem);
1613 return rc;
1614}
1615
1616static void
1617cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1618{
1619 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1620 cifs_down_write(&cinode->lock_sem);
1621 list_add_tail(&lock->llist, &cfile->llist->locks);
1622 up_write(&cinode->lock_sem);
1623}
1624
1625/*
1626 * Set the byte-range lock (mandatory style). Returns:
1627 * 1) 0, if we set the lock and don't need to request to the server;
1628 * 2) 1, if no locks prevent us but we need to request to the server;
1629 * 3) -EACCES, if there is a lock that prevents us and wait is false.
1630 */
1631static int
1632cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1633 bool wait)
1634{
1635 struct cifsLockInfo *conf_lock;
1636 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1637 bool exist;
1638 int rc = 0;
1639
1640try_again:
1641 exist = false;
1642 cifs_down_write(&cinode->lock_sem);
1643
1644 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1645 lock->type, lock->flags, &conf_lock,
1646 CIFS_LOCK_OP);
1647 if (!exist && cinode->can_cache_brlcks) {
1648 list_add_tail(&lock->llist, &cfile->llist->locks);
1649 up_write(&cinode->lock_sem);
1650 return rc;
1651 }
1652
1653 if (!exist)
1654 rc = 1;
1655 else if (!wait)
1656 rc = -EACCES;
1657 else {
1658 list_add_tail(&lock->blist, &conf_lock->blist);
1659 up_write(&cinode->lock_sem);
1660 rc = wait_event_interruptible(lock->block_q,
1661 (lock->blist.prev == &lock->blist) &&
1662 (lock->blist.next == &lock->blist));
1663 if (!rc)
1664 goto try_again;
1665 cifs_down_write(&cinode->lock_sem);
1666 list_del_init(&lock->blist);
1667 }
1668
1669 up_write(&cinode->lock_sem);
1670 return rc;
1671}
1672
1673#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1674/*
1675 * Check if there is another lock that prevents us to set the lock (posix
1676 * style). If such a lock exists, update the flock structure with its
1677 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1678 * or leave it the same if we can't. Returns 0 if we don't need to request to
1679 * the server or 1 otherwise.
1680 */
1681static int
1682cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1683{
1684 int rc = 0;
1685 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1686 unsigned char saved_type = flock->c.flc_type;
1687
1688 if ((flock->c.flc_flags & FL_POSIX) == 0)
1689 return 1;
1690
1691 down_read(&cinode->lock_sem);
1692 posix_test_lock(file, flock);
1693
1694 if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) {
1695 flock->c.flc_type = saved_type;
1696 rc = 1;
1697 }
1698
1699 up_read(&cinode->lock_sem);
1700 return rc;
1701}
1702
1703/*
1704 * Set the byte-range lock (posix style). Returns:
1705 * 1) <0, if the error occurs while setting the lock;
1706 * 2) 0, if we set the lock and don't need to request to the server;
1707 * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1708 * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
1709 */
1710static int
1711cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1712{
1713 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1714 int rc = FILE_LOCK_DEFERRED + 1;
1715
1716 if ((flock->c.flc_flags & FL_POSIX) == 0)
1717 return rc;
1718
1719 cifs_down_write(&cinode->lock_sem);
1720 if (!cinode->can_cache_brlcks) {
1721 up_write(&cinode->lock_sem);
1722 return rc;
1723 }
1724
1725 rc = posix_lock_file(file, flock, NULL);
1726 up_write(&cinode->lock_sem);
1727 return rc;
1728}
1729
1730int
1731cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1732{
1733 unsigned int xid;
1734 int rc = 0, stored_rc;
1735 struct cifsLockInfo *li, *tmp;
1736 struct cifs_tcon *tcon;
1737 unsigned int num, max_num, max_buf;
1738 LOCKING_ANDX_RANGE *buf, *cur;
1739 static const int types[] = {
1740 LOCKING_ANDX_LARGE_FILES,
1741 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1742 };
1743 int i;
1744
1745 xid = get_xid();
1746 tcon = tlink_tcon(cfile->tlink);
1747
1748 /*
1749 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1750 * and check it before using.
1751 */
1752 max_buf = tcon->ses->server->maxBuf;
1753 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1754 free_xid(xid);
1755 return -EINVAL;
1756 }
1757
1758 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1759 PAGE_SIZE);
1760 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1761 PAGE_SIZE);
1762 max_num = (max_buf - sizeof(struct smb_hdr)) /
1763 sizeof(LOCKING_ANDX_RANGE);
1764 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1765 if (!buf) {
1766 free_xid(xid);
1767 return -ENOMEM;
1768 }
1769
1770 for (i = 0; i < 2; i++) {
1771 cur = buf;
1772 num = 0;
1773 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1774 if (li->type != types[i])
1775 continue;
1776 cur->Pid = cpu_to_le16(li->pid);
1777 cur->LengthLow = cpu_to_le32((u32)li->length);
1778 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1779 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1780 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1781 if (++num == max_num) {
1782 stored_rc = cifs_lockv(xid, tcon,
1783 cfile->fid.netfid,
1784 (__u8)li->type, 0, num,
1785 buf);
1786 if (stored_rc)
1787 rc = stored_rc;
1788 cur = buf;
1789 num = 0;
1790 } else
1791 cur++;
1792 }
1793
1794 if (num) {
1795 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1796 (__u8)types[i], 0, num, buf);
1797 if (stored_rc)
1798 rc = stored_rc;
1799 }
1800 }
1801
1802 kfree(buf);
1803 free_xid(xid);
1804 return rc;
1805}
1806
1807static __u32
1808hash_lockowner(fl_owner_t owner)
1809{
1810 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1811}
1812#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1813
1814struct lock_to_push {
1815 struct list_head llist;
1816 __u64 offset;
1817 __u64 length;
1818 __u32 pid;
1819 __u16 netfid;
1820 __u8 type;
1821};
1822
1823#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1824static int
1825cifs_push_posix_locks(struct cifsFileInfo *cfile)
1826{
1827 struct inode *inode = d_inode(cfile->dentry);
1828 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1829 struct file_lock *flock;
1830 struct file_lock_context *flctx = locks_inode_context(inode);
1831 unsigned int count = 0, i;
1832 int rc = 0, xid, type;
1833 struct list_head locks_to_send, *el;
1834 struct lock_to_push *lck, *tmp;
1835 __u64 length;
1836
1837 xid = get_xid();
1838
1839 if (!flctx)
1840 goto out;
1841
1842 spin_lock(&flctx->flc_lock);
1843 list_for_each(el, &flctx->flc_posix) {
1844 count++;
1845 }
1846 spin_unlock(&flctx->flc_lock);
1847
1848 INIT_LIST_HEAD(&locks_to_send);
1849
1850 /*
1851 * Allocating count locks is enough because no FL_POSIX locks can be
1852 * added to the list while we are holding cinode->lock_sem that
1853 * protects locking operations of this inode.
1854 */
1855 for (i = 0; i < count; i++) {
1856 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1857 if (!lck) {
1858 rc = -ENOMEM;
1859 goto err_out;
1860 }
1861 list_add_tail(&lck->llist, &locks_to_send);
1862 }
1863
1864 el = locks_to_send.next;
1865 spin_lock(&flctx->flc_lock);
1866 for_each_file_lock(flock, &flctx->flc_posix) {
1867 unsigned char ftype = flock->c.flc_type;
1868
1869 if (el == &locks_to_send) {
1870 /*
1871 * The list ended. We don't have enough allocated
1872 * structures - something is really wrong.
1873 */
1874 cifs_dbg(VFS, "Can't push all brlocks!\n");
1875 break;
1876 }
1877 length = cifs_flock_len(flock);
1878 if (ftype == F_RDLCK || ftype == F_SHLCK)
1879 type = CIFS_RDLCK;
1880 else
1881 type = CIFS_WRLCK;
1882 lck = list_entry(el, struct lock_to_push, llist);
1883 lck->pid = hash_lockowner(flock->c.flc_owner);
1884 lck->netfid = cfile->fid.netfid;
1885 lck->length = length;
1886 lck->type = type;
1887 lck->offset = flock->fl_start;
1888 }
1889 spin_unlock(&flctx->flc_lock);
1890
1891 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1892 int stored_rc;
1893
1894 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1895 lck->offset, lck->length, NULL,
1896 lck->type, 0);
1897 if (stored_rc)
1898 rc = stored_rc;
1899 list_del(&lck->llist);
1900 kfree(lck);
1901 }
1902
1903out:
1904 free_xid(xid);
1905 return rc;
1906err_out:
1907 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1908 list_del(&lck->llist);
1909 kfree(lck);
1910 }
1911 goto out;
1912}
1913#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1914
1915static int
1916cifs_push_locks(struct cifsFileInfo *cfile)
1917{
1918 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1919 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1920 int rc = 0;
1921#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1922 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1923#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1924
1925 /* we are going to update can_cache_brlcks here - need a write access */
1926 cifs_down_write(&cinode->lock_sem);
1927 if (!cinode->can_cache_brlcks) {
1928 up_write(&cinode->lock_sem);
1929 return rc;
1930 }
1931
1932#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1933 if (cap_unix(tcon->ses) &&
1934 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1935 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1936 rc = cifs_push_posix_locks(cfile);
1937 else
1938#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1939 rc = tcon->ses->server->ops->push_mand_locks(cfile);
1940
1941 cinode->can_cache_brlcks = false;
1942 up_write(&cinode->lock_sem);
1943 return rc;
1944}
1945
1946static void
1947cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1948 bool *wait_flag, struct TCP_Server_Info *server)
1949{
1950 if (flock->c.flc_flags & FL_POSIX)
1951 cifs_dbg(FYI, "Posix\n");
1952 if (flock->c.flc_flags & FL_FLOCK)
1953 cifs_dbg(FYI, "Flock\n");
1954 if (flock->c.flc_flags & FL_SLEEP) {
1955 cifs_dbg(FYI, "Blocking lock\n");
1956 *wait_flag = true;
1957 }
1958 if (flock->c.flc_flags & FL_ACCESS)
1959 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
1960 if (flock->c.flc_flags & FL_LEASE)
1961 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
1962 if (flock->c.flc_flags &
1963 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1964 FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
1965 cifs_dbg(FYI, "Unknown lock flags 0x%x\n",
1966 flock->c.flc_flags);
1967
1968 *type = server->vals->large_lock_type;
1969 if (lock_is_write(flock)) {
1970 cifs_dbg(FYI, "F_WRLCK\n");
1971 *type |= server->vals->exclusive_lock_type;
1972 *lock = 1;
1973 } else if (lock_is_unlock(flock)) {
1974 cifs_dbg(FYI, "F_UNLCK\n");
1975 *type |= server->vals->unlock_lock_type;
1976 *unlock = 1;
1977 /* Check if unlock includes more than one lock range */
1978 } else if (lock_is_read(flock)) {
1979 cifs_dbg(FYI, "F_RDLCK\n");
1980 *type |= server->vals->shared_lock_type;
1981 *lock = 1;
1982 } else if (flock->c.flc_type == F_EXLCK) {
1983 cifs_dbg(FYI, "F_EXLCK\n");
1984 *type |= server->vals->exclusive_lock_type;
1985 *lock = 1;
1986 } else if (flock->c.flc_type == F_SHLCK) {
1987 cifs_dbg(FYI, "F_SHLCK\n");
1988 *type |= server->vals->shared_lock_type;
1989 *lock = 1;
1990 } else
1991 cifs_dbg(FYI, "Unknown type of lock\n");
1992}
1993
1994static int
1995cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
1996 bool wait_flag, bool posix_lck, unsigned int xid)
1997{
1998 int rc = 0;
1999 __u64 length = cifs_flock_len(flock);
2000 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2001 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2002 struct TCP_Server_Info *server = tcon->ses->server;
2003#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2004 __u16 netfid = cfile->fid.netfid;
2005
2006 if (posix_lck) {
2007 int posix_lock_type;
2008
2009 rc = cifs_posix_lock_test(file, flock);
2010 if (!rc)
2011 return rc;
2012
2013 if (type & server->vals->shared_lock_type)
2014 posix_lock_type = CIFS_RDLCK;
2015 else
2016 posix_lock_type = CIFS_WRLCK;
2017 rc = CIFSSMBPosixLock(xid, tcon, netfid,
2018 hash_lockowner(flock->c.flc_owner),
2019 flock->fl_start, length, flock,
2020 posix_lock_type, wait_flag);
2021 return rc;
2022 }
2023#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2024
2025 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
2026 if (!rc)
2027 return rc;
2028
2029 /* BB we could chain these into one lock request BB */
2030 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
2031 1, 0, false);
2032 if (rc == 0) {
2033 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2034 type, 0, 1, false);
2035 flock->c.flc_type = F_UNLCK;
2036 if (rc != 0)
2037 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2038 rc);
2039 return 0;
2040 }
2041
2042 if (type & server->vals->shared_lock_type) {
2043 flock->c.flc_type = F_WRLCK;
2044 return 0;
2045 }
2046
2047 type &= ~server->vals->exclusive_lock_type;
2048
2049 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2050 type | server->vals->shared_lock_type,
2051 1, 0, false);
2052 if (rc == 0) {
2053 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2054 type | server->vals->shared_lock_type, 0, 1, false);
2055 flock->c.flc_type = F_RDLCK;
2056 if (rc != 0)
2057 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2058 rc);
2059 } else
2060 flock->c.flc_type = F_WRLCK;
2061
2062 return 0;
2063}
2064
2065void
2066cifs_move_llist(struct list_head *source, struct list_head *dest)
2067{
2068 struct list_head *li, *tmp;
2069 list_for_each_safe(li, tmp, source)
2070 list_move(li, dest);
2071}
2072
2073void
2074cifs_free_llist(struct list_head *llist)
2075{
2076 struct cifsLockInfo *li, *tmp;
2077 list_for_each_entry_safe(li, tmp, llist, llist) {
2078 cifs_del_lock_waiters(li);
2079 list_del(&li->llist);
2080 kfree(li);
2081 }
2082}
2083
2084#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2085int
2086cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
2087 unsigned int xid)
2088{
2089 int rc = 0, stored_rc;
2090 static const int types[] = {
2091 LOCKING_ANDX_LARGE_FILES,
2092 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
2093 };
2094 unsigned int i;
2095 unsigned int max_num, num, max_buf;
2096 LOCKING_ANDX_RANGE *buf, *cur;
2097 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2098 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2099 struct cifsLockInfo *li, *tmp;
2100 __u64 length = cifs_flock_len(flock);
2101 LIST_HEAD(tmp_llist);
2102
2103 /*
2104 * Accessing maxBuf is racy with cifs_reconnect - need to store value
2105 * and check it before using.
2106 */
2107 max_buf = tcon->ses->server->maxBuf;
2108 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
2109 return -EINVAL;
2110
2111 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
2112 PAGE_SIZE);
2113 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
2114 PAGE_SIZE);
2115 max_num = (max_buf - sizeof(struct smb_hdr)) /
2116 sizeof(LOCKING_ANDX_RANGE);
2117 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
2118 if (!buf)
2119 return -ENOMEM;
2120
2121 cifs_down_write(&cinode->lock_sem);
2122 for (i = 0; i < 2; i++) {
2123 cur = buf;
2124 num = 0;
2125 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
2126 if (flock->fl_start > li->offset ||
2127 (flock->fl_start + length) <
2128 (li->offset + li->length))
2129 continue;
2130 if (current->tgid != li->pid)
2131 continue;
2132 if (types[i] != li->type)
2133 continue;
2134 if (cinode->can_cache_brlcks) {
2135 /*
2136 * We can cache brlock requests - simply remove
2137 * a lock from the file's list.
2138 */
2139 list_del(&li->llist);
2140 cifs_del_lock_waiters(li);
2141 kfree(li);
2142 continue;
2143 }
2144 cur->Pid = cpu_to_le16(li->pid);
2145 cur->LengthLow = cpu_to_le32((u32)li->length);
2146 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
2147 cur->OffsetLow = cpu_to_le32((u32)li->offset);
2148 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
2149 /*
2150 * We need to save a lock here to let us add it again to
2151 * the file's list if the unlock range request fails on
2152 * the server.
2153 */
2154 list_move(&li->llist, &tmp_llist);
2155 if (++num == max_num) {
2156 stored_rc = cifs_lockv(xid, tcon,
2157 cfile->fid.netfid,
2158 li->type, num, 0, buf);
2159 if (stored_rc) {
2160 /*
2161 * We failed on the unlock range
2162 * request - add all locks from the tmp
2163 * list to the head of the file's list.
2164 */
2165 cifs_move_llist(&tmp_llist,
2166 &cfile->llist->locks);
2167 rc = stored_rc;
2168 } else
2169 /*
2170 * The unlock range request succeed -
2171 * free the tmp list.
2172 */
2173 cifs_free_llist(&tmp_llist);
2174 cur = buf;
2175 num = 0;
2176 } else
2177 cur++;
2178 }
2179 if (num) {
2180 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
2181 types[i], num, 0, buf);
2182 if (stored_rc) {
2183 cifs_move_llist(&tmp_llist,
2184 &cfile->llist->locks);
2185 rc = stored_rc;
2186 } else
2187 cifs_free_llist(&tmp_llist);
2188 }
2189 }
2190
2191 up_write(&cinode->lock_sem);
2192 kfree(buf);
2193 return rc;
2194}
2195#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2196
2197static int
2198cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
2199 bool wait_flag, bool posix_lck, int lock, int unlock,
2200 unsigned int xid)
2201{
2202 int rc = 0;
2203 __u64 length = cifs_flock_len(flock);
2204 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2205 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2206 struct TCP_Server_Info *server = tcon->ses->server;
2207 struct inode *inode = d_inode(cfile->dentry);
2208
2209#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2210 if (posix_lck) {
2211 int posix_lock_type;
2212
2213 rc = cifs_posix_lock_set(file, flock);
2214 if (rc <= FILE_LOCK_DEFERRED)
2215 return rc;
2216
2217 if (type & server->vals->shared_lock_type)
2218 posix_lock_type = CIFS_RDLCK;
2219 else
2220 posix_lock_type = CIFS_WRLCK;
2221
2222 if (unlock == 1)
2223 posix_lock_type = CIFS_UNLCK;
2224
2225 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
2226 hash_lockowner(flock->c.flc_owner),
2227 flock->fl_start, length,
2228 NULL, posix_lock_type, wait_flag);
2229 goto out;
2230 }
2231#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2232 if (lock) {
2233 struct cifsLockInfo *lock;
2234
2235 lock = cifs_lock_init(flock->fl_start, length, type,
2236 flock->c.flc_flags);
2237 if (!lock)
2238 return -ENOMEM;
2239
2240 rc = cifs_lock_add_if(cfile, lock, wait_flag);
2241 if (rc < 0) {
2242 kfree(lock);
2243 return rc;
2244 }
2245 if (!rc)
2246 goto out;
2247
2248 /*
2249 * Windows 7 server can delay breaking lease from read to None
2250 * if we set a byte-range lock on a file - break it explicitly
2251 * before sending the lock to the server to be sure the next
2252 * read won't conflict with non-overlapted locks due to
2253 * pagereading.
2254 */
2255 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
2256 CIFS_CACHE_READ(CIFS_I(inode))) {
2257 cifs_zap_mapping(inode);
2258 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
2259 inode);
2260 CIFS_I(inode)->oplock = 0;
2261 }
2262
2263 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2264 type, 1, 0, wait_flag);
2265 if (rc) {
2266 kfree(lock);
2267 return rc;
2268 }
2269
2270 cifs_lock_add(cfile, lock);
2271 } else if (unlock)
2272 rc = server->ops->mand_unlock_range(cfile, flock, xid);
2273
2274out:
2275 if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) {
2276 /*
2277 * If this is a request to remove all locks because we
2278 * are closing the file, it doesn't matter if the
2279 * unlocking failed as both cifs.ko and the SMB server
2280 * remove the lock on file close
2281 */
2282 if (rc) {
2283 cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
2284 if (!(flock->c.flc_flags & FL_CLOSE))
2285 return rc;
2286 }
2287 rc = locks_lock_file_wait(file, flock);
2288 }
2289 return rc;
2290}
2291
2292int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
2293{
2294 int rc, xid;
2295 int lock = 0, unlock = 0;
2296 bool wait_flag = false;
2297 bool posix_lck = false;
2298 struct cifs_sb_info *cifs_sb;
2299 struct cifs_tcon *tcon;
2300 struct cifsFileInfo *cfile;
2301 __u32 type;
2302
2303 xid = get_xid();
2304
2305 if (!(fl->c.flc_flags & FL_FLOCK)) {
2306 rc = -ENOLCK;
2307 free_xid(xid);
2308 return rc;
2309 }
2310
2311 cfile = (struct cifsFileInfo *)file->private_data;
2312 tcon = tlink_tcon(cfile->tlink);
2313
2314 cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
2315 tcon->ses->server);
2316 cifs_sb = CIFS_FILE_SB(file);
2317
2318 if (cap_unix(tcon->ses) &&
2319 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2320 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2321 posix_lck = true;
2322
2323 if (!lock && !unlock) {
2324 /*
2325 * if no lock or unlock then nothing to do since we do not
2326 * know what it is
2327 */
2328 rc = -EOPNOTSUPP;
2329 free_xid(xid);
2330 return rc;
2331 }
2332
2333 rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
2334 xid);
2335 free_xid(xid);
2336 return rc;
2337
2338
2339}
2340
2341int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
2342{
2343 int rc, xid;
2344 int lock = 0, unlock = 0;
2345 bool wait_flag = false;
2346 bool posix_lck = false;
2347 struct cifs_sb_info *cifs_sb;
2348 struct cifs_tcon *tcon;
2349 struct cifsFileInfo *cfile;
2350 __u32 type;
2351
2352 rc = -EACCES;
2353 xid = get_xid();
2354
2355 cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
2356 flock->c.flc_flags, flock->c.flc_type,
2357 (long long)flock->fl_start,
2358 (long long)flock->fl_end);
2359
2360 cfile = (struct cifsFileInfo *)file->private_data;
2361 tcon = tlink_tcon(cfile->tlink);
2362
2363 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
2364 tcon->ses->server);
2365 cifs_sb = CIFS_FILE_SB(file);
2366 set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
2367
2368 if (cap_unix(tcon->ses) &&
2369 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2370 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2371 posix_lck = true;
2372 /*
2373 * BB add code here to normalize offset and length to account for
2374 * negative length which we can not accept over the wire.
2375 */
2376 if (IS_GETLK(cmd)) {
2377 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
2378 free_xid(xid);
2379 return rc;
2380 }
2381
2382 if (!lock && !unlock) {
2383 /*
2384 * if no lock or unlock then nothing to do since we do not
2385 * know what it is
2386 */
2387 free_xid(xid);
2388 return -EOPNOTSUPP;
2389 }
2390
2391 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
2392 xid);
2393 free_xid(xid);
2394 return rc;
2395}
2396
2397void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result,
2398 bool was_async)
2399{
2400 struct netfs_io_request *wreq = wdata->rreq;
2401 struct netfs_inode *ictx = netfs_inode(wreq->inode);
2402 loff_t wrend;
2403
2404 if (result > 0) {
2405 wrend = wdata->subreq.start + wdata->subreq.transferred + result;
2406
2407 if (wrend > ictx->zero_point &&
2408 (wdata->rreq->origin == NETFS_UNBUFFERED_WRITE ||
2409 wdata->rreq->origin == NETFS_DIO_WRITE))
2410 ictx->zero_point = wrend;
2411 if (wrend > ictx->remote_i_size)
2412 netfs_resize_file(ictx, wrend, true);
2413 }
2414
2415 netfs_write_subrequest_terminated(&wdata->subreq, result, was_async);
2416}
2417
2418struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
2419 bool fsuid_only)
2420{
2421 struct cifsFileInfo *open_file = NULL;
2422 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2423
2424 /* only filter by fsuid on multiuser mounts */
2425 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2426 fsuid_only = false;
2427
2428 spin_lock(&cifs_inode->open_file_lock);
2429 /* we could simply get the first_list_entry since write-only entries
2430 are always at the end of the list but since the first entry might
2431 have a close pending, we go through the whole list */
2432 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2433 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2434 continue;
2435 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
2436 if ((!open_file->invalidHandle)) {
2437 /* found a good file */
2438 /* lock it so it will not be closed on us */
2439 cifsFileInfo_get(open_file);
2440 spin_unlock(&cifs_inode->open_file_lock);
2441 return open_file;
2442 } /* else might as well continue, and look for
2443 another, or simply have the caller reopen it
2444 again rather than trying to fix this handle */
2445 } else /* write only file */
2446 break; /* write only files are last so must be done */
2447 }
2448 spin_unlock(&cifs_inode->open_file_lock);
2449 return NULL;
2450}
2451
2452/* Return -EBADF if no handle is found and general rc otherwise */
2453int
2454cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
2455 struct cifsFileInfo **ret_file)
2456{
2457 struct cifsFileInfo *open_file, *inv_file = NULL;
2458 struct cifs_sb_info *cifs_sb;
2459 bool any_available = false;
2460 int rc = -EBADF;
2461 unsigned int refind = 0;
2462 bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
2463 bool with_delete = flags & FIND_WR_WITH_DELETE;
2464 *ret_file = NULL;
2465
2466 /*
2467 * Having a null inode here (because mapping->host was set to zero by
2468 * the VFS or MM) should not happen but we had reports of on oops (due
2469 * to it being zero) during stress testcases so we need to check for it
2470 */
2471
2472 if (cifs_inode == NULL) {
2473 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
2474 dump_stack();
2475 return rc;
2476 }
2477
2478 cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2479
2480 /* only filter by fsuid on multiuser mounts */
2481 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2482 fsuid_only = false;
2483
2484 spin_lock(&cifs_inode->open_file_lock);
2485refind_writable:
2486 if (refind > MAX_REOPEN_ATT) {
2487 spin_unlock(&cifs_inode->open_file_lock);
2488 return rc;
2489 }
2490 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2491 if (!any_available && open_file->pid != current->tgid)
2492 continue;
2493 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2494 continue;
2495 if (with_delete && !(open_file->fid.access & DELETE))
2496 continue;
2497 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2498 if (!open_file->invalidHandle) {
2499 /* found a good writable file */
2500 cifsFileInfo_get(open_file);
2501 spin_unlock(&cifs_inode->open_file_lock);
2502 *ret_file = open_file;
2503 return 0;
2504 } else {
2505 if (!inv_file)
2506 inv_file = open_file;
2507 }
2508 }
2509 }
2510 /* couldn't find usable FH with same pid, try any available */
2511 if (!any_available) {
2512 any_available = true;
2513 goto refind_writable;
2514 }
2515
2516 if (inv_file) {
2517 any_available = false;
2518 cifsFileInfo_get(inv_file);
2519 }
2520
2521 spin_unlock(&cifs_inode->open_file_lock);
2522
2523 if (inv_file) {
2524 rc = cifs_reopen_file(inv_file, false);
2525 if (!rc) {
2526 *ret_file = inv_file;
2527 return 0;
2528 }
2529
2530 spin_lock(&cifs_inode->open_file_lock);
2531 list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
2532 spin_unlock(&cifs_inode->open_file_lock);
2533 cifsFileInfo_put(inv_file);
2534 ++refind;
2535 inv_file = NULL;
2536 spin_lock(&cifs_inode->open_file_lock);
2537 goto refind_writable;
2538 }
2539
2540 return rc;
2541}
2542
2543struct cifsFileInfo *
2544find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
2545{
2546 struct cifsFileInfo *cfile;
2547 int rc;
2548
2549 rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
2550 if (rc)
2551 cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
2552
2553 return cfile;
2554}
2555
2556int
2557cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2558 int flags,
2559 struct cifsFileInfo **ret_file)
2560{
2561 struct cifsFileInfo *cfile;
2562 void *page = alloc_dentry_path();
2563
2564 *ret_file = NULL;
2565
2566 spin_lock(&tcon->open_file_lock);
2567 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2568 struct cifsInodeInfo *cinode;
2569 const char *full_path = build_path_from_dentry(cfile->dentry, page);
2570 if (IS_ERR(full_path)) {
2571 spin_unlock(&tcon->open_file_lock);
2572 free_dentry_path(page);
2573 return PTR_ERR(full_path);
2574 }
2575 if (strcmp(full_path, name))
2576 continue;
2577
2578 cinode = CIFS_I(d_inode(cfile->dentry));
2579 spin_unlock(&tcon->open_file_lock);
2580 free_dentry_path(page);
2581 return cifs_get_writable_file(cinode, flags, ret_file);
2582 }
2583
2584 spin_unlock(&tcon->open_file_lock);
2585 free_dentry_path(page);
2586 return -ENOENT;
2587}
2588
2589int
2590cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2591 struct cifsFileInfo **ret_file)
2592{
2593 struct cifsFileInfo *cfile;
2594 void *page = alloc_dentry_path();
2595
2596 *ret_file = NULL;
2597
2598 spin_lock(&tcon->open_file_lock);
2599 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2600 struct cifsInodeInfo *cinode;
2601 const char *full_path = build_path_from_dentry(cfile->dentry, page);
2602 if (IS_ERR(full_path)) {
2603 spin_unlock(&tcon->open_file_lock);
2604 free_dentry_path(page);
2605 return PTR_ERR(full_path);
2606 }
2607 if (strcmp(full_path, name))
2608 continue;
2609
2610 cinode = CIFS_I(d_inode(cfile->dentry));
2611 spin_unlock(&tcon->open_file_lock);
2612 free_dentry_path(page);
2613 *ret_file = find_readable_file(cinode, 0);
2614 return *ret_file ? 0 : -ENOENT;
2615 }
2616
2617 spin_unlock(&tcon->open_file_lock);
2618 free_dentry_path(page);
2619 return -ENOENT;
2620}
2621
2622/*
2623 * Flush data on a strict file.
2624 */
2625int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2626 int datasync)
2627{
2628 unsigned int xid;
2629 int rc = 0;
2630 struct cifs_tcon *tcon;
2631 struct TCP_Server_Info *server;
2632 struct cifsFileInfo *smbfile = file->private_data;
2633 struct inode *inode = file_inode(file);
2634 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2635
2636 rc = file_write_and_wait_range(file, start, end);
2637 if (rc) {
2638 trace_cifs_fsync_err(inode->i_ino, rc);
2639 return rc;
2640 }
2641
2642 xid = get_xid();
2643
2644 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2645 file, datasync);
2646
2647 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2648 rc = cifs_zap_mapping(inode);
2649 if (rc) {
2650 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
2651 rc = 0; /* don't care about it in fsync */
2652 }
2653 }
2654
2655 tcon = tlink_tcon(smbfile->tlink);
2656 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2657 server = tcon->ses->server;
2658 if (server->ops->flush == NULL) {
2659 rc = -ENOSYS;
2660 goto strict_fsync_exit;
2661 }
2662
2663 if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2664 smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2665 if (smbfile) {
2666 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2667 cifsFileInfo_put(smbfile);
2668 } else
2669 cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2670 } else
2671 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2672 }
2673
2674strict_fsync_exit:
2675 free_xid(xid);
2676 return rc;
2677}
2678
2679/*
2680 * Flush data on a non-strict data.
2681 */
2682int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2683{
2684 unsigned int xid;
2685 int rc = 0;
2686 struct cifs_tcon *tcon;
2687 struct TCP_Server_Info *server;
2688 struct cifsFileInfo *smbfile = file->private_data;
2689 struct inode *inode = file_inode(file);
2690 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2691
2692 rc = file_write_and_wait_range(file, start, end);
2693 if (rc) {
2694 trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
2695 return rc;
2696 }
2697
2698 xid = get_xid();
2699
2700 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2701 file, datasync);
2702
2703 tcon = tlink_tcon(smbfile->tlink);
2704 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2705 server = tcon->ses->server;
2706 if (server->ops->flush == NULL) {
2707 rc = -ENOSYS;
2708 goto fsync_exit;
2709 }
2710
2711 if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2712 smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2713 if (smbfile) {
2714 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2715 cifsFileInfo_put(smbfile);
2716 } else
2717 cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2718 } else
2719 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2720 }
2721
2722fsync_exit:
2723 free_xid(xid);
2724 return rc;
2725}
2726
2727/*
2728 * As file closes, flush all cached write data for this inode checking
2729 * for write behind errors.
2730 */
2731int cifs_flush(struct file *file, fl_owner_t id)
2732{
2733 struct inode *inode = file_inode(file);
2734 int rc = 0;
2735
2736 if (file->f_mode & FMODE_WRITE)
2737 rc = filemap_write_and_wait(inode->i_mapping);
2738
2739 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2740 if (rc) {
2741 /* get more nuanced writeback errors */
2742 rc = filemap_check_wb_err(file->f_mapping, 0);
2743 trace_cifs_flush_err(inode->i_ino, rc);
2744 }
2745 return rc;
2746}
2747
2748static ssize_t
2749cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2750{
2751 struct file *file = iocb->ki_filp;
2752 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2753 struct inode *inode = file->f_mapping->host;
2754 struct cifsInodeInfo *cinode = CIFS_I(inode);
2755 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2756 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2757 ssize_t rc;
2758
2759 rc = netfs_start_io_write(inode);
2760 if (rc < 0)
2761 return rc;
2762
2763 /*
2764 * We need to hold the sem to be sure nobody modifies lock list
2765 * with a brlock that prevents writing.
2766 */
2767 down_read(&cinode->lock_sem);
2768
2769 rc = generic_write_checks(iocb, from);
2770 if (rc <= 0)
2771 goto out;
2772
2773 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) &&
2774 (cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2775 server->vals->exclusive_lock_type, 0,
2776 NULL, CIFS_WRITE_OP))) {
2777 rc = -EACCES;
2778 goto out;
2779 }
2780
2781 rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
2782
2783out:
2784 up_read(&cinode->lock_sem);
2785 netfs_end_io_write(inode);
2786 if (rc > 0)
2787 rc = generic_write_sync(iocb, rc);
2788 return rc;
2789}
2790
2791ssize_t
2792cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2793{
2794 struct inode *inode = file_inode(iocb->ki_filp);
2795 struct cifsInodeInfo *cinode = CIFS_I(inode);
2796 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2797 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2798 iocb->ki_filp->private_data;
2799 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2800 ssize_t written;
2801
2802 written = cifs_get_writer(cinode);
2803 if (written)
2804 return written;
2805
2806 if (CIFS_CACHE_WRITE(cinode)) {
2807 if (cap_unix(tcon->ses) &&
2808 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2809 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2810 written = netfs_file_write_iter(iocb, from);
2811 goto out;
2812 }
2813 written = cifs_writev(iocb, from);
2814 goto out;
2815 }
2816 /*
2817 * For non-oplocked files in strict cache mode we need to write the data
2818 * to the server exactly from the pos to pos+len-1 rather than flush all
2819 * affected pages because it may cause a error with mandatory locks on
2820 * these pages but not on the region from pos to ppos+len-1.
2821 */
2822 written = netfs_file_write_iter(iocb, from);
2823 if (CIFS_CACHE_READ(cinode)) {
2824 /*
2825 * We have read level caching and we have just sent a write
2826 * request to the server thus making data in the cache stale.
2827 * Zap the cache and set oplock/lease level to NONE to avoid
2828 * reading stale data from the cache. All subsequent read
2829 * operations will read new data from the server.
2830 */
2831 cifs_zap_mapping(inode);
2832 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
2833 inode);
2834 cinode->oplock = 0;
2835 }
2836out:
2837 cifs_put_writer(cinode);
2838 return written;
2839}
2840
2841ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2842{
2843 ssize_t rc;
2844 struct inode *inode = file_inode(iocb->ki_filp);
2845
2846 if (iocb->ki_flags & IOCB_DIRECT)
2847 return netfs_unbuffered_read_iter(iocb, iter);
2848
2849 rc = cifs_revalidate_mapping(inode);
2850 if (rc)
2851 return rc;
2852
2853 return netfs_file_read_iter(iocb, iter);
2854}
2855
2856ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2857{
2858 struct inode *inode = file_inode(iocb->ki_filp);
2859 struct cifsInodeInfo *cinode = CIFS_I(inode);
2860 ssize_t written;
2861 int rc;
2862
2863 if (iocb->ki_filp->f_flags & O_DIRECT) {
2864 written = netfs_unbuffered_write_iter(iocb, from);
2865 if (written > 0 && CIFS_CACHE_READ(cinode)) {
2866 cifs_zap_mapping(inode);
2867 cifs_dbg(FYI,
2868 "Set no oplock for inode=%p after a write operation\n",
2869 inode);
2870 cinode->oplock = 0;
2871 }
2872 return written;
2873 }
2874
2875 written = cifs_get_writer(cinode);
2876 if (written)
2877 return written;
2878
2879 written = netfs_file_write_iter(iocb, from);
2880
2881 if (!CIFS_CACHE_WRITE(CIFS_I(inode))) {
2882 rc = filemap_fdatawrite(inode->i_mapping);
2883 if (rc)
2884 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
2885 rc, inode);
2886 }
2887
2888 cifs_put_writer(cinode);
2889 return written;
2890}
2891
2892ssize_t
2893cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
2894{
2895 struct inode *inode = file_inode(iocb->ki_filp);
2896 struct cifsInodeInfo *cinode = CIFS_I(inode);
2897 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2898 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2899 iocb->ki_filp->private_data;
2900 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2901 int rc = -EACCES;
2902
2903 /*
2904 * In strict cache mode we need to read from the server all the time
2905 * if we don't have level II oplock because the server can delay mtime
2906 * change - so we can't make a decision about inode invalidating.
2907 * And we can also fail with pagereading if there are mandatory locks
2908 * on pages affected by this read but not on the region from pos to
2909 * pos+len-1.
2910 */
2911 if (!CIFS_CACHE_READ(cinode))
2912 return netfs_unbuffered_read_iter(iocb, to);
2913
2914 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0) {
2915 if (iocb->ki_flags & IOCB_DIRECT)
2916 return netfs_unbuffered_read_iter(iocb, to);
2917 return netfs_buffered_read_iter(iocb, to);
2918 }
2919
2920 /*
2921 * We need to hold the sem to be sure nobody modifies lock list
2922 * with a brlock that prevents reading.
2923 */
2924 if (iocb->ki_flags & IOCB_DIRECT) {
2925 rc = netfs_start_io_direct(inode);
2926 if (rc < 0)
2927 goto out;
2928 rc = -EACCES;
2929 down_read(&cinode->lock_sem);
2930 if (!cifs_find_lock_conflict(
2931 cfile, iocb->ki_pos, iov_iter_count(to),
2932 tcon->ses->server->vals->shared_lock_type,
2933 0, NULL, CIFS_READ_OP))
2934 rc = netfs_unbuffered_read_iter_locked(iocb, to);
2935 up_read(&cinode->lock_sem);
2936 netfs_end_io_direct(inode);
2937 } else {
2938 rc = netfs_start_io_read(inode);
2939 if (rc < 0)
2940 goto out;
2941 rc = -EACCES;
2942 down_read(&cinode->lock_sem);
2943 if (!cifs_find_lock_conflict(
2944 cfile, iocb->ki_pos, iov_iter_count(to),
2945 tcon->ses->server->vals->shared_lock_type,
2946 0, NULL, CIFS_READ_OP))
2947 rc = filemap_read(iocb, to, 0);
2948 up_read(&cinode->lock_sem);
2949 netfs_end_io_read(inode);
2950 }
2951out:
2952 return rc;
2953}
2954
2955static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
2956{
2957 return netfs_page_mkwrite(vmf, NULL);
2958}
2959
2960static const struct vm_operations_struct cifs_file_vm_ops = {
2961 .fault = filemap_fault,
2962 .map_pages = filemap_map_pages,
2963 .page_mkwrite = cifs_page_mkwrite,
2964};
2965
2966int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2967{
2968 int xid, rc = 0;
2969 struct inode *inode = file_inode(file);
2970
2971 xid = get_xid();
2972
2973 if (!CIFS_CACHE_READ(CIFS_I(inode)))
2974 rc = cifs_zap_mapping(inode);
2975 if (!rc)
2976 rc = generic_file_mmap(file, vma);
2977 if (!rc)
2978 vma->vm_ops = &cifs_file_vm_ops;
2979
2980 free_xid(xid);
2981 return rc;
2982}
2983
2984int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2985{
2986 int rc, xid;
2987
2988 xid = get_xid();
2989
2990 rc = cifs_revalidate_file(file);
2991 if (rc)
2992 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
2993 rc);
2994 if (!rc)
2995 rc = generic_file_mmap(file, vma);
2996 if (!rc)
2997 vma->vm_ops = &cifs_file_vm_ops;
2998
2999 free_xid(xid);
3000 return rc;
3001}
3002
3003static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3004{
3005 struct cifsFileInfo *open_file;
3006
3007 spin_lock(&cifs_inode->open_file_lock);
3008 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3009 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3010 spin_unlock(&cifs_inode->open_file_lock);
3011 return 1;
3012 }
3013 }
3014 spin_unlock(&cifs_inode->open_file_lock);
3015 return 0;
3016}
3017
3018/* We do not want to update the file size from server for inodes
3019 open for write - to avoid races with writepage extending
3020 the file - in the future we could consider allowing
3021 refreshing the inode only on increases in the file size
3022 but this is tricky to do without racing with writebehind
3023 page caching in the current Linux kernel design */
3024bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
3025 bool from_readdir)
3026{
3027 if (!cifsInode)
3028 return true;
3029
3030 if (is_inode_writable(cifsInode) ||
3031 ((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
3032 /* This inode is open for write at least once */
3033 struct cifs_sb_info *cifs_sb;
3034
3035 cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb);
3036 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
3037 /* since no page cache to corrupt on directio
3038 we can change size safely */
3039 return true;
3040 }
3041
3042 if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
3043 return true;
3044
3045 return false;
3046 } else
3047 return true;
3048}
3049
3050void cifs_oplock_break(struct work_struct *work)
3051{
3052 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3053 oplock_break);
3054 struct inode *inode = d_inode(cfile->dentry);
3055 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3056 struct cifsInodeInfo *cinode = CIFS_I(inode);
3057 struct cifs_tcon *tcon;
3058 struct TCP_Server_Info *server;
3059 struct tcon_link *tlink;
3060 int rc = 0;
3061 bool purge_cache = false, oplock_break_cancelled;
3062 __u64 persistent_fid, volatile_fid;
3063 __u16 net_fid;
3064
3065 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3066 TASK_UNINTERRUPTIBLE);
3067
3068 tlink = cifs_sb_tlink(cifs_sb);
3069 if (IS_ERR(tlink))
3070 goto out;
3071 tcon = tlink_tcon(tlink);
3072 server = tcon->ses->server;
3073
3074 server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
3075 cfile->oplock_epoch, &purge_cache);
3076
3077 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
3078 cifs_has_mand_locks(cinode)) {
3079 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3080 inode);
3081 cinode->oplock = 0;
3082 }
3083
3084 if (inode && S_ISREG(inode->i_mode)) {
3085 if (CIFS_CACHE_READ(cinode))
3086 break_lease(inode, O_RDONLY);
3087 else
3088 break_lease(inode, O_WRONLY);
3089 rc = filemap_fdatawrite(inode->i_mapping);
3090 if (!CIFS_CACHE_READ(cinode) || purge_cache) {
3091 rc = filemap_fdatawait(inode->i_mapping);
3092 mapping_set_error(inode->i_mapping, rc);
3093 cifs_zap_mapping(inode);
3094 }
3095 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3096 if (CIFS_CACHE_WRITE(cinode))
3097 goto oplock_break_ack;
3098 }
3099
3100 rc = cifs_push_locks(cfile);
3101 if (rc)
3102 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3103
3104oplock_break_ack:
3105 /*
3106 * When oplock break is received and there are no active
3107 * file handles but cached, then schedule deferred close immediately.
3108 * So, new open will not use cached handle.
3109 */
3110
3111 if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes))
3112 cifs_close_deferred_file(cinode);
3113
3114 persistent_fid = cfile->fid.persistent_fid;
3115 volatile_fid = cfile->fid.volatile_fid;
3116 net_fid = cfile->fid.netfid;
3117 oplock_break_cancelled = cfile->oplock_break_cancelled;
3118
3119 _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
3120 /*
3121 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
3122 * an acknowledgment to be sent when the file has already been closed.
3123 */
3124 spin_lock(&cinode->open_file_lock);
3125 /* check list empty since can race with kill_sb calling tree disconnect */
3126 if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
3127 spin_unlock(&cinode->open_file_lock);
3128 rc = server->ops->oplock_response(tcon, persistent_fid,
3129 volatile_fid, net_fid, cinode);
3130 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3131 } else
3132 spin_unlock(&cinode->open_file_lock);
3133
3134 cifs_put_tlink(tlink);
3135out:
3136 cifs_done_oplock_break(cinode);
3137}
3138
3139static int cifs_swap_activate(struct swap_info_struct *sis,
3140 struct file *swap_file, sector_t *span)
3141{
3142 struct cifsFileInfo *cfile = swap_file->private_data;
3143 struct inode *inode = swap_file->f_mapping->host;
3144 unsigned long blocks;
3145 long long isize;
3146
3147 cifs_dbg(FYI, "swap activate\n");
3148
3149 if (!swap_file->f_mapping->a_ops->swap_rw)
3150 /* Cannot support swap */
3151 return -EINVAL;
3152
3153 spin_lock(&inode->i_lock);
3154 blocks = inode->i_blocks;
3155 isize = inode->i_size;
3156 spin_unlock(&inode->i_lock);
3157 if (blocks*512 < isize) {
3158 pr_warn("swap activate: swapfile has holes\n");
3159 return -EINVAL;
3160 }
3161 *span = sis->pages;
3162
3163 pr_warn_once("Swap support over SMB3 is experimental\n");
3164
3165 /*
3166 * TODO: consider adding ACL (or documenting how) to prevent other
3167 * users (on this or other systems) from reading it
3168 */
3169
3170
3171 /* TODO: add sk_set_memalloc(inet) or similar */
3172
3173 if (cfile)
3174 cfile->swapfile = true;
3175 /*
3176 * TODO: Since file already open, we can't open with DENY_ALL here
3177 * but we could add call to grab a byte range lock to prevent others
3178 * from reading or writing the file
3179 */
3180
3181 sis->flags |= SWP_FS_OPS;
3182 return add_swap_extent(sis, 0, sis->max, 0);
3183}
3184
3185static void cifs_swap_deactivate(struct file *file)
3186{
3187 struct cifsFileInfo *cfile = file->private_data;
3188
3189 cifs_dbg(FYI, "swap deactivate\n");
3190
3191 /* TODO: undo sk_set_memalloc(inet) will eventually be needed */
3192
3193 if (cfile)
3194 cfile->swapfile = false;
3195
3196 /* do we need to unpin (or unlock) the file */
3197}
3198
3199/**
3200 * cifs_swap_rw - SMB3 address space operation for swap I/O
3201 * @iocb: target I/O control block
3202 * @iter: I/O buffer
3203 *
3204 * Perform IO to the swap-file. This is much like direct IO.
3205 */
3206static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
3207{
3208 ssize_t ret;
3209
3210 if (iov_iter_rw(iter) == READ)
3211 ret = netfs_unbuffered_read_iter_locked(iocb, iter);
3212 else
3213 ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL);
3214 if (ret < 0)
3215 return ret;
3216 return 0;
3217}
3218
3219const struct address_space_operations cifs_addr_ops = {
3220 .read_folio = netfs_read_folio,
3221 .readahead = netfs_readahead,
3222 .writepages = netfs_writepages,
3223 .dirty_folio = netfs_dirty_folio,
3224 .release_folio = netfs_release_folio,
3225 .direct_IO = noop_direct_IO,
3226 .invalidate_folio = netfs_invalidate_folio,
3227 .migrate_folio = filemap_migrate_folio,
3228 /*
3229 * TODO: investigate and if useful we could add an is_dirty_writeback
3230 * helper if needed
3231 */
3232 .swap_activate = cifs_swap_activate,
3233 .swap_deactivate = cifs_swap_deactivate,
3234 .swap_rw = cifs_swap_rw,
3235};
3236
3237/*
3238 * cifs_readahead requires the server to support a buffer large enough to
3239 * contain the header plus one complete page of data. Otherwise, we need
3240 * to leave cifs_readahead out of the address space operations.
3241 */
3242const struct address_space_operations cifs_addr_ops_smallbuf = {
3243 .read_folio = netfs_read_folio,
3244 .writepages = netfs_writepages,
3245 .dirty_folio = netfs_dirty_folio,
3246 .release_folio = netfs_release_folio,
3247 .invalidate_folio = netfs_invalidate_folio,
3248 .migrate_folio = filemap_migrate_folio,
3249};