Loading...
1/*
2 * fs/cifs/transport.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
7 *
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/fs.h>
24#include <linux/list.h>
25#include <linux/gfp.h>
26#include <linux/wait.h>
27#include <linux/net.h>
28#include <linux/delay.h>
29#include <linux/freezer.h>
30#include <linux/tcp.h>
31#include <linux/highmem.h>
32#include <asm/uaccess.h>
33#include <asm/processor.h>
34#include <linux/mempool.h>
35#include "cifspdu.h"
36#include "cifsglob.h"
37#include "cifsproto.h"
38#include "cifs_debug.h"
39
40void
41cifs_wake_up_task(struct mid_q_entry *mid)
42{
43 wake_up_process(mid->callback_data);
44}
45
46struct mid_q_entry *
47AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
48{
49 struct mid_q_entry *temp;
50
51 if (server == NULL) {
52 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
53 return NULL;
54 }
55
56 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
57 if (temp == NULL)
58 return temp;
59 else {
60 memset(temp, 0, sizeof(struct mid_q_entry));
61 temp->mid = get_mid(smb_buffer);
62 temp->pid = current->pid;
63 temp->command = cpu_to_le16(smb_buffer->Command);
64 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
65 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
66 /* when mid allocated can be before when sent */
67 temp->when_alloc = jiffies;
68 temp->server = server;
69
70 /*
71 * The default is for the mid to be synchronous, so the
72 * default callback just wakes up the current task.
73 */
74 temp->callback = cifs_wake_up_task;
75 temp->callback_data = current;
76 }
77
78 atomic_inc(&midCount);
79 temp->mid_state = MID_REQUEST_ALLOCATED;
80 return temp;
81}
82
83void
84DeleteMidQEntry(struct mid_q_entry *midEntry)
85{
86#ifdef CONFIG_CIFS_STATS2
87 __le16 command = midEntry->server->vals->lock_cmd;
88 unsigned long now;
89#endif
90 midEntry->mid_state = MID_FREE;
91 atomic_dec(&midCount);
92 if (midEntry->large_buf)
93 cifs_buf_release(midEntry->resp_buf);
94 else
95 cifs_small_buf_release(midEntry->resp_buf);
96#ifdef CONFIG_CIFS_STATS2
97 now = jiffies;
98 /* commands taking longer than one second are indications that
99 something is wrong, unless it is quite a slow link or server */
100 if ((now - midEntry->when_alloc) > HZ) {
101 if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) {
102 printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %llu",
103 midEntry->command, midEntry->mid);
104 printk(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
105 now - midEntry->when_alloc,
106 now - midEntry->when_sent,
107 now - midEntry->when_received);
108 }
109 }
110#endif
111 mempool_free(midEntry, cifs_mid_poolp);
112}
113
114void
115cifs_delete_mid(struct mid_q_entry *mid)
116{
117 spin_lock(&GlobalMid_Lock);
118 list_del(&mid->qhead);
119 spin_unlock(&GlobalMid_Lock);
120
121 DeleteMidQEntry(mid);
122}
123
124/*
125 * smb_send_kvec - send an array of kvecs to the server
126 * @server: Server to send the data to
127 * @iov: Pointer to array of kvecs
128 * @n_vec: length of kvec array
129 * @sent: amount of data sent on socket is stored here
130 *
131 * Our basic "send data to server" function. Should be called with srv_mutex
132 * held. The caller is responsible for handling the results.
133 */
134static int
135smb_send_kvec(struct TCP_Server_Info *server, struct kvec *iov, size_t n_vec,
136 size_t *sent)
137{
138 int rc = 0;
139 int i = 0;
140 struct msghdr smb_msg;
141 unsigned int remaining;
142 size_t first_vec = 0;
143 struct socket *ssocket = server->ssocket;
144
145 *sent = 0;
146
147 smb_msg.msg_name = (struct sockaddr *) &server->dstaddr;
148 smb_msg.msg_namelen = sizeof(struct sockaddr);
149 smb_msg.msg_control = NULL;
150 smb_msg.msg_controllen = 0;
151 if (server->noblocksnd)
152 smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
153 else
154 smb_msg.msg_flags = MSG_NOSIGNAL;
155
156 remaining = 0;
157 for (i = 0; i < n_vec; i++)
158 remaining += iov[i].iov_len;
159
160 i = 0;
161 while (remaining) {
162 /*
163 * If blocking send, we try 3 times, since each can block
164 * for 5 seconds. For nonblocking we have to try more
165 * but wait increasing amounts of time allowing time for
166 * socket to clear. The overall time we wait in either
167 * case to send on the socket is about 15 seconds.
168 * Similarly we wait for 15 seconds for a response from
169 * the server in SendReceive[2] for the server to send
170 * a response back for most types of requests (except
171 * SMB Write past end of file which can be slow, and
172 * blocking lock operations). NFS waits slightly longer
173 * than CIFS, but this can make it take longer for
174 * nonresponsive servers to be detected and 15 seconds
175 * is more than enough time for modern networks to
176 * send a packet. In most cases if we fail to send
177 * after the retries we will kill the socket and
178 * reconnect which may clear the network problem.
179 */
180 rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec],
181 n_vec - first_vec, remaining);
182 if (rc == -EAGAIN) {
183 i++;
184 if (i >= 14 || (!server->noblocksnd && (i > 2))) {
185 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
186 ssocket);
187 rc = -EAGAIN;
188 break;
189 }
190 msleep(1 << i);
191 continue;
192 }
193
194 if (rc < 0)
195 break;
196
197 /* send was at least partially successful */
198 *sent += rc;
199
200 if (rc == remaining) {
201 remaining = 0;
202 break;
203 }
204
205 if (rc > remaining) {
206 cifs_dbg(VFS, "sent %d requested %d\n", rc, remaining);
207 break;
208 }
209
210 if (rc == 0) {
211 /* should never happen, letting socket clear before
212 retrying is our only obvious option here */
213 cifs_dbg(VFS, "tcp sent no data\n");
214 msleep(500);
215 continue;
216 }
217
218 remaining -= rc;
219
220 /* the line below resets i */
221 for (i = first_vec; i < n_vec; i++) {
222 if (iov[i].iov_len) {
223 if (rc > iov[i].iov_len) {
224 rc -= iov[i].iov_len;
225 iov[i].iov_len = 0;
226 } else {
227 iov[i].iov_base += rc;
228 iov[i].iov_len -= rc;
229 first_vec = i;
230 break;
231 }
232 }
233 }
234
235 i = 0; /* in case we get ENOSPC on the next send */
236 rc = 0;
237 }
238 return rc;
239}
240
241/**
242 * rqst_page_to_kvec - Turn a slot in the smb_rqst page array into a kvec
243 * @rqst: pointer to smb_rqst
244 * @idx: index into the array of the page
245 * @iov: pointer to struct kvec that will hold the result
246 *
247 * Helper function to convert a slot in the rqst->rq_pages array into a kvec.
248 * The page will be kmapped and the address placed into iov_base. The length
249 * will then be adjusted according to the ptailoff.
250 */
251void
252cifs_rqst_page_to_kvec(struct smb_rqst *rqst, unsigned int idx,
253 struct kvec *iov)
254{
255 /*
256 * FIXME: We could avoid this kmap altogether if we used
257 * kernel_sendpage instead of kernel_sendmsg. That will only
258 * work if signing is disabled though as sendpage inlines the
259 * page directly into the fraglist. If userspace modifies the
260 * page after we calculate the signature, then the server will
261 * reject it and may break the connection. kernel_sendmsg does
262 * an extra copy of the data and avoids that issue.
263 */
264 iov->iov_base = kmap(rqst->rq_pages[idx]);
265
266 /* if last page, don't send beyond this offset into page */
267 if (idx == (rqst->rq_npages - 1))
268 iov->iov_len = rqst->rq_tailsz;
269 else
270 iov->iov_len = rqst->rq_pagesz;
271}
272
273static unsigned long
274rqst_len(struct smb_rqst *rqst)
275{
276 unsigned int i;
277 struct kvec *iov = rqst->rq_iov;
278 unsigned long buflen = 0;
279
280 /* total up iov array first */
281 for (i = 0; i < rqst->rq_nvec; i++)
282 buflen += iov[i].iov_len;
283
284 /* add in the page array if there is one */
285 if (rqst->rq_npages) {
286 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1);
287 buflen += rqst->rq_tailsz;
288 }
289
290 return buflen;
291}
292
293static int
294smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
295{
296 int rc;
297 struct kvec *iov = rqst->rq_iov;
298 int n_vec = rqst->rq_nvec;
299 unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
300 unsigned long send_length;
301 unsigned int i;
302 size_t total_len = 0, sent;
303 struct socket *ssocket = server->ssocket;
304 int val = 1;
305
306 if (ssocket == NULL)
307 return -ENOTSOCK;
308
309 /* sanity check send length */
310 send_length = rqst_len(rqst);
311 if (send_length != smb_buf_length + 4) {
312 WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n",
313 send_length, smb_buf_length);
314 return -EIO;
315 }
316
317 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length);
318 dump_smb(iov[0].iov_base, iov[0].iov_len);
319
320 /* cork the socket */
321 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
322 (char *)&val, sizeof(val));
323
324 rc = smb_send_kvec(server, iov, n_vec, &sent);
325 if (rc < 0)
326 goto uncork;
327
328 total_len += sent;
329
330 /* now walk the page array and send each page in it */
331 for (i = 0; i < rqst->rq_npages; i++) {
332 struct kvec p_iov;
333
334 cifs_rqst_page_to_kvec(rqst, i, &p_iov);
335 rc = smb_send_kvec(server, &p_iov, 1, &sent);
336 kunmap(rqst->rq_pages[i]);
337 if (rc < 0)
338 break;
339
340 total_len += sent;
341 }
342
343uncork:
344 /* uncork it */
345 val = 0;
346 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
347 (char *)&val, sizeof(val));
348
349 if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
350 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
351 smb_buf_length + 4, total_len);
352 /*
353 * If we have only sent part of an SMB then the next SMB could
354 * be taken as the remainder of this one. We need to kill the
355 * socket so the server throws away the partial SMB
356 */
357 server->tcpStatus = CifsNeedReconnect;
358 }
359
360 if (rc < 0 && rc != -EINTR)
361 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
362 rc);
363 else
364 rc = 0;
365
366 return rc;
367}
368
369static int
370smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
371{
372 struct smb_rqst rqst = { .rq_iov = iov,
373 .rq_nvec = n_vec };
374
375 return smb_send_rqst(server, &rqst);
376}
377
378int
379smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
380 unsigned int smb_buf_length)
381{
382 struct kvec iov;
383
384 iov.iov_base = smb_buffer;
385 iov.iov_len = smb_buf_length + 4;
386
387 return smb_sendv(server, &iov, 1);
388}
389
390static int
391wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
392 int *credits)
393{
394 int rc;
395
396 spin_lock(&server->req_lock);
397 if (timeout == CIFS_ASYNC_OP) {
398 /* oplock breaks must not be held up */
399 server->in_flight++;
400 *credits -= 1;
401 spin_unlock(&server->req_lock);
402 return 0;
403 }
404
405 while (1) {
406 if (*credits <= 0) {
407 spin_unlock(&server->req_lock);
408 cifs_num_waiters_inc(server);
409 rc = wait_event_killable(server->request_q,
410 has_credits(server, credits));
411 cifs_num_waiters_dec(server);
412 if (rc)
413 return rc;
414 spin_lock(&server->req_lock);
415 } else {
416 if (server->tcpStatus == CifsExiting) {
417 spin_unlock(&server->req_lock);
418 return -ENOENT;
419 }
420
421 /*
422 * Can not count locking commands against total
423 * as they are allowed to block on server.
424 */
425
426 /* update # of requests on the wire to server */
427 if (timeout != CIFS_BLOCKING_OP) {
428 *credits -= 1;
429 server->in_flight++;
430 }
431 spin_unlock(&server->req_lock);
432 break;
433 }
434 }
435 return 0;
436}
437
438static int
439wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
440 const int optype)
441{
442 int *val;
443
444 val = server->ops->get_credits_field(server, optype);
445 /* Since an echo is already inflight, no need to wait to send another */
446 if (*val <= 0 && optype == CIFS_ECHO_OP)
447 return -EAGAIN;
448 return wait_for_free_credits(server, timeout, val);
449}
450
451static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
452 struct mid_q_entry **ppmidQ)
453{
454 if (ses->server->tcpStatus == CifsExiting) {
455 return -ENOENT;
456 }
457
458 if (ses->server->tcpStatus == CifsNeedReconnect) {
459 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
460 return -EAGAIN;
461 }
462
463 if (ses->status == CifsNew) {
464 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
465 (in_buf->Command != SMB_COM_NEGOTIATE))
466 return -EAGAIN;
467 /* else ok - we are setting up session */
468 }
469
470 if (ses->status == CifsExiting) {
471 /* check if SMB session is bad because we are setting it up */
472 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
473 return -EAGAIN;
474 /* else ok - we are shutting down session */
475 }
476
477 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
478 if (*ppmidQ == NULL)
479 return -ENOMEM;
480 spin_lock(&GlobalMid_Lock);
481 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
482 spin_unlock(&GlobalMid_Lock);
483 return 0;
484}
485
486static int
487wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
488{
489 int error;
490
491 error = wait_event_freezekillable_unsafe(server->response_q,
492 midQ->mid_state != MID_REQUEST_SUBMITTED);
493 if (error < 0)
494 return -ERESTARTSYS;
495
496 return 0;
497}
498
499struct mid_q_entry *
500cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
501{
502 int rc;
503 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
504 struct mid_q_entry *mid;
505
506 /* enable signing if server requires it */
507 if (server->sign)
508 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
509
510 mid = AllocMidQEntry(hdr, server);
511 if (mid == NULL)
512 return ERR_PTR(-ENOMEM);
513
514 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
515 if (rc) {
516 DeleteMidQEntry(mid);
517 return ERR_PTR(rc);
518 }
519
520 return mid;
521}
522
523/*
524 * Send a SMB request and set the callback function in the mid to handle
525 * the result. Caller is responsible for dealing with timeouts.
526 */
527int
528cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
529 mid_receive_t *receive, mid_callback_t *callback,
530 void *cbdata, const int flags)
531{
532 int rc, timeout, optype;
533 struct mid_q_entry *mid;
534
535 timeout = flags & CIFS_TIMEOUT_MASK;
536 optype = flags & CIFS_OP_MASK;
537
538 rc = wait_for_free_request(server, timeout, optype);
539 if (rc)
540 return rc;
541
542 mutex_lock(&server->srv_mutex);
543 mid = server->ops->setup_async_request(server, rqst);
544 if (IS_ERR(mid)) {
545 mutex_unlock(&server->srv_mutex);
546 add_credits(server, 1, optype);
547 wake_up(&server->request_q);
548 return PTR_ERR(mid);
549 }
550
551 mid->receive = receive;
552 mid->callback = callback;
553 mid->callback_data = cbdata;
554 mid->mid_state = MID_REQUEST_SUBMITTED;
555
556 /* put it on the pending_mid_q */
557 spin_lock(&GlobalMid_Lock);
558 list_add_tail(&mid->qhead, &server->pending_mid_q);
559 spin_unlock(&GlobalMid_Lock);
560
561
562 cifs_in_send_inc(server);
563 rc = smb_send_rqst(server, rqst);
564 cifs_in_send_dec(server);
565 cifs_save_when_sent(mid);
566
567 if (rc < 0)
568 server->sequence_number -= 2;
569 mutex_unlock(&server->srv_mutex);
570
571 if (rc == 0)
572 return 0;
573
574 cifs_delete_mid(mid);
575 add_credits(server, 1, optype);
576 wake_up(&server->request_q);
577 return rc;
578}
579
580/*
581 *
582 * Send an SMB Request. No response info (other than return code)
583 * needs to be parsed.
584 *
585 * flags indicate the type of request buffer and how long to wait
586 * and whether to log NT STATUS code (error) before mapping it to POSIX error
587 *
588 */
589int
590SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
591 char *in_buf, int flags)
592{
593 int rc;
594 struct kvec iov[1];
595 int resp_buf_type;
596
597 iov[0].iov_base = in_buf;
598 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
599 flags |= CIFS_NO_RESP;
600 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
601 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
602
603 return rc;
604}
605
606static int
607cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
608{
609 int rc = 0;
610
611 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
612 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
613
614 spin_lock(&GlobalMid_Lock);
615 switch (mid->mid_state) {
616 case MID_RESPONSE_RECEIVED:
617 spin_unlock(&GlobalMid_Lock);
618 return rc;
619 case MID_RETRY_NEEDED:
620 rc = -EAGAIN;
621 break;
622 case MID_RESPONSE_MALFORMED:
623 rc = -EIO;
624 break;
625 case MID_SHUTDOWN:
626 rc = -EHOSTDOWN;
627 break;
628 default:
629 list_del_init(&mid->qhead);
630 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
631 __func__, mid->mid, mid->mid_state);
632 rc = -EIO;
633 }
634 spin_unlock(&GlobalMid_Lock);
635
636 DeleteMidQEntry(mid);
637 return rc;
638}
639
640static inline int
641send_cancel(struct TCP_Server_Info *server, void *buf, struct mid_q_entry *mid)
642{
643 return server->ops->send_cancel ?
644 server->ops->send_cancel(server, buf, mid) : 0;
645}
646
647int
648cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
649 bool log_error)
650{
651 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
652
653 dump_smb(mid->resp_buf, min_t(u32, 92, len));
654
655 /* convert the length into a more usable form */
656 if (server->sign) {
657 struct kvec iov;
658 int rc = 0;
659 struct smb_rqst rqst = { .rq_iov = &iov,
660 .rq_nvec = 1 };
661
662 iov.iov_base = mid->resp_buf;
663 iov.iov_len = len;
664 /* FIXME: add code to kill session */
665 rc = cifs_verify_signature(&rqst, server,
666 mid->sequence_number);
667 if (rc)
668 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
669 rc);
670 }
671
672 /* BB special case reconnect tid and uid here? */
673 return map_smb_to_linux_error(mid->resp_buf, log_error);
674}
675
676struct mid_q_entry *
677cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
678{
679 int rc;
680 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
681 struct mid_q_entry *mid;
682
683 rc = allocate_mid(ses, hdr, &mid);
684 if (rc)
685 return ERR_PTR(rc);
686 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
687 if (rc) {
688 cifs_delete_mid(mid);
689 return ERR_PTR(rc);
690 }
691 return mid;
692}
693
694int
695SendReceive2(const unsigned int xid, struct cifs_ses *ses,
696 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
697 const int flags)
698{
699 int rc = 0;
700 int timeout, optype;
701 struct mid_q_entry *midQ;
702 char *buf = iov[0].iov_base;
703 unsigned int credits = 1;
704 struct smb_rqst rqst = { .rq_iov = iov,
705 .rq_nvec = n_vec };
706
707 timeout = flags & CIFS_TIMEOUT_MASK;
708 optype = flags & CIFS_OP_MASK;
709
710 *resp_buf_type = CIFS_NO_BUFFER; /* no response buf yet */
711
712 if ((ses == NULL) || (ses->server == NULL)) {
713 cifs_small_buf_release(buf);
714 cifs_dbg(VFS, "Null session\n");
715 return -EIO;
716 }
717
718 if (ses->server->tcpStatus == CifsExiting) {
719 cifs_small_buf_release(buf);
720 return -ENOENT;
721 }
722
723 /*
724 * Ensure that we do not send more than 50 overlapping requests
725 * to the same server. We may make this configurable later or
726 * use ses->maxReq.
727 */
728
729 rc = wait_for_free_request(ses->server, timeout, optype);
730 if (rc) {
731 cifs_small_buf_release(buf);
732 return rc;
733 }
734
735 /*
736 * Make sure that we sign in the same order that we send on this socket
737 * and avoid races inside tcp sendmsg code that could cause corruption
738 * of smb data.
739 */
740
741 mutex_lock(&ses->server->srv_mutex);
742
743 midQ = ses->server->ops->setup_request(ses, &rqst);
744 if (IS_ERR(midQ)) {
745 mutex_unlock(&ses->server->srv_mutex);
746 cifs_small_buf_release(buf);
747 /* Update # of requests on wire to server */
748 add_credits(ses->server, 1, optype);
749 return PTR_ERR(midQ);
750 }
751
752 midQ->mid_state = MID_REQUEST_SUBMITTED;
753 cifs_in_send_inc(ses->server);
754 rc = smb_sendv(ses->server, iov, n_vec);
755 cifs_in_send_dec(ses->server);
756 cifs_save_when_sent(midQ);
757
758 if (rc < 0)
759 ses->server->sequence_number -= 2;
760 mutex_unlock(&ses->server->srv_mutex);
761
762 if (rc < 0) {
763 cifs_small_buf_release(buf);
764 goto out;
765 }
766
767 if (timeout == CIFS_ASYNC_OP) {
768 cifs_small_buf_release(buf);
769 goto out;
770 }
771
772 rc = wait_for_response(ses->server, midQ);
773 if (rc != 0) {
774 send_cancel(ses->server, buf, midQ);
775 spin_lock(&GlobalMid_Lock);
776 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
777 midQ->callback = DeleteMidQEntry;
778 spin_unlock(&GlobalMid_Lock);
779 cifs_small_buf_release(buf);
780 add_credits(ses->server, 1, optype);
781 return rc;
782 }
783 spin_unlock(&GlobalMid_Lock);
784 }
785
786 cifs_small_buf_release(buf);
787
788 rc = cifs_sync_mid_result(midQ, ses->server);
789 if (rc != 0) {
790 add_credits(ses->server, 1, optype);
791 return rc;
792 }
793
794 if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
795 rc = -EIO;
796 cifs_dbg(FYI, "Bad MID state?\n");
797 goto out;
798 }
799
800 buf = (char *)midQ->resp_buf;
801 iov[0].iov_base = buf;
802 iov[0].iov_len = get_rfc1002_length(buf) + 4;
803 if (midQ->large_buf)
804 *resp_buf_type = CIFS_LARGE_BUFFER;
805 else
806 *resp_buf_type = CIFS_SMALL_BUFFER;
807
808 credits = ses->server->ops->get_credits(midQ);
809
810 rc = ses->server->ops->check_receive(midQ, ses->server,
811 flags & CIFS_LOG_ERROR);
812
813 /* mark it so buf will not be freed by cifs_delete_mid */
814 if ((flags & CIFS_NO_RESP) == 0)
815 midQ->resp_buf = NULL;
816out:
817 cifs_delete_mid(midQ);
818 add_credits(ses->server, credits, optype);
819
820 return rc;
821}
822
823int
824SendReceive(const unsigned int xid, struct cifs_ses *ses,
825 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
826 int *pbytes_returned, const int timeout)
827{
828 int rc = 0;
829 struct mid_q_entry *midQ;
830
831 if (ses == NULL) {
832 cifs_dbg(VFS, "Null smb session\n");
833 return -EIO;
834 }
835 if (ses->server == NULL) {
836 cifs_dbg(VFS, "Null tcp session\n");
837 return -EIO;
838 }
839
840 if (ses->server->tcpStatus == CifsExiting)
841 return -ENOENT;
842
843 /* Ensure that we do not send more than 50 overlapping requests
844 to the same server. We may make this configurable later or
845 use ses->maxReq */
846
847 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
848 MAX_CIFS_HDR_SIZE - 4) {
849 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
850 be32_to_cpu(in_buf->smb_buf_length));
851 return -EIO;
852 }
853
854 rc = wait_for_free_request(ses->server, timeout, 0);
855 if (rc)
856 return rc;
857
858 /* make sure that we sign in the same order that we send on this socket
859 and avoid races inside tcp sendmsg code that could cause corruption
860 of smb data */
861
862 mutex_lock(&ses->server->srv_mutex);
863
864 rc = allocate_mid(ses, in_buf, &midQ);
865 if (rc) {
866 mutex_unlock(&ses->server->srv_mutex);
867 /* Update # of requests on wire to server */
868 add_credits(ses->server, 1, 0);
869 return rc;
870 }
871
872 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
873 if (rc) {
874 mutex_unlock(&ses->server->srv_mutex);
875 goto out;
876 }
877
878 midQ->mid_state = MID_REQUEST_SUBMITTED;
879
880 cifs_in_send_inc(ses->server);
881 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
882 cifs_in_send_dec(ses->server);
883 cifs_save_when_sent(midQ);
884
885 if (rc < 0)
886 ses->server->sequence_number -= 2;
887
888 mutex_unlock(&ses->server->srv_mutex);
889
890 if (rc < 0)
891 goto out;
892
893 if (timeout == CIFS_ASYNC_OP)
894 goto out;
895
896 rc = wait_for_response(ses->server, midQ);
897 if (rc != 0) {
898 send_cancel(ses->server, in_buf, midQ);
899 spin_lock(&GlobalMid_Lock);
900 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
901 /* no longer considered to be "in-flight" */
902 midQ->callback = DeleteMidQEntry;
903 spin_unlock(&GlobalMid_Lock);
904 add_credits(ses->server, 1, 0);
905 return rc;
906 }
907 spin_unlock(&GlobalMid_Lock);
908 }
909
910 rc = cifs_sync_mid_result(midQ, ses->server);
911 if (rc != 0) {
912 add_credits(ses->server, 1, 0);
913 return rc;
914 }
915
916 if (!midQ->resp_buf || !out_buf ||
917 midQ->mid_state != MID_RESPONSE_RECEIVED) {
918 rc = -EIO;
919 cifs_dbg(VFS, "Bad MID state?\n");
920 goto out;
921 }
922
923 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
924 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
925 rc = cifs_check_receive(midQ, ses->server, 0);
926out:
927 cifs_delete_mid(midQ);
928 add_credits(ses->server, 1, 0);
929
930 return rc;
931}
932
933/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
934 blocking lock to return. */
935
936static int
937send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
938 struct smb_hdr *in_buf,
939 struct smb_hdr *out_buf)
940{
941 int bytes_returned;
942 struct cifs_ses *ses = tcon->ses;
943 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
944
945 /* We just modify the current in_buf to change
946 the type of lock from LOCKING_ANDX_SHARED_LOCK
947 or LOCKING_ANDX_EXCLUSIVE_LOCK to
948 LOCKING_ANDX_CANCEL_LOCK. */
949
950 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
951 pSMB->Timeout = 0;
952 pSMB->hdr.Mid = get_next_mid(ses->server);
953
954 return SendReceive(xid, ses, in_buf, out_buf,
955 &bytes_returned, 0);
956}
957
958int
959SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
960 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
961 int *pbytes_returned)
962{
963 int rc = 0;
964 int rstart = 0;
965 struct mid_q_entry *midQ;
966 struct cifs_ses *ses;
967
968 if (tcon == NULL || tcon->ses == NULL) {
969 cifs_dbg(VFS, "Null smb session\n");
970 return -EIO;
971 }
972 ses = tcon->ses;
973
974 if (ses->server == NULL) {
975 cifs_dbg(VFS, "Null tcp session\n");
976 return -EIO;
977 }
978
979 if (ses->server->tcpStatus == CifsExiting)
980 return -ENOENT;
981
982 /* Ensure that we do not send more than 50 overlapping requests
983 to the same server. We may make this configurable later or
984 use ses->maxReq */
985
986 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
987 MAX_CIFS_HDR_SIZE - 4) {
988 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
989 be32_to_cpu(in_buf->smb_buf_length));
990 return -EIO;
991 }
992
993 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
994 if (rc)
995 return rc;
996
997 /* make sure that we sign in the same order that we send on this socket
998 and avoid races inside tcp sendmsg code that could cause corruption
999 of smb data */
1000
1001 mutex_lock(&ses->server->srv_mutex);
1002
1003 rc = allocate_mid(ses, in_buf, &midQ);
1004 if (rc) {
1005 mutex_unlock(&ses->server->srv_mutex);
1006 return rc;
1007 }
1008
1009 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1010 if (rc) {
1011 cifs_delete_mid(midQ);
1012 mutex_unlock(&ses->server->srv_mutex);
1013 return rc;
1014 }
1015
1016 midQ->mid_state = MID_REQUEST_SUBMITTED;
1017 cifs_in_send_inc(ses->server);
1018 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
1019 cifs_in_send_dec(ses->server);
1020 cifs_save_when_sent(midQ);
1021
1022 if (rc < 0)
1023 ses->server->sequence_number -= 2;
1024
1025 mutex_unlock(&ses->server->srv_mutex);
1026
1027 if (rc < 0) {
1028 cifs_delete_mid(midQ);
1029 return rc;
1030 }
1031
1032 /* Wait for a reply - allow signals to interrupt. */
1033 rc = wait_event_interruptible(ses->server->response_q,
1034 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1035 ((ses->server->tcpStatus != CifsGood) &&
1036 (ses->server->tcpStatus != CifsNew)));
1037
1038 /* Were we interrupted by a signal ? */
1039 if ((rc == -ERESTARTSYS) &&
1040 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1041 ((ses->server->tcpStatus == CifsGood) ||
1042 (ses->server->tcpStatus == CifsNew))) {
1043
1044 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1045 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1046 blocking lock to return. */
1047 rc = send_cancel(ses->server, in_buf, midQ);
1048 if (rc) {
1049 cifs_delete_mid(midQ);
1050 return rc;
1051 }
1052 } else {
1053 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1054 to cause the blocking lock to return. */
1055
1056 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1057
1058 /* If we get -ENOLCK back the lock may have
1059 already been removed. Don't exit in this case. */
1060 if (rc && rc != -ENOLCK) {
1061 cifs_delete_mid(midQ);
1062 return rc;
1063 }
1064 }
1065
1066 rc = wait_for_response(ses->server, midQ);
1067 if (rc) {
1068 send_cancel(ses->server, in_buf, midQ);
1069 spin_lock(&GlobalMid_Lock);
1070 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1071 /* no longer considered to be "in-flight" */
1072 midQ->callback = DeleteMidQEntry;
1073 spin_unlock(&GlobalMid_Lock);
1074 return rc;
1075 }
1076 spin_unlock(&GlobalMid_Lock);
1077 }
1078
1079 /* We got the response - restart system call. */
1080 rstart = 1;
1081 }
1082
1083 rc = cifs_sync_mid_result(midQ, ses->server);
1084 if (rc != 0)
1085 return rc;
1086
1087 /* rcvd frame is ok */
1088 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1089 rc = -EIO;
1090 cifs_dbg(VFS, "Bad MID state?\n");
1091 goto out;
1092 }
1093
1094 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1095 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1096 rc = cifs_check_receive(midQ, ses->server, 0);
1097out:
1098 cifs_delete_mid(midQ);
1099 if (rstart && rc == -EACCES)
1100 return -ERESTARTSYS;
1101 return rc;
1102}
1// SPDX-License-Identifier: LGPL-2.1
2/*
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
7 *
8 */
9
10#include <linux/fs.h>
11#include <linux/list.h>
12#include <linux/gfp.h>
13#include <linux/wait.h>
14#include <linux/net.h>
15#include <linux/delay.h>
16#include <linux/freezer.h>
17#include <linux/tcp.h>
18#include <linux/bvec.h>
19#include <linux/highmem.h>
20#include <linux/uaccess.h>
21#include <asm/processor.h>
22#include <linux/mempool.h>
23#include <linux/sched/signal.h>
24#include <linux/task_io_accounting_ops.h>
25#include "cifspdu.h"
26#include "cifsglob.h"
27#include "cifsproto.h"
28#include "cifs_debug.h"
29#include "smb2proto.h"
30#include "smbdirect.h"
31
32/* Max number of iovectors we can use off the stack when sending requests. */
33#define CIFS_MAX_IOV_SIZE 8
34
35void
36cifs_wake_up_task(struct mid_q_entry *mid)
37{
38 wake_up_process(mid->callback_data);
39}
40
41static struct mid_q_entry *
42alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
43{
44 struct mid_q_entry *temp;
45
46 if (server == NULL) {
47 cifs_dbg(VFS, "%s: null TCP session\n", __func__);
48 return NULL;
49 }
50
51 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
52 memset(temp, 0, sizeof(struct mid_q_entry));
53 kref_init(&temp->refcount);
54 temp->mid = get_mid(smb_buffer);
55 temp->pid = current->pid;
56 temp->command = cpu_to_le16(smb_buffer->Command);
57 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
58 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
59 /* when mid allocated can be before when sent */
60 temp->when_alloc = jiffies;
61 temp->server = server;
62
63 /*
64 * The default is for the mid to be synchronous, so the
65 * default callback just wakes up the current task.
66 */
67 get_task_struct(current);
68 temp->creator = current;
69 temp->callback = cifs_wake_up_task;
70 temp->callback_data = current;
71
72 atomic_inc(&mid_count);
73 temp->mid_state = MID_REQUEST_ALLOCATED;
74 return temp;
75}
76
77static void __release_mid(struct kref *refcount)
78{
79 struct mid_q_entry *midEntry =
80 container_of(refcount, struct mid_q_entry, refcount);
81#ifdef CONFIG_CIFS_STATS2
82 __le16 command = midEntry->server->vals->lock_cmd;
83 __u16 smb_cmd = le16_to_cpu(midEntry->command);
84 unsigned long now;
85 unsigned long roundtrip_time;
86#endif
87 struct TCP_Server_Info *server = midEntry->server;
88
89 if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
90 midEntry->mid_state == MID_RESPONSE_RECEIVED &&
91 server->ops->handle_cancelled_mid)
92 server->ops->handle_cancelled_mid(midEntry, server);
93
94 midEntry->mid_state = MID_FREE;
95 atomic_dec(&mid_count);
96 if (midEntry->large_buf)
97 cifs_buf_release(midEntry->resp_buf);
98 else
99 cifs_small_buf_release(midEntry->resp_buf);
100#ifdef CONFIG_CIFS_STATS2
101 now = jiffies;
102 if (now < midEntry->when_alloc)
103 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
104 roundtrip_time = now - midEntry->when_alloc;
105
106 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
107 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
108 server->slowest_cmd[smb_cmd] = roundtrip_time;
109 server->fastest_cmd[smb_cmd] = roundtrip_time;
110 } else {
111 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
112 server->slowest_cmd[smb_cmd] = roundtrip_time;
113 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
114 server->fastest_cmd[smb_cmd] = roundtrip_time;
115 }
116 cifs_stats_inc(&server->num_cmds[smb_cmd]);
117 server->time_per_cmd[smb_cmd] += roundtrip_time;
118 }
119 /*
120 * commands taking longer than one second (default) can be indications
121 * that something is wrong, unless it is quite a slow link or a very
122 * busy server. Note that this calc is unlikely or impossible to wrap
123 * as long as slow_rsp_threshold is not set way above recommended max
124 * value (32767 ie 9 hours) and is generally harmless even if wrong
125 * since only affects debug counters - so leaving the calc as simple
126 * comparison rather than doing multiple conversions and overflow
127 * checks
128 */
129 if ((slow_rsp_threshold != 0) &&
130 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
131 (midEntry->command != command)) {
132 /*
133 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
134 * NB: le16_to_cpu returns unsigned so can not be negative below
135 */
136 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
137 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
138
139 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
140 midEntry->when_sent, midEntry->when_received);
141 if (cifsFYI & CIFS_TIMER) {
142 pr_debug("slow rsp: cmd %d mid %llu",
143 midEntry->command, midEntry->mid);
144 cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
145 now - midEntry->when_alloc,
146 now - midEntry->when_sent,
147 now - midEntry->when_received);
148 }
149 }
150#endif
151 put_task_struct(midEntry->creator);
152
153 mempool_free(midEntry, cifs_mid_poolp);
154}
155
156void release_mid(struct mid_q_entry *mid)
157{
158 struct TCP_Server_Info *server = mid->server;
159
160 spin_lock(&server->mid_lock);
161 kref_put(&mid->refcount, __release_mid);
162 spin_unlock(&server->mid_lock);
163}
164
165void
166delete_mid(struct mid_q_entry *mid)
167{
168 spin_lock(&mid->server->mid_lock);
169 if (!(mid->mid_flags & MID_DELETED)) {
170 list_del_init(&mid->qhead);
171 mid->mid_flags |= MID_DELETED;
172 }
173 spin_unlock(&mid->server->mid_lock);
174
175 release_mid(mid);
176}
177
178/*
179 * smb_send_kvec - send an array of kvecs to the server
180 * @server: Server to send the data to
181 * @smb_msg: Message to send
182 * @sent: amount of data sent on socket is stored here
183 *
184 * Our basic "send data to server" function. Should be called with srv_mutex
185 * held. The caller is responsible for handling the results.
186 */
187static int
188smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
189 size_t *sent)
190{
191 int rc = 0;
192 int retries = 0;
193 struct socket *ssocket = server->ssocket;
194
195 *sent = 0;
196
197 if (server->noblocksnd)
198 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
199 else
200 smb_msg->msg_flags = MSG_NOSIGNAL;
201
202 while (msg_data_left(smb_msg)) {
203 /*
204 * If blocking send, we try 3 times, since each can block
205 * for 5 seconds. For nonblocking we have to try more
206 * but wait increasing amounts of time allowing time for
207 * socket to clear. The overall time we wait in either
208 * case to send on the socket is about 15 seconds.
209 * Similarly we wait for 15 seconds for a response from
210 * the server in SendReceive[2] for the server to send
211 * a response back for most types of requests (except
212 * SMB Write past end of file which can be slow, and
213 * blocking lock operations). NFS waits slightly longer
214 * than CIFS, but this can make it take longer for
215 * nonresponsive servers to be detected and 15 seconds
216 * is more than enough time for modern networks to
217 * send a packet. In most cases if we fail to send
218 * after the retries we will kill the socket and
219 * reconnect which may clear the network problem.
220 */
221 rc = sock_sendmsg(ssocket, smb_msg);
222 if (rc == -EAGAIN) {
223 retries++;
224 if (retries >= 14 ||
225 (!server->noblocksnd && (retries > 2))) {
226 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
227 ssocket);
228 return -EAGAIN;
229 }
230 msleep(1 << retries);
231 continue;
232 }
233
234 if (rc < 0)
235 return rc;
236
237 if (rc == 0) {
238 /* should never happen, letting socket clear before
239 retrying is our only obvious option here */
240 cifs_server_dbg(VFS, "tcp sent no data\n");
241 msleep(500);
242 continue;
243 }
244
245 /* send was at least partially successful */
246 *sent += rc;
247 retries = 0; /* in case we get ENOSPC on the next send */
248 }
249 return 0;
250}
251
252unsigned long
253smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
254{
255 unsigned int i;
256 struct kvec *iov;
257 int nvec;
258 unsigned long buflen = 0;
259
260 if (!is_smb1(server) && rqst->rq_nvec >= 2 &&
261 rqst->rq_iov[0].iov_len == 4) {
262 iov = &rqst->rq_iov[1];
263 nvec = rqst->rq_nvec - 1;
264 } else {
265 iov = rqst->rq_iov;
266 nvec = rqst->rq_nvec;
267 }
268
269 /* total up iov array first */
270 for (i = 0; i < nvec; i++)
271 buflen += iov[i].iov_len;
272
273 /*
274 * Add in the page array if there is one. The caller needs to make
275 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
276 * multiple pages ends at page boundary, rq_tailsz needs to be set to
277 * PAGE_SIZE.
278 */
279 if (rqst->rq_npages) {
280 if (rqst->rq_npages == 1)
281 buflen += rqst->rq_tailsz;
282 else {
283 /*
284 * If there is more than one page, calculate the
285 * buffer length based on rq_offset and rq_tailsz
286 */
287 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
288 rqst->rq_offset;
289 buflen += rqst->rq_tailsz;
290 }
291 }
292
293 return buflen;
294}
295
296static int
297__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
298 struct smb_rqst *rqst)
299{
300 int rc = 0;
301 struct kvec *iov;
302 int n_vec;
303 unsigned int send_length = 0;
304 unsigned int i, j;
305 sigset_t mask, oldmask;
306 size_t total_len = 0, sent, size;
307 struct socket *ssocket = server->ssocket;
308 struct msghdr smb_msg = {};
309 __be32 rfc1002_marker;
310
311 if (cifs_rdma_enabled(server)) {
312 /* return -EAGAIN when connecting or reconnecting */
313 rc = -EAGAIN;
314 if (server->smbd_conn)
315 rc = smbd_send(server, num_rqst, rqst);
316 goto smbd_done;
317 }
318
319 if (ssocket == NULL)
320 return -EAGAIN;
321
322 if (fatal_signal_pending(current)) {
323 cifs_dbg(FYI, "signal pending before send request\n");
324 return -ERESTARTSYS;
325 }
326
327 /* cork the socket */
328 tcp_sock_set_cork(ssocket->sk, true);
329
330 for (j = 0; j < num_rqst; j++)
331 send_length += smb_rqst_len(server, &rqst[j]);
332 rfc1002_marker = cpu_to_be32(send_length);
333
334 /*
335 * We should not allow signals to interrupt the network send because
336 * any partial send will cause session reconnects thus increasing
337 * latency of system calls and overload a server with unnecessary
338 * requests.
339 */
340
341 sigfillset(&mask);
342 sigprocmask(SIG_BLOCK, &mask, &oldmask);
343
344 /* Generate a rfc1002 marker for SMB2+ */
345 if (!is_smb1(server)) {
346 struct kvec hiov = {
347 .iov_base = &rfc1002_marker,
348 .iov_len = 4
349 };
350 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, &hiov, 1, 4);
351 rc = smb_send_kvec(server, &smb_msg, &sent);
352 if (rc < 0)
353 goto unmask;
354
355 total_len += sent;
356 send_length += 4;
357 }
358
359 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
360
361 for (j = 0; j < num_rqst; j++) {
362 iov = rqst[j].rq_iov;
363 n_vec = rqst[j].rq_nvec;
364
365 size = 0;
366 for (i = 0; i < n_vec; i++) {
367 dump_smb(iov[i].iov_base, iov[i].iov_len);
368 size += iov[i].iov_len;
369 }
370
371 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, iov, n_vec, size);
372
373 rc = smb_send_kvec(server, &smb_msg, &sent);
374 if (rc < 0)
375 goto unmask;
376
377 total_len += sent;
378
379 /* now walk the page array and send each page in it */
380 for (i = 0; i < rqst[j].rq_npages; i++) {
381 struct bio_vec bvec;
382
383 bvec.bv_page = rqst[j].rq_pages[i];
384 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
385 &bvec.bv_offset);
386
387 iov_iter_bvec(&smb_msg.msg_iter, ITER_SOURCE,
388 &bvec, 1, bvec.bv_len);
389 rc = smb_send_kvec(server, &smb_msg, &sent);
390 if (rc < 0)
391 break;
392
393 total_len += sent;
394 }
395 }
396
397unmask:
398 sigprocmask(SIG_SETMASK, &oldmask, NULL);
399
400 /*
401 * If signal is pending but we have already sent the whole packet to
402 * the server we need to return success status to allow a corresponding
403 * mid entry to be kept in the pending requests queue thus allowing
404 * to handle responses from the server by the client.
405 *
406 * If only part of the packet has been sent there is no need to hide
407 * interrupt because the session will be reconnected anyway, so there
408 * won't be any response from the server to handle.
409 */
410
411 if (signal_pending(current) && (total_len != send_length)) {
412 cifs_dbg(FYI, "signal is pending after attempt to send\n");
413 rc = -ERESTARTSYS;
414 }
415
416 /* uncork it */
417 tcp_sock_set_cork(ssocket->sk, false);
418
419 if ((total_len > 0) && (total_len != send_length)) {
420 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
421 send_length, total_len);
422 /*
423 * If we have only sent part of an SMB then the next SMB could
424 * be taken as the remainder of this one. We need to kill the
425 * socket so the server throws away the partial SMB
426 */
427 cifs_signal_cifsd_for_reconnect(server, false);
428 trace_smb3_partial_send_reconnect(server->CurrentMid,
429 server->conn_id, server->hostname);
430 }
431smbd_done:
432 if (rc < 0 && rc != -EINTR)
433 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
434 rc);
435 else if (rc > 0)
436 rc = 0;
437
438 return rc;
439}
440
441static int
442smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
443 struct smb_rqst *rqst, int flags)
444{
445 struct kvec iov;
446 struct smb2_transform_hdr *tr_hdr;
447 struct smb_rqst cur_rqst[MAX_COMPOUND];
448 int rc;
449
450 if (!(flags & CIFS_TRANSFORM_REQ))
451 return __smb_send_rqst(server, num_rqst, rqst);
452
453 if (num_rqst > MAX_COMPOUND - 1)
454 return -ENOMEM;
455
456 if (!server->ops->init_transform_rq) {
457 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
458 return -EIO;
459 }
460
461 tr_hdr = kzalloc(sizeof(*tr_hdr), GFP_NOFS);
462 if (!tr_hdr)
463 return -ENOMEM;
464
465 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
466 memset(&iov, 0, sizeof(iov));
467
468 iov.iov_base = tr_hdr;
469 iov.iov_len = sizeof(*tr_hdr);
470 cur_rqst[0].rq_iov = &iov;
471 cur_rqst[0].rq_nvec = 1;
472
473 rc = server->ops->init_transform_rq(server, num_rqst + 1,
474 &cur_rqst[0], rqst);
475 if (rc)
476 goto out;
477
478 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
479 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
480out:
481 kfree(tr_hdr);
482 return rc;
483}
484
485int
486smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
487 unsigned int smb_buf_length)
488{
489 struct kvec iov[2];
490 struct smb_rqst rqst = { .rq_iov = iov,
491 .rq_nvec = 2 };
492
493 iov[0].iov_base = smb_buffer;
494 iov[0].iov_len = 4;
495 iov[1].iov_base = (char *)smb_buffer + 4;
496 iov[1].iov_len = smb_buf_length;
497
498 return __smb_send_rqst(server, 1, &rqst);
499}
500
501static int
502wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
503 const int timeout, const int flags,
504 unsigned int *instance)
505{
506 long rc;
507 int *credits;
508 int optype;
509 long int t;
510 int scredits, in_flight;
511
512 if (timeout < 0)
513 t = MAX_JIFFY_OFFSET;
514 else
515 t = msecs_to_jiffies(timeout);
516
517 optype = flags & CIFS_OP_MASK;
518
519 *instance = 0;
520
521 credits = server->ops->get_credits_field(server, optype);
522 /* Since an echo is already inflight, no need to wait to send another */
523 if (*credits <= 0 && optype == CIFS_ECHO_OP)
524 return -EAGAIN;
525
526 spin_lock(&server->req_lock);
527 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
528 /* oplock breaks must not be held up */
529 server->in_flight++;
530 if (server->in_flight > server->max_in_flight)
531 server->max_in_flight = server->in_flight;
532 *credits -= 1;
533 *instance = server->reconnect_instance;
534 scredits = *credits;
535 in_flight = server->in_flight;
536 spin_unlock(&server->req_lock);
537
538 trace_smb3_nblk_credits(server->CurrentMid,
539 server->conn_id, server->hostname, scredits, -1, in_flight);
540 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
541 __func__, 1, scredits);
542
543 return 0;
544 }
545
546 while (1) {
547 if (*credits < num_credits) {
548 scredits = *credits;
549 spin_unlock(&server->req_lock);
550
551 cifs_num_waiters_inc(server);
552 rc = wait_event_killable_timeout(server->request_q,
553 has_credits(server, credits, num_credits), t);
554 cifs_num_waiters_dec(server);
555 if (!rc) {
556 spin_lock(&server->req_lock);
557 scredits = *credits;
558 in_flight = server->in_flight;
559 spin_unlock(&server->req_lock);
560
561 trace_smb3_credit_timeout(server->CurrentMid,
562 server->conn_id, server->hostname, scredits,
563 num_credits, in_flight);
564 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
565 timeout);
566 return -EBUSY;
567 }
568 if (rc == -ERESTARTSYS)
569 return -ERESTARTSYS;
570 spin_lock(&server->req_lock);
571 } else {
572 spin_unlock(&server->req_lock);
573
574 spin_lock(&server->srv_lock);
575 if (server->tcpStatus == CifsExiting) {
576 spin_unlock(&server->srv_lock);
577 return -ENOENT;
578 }
579 spin_unlock(&server->srv_lock);
580
581 /*
582 * For normal commands, reserve the last MAX_COMPOUND
583 * credits to compound requests.
584 * Otherwise these compounds could be permanently
585 * starved for credits by single-credit requests.
586 *
587 * To prevent spinning CPU, block this thread until
588 * there are >MAX_COMPOUND credits available.
589 * But only do this is we already have a lot of
590 * credits in flight to avoid triggering this check
591 * for servers that are slow to hand out credits on
592 * new sessions.
593 */
594 spin_lock(&server->req_lock);
595 if (!optype && num_credits == 1 &&
596 server->in_flight > 2 * MAX_COMPOUND &&
597 *credits <= MAX_COMPOUND) {
598 spin_unlock(&server->req_lock);
599
600 cifs_num_waiters_inc(server);
601 rc = wait_event_killable_timeout(
602 server->request_q,
603 has_credits(server, credits,
604 MAX_COMPOUND + 1),
605 t);
606 cifs_num_waiters_dec(server);
607 if (!rc) {
608 spin_lock(&server->req_lock);
609 scredits = *credits;
610 in_flight = server->in_flight;
611 spin_unlock(&server->req_lock);
612
613 trace_smb3_credit_timeout(
614 server->CurrentMid,
615 server->conn_id, server->hostname,
616 scredits, num_credits, in_flight);
617 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
618 timeout);
619 return -EBUSY;
620 }
621 if (rc == -ERESTARTSYS)
622 return -ERESTARTSYS;
623 spin_lock(&server->req_lock);
624 continue;
625 }
626
627 /*
628 * Can not count locking commands against total
629 * as they are allowed to block on server.
630 */
631
632 /* update # of requests on the wire to server */
633 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
634 *credits -= num_credits;
635 server->in_flight += num_credits;
636 if (server->in_flight > server->max_in_flight)
637 server->max_in_flight = server->in_flight;
638 *instance = server->reconnect_instance;
639 }
640 scredits = *credits;
641 in_flight = server->in_flight;
642 spin_unlock(&server->req_lock);
643
644 trace_smb3_waitff_credits(server->CurrentMid,
645 server->conn_id, server->hostname, scredits,
646 -(num_credits), in_flight);
647 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
648 __func__, num_credits, scredits);
649 break;
650 }
651 }
652 return 0;
653}
654
655static int
656wait_for_free_request(struct TCP_Server_Info *server, const int flags,
657 unsigned int *instance)
658{
659 return wait_for_free_credits(server, 1, -1, flags,
660 instance);
661}
662
663static int
664wait_for_compound_request(struct TCP_Server_Info *server, int num,
665 const int flags, unsigned int *instance)
666{
667 int *credits;
668 int scredits, in_flight;
669
670 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
671
672 spin_lock(&server->req_lock);
673 scredits = *credits;
674 in_flight = server->in_flight;
675
676 if (*credits < num) {
677 /*
678 * If the server is tight on resources or just gives us less
679 * credits for other reasons (e.g. requests are coming out of
680 * order and the server delays granting more credits until it
681 * processes a missing mid) and we exhausted most available
682 * credits there may be situations when we try to send
683 * a compound request but we don't have enough credits. At this
684 * point the client needs to decide if it should wait for
685 * additional credits or fail the request. If at least one
686 * request is in flight there is a high probability that the
687 * server will return enough credits to satisfy this compound
688 * request.
689 *
690 * Return immediately if no requests in flight since we will be
691 * stuck on waiting for credits.
692 */
693 if (server->in_flight == 0) {
694 spin_unlock(&server->req_lock);
695 trace_smb3_insufficient_credits(server->CurrentMid,
696 server->conn_id, server->hostname, scredits,
697 num, in_flight);
698 cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
699 __func__, in_flight, num, scredits);
700 return -EDEADLK;
701 }
702 }
703 spin_unlock(&server->req_lock);
704
705 return wait_for_free_credits(server, num, 60000, flags,
706 instance);
707}
708
709int
710cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
711 unsigned int *num, struct cifs_credits *credits)
712{
713 *num = size;
714 credits->value = 0;
715 credits->instance = server->reconnect_instance;
716 return 0;
717}
718
719static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
720 struct mid_q_entry **ppmidQ)
721{
722 spin_lock(&ses->ses_lock);
723 if (ses->ses_status == SES_NEW) {
724 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
725 (in_buf->Command != SMB_COM_NEGOTIATE)) {
726 spin_unlock(&ses->ses_lock);
727 return -EAGAIN;
728 }
729 /* else ok - we are setting up session */
730 }
731
732 if (ses->ses_status == SES_EXITING) {
733 /* check if SMB session is bad because we are setting it up */
734 if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
735 spin_unlock(&ses->ses_lock);
736 return -EAGAIN;
737 }
738 /* else ok - we are shutting down session */
739 }
740 spin_unlock(&ses->ses_lock);
741
742 *ppmidQ = alloc_mid(in_buf, ses->server);
743 if (*ppmidQ == NULL)
744 return -ENOMEM;
745 spin_lock(&ses->server->mid_lock);
746 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
747 spin_unlock(&ses->server->mid_lock);
748 return 0;
749}
750
751static int
752wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
753{
754 int error;
755
756 error = wait_event_state(server->response_q,
757 midQ->mid_state != MID_REQUEST_SUBMITTED,
758 (TASK_KILLABLE|TASK_FREEZABLE_UNSAFE));
759 if (error < 0)
760 return -ERESTARTSYS;
761
762 return 0;
763}
764
765struct mid_q_entry *
766cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
767{
768 int rc;
769 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
770 struct mid_q_entry *mid;
771
772 if (rqst->rq_iov[0].iov_len != 4 ||
773 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
774 return ERR_PTR(-EIO);
775
776 /* enable signing if server requires it */
777 if (server->sign)
778 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
779
780 mid = alloc_mid(hdr, server);
781 if (mid == NULL)
782 return ERR_PTR(-ENOMEM);
783
784 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
785 if (rc) {
786 release_mid(mid);
787 return ERR_PTR(rc);
788 }
789
790 return mid;
791}
792
793/*
794 * Send a SMB request and set the callback function in the mid to handle
795 * the result. Caller is responsible for dealing with timeouts.
796 */
797int
798cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
799 mid_receive_t *receive, mid_callback_t *callback,
800 mid_handle_t *handle, void *cbdata, const int flags,
801 const struct cifs_credits *exist_credits)
802{
803 int rc;
804 struct mid_q_entry *mid;
805 struct cifs_credits credits = { .value = 0, .instance = 0 };
806 unsigned int instance;
807 int optype;
808
809 optype = flags & CIFS_OP_MASK;
810
811 if ((flags & CIFS_HAS_CREDITS) == 0) {
812 rc = wait_for_free_request(server, flags, &instance);
813 if (rc)
814 return rc;
815 credits.value = 1;
816 credits.instance = instance;
817 } else
818 instance = exist_credits->instance;
819
820 cifs_server_lock(server);
821
822 /*
823 * We can't use credits obtained from the previous session to send this
824 * request. Check if there were reconnects after we obtained credits and
825 * return -EAGAIN in such cases to let callers handle it.
826 */
827 if (instance != server->reconnect_instance) {
828 cifs_server_unlock(server);
829 add_credits_and_wake_if(server, &credits, optype);
830 return -EAGAIN;
831 }
832
833 mid = server->ops->setup_async_request(server, rqst);
834 if (IS_ERR(mid)) {
835 cifs_server_unlock(server);
836 add_credits_and_wake_if(server, &credits, optype);
837 return PTR_ERR(mid);
838 }
839
840 mid->receive = receive;
841 mid->callback = callback;
842 mid->callback_data = cbdata;
843 mid->handle = handle;
844 mid->mid_state = MID_REQUEST_SUBMITTED;
845
846 /* put it on the pending_mid_q */
847 spin_lock(&server->mid_lock);
848 list_add_tail(&mid->qhead, &server->pending_mid_q);
849 spin_unlock(&server->mid_lock);
850
851 /*
852 * Need to store the time in mid before calling I/O. For call_async,
853 * I/O response may come back and free the mid entry on another thread.
854 */
855 cifs_save_when_sent(mid);
856 cifs_in_send_inc(server);
857 rc = smb_send_rqst(server, 1, rqst, flags);
858 cifs_in_send_dec(server);
859
860 if (rc < 0) {
861 revert_current_mid(server, mid->credits);
862 server->sequence_number -= 2;
863 delete_mid(mid);
864 }
865
866 cifs_server_unlock(server);
867
868 if (rc == 0)
869 return 0;
870
871 add_credits_and_wake_if(server, &credits, optype);
872 return rc;
873}
874
875/*
876 *
877 * Send an SMB Request. No response info (other than return code)
878 * needs to be parsed.
879 *
880 * flags indicate the type of request buffer and how long to wait
881 * and whether to log NT STATUS code (error) before mapping it to POSIX error
882 *
883 */
884int
885SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
886 char *in_buf, int flags)
887{
888 int rc;
889 struct kvec iov[1];
890 struct kvec rsp_iov;
891 int resp_buf_type;
892
893 iov[0].iov_base = in_buf;
894 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
895 flags |= CIFS_NO_RSP_BUF;
896 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
897 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
898
899 return rc;
900}
901
902static int
903cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
904{
905 int rc = 0;
906
907 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
908 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
909
910 spin_lock(&server->mid_lock);
911 switch (mid->mid_state) {
912 case MID_RESPONSE_RECEIVED:
913 spin_unlock(&server->mid_lock);
914 return rc;
915 case MID_RETRY_NEEDED:
916 rc = -EAGAIN;
917 break;
918 case MID_RESPONSE_MALFORMED:
919 rc = -EIO;
920 break;
921 case MID_SHUTDOWN:
922 rc = -EHOSTDOWN;
923 break;
924 default:
925 if (!(mid->mid_flags & MID_DELETED)) {
926 list_del_init(&mid->qhead);
927 mid->mid_flags |= MID_DELETED;
928 }
929 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
930 __func__, mid->mid, mid->mid_state);
931 rc = -EIO;
932 }
933 spin_unlock(&server->mid_lock);
934
935 release_mid(mid);
936 return rc;
937}
938
939static inline int
940send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
941 struct mid_q_entry *mid)
942{
943 return server->ops->send_cancel ?
944 server->ops->send_cancel(server, rqst, mid) : 0;
945}
946
947int
948cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
949 bool log_error)
950{
951 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
952
953 dump_smb(mid->resp_buf, min_t(u32, 92, len));
954
955 /* convert the length into a more usable form */
956 if (server->sign) {
957 struct kvec iov[2];
958 int rc = 0;
959 struct smb_rqst rqst = { .rq_iov = iov,
960 .rq_nvec = 2 };
961
962 iov[0].iov_base = mid->resp_buf;
963 iov[0].iov_len = 4;
964 iov[1].iov_base = (char *)mid->resp_buf + 4;
965 iov[1].iov_len = len - 4;
966 /* FIXME: add code to kill session */
967 rc = cifs_verify_signature(&rqst, server,
968 mid->sequence_number);
969 if (rc)
970 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
971 rc);
972 }
973
974 /* BB special case reconnect tid and uid here? */
975 return map_and_check_smb_error(mid, log_error);
976}
977
978struct mid_q_entry *
979cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
980 struct smb_rqst *rqst)
981{
982 int rc;
983 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
984 struct mid_q_entry *mid;
985
986 if (rqst->rq_iov[0].iov_len != 4 ||
987 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
988 return ERR_PTR(-EIO);
989
990 rc = allocate_mid(ses, hdr, &mid);
991 if (rc)
992 return ERR_PTR(rc);
993 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
994 if (rc) {
995 delete_mid(mid);
996 return ERR_PTR(rc);
997 }
998 return mid;
999}
1000
1001static void
1002cifs_compound_callback(struct mid_q_entry *mid)
1003{
1004 struct TCP_Server_Info *server = mid->server;
1005 struct cifs_credits credits;
1006
1007 credits.value = server->ops->get_credits(mid);
1008 credits.instance = server->reconnect_instance;
1009
1010 add_credits(server, &credits, mid->optype);
1011}
1012
1013static void
1014cifs_compound_last_callback(struct mid_q_entry *mid)
1015{
1016 cifs_compound_callback(mid);
1017 cifs_wake_up_task(mid);
1018}
1019
1020static void
1021cifs_cancelled_callback(struct mid_q_entry *mid)
1022{
1023 cifs_compound_callback(mid);
1024 release_mid(mid);
1025}
1026
1027/*
1028 * Return a channel (master if none) of @ses that can be used to send
1029 * regular requests.
1030 *
1031 * If we are currently binding a new channel (negprot/sess.setup),
1032 * return the new incomplete channel.
1033 */
1034struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1035{
1036 uint index = 0;
1037
1038 if (!ses)
1039 return NULL;
1040
1041 /* round robin */
1042 index = (uint)atomic_inc_return(&ses->chan_seq);
1043
1044 spin_lock(&ses->chan_lock);
1045 index %= ses->chan_count;
1046 spin_unlock(&ses->chan_lock);
1047
1048 return ses->chans[index].server;
1049}
1050
1051int
1052compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1053 struct TCP_Server_Info *server,
1054 const int flags, const int num_rqst, struct smb_rqst *rqst,
1055 int *resp_buf_type, struct kvec *resp_iov)
1056{
1057 int i, j, optype, rc = 0;
1058 struct mid_q_entry *midQ[MAX_COMPOUND];
1059 bool cancelled_mid[MAX_COMPOUND] = {false};
1060 struct cifs_credits credits[MAX_COMPOUND] = {
1061 { .value = 0, .instance = 0 }
1062 };
1063 unsigned int instance;
1064 char *buf;
1065
1066 optype = flags & CIFS_OP_MASK;
1067
1068 for (i = 0; i < num_rqst; i++)
1069 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
1070
1071 if (!ses || !ses->server || !server) {
1072 cifs_dbg(VFS, "Null session\n");
1073 return -EIO;
1074 }
1075
1076 spin_lock(&server->srv_lock);
1077 if (server->tcpStatus == CifsExiting) {
1078 spin_unlock(&server->srv_lock);
1079 return -ENOENT;
1080 }
1081 spin_unlock(&server->srv_lock);
1082
1083 /*
1084 * Wait for all the requests to become available.
1085 * This approach still leaves the possibility to be stuck waiting for
1086 * credits if the server doesn't grant credits to the outstanding
1087 * requests and if the client is completely idle, not generating any
1088 * other requests.
1089 * This can be handled by the eventual session reconnect.
1090 */
1091 rc = wait_for_compound_request(server, num_rqst, flags,
1092 &instance);
1093 if (rc)
1094 return rc;
1095
1096 for (i = 0; i < num_rqst; i++) {
1097 credits[i].value = 1;
1098 credits[i].instance = instance;
1099 }
1100
1101 /*
1102 * Make sure that we sign in the same order that we send on this socket
1103 * and avoid races inside tcp sendmsg code that could cause corruption
1104 * of smb data.
1105 */
1106
1107 cifs_server_lock(server);
1108
1109 /*
1110 * All the parts of the compound chain belong obtained credits from the
1111 * same session. We can not use credits obtained from the previous
1112 * session to send this request. Check if there were reconnects after
1113 * we obtained credits and return -EAGAIN in such cases to let callers
1114 * handle it.
1115 */
1116 if (instance != server->reconnect_instance) {
1117 cifs_server_unlock(server);
1118 for (j = 0; j < num_rqst; j++)
1119 add_credits(server, &credits[j], optype);
1120 return -EAGAIN;
1121 }
1122
1123 for (i = 0; i < num_rqst; i++) {
1124 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1125 if (IS_ERR(midQ[i])) {
1126 revert_current_mid(server, i);
1127 for (j = 0; j < i; j++)
1128 delete_mid(midQ[j]);
1129 cifs_server_unlock(server);
1130
1131 /* Update # of requests on wire to server */
1132 for (j = 0; j < num_rqst; j++)
1133 add_credits(server, &credits[j], optype);
1134 return PTR_ERR(midQ[i]);
1135 }
1136
1137 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1138 midQ[i]->optype = optype;
1139 /*
1140 * Invoke callback for every part of the compound chain
1141 * to calculate credits properly. Wake up this thread only when
1142 * the last element is received.
1143 */
1144 if (i < num_rqst - 1)
1145 midQ[i]->callback = cifs_compound_callback;
1146 else
1147 midQ[i]->callback = cifs_compound_last_callback;
1148 }
1149 cifs_in_send_inc(server);
1150 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1151 cifs_in_send_dec(server);
1152
1153 for (i = 0; i < num_rqst; i++)
1154 cifs_save_when_sent(midQ[i]);
1155
1156 if (rc < 0) {
1157 revert_current_mid(server, num_rqst);
1158 server->sequence_number -= 2;
1159 }
1160
1161 cifs_server_unlock(server);
1162
1163 /*
1164 * If sending failed for some reason or it is an oplock break that we
1165 * will not receive a response to - return credits back
1166 */
1167 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1168 for (i = 0; i < num_rqst; i++)
1169 add_credits(server, &credits[i], optype);
1170 goto out;
1171 }
1172
1173 /*
1174 * At this point the request is passed to the network stack - we assume
1175 * that any credits taken from the server structure on the client have
1176 * been spent and we can't return them back. Once we receive responses
1177 * we will collect credits granted by the server in the mid callbacks
1178 * and add those credits to the server structure.
1179 */
1180
1181 /*
1182 * Compounding is never used during session establish.
1183 */
1184 spin_lock(&ses->ses_lock);
1185 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1186 spin_unlock(&ses->ses_lock);
1187
1188 cifs_server_lock(server);
1189 smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
1190 cifs_server_unlock(server);
1191
1192 spin_lock(&ses->ses_lock);
1193 }
1194 spin_unlock(&ses->ses_lock);
1195
1196 for (i = 0; i < num_rqst; i++) {
1197 rc = wait_for_response(server, midQ[i]);
1198 if (rc != 0)
1199 break;
1200 }
1201 if (rc != 0) {
1202 for (; i < num_rqst; i++) {
1203 cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
1204 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1205 send_cancel(server, &rqst[i], midQ[i]);
1206 spin_lock(&server->mid_lock);
1207 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1208 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1209 midQ[i]->callback = cifs_cancelled_callback;
1210 cancelled_mid[i] = true;
1211 credits[i].value = 0;
1212 }
1213 spin_unlock(&server->mid_lock);
1214 }
1215 }
1216
1217 for (i = 0; i < num_rqst; i++) {
1218 if (rc < 0)
1219 goto out;
1220
1221 rc = cifs_sync_mid_result(midQ[i], server);
1222 if (rc != 0) {
1223 /* mark this mid as cancelled to not free it below */
1224 cancelled_mid[i] = true;
1225 goto out;
1226 }
1227
1228 if (!midQ[i]->resp_buf ||
1229 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1230 rc = -EIO;
1231 cifs_dbg(FYI, "Bad MID state?\n");
1232 goto out;
1233 }
1234
1235 buf = (char *)midQ[i]->resp_buf;
1236 resp_iov[i].iov_base = buf;
1237 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1238 HEADER_PREAMBLE_SIZE(server);
1239
1240 if (midQ[i]->large_buf)
1241 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1242 else
1243 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1244
1245 rc = server->ops->check_receive(midQ[i], server,
1246 flags & CIFS_LOG_ERROR);
1247
1248 /* mark it so buf will not be freed by delete_mid */
1249 if ((flags & CIFS_NO_RSP_BUF) == 0)
1250 midQ[i]->resp_buf = NULL;
1251
1252 }
1253
1254 /*
1255 * Compounding is never used during session establish.
1256 */
1257 spin_lock(&ses->ses_lock);
1258 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1259 struct kvec iov = {
1260 .iov_base = resp_iov[0].iov_base,
1261 .iov_len = resp_iov[0].iov_len
1262 };
1263 spin_unlock(&ses->ses_lock);
1264 cifs_server_lock(server);
1265 smb311_update_preauth_hash(ses, server, &iov, 1);
1266 cifs_server_unlock(server);
1267 spin_lock(&ses->ses_lock);
1268 }
1269 spin_unlock(&ses->ses_lock);
1270
1271out:
1272 /*
1273 * This will dequeue all mids. After this it is important that the
1274 * demultiplex_thread will not process any of these mids any futher.
1275 * This is prevented above by using a noop callback that will not
1276 * wake this thread except for the very last PDU.
1277 */
1278 for (i = 0; i < num_rqst; i++) {
1279 if (!cancelled_mid[i])
1280 delete_mid(midQ[i]);
1281 }
1282
1283 return rc;
1284}
1285
1286int
1287cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1288 struct TCP_Server_Info *server,
1289 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1290 struct kvec *resp_iov)
1291{
1292 return compound_send_recv(xid, ses, server, flags, 1,
1293 rqst, resp_buf_type, resp_iov);
1294}
1295
1296int
1297SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1298 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1299 const int flags, struct kvec *resp_iov)
1300{
1301 struct smb_rqst rqst;
1302 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1303 int rc;
1304
1305 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1306 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1307 GFP_KERNEL);
1308 if (!new_iov) {
1309 /* otherwise cifs_send_recv below sets resp_buf_type */
1310 *resp_buf_type = CIFS_NO_BUFFER;
1311 return -ENOMEM;
1312 }
1313 } else
1314 new_iov = s_iov;
1315
1316 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1317 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1318
1319 new_iov[0].iov_base = new_iov[1].iov_base;
1320 new_iov[0].iov_len = 4;
1321 new_iov[1].iov_base += 4;
1322 new_iov[1].iov_len -= 4;
1323
1324 memset(&rqst, 0, sizeof(struct smb_rqst));
1325 rqst.rq_iov = new_iov;
1326 rqst.rq_nvec = n_vec + 1;
1327
1328 rc = cifs_send_recv(xid, ses, ses->server,
1329 &rqst, resp_buf_type, flags, resp_iov);
1330 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1331 kfree(new_iov);
1332 return rc;
1333}
1334
1335int
1336SendReceive(const unsigned int xid, struct cifs_ses *ses,
1337 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1338 int *pbytes_returned, const int flags)
1339{
1340 int rc = 0;
1341 struct mid_q_entry *midQ;
1342 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1343 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1344 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1345 struct cifs_credits credits = { .value = 1, .instance = 0 };
1346 struct TCP_Server_Info *server;
1347
1348 if (ses == NULL) {
1349 cifs_dbg(VFS, "Null smb session\n");
1350 return -EIO;
1351 }
1352 server = ses->server;
1353 if (server == NULL) {
1354 cifs_dbg(VFS, "Null tcp session\n");
1355 return -EIO;
1356 }
1357
1358 spin_lock(&server->srv_lock);
1359 if (server->tcpStatus == CifsExiting) {
1360 spin_unlock(&server->srv_lock);
1361 return -ENOENT;
1362 }
1363 spin_unlock(&server->srv_lock);
1364
1365 /* Ensure that we do not send more than 50 overlapping requests
1366 to the same server. We may make this configurable later or
1367 use ses->maxReq */
1368
1369 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1370 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1371 len);
1372 return -EIO;
1373 }
1374
1375 rc = wait_for_free_request(server, flags, &credits.instance);
1376 if (rc)
1377 return rc;
1378
1379 /* make sure that we sign in the same order that we send on this socket
1380 and avoid races inside tcp sendmsg code that could cause corruption
1381 of smb data */
1382
1383 cifs_server_lock(server);
1384
1385 rc = allocate_mid(ses, in_buf, &midQ);
1386 if (rc) {
1387 cifs_server_unlock(server);
1388 /* Update # of requests on wire to server */
1389 add_credits(server, &credits, 0);
1390 return rc;
1391 }
1392
1393 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1394 if (rc) {
1395 cifs_server_unlock(server);
1396 goto out;
1397 }
1398
1399 midQ->mid_state = MID_REQUEST_SUBMITTED;
1400
1401 cifs_in_send_inc(server);
1402 rc = smb_send(server, in_buf, len);
1403 cifs_in_send_dec(server);
1404 cifs_save_when_sent(midQ);
1405
1406 if (rc < 0)
1407 server->sequence_number -= 2;
1408
1409 cifs_server_unlock(server);
1410
1411 if (rc < 0)
1412 goto out;
1413
1414 rc = wait_for_response(server, midQ);
1415 if (rc != 0) {
1416 send_cancel(server, &rqst, midQ);
1417 spin_lock(&server->mid_lock);
1418 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1419 /* no longer considered to be "in-flight" */
1420 midQ->callback = release_mid;
1421 spin_unlock(&server->mid_lock);
1422 add_credits(server, &credits, 0);
1423 return rc;
1424 }
1425 spin_unlock(&server->mid_lock);
1426 }
1427
1428 rc = cifs_sync_mid_result(midQ, server);
1429 if (rc != 0) {
1430 add_credits(server, &credits, 0);
1431 return rc;
1432 }
1433
1434 if (!midQ->resp_buf || !out_buf ||
1435 midQ->mid_state != MID_RESPONSE_RECEIVED) {
1436 rc = -EIO;
1437 cifs_server_dbg(VFS, "Bad MID state?\n");
1438 goto out;
1439 }
1440
1441 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1442 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1443 rc = cifs_check_receive(midQ, server, 0);
1444out:
1445 delete_mid(midQ);
1446 add_credits(server, &credits, 0);
1447
1448 return rc;
1449}
1450
1451/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1452 blocking lock to return. */
1453
1454static int
1455send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1456 struct smb_hdr *in_buf,
1457 struct smb_hdr *out_buf)
1458{
1459 int bytes_returned;
1460 struct cifs_ses *ses = tcon->ses;
1461 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1462
1463 /* We just modify the current in_buf to change
1464 the type of lock from LOCKING_ANDX_SHARED_LOCK
1465 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1466 LOCKING_ANDX_CANCEL_LOCK. */
1467
1468 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1469 pSMB->Timeout = 0;
1470 pSMB->hdr.Mid = get_next_mid(ses->server);
1471
1472 return SendReceive(xid, ses, in_buf, out_buf,
1473 &bytes_returned, 0);
1474}
1475
1476int
1477SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1478 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1479 int *pbytes_returned)
1480{
1481 int rc = 0;
1482 int rstart = 0;
1483 struct mid_q_entry *midQ;
1484 struct cifs_ses *ses;
1485 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1486 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1487 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1488 unsigned int instance;
1489 struct TCP_Server_Info *server;
1490
1491 if (tcon == NULL || tcon->ses == NULL) {
1492 cifs_dbg(VFS, "Null smb session\n");
1493 return -EIO;
1494 }
1495 ses = tcon->ses;
1496 server = ses->server;
1497
1498 if (server == NULL) {
1499 cifs_dbg(VFS, "Null tcp session\n");
1500 return -EIO;
1501 }
1502
1503 spin_lock(&server->srv_lock);
1504 if (server->tcpStatus == CifsExiting) {
1505 spin_unlock(&server->srv_lock);
1506 return -ENOENT;
1507 }
1508 spin_unlock(&server->srv_lock);
1509
1510 /* Ensure that we do not send more than 50 overlapping requests
1511 to the same server. We may make this configurable later or
1512 use ses->maxReq */
1513
1514 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1515 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1516 len);
1517 return -EIO;
1518 }
1519
1520 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1521 if (rc)
1522 return rc;
1523
1524 /* make sure that we sign in the same order that we send on this socket
1525 and avoid races inside tcp sendmsg code that could cause corruption
1526 of smb data */
1527
1528 cifs_server_lock(server);
1529
1530 rc = allocate_mid(ses, in_buf, &midQ);
1531 if (rc) {
1532 cifs_server_unlock(server);
1533 return rc;
1534 }
1535
1536 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1537 if (rc) {
1538 delete_mid(midQ);
1539 cifs_server_unlock(server);
1540 return rc;
1541 }
1542
1543 midQ->mid_state = MID_REQUEST_SUBMITTED;
1544 cifs_in_send_inc(server);
1545 rc = smb_send(server, in_buf, len);
1546 cifs_in_send_dec(server);
1547 cifs_save_when_sent(midQ);
1548
1549 if (rc < 0)
1550 server->sequence_number -= 2;
1551
1552 cifs_server_unlock(server);
1553
1554 if (rc < 0) {
1555 delete_mid(midQ);
1556 return rc;
1557 }
1558
1559 /* Wait for a reply - allow signals to interrupt. */
1560 rc = wait_event_interruptible(server->response_q,
1561 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1562 ((server->tcpStatus != CifsGood) &&
1563 (server->tcpStatus != CifsNew)));
1564
1565 /* Were we interrupted by a signal ? */
1566 spin_lock(&server->srv_lock);
1567 if ((rc == -ERESTARTSYS) &&
1568 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1569 ((server->tcpStatus == CifsGood) ||
1570 (server->tcpStatus == CifsNew))) {
1571 spin_unlock(&server->srv_lock);
1572
1573 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1574 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1575 blocking lock to return. */
1576 rc = send_cancel(server, &rqst, midQ);
1577 if (rc) {
1578 delete_mid(midQ);
1579 return rc;
1580 }
1581 } else {
1582 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1583 to cause the blocking lock to return. */
1584
1585 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1586
1587 /* If we get -ENOLCK back the lock may have
1588 already been removed. Don't exit in this case. */
1589 if (rc && rc != -ENOLCK) {
1590 delete_mid(midQ);
1591 return rc;
1592 }
1593 }
1594
1595 rc = wait_for_response(server, midQ);
1596 if (rc) {
1597 send_cancel(server, &rqst, midQ);
1598 spin_lock(&server->mid_lock);
1599 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1600 /* no longer considered to be "in-flight" */
1601 midQ->callback = release_mid;
1602 spin_unlock(&server->mid_lock);
1603 return rc;
1604 }
1605 spin_unlock(&server->mid_lock);
1606 }
1607
1608 /* We got the response - restart system call. */
1609 rstart = 1;
1610 spin_lock(&server->srv_lock);
1611 }
1612 spin_unlock(&server->srv_lock);
1613
1614 rc = cifs_sync_mid_result(midQ, server);
1615 if (rc != 0)
1616 return rc;
1617
1618 /* rcvd frame is ok */
1619 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1620 rc = -EIO;
1621 cifs_tcon_dbg(VFS, "Bad MID state?\n");
1622 goto out;
1623 }
1624
1625 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1626 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1627 rc = cifs_check_receive(midQ, server, 0);
1628out:
1629 delete_mid(midQ);
1630 if (rstart && rc == -EACCES)
1631 return -ERESTARTSYS;
1632 return rc;
1633}
1634
1635/*
1636 * Discard any remaining data in the current SMB. To do this, we borrow the
1637 * current bigbuf.
1638 */
1639int
1640cifs_discard_remaining_data(struct TCP_Server_Info *server)
1641{
1642 unsigned int rfclen = server->pdu_size;
1643 int remaining = rfclen + HEADER_PREAMBLE_SIZE(server) -
1644 server->total_read;
1645
1646 while (remaining > 0) {
1647 int length;
1648
1649 length = cifs_discard_from_socket(server,
1650 min_t(size_t, remaining,
1651 CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
1652 if (length < 0)
1653 return length;
1654 server->total_read += length;
1655 remaining -= length;
1656 }
1657
1658 return 0;
1659}
1660
1661static int
1662__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
1663 bool malformed)
1664{
1665 int length;
1666
1667 length = cifs_discard_remaining_data(server);
1668 dequeue_mid(mid, malformed);
1669 mid->resp_buf = server->smallbuf;
1670 server->smallbuf = NULL;
1671 return length;
1672}
1673
1674static int
1675cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1676{
1677 struct cifs_readdata *rdata = mid->callback_data;
1678
1679 return __cifs_readv_discard(server, mid, rdata->result);
1680}
1681
1682int
1683cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1684{
1685 int length, len;
1686 unsigned int data_offset, data_len;
1687 struct cifs_readdata *rdata = mid->callback_data;
1688 char *buf = server->smallbuf;
1689 unsigned int buflen = server->pdu_size + HEADER_PREAMBLE_SIZE(server);
1690 bool use_rdma_mr = false;
1691
1692 cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n",
1693 __func__, mid->mid, rdata->offset, rdata->bytes);
1694
1695 /*
1696 * read the rest of READ_RSP header (sans Data array), or whatever we
1697 * can if there's not enough data. At this point, we've read down to
1698 * the Mid.
1699 */
1700 len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
1701 HEADER_SIZE(server) + 1;
1702
1703 length = cifs_read_from_socket(server,
1704 buf + HEADER_SIZE(server) - 1, len);
1705 if (length < 0)
1706 return length;
1707 server->total_read += length;
1708
1709 if (server->ops->is_session_expired &&
1710 server->ops->is_session_expired(buf)) {
1711 cifs_reconnect(server, true);
1712 return -1;
1713 }
1714
1715 if (server->ops->is_status_pending &&
1716 server->ops->is_status_pending(buf, server)) {
1717 cifs_discard_remaining_data(server);
1718 return -1;
1719 }
1720
1721 /* set up first two iov for signature check and to get credits */
1722 rdata->iov[0].iov_base = buf;
1723 rdata->iov[0].iov_len = HEADER_PREAMBLE_SIZE(server);
1724 rdata->iov[1].iov_base = buf + HEADER_PREAMBLE_SIZE(server);
1725 rdata->iov[1].iov_len =
1726 server->total_read - HEADER_PREAMBLE_SIZE(server);
1727 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
1728 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
1729 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
1730 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
1731
1732 /* Was the SMB read successful? */
1733 rdata->result = server->ops->map_error(buf, false);
1734 if (rdata->result != 0) {
1735 cifs_dbg(FYI, "%s: server returned error %d\n",
1736 __func__, rdata->result);
1737 /* normal error on read response */
1738 return __cifs_readv_discard(server, mid, false);
1739 }
1740
1741 /* Is there enough to get to the rest of the READ_RSP header? */
1742 if (server->total_read < server->vals->read_rsp_size) {
1743 cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n",
1744 __func__, server->total_read,
1745 server->vals->read_rsp_size);
1746 rdata->result = -EIO;
1747 return cifs_readv_discard(server, mid);
1748 }
1749
1750 data_offset = server->ops->read_data_offset(buf) +
1751 HEADER_PREAMBLE_SIZE(server);
1752 if (data_offset < server->total_read) {
1753 /*
1754 * win2k8 sometimes sends an offset of 0 when the read
1755 * is beyond the EOF. Treat it as if the data starts just after
1756 * the header.
1757 */
1758 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
1759 __func__, data_offset);
1760 data_offset = server->total_read;
1761 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
1762 /* data_offset is beyond the end of smallbuf */
1763 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
1764 __func__, data_offset);
1765 rdata->result = -EIO;
1766 return cifs_readv_discard(server, mid);
1767 }
1768
1769 cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n",
1770 __func__, server->total_read, data_offset);
1771
1772 len = data_offset - server->total_read;
1773 if (len > 0) {
1774 /* read any junk before data into the rest of smallbuf */
1775 length = cifs_read_from_socket(server,
1776 buf + server->total_read, len);
1777 if (length < 0)
1778 return length;
1779 server->total_read += length;
1780 }
1781
1782 /* how much data is in the response? */
1783#ifdef CONFIG_CIFS_SMB_DIRECT
1784 use_rdma_mr = rdata->mr;
1785#endif
1786 data_len = server->ops->read_data_length(buf, use_rdma_mr);
1787 if (!use_rdma_mr && (data_offset + data_len > buflen)) {
1788 /* data_len is corrupt -- discard frame */
1789 rdata->result = -EIO;
1790 return cifs_readv_discard(server, mid);
1791 }
1792
1793 length = rdata->read_into_pages(server, rdata, data_len);
1794 if (length < 0)
1795 return length;
1796
1797 server->total_read += length;
1798
1799 cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n",
1800 server->total_read, buflen, data_len);
1801
1802 /* discard anything left over */
1803 if (server->total_read < buflen)
1804 return cifs_readv_discard(server, mid);
1805
1806 dequeue_mid(mid, false);
1807 mid->resp_buf = server->smallbuf;
1808 server->smallbuf = NULL;
1809 return length;
1810}