Loading...
1/*
2 * fs/cifs/transport.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
7 *
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/fs.h>
24#include <linux/list.h>
25#include <linux/gfp.h>
26#include <linux/wait.h>
27#include <linux/net.h>
28#include <linux/delay.h>
29#include <asm/uaccess.h>
30#include <asm/processor.h>
31#include <linux/mempool.h>
32#include "cifspdu.h"
33#include "cifsglob.h"
34#include "cifsproto.h"
35#include "cifs_debug.h"
36
37extern mempool_t *cifs_mid_poolp;
38
39static void
40wake_up_task(struct mid_q_entry *mid)
41{
42 wake_up_process(mid->callback_data);
43}
44
45struct mid_q_entry *
46AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
47{
48 struct mid_q_entry *temp;
49
50 if (server == NULL) {
51 cERROR(1, "Null TCP session in AllocMidQEntry");
52 return NULL;
53 }
54
55 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
56 if (temp == NULL)
57 return temp;
58 else {
59 memset(temp, 0, sizeof(struct mid_q_entry));
60 temp->mid = smb_buffer->Mid; /* always LE */
61 temp->pid = current->pid;
62 temp->command = smb_buffer->Command;
63 cFYI(1, "For smb_command %d", temp->command);
64 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
65 /* when mid allocated can be before when sent */
66 temp->when_alloc = jiffies;
67
68 /*
69 * The default is for the mid to be synchronous, so the
70 * default callback just wakes up the current task.
71 */
72 temp->callback = wake_up_task;
73 temp->callback_data = current;
74 }
75
76 atomic_inc(&midCount);
77 temp->midState = MID_REQUEST_ALLOCATED;
78 return temp;
79}
80
81void
82DeleteMidQEntry(struct mid_q_entry *midEntry)
83{
84#ifdef CONFIG_CIFS_STATS2
85 unsigned long now;
86#endif
87 midEntry->midState = MID_FREE;
88 atomic_dec(&midCount);
89 if (midEntry->largeBuf)
90 cifs_buf_release(midEntry->resp_buf);
91 else
92 cifs_small_buf_release(midEntry->resp_buf);
93#ifdef CONFIG_CIFS_STATS2
94 now = jiffies;
95 /* commands taking longer than one second are indications that
96 something is wrong, unless it is quite a slow link or server */
97 if ((now - midEntry->when_alloc) > HZ) {
98 if ((cifsFYI & CIFS_TIMER) &&
99 (midEntry->command != SMB_COM_LOCKING_ANDX)) {
100 printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %d",
101 midEntry->command, midEntry->mid);
102 printk(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
103 now - midEntry->when_alloc,
104 now - midEntry->when_sent,
105 now - midEntry->when_received);
106 }
107 }
108#endif
109 mempool_free(midEntry, cifs_mid_poolp);
110}
111
112static void
113delete_mid(struct mid_q_entry *mid)
114{
115 spin_lock(&GlobalMid_Lock);
116 list_del(&mid->qhead);
117 spin_unlock(&GlobalMid_Lock);
118
119 DeleteMidQEntry(mid);
120}
121
122static int
123smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
124{
125 int rc = 0;
126 int i = 0;
127 struct msghdr smb_msg;
128 struct smb_hdr *smb_buffer = iov[0].iov_base;
129 unsigned int len = iov[0].iov_len;
130 unsigned int total_len;
131 int first_vec = 0;
132 unsigned int smb_buf_length = be32_to_cpu(smb_buffer->smb_buf_length);
133 struct socket *ssocket = server->ssocket;
134
135 if (ssocket == NULL)
136 return -ENOTSOCK; /* BB eventually add reconnect code here */
137
138 smb_msg.msg_name = (struct sockaddr *) &server->dstaddr;
139 smb_msg.msg_namelen = sizeof(struct sockaddr);
140 smb_msg.msg_control = NULL;
141 smb_msg.msg_controllen = 0;
142 if (server->noblocksnd)
143 smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
144 else
145 smb_msg.msg_flags = MSG_NOSIGNAL;
146
147 total_len = 0;
148 for (i = 0; i < n_vec; i++)
149 total_len += iov[i].iov_len;
150
151 cFYI(1, "Sending smb: total_len %d", total_len);
152 dump_smb(smb_buffer, len);
153
154 i = 0;
155 while (total_len) {
156 rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec],
157 n_vec - first_vec, total_len);
158 if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
159 i++;
160 /* if blocking send we try 3 times, since each can block
161 for 5 seconds. For nonblocking we have to try more
162 but wait increasing amounts of time allowing time for
163 socket to clear. The overall time we wait in either
164 case to send on the socket is about 15 seconds.
165 Similarly we wait for 15 seconds for
166 a response from the server in SendReceive[2]
167 for the server to send a response back for
168 most types of requests (except SMB Write
169 past end of file which can be slow, and
170 blocking lock operations). NFS waits slightly longer
171 than CIFS, but this can make it take longer for
172 nonresponsive servers to be detected and 15 seconds
173 is more than enough time for modern networks to
174 send a packet. In most cases if we fail to send
175 after the retries we will kill the socket and
176 reconnect which may clear the network problem.
177 */
178 if ((i >= 14) || (!server->noblocksnd && (i > 2))) {
179 cERROR(1, "sends on sock %p stuck for 15 seconds",
180 ssocket);
181 rc = -EAGAIN;
182 break;
183 }
184 msleep(1 << i);
185 continue;
186 }
187 if (rc < 0)
188 break;
189
190 if (rc == total_len) {
191 total_len = 0;
192 break;
193 } else if (rc > total_len) {
194 cERROR(1, "sent %d requested %d", rc, total_len);
195 break;
196 }
197 if (rc == 0) {
198 /* should never happen, letting socket clear before
199 retrying is our only obvious option here */
200 cERROR(1, "tcp sent no data");
201 msleep(500);
202 continue;
203 }
204 total_len -= rc;
205 /* the line below resets i */
206 for (i = first_vec; i < n_vec; i++) {
207 if (iov[i].iov_len) {
208 if (rc > iov[i].iov_len) {
209 rc -= iov[i].iov_len;
210 iov[i].iov_len = 0;
211 } else {
212 iov[i].iov_base += rc;
213 iov[i].iov_len -= rc;
214 first_vec = i;
215 break;
216 }
217 }
218 }
219 i = 0; /* in case we get ENOSPC on the next send */
220 }
221
222 if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
223 cFYI(1, "partial send (%d remaining), terminating session",
224 total_len);
225 /* If we have only sent part of an SMB then the next SMB
226 could be taken as the remainder of this one. We need
227 to kill the socket so the server throws away the partial
228 SMB */
229 server->tcpStatus = CifsNeedReconnect;
230 }
231
232 if (rc < 0 && rc != -EINTR)
233 cERROR(1, "Error %d sending data on socket to server", rc);
234 else
235 rc = 0;
236
237 /* Don't want to modify the buffer as a
238 side effect of this call. */
239 smb_buffer->smb_buf_length = cpu_to_be32(smb_buf_length);
240
241 return rc;
242}
243
244int
245smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
246 unsigned int smb_buf_length)
247{
248 struct kvec iov;
249
250 iov.iov_base = smb_buffer;
251 iov.iov_len = smb_buf_length + 4;
252
253 return smb_sendv(server, &iov, 1);
254}
255
256static int wait_for_free_request(struct TCP_Server_Info *server,
257 const int long_op)
258{
259 if (long_op == CIFS_ASYNC_OP) {
260 /* oplock breaks must not be held up */
261 atomic_inc(&server->inFlight);
262 return 0;
263 }
264
265 spin_lock(&GlobalMid_Lock);
266 while (1) {
267 if (atomic_read(&server->inFlight) >= cifs_max_pending) {
268 spin_unlock(&GlobalMid_Lock);
269 cifs_num_waiters_inc(server);
270 wait_event(server->request_q,
271 atomic_read(&server->inFlight)
272 < cifs_max_pending);
273 cifs_num_waiters_dec(server);
274 spin_lock(&GlobalMid_Lock);
275 } else {
276 if (server->tcpStatus == CifsExiting) {
277 spin_unlock(&GlobalMid_Lock);
278 return -ENOENT;
279 }
280
281 /* can not count locking commands against total
282 as they are allowed to block on server */
283
284 /* update # of requests on the wire to server */
285 if (long_op != CIFS_BLOCKING_OP)
286 atomic_inc(&server->inFlight);
287 spin_unlock(&GlobalMid_Lock);
288 break;
289 }
290 }
291 return 0;
292}
293
294static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
295 struct mid_q_entry **ppmidQ)
296{
297 if (ses->server->tcpStatus == CifsExiting) {
298 return -ENOENT;
299 }
300
301 if (ses->server->tcpStatus == CifsNeedReconnect) {
302 cFYI(1, "tcp session dead - return to caller to retry");
303 return -EAGAIN;
304 }
305
306 if (ses->status != CifsGood) {
307 /* check if SMB session is bad because we are setting it up */
308 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
309 (in_buf->Command != SMB_COM_NEGOTIATE))
310 return -EAGAIN;
311 /* else ok - we are setting up session */
312 }
313 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
314 if (*ppmidQ == NULL)
315 return -ENOMEM;
316 spin_lock(&GlobalMid_Lock);
317 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
318 spin_unlock(&GlobalMid_Lock);
319 return 0;
320}
321
322static int
323wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
324{
325 int error;
326
327 error = wait_event_killable(server->response_q,
328 midQ->midState != MID_REQUEST_SUBMITTED);
329 if (error < 0)
330 return -ERESTARTSYS;
331
332 return 0;
333}
334
335
336/*
337 * Send a SMB request and set the callback function in the mid to handle
338 * the result. Caller is responsible for dealing with timeouts.
339 */
340int
341cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
342 unsigned int nvec, mid_callback_t *callback, void *cbdata,
343 bool ignore_pend)
344{
345 int rc;
346 struct mid_q_entry *mid;
347 struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
348
349 rc = wait_for_free_request(server, ignore_pend ? CIFS_ASYNC_OP : 0);
350 if (rc)
351 return rc;
352
353 /* enable signing if server requires it */
354 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
355 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
356
357 mutex_lock(&server->srv_mutex);
358 mid = AllocMidQEntry(hdr, server);
359 if (mid == NULL) {
360 mutex_unlock(&server->srv_mutex);
361 atomic_dec(&server->inFlight);
362 wake_up(&server->request_q);
363 return -ENOMEM;
364 }
365
366 /* put it on the pending_mid_q */
367 spin_lock(&GlobalMid_Lock);
368 list_add_tail(&mid->qhead, &server->pending_mid_q);
369 spin_unlock(&GlobalMid_Lock);
370
371 rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number);
372 if (rc) {
373 mutex_unlock(&server->srv_mutex);
374 goto out_err;
375 }
376
377 mid->callback = callback;
378 mid->callback_data = cbdata;
379 mid->midState = MID_REQUEST_SUBMITTED;
380
381 cifs_in_send_inc(server);
382 rc = smb_sendv(server, iov, nvec);
383 cifs_in_send_dec(server);
384 cifs_save_when_sent(mid);
385 mutex_unlock(&server->srv_mutex);
386
387 if (rc)
388 goto out_err;
389
390 return rc;
391out_err:
392 delete_mid(mid);
393 atomic_dec(&server->inFlight);
394 wake_up(&server->request_q);
395 return rc;
396}
397
398/*
399 *
400 * Send an SMB Request. No response info (other than return code)
401 * needs to be parsed.
402 *
403 * flags indicate the type of request buffer and how long to wait
404 * and whether to log NT STATUS code (error) before mapping it to POSIX error
405 *
406 */
407int
408SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
409 struct smb_hdr *in_buf, int flags)
410{
411 int rc;
412 struct kvec iov[1];
413 int resp_buf_type;
414
415 iov[0].iov_base = (char *)in_buf;
416 iov[0].iov_len = be32_to_cpu(in_buf->smb_buf_length) + 4;
417 flags |= CIFS_NO_RESP;
418 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
419 cFYI(DBG2, "SendRcvNoRsp flags %d rc %d", flags, rc);
420
421 return rc;
422}
423
424static int
425cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
426{
427 int rc = 0;
428
429 cFYI(1, "%s: cmd=%d mid=%d state=%d", __func__, mid->command,
430 mid->mid, mid->midState);
431
432 spin_lock(&GlobalMid_Lock);
433 switch (mid->midState) {
434 case MID_RESPONSE_RECEIVED:
435 spin_unlock(&GlobalMid_Lock);
436 return rc;
437 case MID_RETRY_NEEDED:
438 rc = -EAGAIN;
439 break;
440 case MID_RESPONSE_MALFORMED:
441 rc = -EIO;
442 break;
443 case MID_SHUTDOWN:
444 rc = -EHOSTDOWN;
445 break;
446 default:
447 list_del_init(&mid->qhead);
448 cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__,
449 mid->mid, mid->midState);
450 rc = -EIO;
451 }
452 spin_unlock(&GlobalMid_Lock);
453
454 DeleteMidQEntry(mid);
455 return rc;
456}
457
458/*
459 * An NT cancel request header looks just like the original request except:
460 *
461 * The Command is SMB_COM_NT_CANCEL
462 * The WordCount is zeroed out
463 * The ByteCount is zeroed out
464 *
465 * This function mangles an existing request buffer into a
466 * SMB_COM_NT_CANCEL request and then sends it.
467 */
468static int
469send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
470 struct mid_q_entry *mid)
471{
472 int rc = 0;
473
474 /* -4 for RFC1001 length and +2 for BCC field */
475 in_buf->smb_buf_length = cpu_to_be32(sizeof(struct smb_hdr) - 4 + 2);
476 in_buf->Command = SMB_COM_NT_CANCEL;
477 in_buf->WordCount = 0;
478 put_bcc(0, in_buf);
479
480 mutex_lock(&server->srv_mutex);
481 rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
482 if (rc) {
483 mutex_unlock(&server->srv_mutex);
484 return rc;
485 }
486 rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
487 mutex_unlock(&server->srv_mutex);
488
489 cFYI(1, "issued NT_CANCEL for mid %u, rc = %d",
490 in_buf->Mid, rc);
491
492 return rc;
493}
494
495int
496cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
497 bool log_error)
498{
499 dump_smb(mid->resp_buf,
500 min_t(u32, 92, be32_to_cpu(mid->resp_buf->smb_buf_length)));
501
502 /* convert the length into a more usable form */
503 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
504 /* FIXME: add code to kill session */
505 if (cifs_verify_signature(mid->resp_buf, server,
506 mid->sequence_number + 1) != 0)
507 cERROR(1, "Unexpected SMB signature");
508 }
509
510 /* BB special case reconnect tid and uid here? */
511 return map_smb_to_linux_error(mid->resp_buf, log_error);
512}
513
514int
515SendReceive2(const unsigned int xid, struct cifs_ses *ses,
516 struct kvec *iov, int n_vec, int *pRespBufType /* ret */,
517 const int flags)
518{
519 int rc = 0;
520 int long_op;
521 struct mid_q_entry *midQ;
522 struct smb_hdr *in_buf = iov[0].iov_base;
523
524 long_op = flags & CIFS_TIMEOUT_MASK;
525
526 *pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */
527
528 if ((ses == NULL) || (ses->server == NULL)) {
529 cifs_small_buf_release(in_buf);
530 cERROR(1, "Null session");
531 return -EIO;
532 }
533
534 if (ses->server->tcpStatus == CifsExiting) {
535 cifs_small_buf_release(in_buf);
536 return -ENOENT;
537 }
538
539 /* Ensure that we do not send more than 50 overlapping requests
540 to the same server. We may make this configurable later or
541 use ses->maxReq */
542
543 rc = wait_for_free_request(ses->server, long_op);
544 if (rc) {
545 cifs_small_buf_release(in_buf);
546 return rc;
547 }
548
549 /* make sure that we sign in the same order that we send on this socket
550 and avoid races inside tcp sendmsg code that could cause corruption
551 of smb data */
552
553 mutex_lock(&ses->server->srv_mutex);
554
555 rc = allocate_mid(ses, in_buf, &midQ);
556 if (rc) {
557 mutex_unlock(&ses->server->srv_mutex);
558 cifs_small_buf_release(in_buf);
559 /* Update # of requests on wire to server */
560 atomic_dec(&ses->server->inFlight);
561 wake_up(&ses->server->request_q);
562 return rc;
563 }
564 rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number);
565 if (rc) {
566 mutex_unlock(&ses->server->srv_mutex);
567 cifs_small_buf_release(in_buf);
568 goto out;
569 }
570
571 midQ->midState = MID_REQUEST_SUBMITTED;
572 cifs_in_send_inc(ses->server);
573 rc = smb_sendv(ses->server, iov, n_vec);
574 cifs_in_send_dec(ses->server);
575 cifs_save_when_sent(midQ);
576
577 mutex_unlock(&ses->server->srv_mutex);
578
579 if (rc < 0) {
580 cifs_small_buf_release(in_buf);
581 goto out;
582 }
583
584 if (long_op == CIFS_ASYNC_OP) {
585 cifs_small_buf_release(in_buf);
586 goto out;
587 }
588
589 rc = wait_for_response(ses->server, midQ);
590 if (rc != 0) {
591 send_nt_cancel(ses->server, in_buf, midQ);
592 spin_lock(&GlobalMid_Lock);
593 if (midQ->midState == MID_REQUEST_SUBMITTED) {
594 midQ->callback = DeleteMidQEntry;
595 spin_unlock(&GlobalMid_Lock);
596 cifs_small_buf_release(in_buf);
597 atomic_dec(&ses->server->inFlight);
598 wake_up(&ses->server->request_q);
599 return rc;
600 }
601 spin_unlock(&GlobalMid_Lock);
602 }
603
604 cifs_small_buf_release(in_buf);
605
606 rc = cifs_sync_mid_result(midQ, ses->server);
607 if (rc != 0) {
608 atomic_dec(&ses->server->inFlight);
609 wake_up(&ses->server->request_q);
610 return rc;
611 }
612
613 if (!midQ->resp_buf || midQ->midState != MID_RESPONSE_RECEIVED) {
614 rc = -EIO;
615 cFYI(1, "Bad MID state?");
616 goto out;
617 }
618
619 iov[0].iov_base = (char *)midQ->resp_buf;
620 iov[0].iov_len = be32_to_cpu(midQ->resp_buf->smb_buf_length) + 4;
621 if (midQ->largeBuf)
622 *pRespBufType = CIFS_LARGE_BUFFER;
623 else
624 *pRespBufType = CIFS_SMALL_BUFFER;
625
626 rc = cifs_check_receive(midQ, ses->server, flags & CIFS_LOG_ERROR);
627
628 /* mark it so buf will not be freed by delete_mid */
629 if ((flags & CIFS_NO_RESP) == 0)
630 midQ->resp_buf = NULL;
631out:
632 delete_mid(midQ);
633 atomic_dec(&ses->server->inFlight);
634 wake_up(&ses->server->request_q);
635
636 return rc;
637}
638
639int
640SendReceive(const unsigned int xid, struct cifs_ses *ses,
641 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
642 int *pbytes_returned, const int long_op)
643{
644 int rc = 0;
645 struct mid_q_entry *midQ;
646
647 if (ses == NULL) {
648 cERROR(1, "Null smb session");
649 return -EIO;
650 }
651 if (ses->server == NULL) {
652 cERROR(1, "Null tcp session");
653 return -EIO;
654 }
655
656 if (ses->server->tcpStatus == CifsExiting)
657 return -ENOENT;
658
659 /* Ensure that we do not send more than 50 overlapping requests
660 to the same server. We may make this configurable later or
661 use ses->maxReq */
662
663 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
664 MAX_CIFS_HDR_SIZE - 4) {
665 cERROR(1, "Illegal length, greater than maximum frame, %d",
666 be32_to_cpu(in_buf->smb_buf_length));
667 return -EIO;
668 }
669
670 rc = wait_for_free_request(ses->server, long_op);
671 if (rc)
672 return rc;
673
674 /* make sure that we sign in the same order that we send on this socket
675 and avoid races inside tcp sendmsg code that could cause corruption
676 of smb data */
677
678 mutex_lock(&ses->server->srv_mutex);
679
680 rc = allocate_mid(ses, in_buf, &midQ);
681 if (rc) {
682 mutex_unlock(&ses->server->srv_mutex);
683 /* Update # of requests on wire to server */
684 atomic_dec(&ses->server->inFlight);
685 wake_up(&ses->server->request_q);
686 return rc;
687 }
688
689 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
690 if (rc) {
691 mutex_unlock(&ses->server->srv_mutex);
692 goto out;
693 }
694
695 midQ->midState = MID_REQUEST_SUBMITTED;
696
697 cifs_in_send_inc(ses->server);
698 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
699 cifs_in_send_dec(ses->server);
700 cifs_save_when_sent(midQ);
701 mutex_unlock(&ses->server->srv_mutex);
702
703 if (rc < 0)
704 goto out;
705
706 if (long_op == CIFS_ASYNC_OP)
707 goto out;
708
709 rc = wait_for_response(ses->server, midQ);
710 if (rc != 0) {
711 send_nt_cancel(ses->server, in_buf, midQ);
712 spin_lock(&GlobalMid_Lock);
713 if (midQ->midState == MID_REQUEST_SUBMITTED) {
714 /* no longer considered to be "in-flight" */
715 midQ->callback = DeleteMidQEntry;
716 spin_unlock(&GlobalMid_Lock);
717 atomic_dec(&ses->server->inFlight);
718 wake_up(&ses->server->request_q);
719 return rc;
720 }
721 spin_unlock(&GlobalMid_Lock);
722 }
723
724 rc = cifs_sync_mid_result(midQ, ses->server);
725 if (rc != 0) {
726 atomic_dec(&ses->server->inFlight);
727 wake_up(&ses->server->request_q);
728 return rc;
729 }
730
731 if (!midQ->resp_buf || !out_buf ||
732 midQ->midState != MID_RESPONSE_RECEIVED) {
733 rc = -EIO;
734 cERROR(1, "Bad MID state?");
735 goto out;
736 }
737
738 *pbytes_returned = be32_to_cpu(midQ->resp_buf->smb_buf_length);
739 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
740 rc = cifs_check_receive(midQ, ses->server, 0);
741out:
742 delete_mid(midQ);
743 atomic_dec(&ses->server->inFlight);
744 wake_up(&ses->server->request_q);
745
746 return rc;
747}
748
749/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
750 blocking lock to return. */
751
752static int
753send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
754 struct smb_hdr *in_buf,
755 struct smb_hdr *out_buf)
756{
757 int bytes_returned;
758 struct cifs_ses *ses = tcon->ses;
759 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
760
761 /* We just modify the current in_buf to change
762 the type of lock from LOCKING_ANDX_SHARED_LOCK
763 or LOCKING_ANDX_EXCLUSIVE_LOCK to
764 LOCKING_ANDX_CANCEL_LOCK. */
765
766 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
767 pSMB->Timeout = 0;
768 pSMB->hdr.Mid = GetNextMid(ses->server);
769
770 return SendReceive(xid, ses, in_buf, out_buf,
771 &bytes_returned, 0);
772}
773
774int
775SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
776 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
777 int *pbytes_returned)
778{
779 int rc = 0;
780 int rstart = 0;
781 struct mid_q_entry *midQ;
782 struct cifs_ses *ses;
783
784 if (tcon == NULL || tcon->ses == NULL) {
785 cERROR(1, "Null smb session");
786 return -EIO;
787 }
788 ses = tcon->ses;
789
790 if (ses->server == NULL) {
791 cERROR(1, "Null tcp session");
792 return -EIO;
793 }
794
795 if (ses->server->tcpStatus == CifsExiting)
796 return -ENOENT;
797
798 /* Ensure that we do not send more than 50 overlapping requests
799 to the same server. We may make this configurable later or
800 use ses->maxReq */
801
802 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
803 MAX_CIFS_HDR_SIZE - 4) {
804 cERROR(1, "Illegal length, greater than maximum frame, %d",
805 be32_to_cpu(in_buf->smb_buf_length));
806 return -EIO;
807 }
808
809 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP);
810 if (rc)
811 return rc;
812
813 /* make sure that we sign in the same order that we send on this socket
814 and avoid races inside tcp sendmsg code that could cause corruption
815 of smb data */
816
817 mutex_lock(&ses->server->srv_mutex);
818
819 rc = allocate_mid(ses, in_buf, &midQ);
820 if (rc) {
821 mutex_unlock(&ses->server->srv_mutex);
822 return rc;
823 }
824
825 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
826 if (rc) {
827 delete_mid(midQ);
828 mutex_unlock(&ses->server->srv_mutex);
829 return rc;
830 }
831
832 midQ->midState = MID_REQUEST_SUBMITTED;
833 cifs_in_send_inc(ses->server);
834 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
835 cifs_in_send_dec(ses->server);
836 cifs_save_when_sent(midQ);
837 mutex_unlock(&ses->server->srv_mutex);
838
839 if (rc < 0) {
840 delete_mid(midQ);
841 return rc;
842 }
843
844 /* Wait for a reply - allow signals to interrupt. */
845 rc = wait_event_interruptible(ses->server->response_q,
846 (!(midQ->midState == MID_REQUEST_SUBMITTED)) ||
847 ((ses->server->tcpStatus != CifsGood) &&
848 (ses->server->tcpStatus != CifsNew)));
849
850 /* Were we interrupted by a signal ? */
851 if ((rc == -ERESTARTSYS) &&
852 (midQ->midState == MID_REQUEST_SUBMITTED) &&
853 ((ses->server->tcpStatus == CifsGood) ||
854 (ses->server->tcpStatus == CifsNew))) {
855
856 if (in_buf->Command == SMB_COM_TRANSACTION2) {
857 /* POSIX lock. We send a NT_CANCEL SMB to cause the
858 blocking lock to return. */
859 rc = send_nt_cancel(ses->server, in_buf, midQ);
860 if (rc) {
861 delete_mid(midQ);
862 return rc;
863 }
864 } else {
865 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
866 to cause the blocking lock to return. */
867
868 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
869
870 /* If we get -ENOLCK back the lock may have
871 already been removed. Don't exit in this case. */
872 if (rc && rc != -ENOLCK) {
873 delete_mid(midQ);
874 return rc;
875 }
876 }
877
878 rc = wait_for_response(ses->server, midQ);
879 if (rc) {
880 send_nt_cancel(ses->server, in_buf, midQ);
881 spin_lock(&GlobalMid_Lock);
882 if (midQ->midState == MID_REQUEST_SUBMITTED) {
883 /* no longer considered to be "in-flight" */
884 midQ->callback = DeleteMidQEntry;
885 spin_unlock(&GlobalMid_Lock);
886 return rc;
887 }
888 spin_unlock(&GlobalMid_Lock);
889 }
890
891 /* We got the response - restart system call. */
892 rstart = 1;
893 }
894
895 rc = cifs_sync_mid_result(midQ, ses->server);
896 if (rc != 0)
897 return rc;
898
899 /* rcvd frame is ok */
900 if (out_buf == NULL || midQ->midState != MID_RESPONSE_RECEIVED) {
901 rc = -EIO;
902 cERROR(1, "Bad MID state?");
903 goto out;
904 }
905
906 *pbytes_returned = be32_to_cpu(midQ->resp_buf->smb_buf_length);
907 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
908 rc = cifs_check_receive(midQ, ses->server, 0);
909out:
910 delete_mid(midQ);
911 if (rstart && rc == -EACCES)
912 return -ERESTARTSYS;
913 return rc;
914}
1/*
2 * fs/cifs/transport.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
7 *
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/fs.h>
24#include <linux/list.h>
25#include <linux/gfp.h>
26#include <linux/wait.h>
27#include <linux/net.h>
28#include <linux/delay.h>
29#include <linux/freezer.h>
30#include <linux/tcp.h>
31#include <linux/bvec.h>
32#include <linux/highmem.h>
33#include <linux/uaccess.h>
34#include <asm/processor.h>
35#include <linux/mempool.h>
36#include <linux/sched/signal.h>
37#include "cifspdu.h"
38#include "cifsglob.h"
39#include "cifsproto.h"
40#include "cifs_debug.h"
41#include "smb2proto.h"
42#include "smbdirect.h"
43
44/* Max number of iovectors we can use off the stack when sending requests. */
45#define CIFS_MAX_IOV_SIZE 8
46
47void
48cifs_wake_up_task(struct mid_q_entry *mid)
49{
50 wake_up_process(mid->callback_data);
51}
52
53struct mid_q_entry *
54AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
55{
56 struct mid_q_entry *temp;
57
58 if (server == NULL) {
59 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
60 return NULL;
61 }
62
63 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
64 memset(temp, 0, sizeof(struct mid_q_entry));
65 kref_init(&temp->refcount);
66 temp->mid = get_mid(smb_buffer);
67 temp->pid = current->pid;
68 temp->command = cpu_to_le16(smb_buffer->Command);
69 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
70 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
71 /* when mid allocated can be before when sent */
72 temp->when_alloc = jiffies;
73 temp->server = server;
74
75 /*
76 * The default is for the mid to be synchronous, so the
77 * default callback just wakes up the current task.
78 */
79 temp->callback = cifs_wake_up_task;
80 temp->callback_data = current;
81
82 atomic_inc(&midCount);
83 temp->mid_state = MID_REQUEST_ALLOCATED;
84 return temp;
85}
86
87static void _cifs_mid_q_entry_release(struct kref *refcount)
88{
89 struct mid_q_entry *midEntry =
90 container_of(refcount, struct mid_q_entry, refcount);
91#ifdef CONFIG_CIFS_STATS2
92 __le16 command = midEntry->server->vals->lock_cmd;
93 __u16 smb_cmd = le16_to_cpu(midEntry->command);
94 unsigned long now;
95 unsigned long roundtrip_time;
96 struct TCP_Server_Info *server = midEntry->server;
97#endif
98 midEntry->mid_state = MID_FREE;
99 atomic_dec(&midCount);
100 if (midEntry->large_buf)
101 cifs_buf_release(midEntry->resp_buf);
102 else
103 cifs_small_buf_release(midEntry->resp_buf);
104#ifdef CONFIG_CIFS_STATS2
105 now = jiffies;
106 if (now < midEntry->when_alloc)
107 cifs_server_dbg(VFS, "invalid mid allocation time\n");
108 roundtrip_time = now - midEntry->when_alloc;
109
110 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
111 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
112 server->slowest_cmd[smb_cmd] = roundtrip_time;
113 server->fastest_cmd[smb_cmd] = roundtrip_time;
114 } else {
115 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
116 server->slowest_cmd[smb_cmd] = roundtrip_time;
117 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
118 server->fastest_cmd[smb_cmd] = roundtrip_time;
119 }
120 cifs_stats_inc(&server->num_cmds[smb_cmd]);
121 server->time_per_cmd[smb_cmd] += roundtrip_time;
122 }
123 /*
124 * commands taking longer than one second (default) can be indications
125 * that something is wrong, unless it is quite a slow link or a very
126 * busy server. Note that this calc is unlikely or impossible to wrap
127 * as long as slow_rsp_threshold is not set way above recommended max
128 * value (32767 ie 9 hours) and is generally harmless even if wrong
129 * since only affects debug counters - so leaving the calc as simple
130 * comparison rather than doing multiple conversions and overflow
131 * checks
132 */
133 if ((slow_rsp_threshold != 0) &&
134 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
135 (midEntry->command != command)) {
136 /*
137 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
138 * NB: le16_to_cpu returns unsigned so can not be negative below
139 */
140 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
141 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
142
143 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
144 midEntry->when_sent, midEntry->when_received);
145 if (cifsFYI & CIFS_TIMER) {
146 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
147 midEntry->command, midEntry->mid);
148 cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
149 now - midEntry->when_alloc,
150 now - midEntry->when_sent,
151 now - midEntry->when_received);
152 }
153 }
154#endif
155
156 mempool_free(midEntry, cifs_mid_poolp);
157}
158
159void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
160{
161 spin_lock(&GlobalMid_Lock);
162 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
163 spin_unlock(&GlobalMid_Lock);
164}
165
166void DeleteMidQEntry(struct mid_q_entry *midEntry)
167{
168 cifs_mid_q_entry_release(midEntry);
169}
170
171void
172cifs_delete_mid(struct mid_q_entry *mid)
173{
174 spin_lock(&GlobalMid_Lock);
175 if (!(mid->mid_flags & MID_DELETED)) {
176 list_del_init(&mid->qhead);
177 mid->mid_flags |= MID_DELETED;
178 }
179 spin_unlock(&GlobalMid_Lock);
180
181 DeleteMidQEntry(mid);
182}
183
184/*
185 * smb_send_kvec - send an array of kvecs to the server
186 * @server: Server to send the data to
187 * @smb_msg: Message to send
188 * @sent: amount of data sent on socket is stored here
189 *
190 * Our basic "send data to server" function. Should be called with srv_mutex
191 * held. The caller is responsible for handling the results.
192 */
193static int
194smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
195 size_t *sent)
196{
197 int rc = 0;
198 int retries = 0;
199 struct socket *ssocket = server->ssocket;
200
201 *sent = 0;
202
203 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
204 smb_msg->msg_namelen = sizeof(struct sockaddr);
205 smb_msg->msg_control = NULL;
206 smb_msg->msg_controllen = 0;
207 if (server->noblocksnd)
208 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
209 else
210 smb_msg->msg_flags = MSG_NOSIGNAL;
211
212 while (msg_data_left(smb_msg)) {
213 /*
214 * If blocking send, we try 3 times, since each can block
215 * for 5 seconds. For nonblocking we have to try more
216 * but wait increasing amounts of time allowing time for
217 * socket to clear. The overall time we wait in either
218 * case to send on the socket is about 15 seconds.
219 * Similarly we wait for 15 seconds for a response from
220 * the server in SendReceive[2] for the server to send
221 * a response back for most types of requests (except
222 * SMB Write past end of file which can be slow, and
223 * blocking lock operations). NFS waits slightly longer
224 * than CIFS, but this can make it take longer for
225 * nonresponsive servers to be detected and 15 seconds
226 * is more than enough time for modern networks to
227 * send a packet. In most cases if we fail to send
228 * after the retries we will kill the socket and
229 * reconnect which may clear the network problem.
230 */
231 rc = sock_sendmsg(ssocket, smb_msg);
232 if (rc == -EAGAIN) {
233 retries++;
234 if (retries >= 14 ||
235 (!server->noblocksnd && (retries > 2))) {
236 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
237 ssocket);
238 return -EAGAIN;
239 }
240 msleep(1 << retries);
241 continue;
242 }
243
244 if (rc < 0)
245 return rc;
246
247 if (rc == 0) {
248 /* should never happen, letting socket clear before
249 retrying is our only obvious option here */
250 cifs_server_dbg(VFS, "tcp sent no data\n");
251 msleep(500);
252 continue;
253 }
254
255 /* send was at least partially successful */
256 *sent += rc;
257 retries = 0; /* in case we get ENOSPC on the next send */
258 }
259 return 0;
260}
261
262unsigned long
263smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
264{
265 unsigned int i;
266 struct kvec *iov;
267 int nvec;
268 unsigned long buflen = 0;
269
270 if (server->vals->header_preamble_size == 0 &&
271 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
272 iov = &rqst->rq_iov[1];
273 nvec = rqst->rq_nvec - 1;
274 } else {
275 iov = rqst->rq_iov;
276 nvec = rqst->rq_nvec;
277 }
278
279 /* total up iov array first */
280 for (i = 0; i < nvec; i++)
281 buflen += iov[i].iov_len;
282
283 /*
284 * Add in the page array if there is one. The caller needs to make
285 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
286 * multiple pages ends at page boundary, rq_tailsz needs to be set to
287 * PAGE_SIZE.
288 */
289 if (rqst->rq_npages) {
290 if (rqst->rq_npages == 1)
291 buflen += rqst->rq_tailsz;
292 else {
293 /*
294 * If there is more than one page, calculate the
295 * buffer length based on rq_offset and rq_tailsz
296 */
297 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
298 rqst->rq_offset;
299 buflen += rqst->rq_tailsz;
300 }
301 }
302
303 return buflen;
304}
305
306static int
307__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
308 struct smb_rqst *rqst)
309{
310 int rc = 0;
311 struct kvec *iov;
312 int n_vec;
313 unsigned int send_length = 0;
314 unsigned int i, j;
315 sigset_t mask, oldmask;
316 size_t total_len = 0, sent, size;
317 struct socket *ssocket = server->ssocket;
318 struct msghdr smb_msg;
319 int val = 1;
320 __be32 rfc1002_marker;
321
322 if (cifs_rdma_enabled(server) && server->smbd_conn) {
323 rc = smbd_send(server, num_rqst, rqst);
324 goto smbd_done;
325 }
326
327 if (ssocket == NULL)
328 return -EAGAIN;
329
330 if (signal_pending(current)) {
331 cifs_dbg(FYI, "signal is pending before sending any data\n");
332 return -EINTR;
333 }
334
335 /* cork the socket */
336 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
337 (char *)&val, sizeof(val));
338
339 for (j = 0; j < num_rqst; j++)
340 send_length += smb_rqst_len(server, &rqst[j]);
341 rfc1002_marker = cpu_to_be32(send_length);
342
343 /*
344 * We should not allow signals to interrupt the network send because
345 * any partial send will cause session reconnects thus increasing
346 * latency of system calls and overload a server with unnecessary
347 * requests.
348 */
349
350 sigfillset(&mask);
351 sigprocmask(SIG_BLOCK, &mask, &oldmask);
352
353 /* Generate a rfc1002 marker for SMB2+ */
354 if (server->vals->header_preamble_size == 0) {
355 struct kvec hiov = {
356 .iov_base = &rfc1002_marker,
357 .iov_len = 4
358 };
359 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
360 rc = smb_send_kvec(server, &smb_msg, &sent);
361 if (rc < 0)
362 goto unmask;
363
364 total_len += sent;
365 send_length += 4;
366 }
367
368 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
369
370 for (j = 0; j < num_rqst; j++) {
371 iov = rqst[j].rq_iov;
372 n_vec = rqst[j].rq_nvec;
373
374 size = 0;
375 for (i = 0; i < n_vec; i++) {
376 dump_smb(iov[i].iov_base, iov[i].iov_len);
377 size += iov[i].iov_len;
378 }
379
380 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
381
382 rc = smb_send_kvec(server, &smb_msg, &sent);
383 if (rc < 0)
384 goto unmask;
385
386 total_len += sent;
387
388 /* now walk the page array and send each page in it */
389 for (i = 0; i < rqst[j].rq_npages; i++) {
390 struct bio_vec bvec;
391
392 bvec.bv_page = rqst[j].rq_pages[i];
393 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
394 &bvec.bv_offset);
395
396 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
397 &bvec, 1, bvec.bv_len);
398 rc = smb_send_kvec(server, &smb_msg, &sent);
399 if (rc < 0)
400 break;
401
402 total_len += sent;
403 }
404 }
405
406unmask:
407 sigprocmask(SIG_SETMASK, &oldmask, NULL);
408
409 /*
410 * If signal is pending but we have already sent the whole packet to
411 * the server we need to return success status to allow a corresponding
412 * mid entry to be kept in the pending requests queue thus allowing
413 * to handle responses from the server by the client.
414 *
415 * If only part of the packet has been sent there is no need to hide
416 * interrupt because the session will be reconnected anyway, so there
417 * won't be any response from the server to handle.
418 */
419
420 if (signal_pending(current) && (total_len != send_length)) {
421 cifs_dbg(FYI, "signal is pending after attempt to send\n");
422 rc = -EINTR;
423 }
424
425 /* uncork it */
426 val = 0;
427 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
428 (char *)&val, sizeof(val));
429
430 if ((total_len > 0) && (total_len != send_length)) {
431 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
432 send_length, total_len);
433 /*
434 * If we have only sent part of an SMB then the next SMB could
435 * be taken as the remainder of this one. We need to kill the
436 * socket so the server throws away the partial SMB
437 */
438 server->tcpStatus = CifsNeedReconnect;
439 trace_smb3_partial_send_reconnect(server->CurrentMid,
440 server->hostname);
441 }
442smbd_done:
443 if (rc < 0 && rc != -EINTR)
444 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
445 rc);
446 else if (rc > 0)
447 rc = 0;
448
449 return rc;
450}
451
452static int
453smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
454 struct smb_rqst *rqst, int flags)
455{
456 struct kvec iov;
457 struct smb2_transform_hdr tr_hdr;
458 struct smb_rqst cur_rqst[MAX_COMPOUND];
459 int rc;
460
461 if (!(flags & CIFS_TRANSFORM_REQ))
462 return __smb_send_rqst(server, num_rqst, rqst);
463
464 if (num_rqst > MAX_COMPOUND - 1)
465 return -ENOMEM;
466
467 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
468 memset(&iov, 0, sizeof(iov));
469 memset(&tr_hdr, 0, sizeof(tr_hdr));
470
471 iov.iov_base = &tr_hdr;
472 iov.iov_len = sizeof(tr_hdr);
473 cur_rqst[0].rq_iov = &iov;
474 cur_rqst[0].rq_nvec = 1;
475
476 if (!server->ops->init_transform_rq) {
477 cifs_server_dbg(VFS, "Encryption requested but transform "
478 "callback is missing\n");
479 return -EIO;
480 }
481
482 rc = server->ops->init_transform_rq(server, num_rqst + 1,
483 &cur_rqst[0], rqst);
484 if (rc)
485 return rc;
486
487 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
488 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
489 return rc;
490}
491
492int
493smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
494 unsigned int smb_buf_length)
495{
496 struct kvec iov[2];
497 struct smb_rqst rqst = { .rq_iov = iov,
498 .rq_nvec = 2 };
499
500 iov[0].iov_base = smb_buffer;
501 iov[0].iov_len = 4;
502 iov[1].iov_base = (char *)smb_buffer + 4;
503 iov[1].iov_len = smb_buf_length;
504
505 return __smb_send_rqst(server, 1, &rqst);
506}
507
508static int
509wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
510 const int timeout, const int flags,
511 unsigned int *instance)
512{
513 int rc;
514 int *credits;
515 int optype;
516 long int t;
517
518 if (timeout < 0)
519 t = MAX_JIFFY_OFFSET;
520 else
521 t = msecs_to_jiffies(timeout);
522
523 optype = flags & CIFS_OP_MASK;
524
525 *instance = 0;
526
527 credits = server->ops->get_credits_field(server, optype);
528 /* Since an echo is already inflight, no need to wait to send another */
529 if (*credits <= 0 && optype == CIFS_ECHO_OP)
530 return -EAGAIN;
531
532 spin_lock(&server->req_lock);
533 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
534 /* oplock breaks must not be held up */
535 server->in_flight++;
536 if (server->in_flight > server->max_in_flight)
537 server->max_in_flight = server->in_flight;
538 *credits -= 1;
539 *instance = server->reconnect_instance;
540 spin_unlock(&server->req_lock);
541 return 0;
542 }
543
544 while (1) {
545 if (*credits < num_credits) {
546 spin_unlock(&server->req_lock);
547 cifs_num_waiters_inc(server);
548 rc = wait_event_killable_timeout(server->request_q,
549 has_credits(server, credits, num_credits), t);
550 cifs_num_waiters_dec(server);
551 if (!rc) {
552 trace_smb3_credit_timeout(server->CurrentMid,
553 server->hostname, num_credits);
554 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
555 timeout);
556 return -ENOTSUPP;
557 }
558 if (rc == -ERESTARTSYS)
559 return -ERESTARTSYS;
560 spin_lock(&server->req_lock);
561 } else {
562 if (server->tcpStatus == CifsExiting) {
563 spin_unlock(&server->req_lock);
564 return -ENOENT;
565 }
566
567 /*
568 * For normal commands, reserve the last MAX_COMPOUND
569 * credits to compound requests.
570 * Otherwise these compounds could be permanently
571 * starved for credits by single-credit requests.
572 *
573 * To prevent spinning CPU, block this thread until
574 * there are >MAX_COMPOUND credits available.
575 * But only do this is we already have a lot of
576 * credits in flight to avoid triggering this check
577 * for servers that are slow to hand out credits on
578 * new sessions.
579 */
580 if (!optype && num_credits == 1 &&
581 server->in_flight > 2 * MAX_COMPOUND &&
582 *credits <= MAX_COMPOUND) {
583 spin_unlock(&server->req_lock);
584 cifs_num_waiters_inc(server);
585 rc = wait_event_killable_timeout(
586 server->request_q,
587 has_credits(server, credits,
588 MAX_COMPOUND + 1),
589 t);
590 cifs_num_waiters_dec(server);
591 if (!rc) {
592 trace_smb3_credit_timeout(
593 server->CurrentMid,
594 server->hostname, num_credits);
595 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
596 timeout);
597 return -ENOTSUPP;
598 }
599 if (rc == -ERESTARTSYS)
600 return -ERESTARTSYS;
601 spin_lock(&server->req_lock);
602 continue;
603 }
604
605 /*
606 * Can not count locking commands against total
607 * as they are allowed to block on server.
608 */
609
610 /* update # of requests on the wire to server */
611 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
612 *credits -= num_credits;
613 server->in_flight += num_credits;
614 if (server->in_flight > server->max_in_flight)
615 server->max_in_flight = server->in_flight;
616 *instance = server->reconnect_instance;
617 }
618 spin_unlock(&server->req_lock);
619 break;
620 }
621 }
622 return 0;
623}
624
625static int
626wait_for_free_request(struct TCP_Server_Info *server, const int flags,
627 unsigned int *instance)
628{
629 return wait_for_free_credits(server, 1, -1, flags,
630 instance);
631}
632
633static int
634wait_for_compound_request(struct TCP_Server_Info *server, int num,
635 const int flags, unsigned int *instance)
636{
637 int *credits;
638
639 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
640
641 spin_lock(&server->req_lock);
642 if (*credits < num) {
643 /*
644 * Return immediately if not too many requests in flight since
645 * we will likely be stuck on waiting for credits.
646 */
647 if (server->in_flight < num - *credits) {
648 spin_unlock(&server->req_lock);
649 return -ENOTSUPP;
650 }
651 }
652 spin_unlock(&server->req_lock);
653
654 return wait_for_free_credits(server, num, 60000, flags,
655 instance);
656}
657
658int
659cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
660 unsigned int *num, struct cifs_credits *credits)
661{
662 *num = size;
663 credits->value = 0;
664 credits->instance = server->reconnect_instance;
665 return 0;
666}
667
668static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
669 struct mid_q_entry **ppmidQ)
670{
671 if (ses->server->tcpStatus == CifsExiting) {
672 return -ENOENT;
673 }
674
675 if (ses->server->tcpStatus == CifsNeedReconnect) {
676 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
677 return -EAGAIN;
678 }
679
680 if (ses->status == CifsNew) {
681 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
682 (in_buf->Command != SMB_COM_NEGOTIATE))
683 return -EAGAIN;
684 /* else ok - we are setting up session */
685 }
686
687 if (ses->status == CifsExiting) {
688 /* check if SMB session is bad because we are setting it up */
689 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
690 return -EAGAIN;
691 /* else ok - we are shutting down session */
692 }
693
694 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
695 if (*ppmidQ == NULL)
696 return -ENOMEM;
697 spin_lock(&GlobalMid_Lock);
698 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
699 spin_unlock(&GlobalMid_Lock);
700 return 0;
701}
702
703static int
704wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
705{
706 int error;
707
708 error = wait_event_freezekillable_unsafe(server->response_q,
709 midQ->mid_state != MID_REQUEST_SUBMITTED);
710 if (error < 0)
711 return -ERESTARTSYS;
712
713 return 0;
714}
715
716struct mid_q_entry *
717cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
718{
719 int rc;
720 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
721 struct mid_q_entry *mid;
722
723 if (rqst->rq_iov[0].iov_len != 4 ||
724 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
725 return ERR_PTR(-EIO);
726
727 /* enable signing if server requires it */
728 if (server->sign)
729 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
730
731 mid = AllocMidQEntry(hdr, server);
732 if (mid == NULL)
733 return ERR_PTR(-ENOMEM);
734
735 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
736 if (rc) {
737 DeleteMidQEntry(mid);
738 return ERR_PTR(rc);
739 }
740
741 return mid;
742}
743
744/*
745 * Send a SMB request and set the callback function in the mid to handle
746 * the result. Caller is responsible for dealing with timeouts.
747 */
748int
749cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
750 mid_receive_t *receive, mid_callback_t *callback,
751 mid_handle_t *handle, void *cbdata, const int flags,
752 const struct cifs_credits *exist_credits)
753{
754 int rc;
755 struct mid_q_entry *mid;
756 struct cifs_credits credits = { .value = 0, .instance = 0 };
757 unsigned int instance;
758 int optype;
759
760 optype = flags & CIFS_OP_MASK;
761
762 if ((flags & CIFS_HAS_CREDITS) == 0) {
763 rc = wait_for_free_request(server, flags, &instance);
764 if (rc)
765 return rc;
766 credits.value = 1;
767 credits.instance = instance;
768 } else
769 instance = exist_credits->instance;
770
771 mutex_lock(&server->srv_mutex);
772
773 /*
774 * We can't use credits obtained from the previous session to send this
775 * request. Check if there were reconnects after we obtained credits and
776 * return -EAGAIN in such cases to let callers handle it.
777 */
778 if (instance != server->reconnect_instance) {
779 mutex_unlock(&server->srv_mutex);
780 add_credits_and_wake_if(server, &credits, optype);
781 return -EAGAIN;
782 }
783
784 mid = server->ops->setup_async_request(server, rqst);
785 if (IS_ERR(mid)) {
786 mutex_unlock(&server->srv_mutex);
787 add_credits_and_wake_if(server, &credits, optype);
788 return PTR_ERR(mid);
789 }
790
791 mid->receive = receive;
792 mid->callback = callback;
793 mid->callback_data = cbdata;
794 mid->handle = handle;
795 mid->mid_state = MID_REQUEST_SUBMITTED;
796
797 /* put it on the pending_mid_q */
798 spin_lock(&GlobalMid_Lock);
799 list_add_tail(&mid->qhead, &server->pending_mid_q);
800 spin_unlock(&GlobalMid_Lock);
801
802 /*
803 * Need to store the time in mid before calling I/O. For call_async,
804 * I/O response may come back and free the mid entry on another thread.
805 */
806 cifs_save_when_sent(mid);
807 cifs_in_send_inc(server);
808 rc = smb_send_rqst(server, 1, rqst, flags);
809 cifs_in_send_dec(server);
810
811 if (rc < 0) {
812 revert_current_mid(server, mid->credits);
813 server->sequence_number -= 2;
814 cifs_delete_mid(mid);
815 }
816
817 mutex_unlock(&server->srv_mutex);
818
819 if (rc == 0)
820 return 0;
821
822 add_credits_and_wake_if(server, &credits, optype);
823 return rc;
824}
825
826/*
827 *
828 * Send an SMB Request. No response info (other than return code)
829 * needs to be parsed.
830 *
831 * flags indicate the type of request buffer and how long to wait
832 * and whether to log NT STATUS code (error) before mapping it to POSIX error
833 *
834 */
835int
836SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
837 char *in_buf, int flags)
838{
839 int rc;
840 struct kvec iov[1];
841 struct kvec rsp_iov;
842 int resp_buf_type;
843
844 iov[0].iov_base = in_buf;
845 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
846 flags |= CIFS_NO_RSP_BUF;
847 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
848 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
849
850 return rc;
851}
852
853static int
854cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
855{
856 int rc = 0;
857
858 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
859 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
860
861 spin_lock(&GlobalMid_Lock);
862 switch (mid->mid_state) {
863 case MID_RESPONSE_RECEIVED:
864 spin_unlock(&GlobalMid_Lock);
865 return rc;
866 case MID_RETRY_NEEDED:
867 rc = -EAGAIN;
868 break;
869 case MID_RESPONSE_MALFORMED:
870 rc = -EIO;
871 break;
872 case MID_SHUTDOWN:
873 rc = -EHOSTDOWN;
874 break;
875 default:
876 if (!(mid->mid_flags & MID_DELETED)) {
877 list_del_init(&mid->qhead);
878 mid->mid_flags |= MID_DELETED;
879 }
880 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
881 __func__, mid->mid, mid->mid_state);
882 rc = -EIO;
883 }
884 spin_unlock(&GlobalMid_Lock);
885
886 DeleteMidQEntry(mid);
887 return rc;
888}
889
890static inline int
891send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
892 struct mid_q_entry *mid)
893{
894 return server->ops->send_cancel ?
895 server->ops->send_cancel(server, rqst, mid) : 0;
896}
897
898int
899cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
900 bool log_error)
901{
902 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
903
904 dump_smb(mid->resp_buf, min_t(u32, 92, len));
905
906 /* convert the length into a more usable form */
907 if (server->sign) {
908 struct kvec iov[2];
909 int rc = 0;
910 struct smb_rqst rqst = { .rq_iov = iov,
911 .rq_nvec = 2 };
912
913 iov[0].iov_base = mid->resp_buf;
914 iov[0].iov_len = 4;
915 iov[1].iov_base = (char *)mid->resp_buf + 4;
916 iov[1].iov_len = len - 4;
917 /* FIXME: add code to kill session */
918 rc = cifs_verify_signature(&rqst, server,
919 mid->sequence_number);
920 if (rc)
921 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
922 rc);
923 }
924
925 /* BB special case reconnect tid and uid here? */
926 return map_smb_to_linux_error(mid->resp_buf, log_error);
927}
928
929struct mid_q_entry *
930cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
931{
932 int rc;
933 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
934 struct mid_q_entry *mid;
935
936 if (rqst->rq_iov[0].iov_len != 4 ||
937 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
938 return ERR_PTR(-EIO);
939
940 rc = allocate_mid(ses, hdr, &mid);
941 if (rc)
942 return ERR_PTR(rc);
943 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
944 if (rc) {
945 cifs_delete_mid(mid);
946 return ERR_PTR(rc);
947 }
948 return mid;
949}
950
951static void
952cifs_compound_callback(struct mid_q_entry *mid)
953{
954 struct TCP_Server_Info *server = mid->server;
955 struct cifs_credits credits;
956
957 credits.value = server->ops->get_credits(mid);
958 credits.instance = server->reconnect_instance;
959
960 add_credits(server, &credits, mid->optype);
961}
962
963static void
964cifs_compound_last_callback(struct mid_q_entry *mid)
965{
966 cifs_compound_callback(mid);
967 cifs_wake_up_task(mid);
968}
969
970static void
971cifs_cancelled_callback(struct mid_q_entry *mid)
972{
973 cifs_compound_callback(mid);
974 DeleteMidQEntry(mid);
975}
976
977int
978compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
979 const int flags, const int num_rqst, struct smb_rqst *rqst,
980 int *resp_buf_type, struct kvec *resp_iov)
981{
982 int i, j, optype, rc = 0;
983 struct mid_q_entry *midQ[MAX_COMPOUND];
984 bool cancelled_mid[MAX_COMPOUND] = {false};
985 struct cifs_credits credits[MAX_COMPOUND] = {
986 { .value = 0, .instance = 0 }
987 };
988 unsigned int instance;
989 char *buf;
990 struct TCP_Server_Info *server;
991
992 optype = flags & CIFS_OP_MASK;
993
994 for (i = 0; i < num_rqst; i++)
995 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
996
997 if ((ses == NULL) || (ses->server == NULL)) {
998 cifs_dbg(VFS, "Null session\n");
999 return -EIO;
1000 }
1001
1002 server = ses->server;
1003 if (server->tcpStatus == CifsExiting)
1004 return -ENOENT;
1005
1006 /*
1007 * Wait for all the requests to become available.
1008 * This approach still leaves the possibility to be stuck waiting for
1009 * credits if the server doesn't grant credits to the outstanding
1010 * requests and if the client is completely idle, not generating any
1011 * other requests.
1012 * This can be handled by the eventual session reconnect.
1013 */
1014 rc = wait_for_compound_request(server, num_rqst, flags,
1015 &instance);
1016 if (rc)
1017 return rc;
1018
1019 for (i = 0; i < num_rqst; i++) {
1020 credits[i].value = 1;
1021 credits[i].instance = instance;
1022 }
1023
1024 /*
1025 * Make sure that we sign in the same order that we send on this socket
1026 * and avoid races inside tcp sendmsg code that could cause corruption
1027 * of smb data.
1028 */
1029
1030 mutex_lock(&server->srv_mutex);
1031
1032 /*
1033 * All the parts of the compound chain belong obtained credits from the
1034 * same session. We can not use credits obtained from the previous
1035 * session to send this request. Check if there were reconnects after
1036 * we obtained credits and return -EAGAIN in such cases to let callers
1037 * handle it.
1038 */
1039 if (instance != server->reconnect_instance) {
1040 mutex_unlock(&server->srv_mutex);
1041 for (j = 0; j < num_rqst; j++)
1042 add_credits(server, &credits[j], optype);
1043 return -EAGAIN;
1044 }
1045
1046 for (i = 0; i < num_rqst; i++) {
1047 midQ[i] = server->ops->setup_request(ses, &rqst[i]);
1048 if (IS_ERR(midQ[i])) {
1049 revert_current_mid(server, i);
1050 for (j = 0; j < i; j++)
1051 cifs_delete_mid(midQ[j]);
1052 mutex_unlock(&server->srv_mutex);
1053
1054 /* Update # of requests on wire to server */
1055 for (j = 0; j < num_rqst; j++)
1056 add_credits(server, &credits[j], optype);
1057 return PTR_ERR(midQ[i]);
1058 }
1059
1060 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1061 midQ[i]->optype = optype;
1062 /*
1063 * Invoke callback for every part of the compound chain
1064 * to calculate credits properly. Wake up this thread only when
1065 * the last element is received.
1066 */
1067 if (i < num_rqst - 1)
1068 midQ[i]->callback = cifs_compound_callback;
1069 else
1070 midQ[i]->callback = cifs_compound_last_callback;
1071 }
1072 cifs_in_send_inc(server);
1073 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1074 cifs_in_send_dec(server);
1075
1076 for (i = 0; i < num_rqst; i++)
1077 cifs_save_when_sent(midQ[i]);
1078
1079 if (rc < 0) {
1080 revert_current_mid(server, num_rqst);
1081 server->sequence_number -= 2;
1082 }
1083
1084 mutex_unlock(&server->srv_mutex);
1085
1086 /*
1087 * If sending failed for some reason or it is an oplock break that we
1088 * will not receive a response to - return credits back
1089 */
1090 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1091 for (i = 0; i < num_rqst; i++)
1092 add_credits(server, &credits[i], optype);
1093 goto out;
1094 }
1095
1096 /*
1097 * At this point the request is passed to the network stack - we assume
1098 * that any credits taken from the server structure on the client have
1099 * been spent and we can't return them back. Once we receive responses
1100 * we will collect credits granted by the server in the mid callbacks
1101 * and add those credits to the server structure.
1102 */
1103
1104 /*
1105 * Compounding is never used during session establish.
1106 */
1107 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
1108 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1109 rqst[0].rq_nvec);
1110
1111 for (i = 0; i < num_rqst; i++) {
1112 rc = wait_for_response(server, midQ[i]);
1113 if (rc != 0)
1114 break;
1115 }
1116 if (rc != 0) {
1117 for (; i < num_rqst; i++) {
1118 cifs_server_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
1119 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1120 send_cancel(server, &rqst[i], midQ[i]);
1121 spin_lock(&GlobalMid_Lock);
1122 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1123 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1124 midQ[i]->callback = cifs_cancelled_callback;
1125 cancelled_mid[i] = true;
1126 credits[i].value = 0;
1127 }
1128 spin_unlock(&GlobalMid_Lock);
1129 }
1130 }
1131
1132 for (i = 0; i < num_rqst; i++) {
1133 if (rc < 0)
1134 goto out;
1135
1136 rc = cifs_sync_mid_result(midQ[i], server);
1137 if (rc != 0) {
1138 /* mark this mid as cancelled to not free it below */
1139 cancelled_mid[i] = true;
1140 goto out;
1141 }
1142
1143 if (!midQ[i]->resp_buf ||
1144 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1145 rc = -EIO;
1146 cifs_dbg(FYI, "Bad MID state?\n");
1147 goto out;
1148 }
1149
1150 buf = (char *)midQ[i]->resp_buf;
1151 resp_iov[i].iov_base = buf;
1152 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1153 server->vals->header_preamble_size;
1154
1155 if (midQ[i]->large_buf)
1156 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1157 else
1158 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1159
1160 rc = server->ops->check_receive(midQ[i], server,
1161 flags & CIFS_LOG_ERROR);
1162
1163 /* mark it so buf will not be freed by cifs_delete_mid */
1164 if ((flags & CIFS_NO_RSP_BUF) == 0)
1165 midQ[i]->resp_buf = NULL;
1166
1167 }
1168
1169 /*
1170 * Compounding is never used during session establish.
1171 */
1172 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1173 struct kvec iov = {
1174 .iov_base = resp_iov[0].iov_base,
1175 .iov_len = resp_iov[0].iov_len
1176 };
1177 smb311_update_preauth_hash(ses, &iov, 1);
1178 }
1179
1180out:
1181 /*
1182 * This will dequeue all mids. After this it is important that the
1183 * demultiplex_thread will not process any of these mids any futher.
1184 * This is prevented above by using a noop callback that will not
1185 * wake this thread except for the very last PDU.
1186 */
1187 for (i = 0; i < num_rqst; i++) {
1188 if (!cancelled_mid[i])
1189 cifs_delete_mid(midQ[i]);
1190 }
1191
1192 return rc;
1193}
1194
1195int
1196cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1197 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1198 struct kvec *resp_iov)
1199{
1200 return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
1201 resp_iov);
1202}
1203
1204int
1205SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1206 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1207 const int flags, struct kvec *resp_iov)
1208{
1209 struct smb_rqst rqst;
1210 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1211 int rc;
1212
1213 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1214 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1215 GFP_KERNEL);
1216 if (!new_iov) {
1217 /* otherwise cifs_send_recv below sets resp_buf_type */
1218 *resp_buf_type = CIFS_NO_BUFFER;
1219 return -ENOMEM;
1220 }
1221 } else
1222 new_iov = s_iov;
1223
1224 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1225 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1226
1227 new_iov[0].iov_base = new_iov[1].iov_base;
1228 new_iov[0].iov_len = 4;
1229 new_iov[1].iov_base += 4;
1230 new_iov[1].iov_len -= 4;
1231
1232 memset(&rqst, 0, sizeof(struct smb_rqst));
1233 rqst.rq_iov = new_iov;
1234 rqst.rq_nvec = n_vec + 1;
1235
1236 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
1237 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1238 kfree(new_iov);
1239 return rc;
1240}
1241
1242int
1243SendReceive(const unsigned int xid, struct cifs_ses *ses,
1244 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1245 int *pbytes_returned, const int flags)
1246{
1247 int rc = 0;
1248 struct mid_q_entry *midQ;
1249 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1250 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1251 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1252 struct cifs_credits credits = { .value = 1, .instance = 0 };
1253 struct TCP_Server_Info *server;
1254
1255 if (ses == NULL) {
1256 cifs_dbg(VFS, "Null smb session\n");
1257 return -EIO;
1258 }
1259 server = ses->server;
1260 if (server == NULL) {
1261 cifs_dbg(VFS, "Null tcp session\n");
1262 return -EIO;
1263 }
1264
1265 if (server->tcpStatus == CifsExiting)
1266 return -ENOENT;
1267
1268 /* Ensure that we do not send more than 50 overlapping requests
1269 to the same server. We may make this configurable later or
1270 use ses->maxReq */
1271
1272 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1273 cifs_server_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1274 len);
1275 return -EIO;
1276 }
1277
1278 rc = wait_for_free_request(server, flags, &credits.instance);
1279 if (rc)
1280 return rc;
1281
1282 /* make sure that we sign in the same order that we send on this socket
1283 and avoid races inside tcp sendmsg code that could cause corruption
1284 of smb data */
1285
1286 mutex_lock(&server->srv_mutex);
1287
1288 rc = allocate_mid(ses, in_buf, &midQ);
1289 if (rc) {
1290 mutex_unlock(&ses->server->srv_mutex);
1291 /* Update # of requests on wire to server */
1292 add_credits(server, &credits, 0);
1293 return rc;
1294 }
1295
1296 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1297 if (rc) {
1298 mutex_unlock(&server->srv_mutex);
1299 goto out;
1300 }
1301
1302 midQ->mid_state = MID_REQUEST_SUBMITTED;
1303
1304 cifs_in_send_inc(server);
1305 rc = smb_send(server, in_buf, len);
1306 cifs_in_send_dec(server);
1307 cifs_save_when_sent(midQ);
1308
1309 if (rc < 0)
1310 server->sequence_number -= 2;
1311
1312 mutex_unlock(&server->srv_mutex);
1313
1314 if (rc < 0)
1315 goto out;
1316
1317 rc = wait_for_response(server, midQ);
1318 if (rc != 0) {
1319 send_cancel(server, &rqst, midQ);
1320 spin_lock(&GlobalMid_Lock);
1321 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1322 /* no longer considered to be "in-flight" */
1323 midQ->callback = DeleteMidQEntry;
1324 spin_unlock(&GlobalMid_Lock);
1325 add_credits(server, &credits, 0);
1326 return rc;
1327 }
1328 spin_unlock(&GlobalMid_Lock);
1329 }
1330
1331 rc = cifs_sync_mid_result(midQ, server);
1332 if (rc != 0) {
1333 add_credits(server, &credits, 0);
1334 return rc;
1335 }
1336
1337 if (!midQ->resp_buf || !out_buf ||
1338 midQ->mid_state != MID_RESPONSE_RECEIVED) {
1339 rc = -EIO;
1340 cifs_server_dbg(VFS, "Bad MID state?\n");
1341 goto out;
1342 }
1343
1344 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1345 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1346 rc = cifs_check_receive(midQ, server, 0);
1347out:
1348 cifs_delete_mid(midQ);
1349 add_credits(server, &credits, 0);
1350
1351 return rc;
1352}
1353
1354/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1355 blocking lock to return. */
1356
1357static int
1358send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1359 struct smb_hdr *in_buf,
1360 struct smb_hdr *out_buf)
1361{
1362 int bytes_returned;
1363 struct cifs_ses *ses = tcon->ses;
1364 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1365
1366 /* We just modify the current in_buf to change
1367 the type of lock from LOCKING_ANDX_SHARED_LOCK
1368 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1369 LOCKING_ANDX_CANCEL_LOCK. */
1370
1371 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1372 pSMB->Timeout = 0;
1373 pSMB->hdr.Mid = get_next_mid(ses->server);
1374
1375 return SendReceive(xid, ses, in_buf, out_buf,
1376 &bytes_returned, 0);
1377}
1378
1379int
1380SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1381 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1382 int *pbytes_returned)
1383{
1384 int rc = 0;
1385 int rstart = 0;
1386 struct mid_q_entry *midQ;
1387 struct cifs_ses *ses;
1388 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1389 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1390 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1391 unsigned int instance;
1392 struct TCP_Server_Info *server;
1393
1394 if (tcon == NULL || tcon->ses == NULL) {
1395 cifs_dbg(VFS, "Null smb session\n");
1396 return -EIO;
1397 }
1398 ses = tcon->ses;
1399 server = ses->server;
1400
1401 if (server == NULL) {
1402 cifs_dbg(VFS, "Null tcp session\n");
1403 return -EIO;
1404 }
1405
1406 if (server->tcpStatus == CifsExiting)
1407 return -ENOENT;
1408
1409 /* Ensure that we do not send more than 50 overlapping requests
1410 to the same server. We may make this configurable later or
1411 use ses->maxReq */
1412
1413 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1414 cifs_tcon_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1415 len);
1416 return -EIO;
1417 }
1418
1419 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1420 if (rc)
1421 return rc;
1422
1423 /* make sure that we sign in the same order that we send on this socket
1424 and avoid races inside tcp sendmsg code that could cause corruption
1425 of smb data */
1426
1427 mutex_lock(&server->srv_mutex);
1428
1429 rc = allocate_mid(ses, in_buf, &midQ);
1430 if (rc) {
1431 mutex_unlock(&server->srv_mutex);
1432 return rc;
1433 }
1434
1435 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1436 if (rc) {
1437 cifs_delete_mid(midQ);
1438 mutex_unlock(&server->srv_mutex);
1439 return rc;
1440 }
1441
1442 midQ->mid_state = MID_REQUEST_SUBMITTED;
1443 cifs_in_send_inc(server);
1444 rc = smb_send(server, in_buf, len);
1445 cifs_in_send_dec(server);
1446 cifs_save_when_sent(midQ);
1447
1448 if (rc < 0)
1449 server->sequence_number -= 2;
1450
1451 mutex_unlock(&server->srv_mutex);
1452
1453 if (rc < 0) {
1454 cifs_delete_mid(midQ);
1455 return rc;
1456 }
1457
1458 /* Wait for a reply - allow signals to interrupt. */
1459 rc = wait_event_interruptible(server->response_q,
1460 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1461 ((server->tcpStatus != CifsGood) &&
1462 (server->tcpStatus != CifsNew)));
1463
1464 /* Were we interrupted by a signal ? */
1465 if ((rc == -ERESTARTSYS) &&
1466 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1467 ((server->tcpStatus == CifsGood) ||
1468 (server->tcpStatus == CifsNew))) {
1469
1470 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1471 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1472 blocking lock to return. */
1473 rc = send_cancel(server, &rqst, midQ);
1474 if (rc) {
1475 cifs_delete_mid(midQ);
1476 return rc;
1477 }
1478 } else {
1479 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1480 to cause the blocking lock to return. */
1481
1482 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1483
1484 /* If we get -ENOLCK back the lock may have
1485 already been removed. Don't exit in this case. */
1486 if (rc && rc != -ENOLCK) {
1487 cifs_delete_mid(midQ);
1488 return rc;
1489 }
1490 }
1491
1492 rc = wait_for_response(server, midQ);
1493 if (rc) {
1494 send_cancel(server, &rqst, midQ);
1495 spin_lock(&GlobalMid_Lock);
1496 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1497 /* no longer considered to be "in-flight" */
1498 midQ->callback = DeleteMidQEntry;
1499 spin_unlock(&GlobalMid_Lock);
1500 return rc;
1501 }
1502 spin_unlock(&GlobalMid_Lock);
1503 }
1504
1505 /* We got the response - restart system call. */
1506 rstart = 1;
1507 }
1508
1509 rc = cifs_sync_mid_result(midQ, server);
1510 if (rc != 0)
1511 return rc;
1512
1513 /* rcvd frame is ok */
1514 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1515 rc = -EIO;
1516 cifs_tcon_dbg(VFS, "Bad MID state?\n");
1517 goto out;
1518 }
1519
1520 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1521 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1522 rc = cifs_check_receive(midQ, server, 0);
1523out:
1524 cifs_delete_mid(midQ);
1525 if (rstart && rc == -EACCES)
1526 return -ERESTARTSYS;
1527 return rc;
1528}