Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 *   fs/cifs/transport.c
  3 *
  4 *   Copyright (C) International Business Machines  Corp., 2002,2008
  5 *   Author(s): Steve French (sfrench@us.ibm.com)
  6 *   Jeremy Allison (jra@samba.org) 2006.
  7 *
  8 *   This library is free software; you can redistribute it and/or modify
  9 *   it under the terms of the GNU Lesser General Public License as published
 10 *   by the Free Software Foundation; either version 2.1 of the License, or
 11 *   (at your option) any later version.
 12 *
 13 *   This library is distributed in the hope that it will be useful,
 14 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 15 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
 16 *   the GNU Lesser General Public License for more details.
 17 *
 18 *   You should have received a copy of the GNU Lesser General Public License
 19 *   along with this library; if not, write to the Free Software
 20 *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 21 */
 22
 23#include <linux/fs.h>
 24#include <linux/list.h>
 25#include <linux/gfp.h>
 26#include <linux/wait.h>
 27#include <linux/net.h>
 28#include <linux/delay.h>
 
 29#include <asm/uaccess.h>
 30#include <asm/processor.h>
 31#include <linux/mempool.h>
 32#include "cifspdu.h"
 33#include "cifsglob.h"
 34#include "cifsproto.h"
 35#include "cifs_debug.h"
 36
 37extern mempool_t *cifs_mid_poolp;
 38
 39static void
 40wake_up_task(struct mid_q_entry *mid)
 41{
 42	wake_up_process(mid->callback_data);
 43}
 44
 45struct mid_q_entry *
 46AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
 47{
 48	struct mid_q_entry *temp;
 49
 50	if (server == NULL) {
 51		cERROR(1, "Null TCP session in AllocMidQEntry");
 52		return NULL;
 53	}
 54
 55	temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
 56	if (temp == NULL)
 57		return temp;
 58	else {
 59		memset(temp, 0, sizeof(struct mid_q_entry));
 60		temp->mid = smb_buffer->Mid;	/* always LE */
 61		temp->pid = current->pid;
 62		temp->command = smb_buffer->Command;
 63		cFYI(1, "For smb_command %d", temp->command);
 64	/*	do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
 65		/* when mid allocated can be before when sent */
 66		temp->when_alloc = jiffies;
 67
 68		/*
 69		 * The default is for the mid to be synchronous, so the
 70		 * default callback just wakes up the current task.
 71		 */
 72		temp->callback = wake_up_task;
 73		temp->callback_data = current;
 74	}
 75
 76	atomic_inc(&midCount);
 77	temp->midState = MID_REQUEST_ALLOCATED;
 78	return temp;
 79}
 80
 81void
 82DeleteMidQEntry(struct mid_q_entry *midEntry)
 83{
 84#ifdef CONFIG_CIFS_STATS2
 85	unsigned long now;
 86#endif
 87	midEntry->midState = MID_FREE;
 88	atomic_dec(&midCount);
 89	if (midEntry->largeBuf)
 90		cifs_buf_release(midEntry->resp_buf);
 91	else
 92		cifs_small_buf_release(midEntry->resp_buf);
 93#ifdef CONFIG_CIFS_STATS2
 94	now = jiffies;
 95	/* commands taking longer than one second are indications that
 96	   something is wrong, unless it is quite a slow link or server */
 97	if ((now - midEntry->when_alloc) > HZ) {
 98		if ((cifsFYI & CIFS_TIMER) &&
 99		   (midEntry->command != SMB_COM_LOCKING_ANDX)) {
100			printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %d",
101			       midEntry->command, midEntry->mid);
102			printk(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
103			       now - midEntry->when_alloc,
104			       now - midEntry->when_sent,
105			       now - midEntry->when_received);
106		}
107	}
108#endif
109	mempool_free(midEntry, cifs_mid_poolp);
110}
111
112static void
113delete_mid(struct mid_q_entry *mid)
114{
115	spin_lock(&GlobalMid_Lock);
116	list_del(&mid->qhead);
117	spin_unlock(&GlobalMid_Lock);
118
119	DeleteMidQEntry(mid);
120}
121
122static int
123smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
124{
125	int rc = 0;
126	int i = 0;
127	struct msghdr smb_msg;
128	struct smb_hdr *smb_buffer = iov[0].iov_base;
129	unsigned int len = iov[0].iov_len;
130	unsigned int total_len;
131	int first_vec = 0;
132	unsigned int smb_buf_length = be32_to_cpu(smb_buffer->smb_buf_length);
133	struct socket *ssocket = server->ssocket;
134
135	if (ssocket == NULL)
136		return -ENOTSOCK; /* BB eventually add reconnect code here */
137
138	smb_msg.msg_name = (struct sockaddr *) &server->dstaddr;
139	smb_msg.msg_namelen = sizeof(struct sockaddr);
140	smb_msg.msg_control = NULL;
141	smb_msg.msg_controllen = 0;
142	if (server->noblocksnd)
143		smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
144	else
145		smb_msg.msg_flags = MSG_NOSIGNAL;
146
147	total_len = 0;
148	for (i = 0; i < n_vec; i++)
149		total_len += iov[i].iov_len;
150
151	cFYI(1, "Sending smb:  total_len %d", total_len);
152	dump_smb(smb_buffer, len);
153
154	i = 0;
155	while (total_len) {
156		rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec],
157				    n_vec - first_vec, total_len);
158		if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
159			i++;
160			/* if blocking send we try 3 times, since each can block
161			   for 5 seconds. For nonblocking  we have to try more
162			   but wait increasing amounts of time allowing time for
163			   socket to clear.  The overall time we wait in either
164			   case to send on the socket is about 15 seconds.
165			   Similarly we wait for 15 seconds for
166			   a response from the server in SendReceive[2]
167			   for the server to send a response back for
168			   most types of requests (except SMB Write
169			   past end of file which can be slow, and
170			   blocking lock operations). NFS waits slightly longer
171			   than CIFS, but this can make it take longer for
172			   nonresponsive servers to be detected and 15 seconds
173			   is more than enough time for modern networks to
174			   send a packet.  In most cases if we fail to send
175			   after the retries we will kill the socket and
176			   reconnect which may clear the network problem.
177			*/
178			if ((i >= 14) || (!server->noblocksnd && (i > 2))) {
179				cERROR(1, "sends on sock %p stuck for 15 seconds",
180				    ssocket);
181				rc = -EAGAIN;
182				break;
183			}
184			msleep(1 << i);
185			continue;
186		}
187		if (rc < 0)
188			break;
189
190		if (rc == total_len) {
191			total_len = 0;
192			break;
193		} else if (rc > total_len) {
194			cERROR(1, "sent %d requested %d", rc, total_len);
195			break;
196		}
197		if (rc == 0) {
198			/* should never happen, letting socket clear before
199			   retrying is our only obvious option here */
200			cERROR(1, "tcp sent no data");
201			msleep(500);
202			continue;
203		}
204		total_len -= rc;
205		/* the line below resets i */
206		for (i = first_vec; i < n_vec; i++) {
207			if (iov[i].iov_len) {
208				if (rc > iov[i].iov_len) {
209					rc -= iov[i].iov_len;
210					iov[i].iov_len = 0;
211				} else {
212					iov[i].iov_base += rc;
213					iov[i].iov_len -= rc;
214					first_vec = i;
215					break;
216				}
217			}
218		}
219		i = 0; /* in case we get ENOSPC on the next send */
220	}
221
222	if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
223		cFYI(1, "partial send (%d remaining), terminating session",
224			total_len);
225		/* If we have only sent part of an SMB then the next SMB
226		   could be taken as the remainder of this one.  We need
227		   to kill the socket so the server throws away the partial
228		   SMB */
229		server->tcpStatus = CifsNeedReconnect;
230	}
231
232	if (rc < 0 && rc != -EINTR)
233		cERROR(1, "Error %d sending data on socket to server", rc);
234	else
235		rc = 0;
236
237	/* Don't want to modify the buffer as a
238	   side effect of this call. */
239	smb_buffer->smb_buf_length = cpu_to_be32(smb_buf_length);
240
241	return rc;
242}
243
244int
245smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
246	 unsigned int smb_buf_length)
247{
248	struct kvec iov;
249
250	iov.iov_base = smb_buffer;
251	iov.iov_len = smb_buf_length + 4;
252
253	return smb_sendv(server, &iov, 1);
254}
255
256static int wait_for_free_request(struct TCP_Server_Info *server,
257				 const int long_op)
 
258{
259	if (long_op == CIFS_ASYNC_OP) {
 
 
 
260		/* oplock breaks must not be held up */
261		atomic_inc(&server->inFlight);
 
 
262		return 0;
263	}
264
265	spin_lock(&GlobalMid_Lock);
266	while (1) {
267		if (atomic_read(&server->inFlight) >= cifs_max_pending) {
268			spin_unlock(&GlobalMid_Lock);
269			cifs_num_waiters_inc(server);
270			wait_event(server->request_q,
271				   atomic_read(&server->inFlight)
272				     < cifs_max_pending);
273			cifs_num_waiters_dec(server);
274			spin_lock(&GlobalMid_Lock);
 
 
275		} else {
276			if (server->tcpStatus == CifsExiting) {
277				spin_unlock(&GlobalMid_Lock);
278				return -ENOENT;
279			}
280
281			/* can not count locking commands against total
282			   as they are allowed to block on server */
 
 
283
284			/* update # of requests on the wire to server */
285			if (long_op != CIFS_BLOCKING_OP)
286				atomic_inc(&server->inFlight);
287			spin_unlock(&GlobalMid_Lock);
 
 
288			break;
289		}
290	}
291	return 0;
292}
293
 
 
 
 
 
 
 
294static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
295			struct mid_q_entry **ppmidQ)
296{
297	if (ses->server->tcpStatus == CifsExiting) {
298		return -ENOENT;
299	}
300
301	if (ses->server->tcpStatus == CifsNeedReconnect) {
302		cFYI(1, "tcp session dead - return to caller to retry");
303		return -EAGAIN;
304	}
305
306	if (ses->status != CifsGood) {
307		/* check if SMB session is bad because we are setting it up */
308		if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
309			(in_buf->Command != SMB_COM_NEGOTIATE))
310			return -EAGAIN;
311		/* else ok - we are setting up session */
312	}
313	*ppmidQ = AllocMidQEntry(in_buf, ses->server);
314	if (*ppmidQ == NULL)
315		return -ENOMEM;
316	spin_lock(&GlobalMid_Lock);
317	list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
318	spin_unlock(&GlobalMid_Lock);
319	return 0;
320}
321
322static int
323wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
324{
325	int error;
326
327	error = wait_event_killable(server->response_q,
328				    midQ->midState != MID_REQUEST_SUBMITTED);
329	if (error < 0)
330		return -ERESTARTSYS;
331
332	return 0;
333}
334
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
335
336/*
337 * Send a SMB request and set the callback function in the mid to handle
338 * the result. Caller is responsible for dealing with timeouts.
339 */
340int
341cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
342		unsigned int nvec, mid_callback_t *callback, void *cbdata,
343		bool ignore_pend)
344{
345	int rc;
346	struct mid_q_entry *mid;
347	struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
348
349	rc = wait_for_free_request(server, ignore_pend ? CIFS_ASYNC_OP : 0);
350	if (rc)
351		return rc;
352
353	/* enable signing if server requires it */
354	if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
355		hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
356
357	mutex_lock(&server->srv_mutex);
358	mid = AllocMidQEntry(hdr, server);
359	if (mid == NULL) {
360		mutex_unlock(&server->srv_mutex);
361		atomic_dec(&server->inFlight);
362		wake_up(&server->request_q);
363		return -ENOMEM;
364	}
365
 
 
 
 
 
366	/* put it on the pending_mid_q */
367	spin_lock(&GlobalMid_Lock);
368	list_add_tail(&mid->qhead, &server->pending_mid_q);
369	spin_unlock(&GlobalMid_Lock);
370
371	rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number);
372	if (rc) {
373		mutex_unlock(&server->srv_mutex);
374		goto out_err;
375	}
376
377	mid->callback = callback;
378	mid->callback_data = cbdata;
379	mid->midState = MID_REQUEST_SUBMITTED;
380
381	cifs_in_send_inc(server);
382	rc = smb_sendv(server, iov, nvec);
383	cifs_in_send_dec(server);
384	cifs_save_when_sent(mid);
385	mutex_unlock(&server->srv_mutex);
386
387	if (rc)
388		goto out_err;
389
390	return rc;
391out_err:
392	delete_mid(mid);
393	atomic_dec(&server->inFlight);
394	wake_up(&server->request_q);
395	return rc;
396}
397
398/*
399 *
400 * Send an SMB Request.  No response info (other than return code)
401 * needs to be parsed.
402 *
403 * flags indicate the type of request buffer and how long to wait
404 * and whether to log NT STATUS code (error) before mapping it to POSIX error
405 *
406 */
407int
408SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
409		struct smb_hdr *in_buf, int flags)
410{
411	int rc;
412	struct kvec iov[1];
413	int resp_buf_type;
414
415	iov[0].iov_base = (char *)in_buf;
416	iov[0].iov_len = be32_to_cpu(in_buf->smb_buf_length) + 4;
417	flags |= CIFS_NO_RESP;
418	rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
419	cFYI(DBG2, "SendRcvNoRsp flags %d rc %d", flags, rc);
420
421	return rc;
422}
423
424static int
425cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
426{
427	int rc = 0;
428
429	cFYI(1, "%s: cmd=%d mid=%d state=%d", __func__, mid->command,
430		mid->mid, mid->midState);
431
432	spin_lock(&GlobalMid_Lock);
433	switch (mid->midState) {
434	case MID_RESPONSE_RECEIVED:
435		spin_unlock(&GlobalMid_Lock);
436		return rc;
437	case MID_RETRY_NEEDED:
438		rc = -EAGAIN;
439		break;
440	case MID_RESPONSE_MALFORMED:
441		rc = -EIO;
442		break;
443	case MID_SHUTDOWN:
444		rc = -EHOSTDOWN;
445		break;
446	default:
447		list_del_init(&mid->qhead);
448		cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__,
449			mid->mid, mid->midState);
450		rc = -EIO;
451	}
452	spin_unlock(&GlobalMid_Lock);
453
454	DeleteMidQEntry(mid);
455	return rc;
456}
457
458/*
459 * An NT cancel request header looks just like the original request except:
460 *
461 * The Command is SMB_COM_NT_CANCEL
462 * The WordCount is zeroed out
463 * The ByteCount is zeroed out
464 *
465 * This function mangles an existing request buffer into a
466 * SMB_COM_NT_CANCEL request and then sends it.
467 */
468static int
469send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
470		struct mid_q_entry *mid)
471{
472	int rc = 0;
473
474	/* -4 for RFC1001 length and +2 for BCC field */
475	in_buf->smb_buf_length = cpu_to_be32(sizeof(struct smb_hdr) - 4  + 2);
476	in_buf->Command = SMB_COM_NT_CANCEL;
477	in_buf->WordCount = 0;
478	put_bcc(0, in_buf);
479
480	mutex_lock(&server->srv_mutex);
481	rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
482	if (rc) {
483		mutex_unlock(&server->srv_mutex);
484		return rc;
485	}
486	rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
487	mutex_unlock(&server->srv_mutex);
488
489	cFYI(1, "issued NT_CANCEL for mid %u, rc = %d",
490		in_buf->Mid, rc);
491
492	return rc;
493}
494
495int
496cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
497		   bool log_error)
498{
499	dump_smb(mid->resp_buf,
500		 min_t(u32, 92, be32_to_cpu(mid->resp_buf->smb_buf_length)));
 
501
502	/* convert the length into a more usable form */
503	if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
 
 
 
 
504		/* FIXME: add code to kill session */
505		if (cifs_verify_signature(mid->resp_buf, server,
506					  mid->sequence_number + 1) != 0)
507			cERROR(1, "Unexpected SMB signature");
508	}
509
510	/* BB special case reconnect tid and uid here? */
511	return map_smb_to_linux_error(mid->resp_buf, log_error);
512}
513
514int
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
515SendReceive2(const unsigned int xid, struct cifs_ses *ses,
516	     struct kvec *iov, int n_vec, int *pRespBufType /* ret */,
517	     const int flags)
518{
519	int rc = 0;
520	int long_op;
521	struct mid_q_entry *midQ;
522	struct smb_hdr *in_buf = iov[0].iov_base;
523
524	long_op = flags & CIFS_TIMEOUT_MASK;
525
526	*pRespBufType = CIFS_NO_BUFFER;  /* no response buf yet */
527
528	if ((ses == NULL) || (ses->server == NULL)) {
529		cifs_small_buf_release(in_buf);
530		cERROR(1, "Null session");
531		return -EIO;
532	}
533
534	if (ses->server->tcpStatus == CifsExiting) {
535		cifs_small_buf_release(in_buf);
536		return -ENOENT;
537	}
538
539	/* Ensure that we do not send more than 50 overlapping requests
540	   to the same server. We may make this configurable later or
541	   use ses->maxReq */
 
 
542
543	rc = wait_for_free_request(ses->server, long_op);
544	if (rc) {
545		cifs_small_buf_release(in_buf);
546		return rc;
547	}
548
549	/* make sure that we sign in the same order that we send on this socket
550	   and avoid races inside tcp sendmsg code that could cause corruption
551	   of smb data */
 
 
552
553	mutex_lock(&ses->server->srv_mutex);
554
555	rc = allocate_mid(ses, in_buf, &midQ);
556	if (rc) {
557		mutex_unlock(&ses->server->srv_mutex);
558		cifs_small_buf_release(in_buf);
559		/* Update # of requests on wire to server */
560		atomic_dec(&ses->server->inFlight);
561		wake_up(&ses->server->request_q);
562		return rc;
563	}
564	rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number);
565	if (rc) {
566		mutex_unlock(&ses->server->srv_mutex);
567		cifs_small_buf_release(in_buf);
568		goto out;
569	}
570
571	midQ->midState = MID_REQUEST_SUBMITTED;
572	cifs_in_send_inc(ses->server);
573	rc = smb_sendv(ses->server, iov, n_vec);
574	cifs_in_send_dec(ses->server);
575	cifs_save_when_sent(midQ);
576
577	mutex_unlock(&ses->server->srv_mutex);
578
579	if (rc < 0) {
580		cifs_small_buf_release(in_buf);
581		goto out;
582	}
583
584	if (long_op == CIFS_ASYNC_OP) {
585		cifs_small_buf_release(in_buf);
586		goto out;
587	}
588
589	rc = wait_for_response(ses->server, midQ);
590	if (rc != 0) {
591		send_nt_cancel(ses->server, in_buf, midQ);
592		spin_lock(&GlobalMid_Lock);
593		if (midQ->midState == MID_REQUEST_SUBMITTED) {
594			midQ->callback = DeleteMidQEntry;
595			spin_unlock(&GlobalMid_Lock);
596			cifs_small_buf_release(in_buf);
597			atomic_dec(&ses->server->inFlight);
598			wake_up(&ses->server->request_q);
599			return rc;
600		}
601		spin_unlock(&GlobalMid_Lock);
602	}
603
604	cifs_small_buf_release(in_buf);
605
606	rc = cifs_sync_mid_result(midQ, ses->server);
607	if (rc != 0) {
608		atomic_dec(&ses->server->inFlight);
609		wake_up(&ses->server->request_q);
610		return rc;
611	}
612
613	if (!midQ->resp_buf || midQ->midState != MID_RESPONSE_RECEIVED) {
614		rc = -EIO;
615		cFYI(1, "Bad MID state?");
616		goto out;
617	}
618
619	iov[0].iov_base = (char *)midQ->resp_buf;
620	iov[0].iov_len = be32_to_cpu(midQ->resp_buf->smb_buf_length) + 4;
621	if (midQ->largeBuf)
 
622		*pRespBufType = CIFS_LARGE_BUFFER;
623	else
624		*pRespBufType = CIFS_SMALL_BUFFER;
625
626	rc = cifs_check_receive(midQ, ses->server, flags & CIFS_LOG_ERROR);
 
627
628	/* mark it so buf will not be freed by delete_mid */
629	if ((flags & CIFS_NO_RESP) == 0)
630		midQ->resp_buf = NULL;
631out:
632	delete_mid(midQ);
633	atomic_dec(&ses->server->inFlight);
634	wake_up(&ses->server->request_q);
635
636	return rc;
637}
638
639int
640SendReceive(const unsigned int xid, struct cifs_ses *ses,
641	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
642	    int *pbytes_returned, const int long_op)
643{
644	int rc = 0;
645	struct mid_q_entry *midQ;
646
647	if (ses == NULL) {
648		cERROR(1, "Null smb session");
649		return -EIO;
650	}
651	if (ses->server == NULL) {
652		cERROR(1, "Null tcp session");
653		return -EIO;
654	}
655
656	if (ses->server->tcpStatus == CifsExiting)
657		return -ENOENT;
658
659	/* Ensure that we do not send more than 50 overlapping requests
660	   to the same server. We may make this configurable later or
661	   use ses->maxReq */
662
663	if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
664			MAX_CIFS_HDR_SIZE - 4) {
665		cERROR(1, "Illegal length, greater than maximum frame, %d",
666			   be32_to_cpu(in_buf->smb_buf_length));
667		return -EIO;
668	}
669
670	rc = wait_for_free_request(ses->server, long_op);
671	if (rc)
672		return rc;
673
674	/* make sure that we sign in the same order that we send on this socket
675	   and avoid races inside tcp sendmsg code that could cause corruption
676	   of smb data */
677
678	mutex_lock(&ses->server->srv_mutex);
679
680	rc = allocate_mid(ses, in_buf, &midQ);
681	if (rc) {
682		mutex_unlock(&ses->server->srv_mutex);
683		/* Update # of requests on wire to server */
684		atomic_dec(&ses->server->inFlight);
685		wake_up(&ses->server->request_q);
686		return rc;
687	}
688
689	rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
690	if (rc) {
691		mutex_unlock(&ses->server->srv_mutex);
692		goto out;
693	}
694
695	midQ->midState = MID_REQUEST_SUBMITTED;
696
697	cifs_in_send_inc(ses->server);
698	rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
699	cifs_in_send_dec(ses->server);
700	cifs_save_when_sent(midQ);
701	mutex_unlock(&ses->server->srv_mutex);
702
703	if (rc < 0)
704		goto out;
705
706	if (long_op == CIFS_ASYNC_OP)
707		goto out;
708
709	rc = wait_for_response(ses->server, midQ);
710	if (rc != 0) {
711		send_nt_cancel(ses->server, in_buf, midQ);
712		spin_lock(&GlobalMid_Lock);
713		if (midQ->midState == MID_REQUEST_SUBMITTED) {
714			/* no longer considered to be "in-flight" */
715			midQ->callback = DeleteMidQEntry;
716			spin_unlock(&GlobalMid_Lock);
717			atomic_dec(&ses->server->inFlight);
718			wake_up(&ses->server->request_q);
719			return rc;
720		}
721		spin_unlock(&GlobalMid_Lock);
722	}
723
724	rc = cifs_sync_mid_result(midQ, ses->server);
725	if (rc != 0) {
726		atomic_dec(&ses->server->inFlight);
727		wake_up(&ses->server->request_q);
728		return rc;
729	}
730
731	if (!midQ->resp_buf || !out_buf ||
732	    midQ->midState != MID_RESPONSE_RECEIVED) {
733		rc = -EIO;
734		cERROR(1, "Bad MID state?");
735		goto out;
736	}
737
738	*pbytes_returned = be32_to_cpu(midQ->resp_buf->smb_buf_length);
739	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
740	rc = cifs_check_receive(midQ, ses->server, 0);
741out:
742	delete_mid(midQ);
743	atomic_dec(&ses->server->inFlight);
744	wake_up(&ses->server->request_q);
745
746	return rc;
747}
748
749/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
750   blocking lock to return. */
751
752static int
753send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
754			struct smb_hdr *in_buf,
755			struct smb_hdr *out_buf)
756{
757	int bytes_returned;
758	struct cifs_ses *ses = tcon->ses;
759	LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
760
761	/* We just modify the current in_buf to change
762	   the type of lock from LOCKING_ANDX_SHARED_LOCK
763	   or LOCKING_ANDX_EXCLUSIVE_LOCK to
764	   LOCKING_ANDX_CANCEL_LOCK. */
765
766	pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
767	pSMB->Timeout = 0;
768	pSMB->hdr.Mid = GetNextMid(ses->server);
769
770	return SendReceive(xid, ses, in_buf, out_buf,
771			&bytes_returned, 0);
772}
773
774int
775SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
776	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
777	    int *pbytes_returned)
778{
779	int rc = 0;
780	int rstart = 0;
781	struct mid_q_entry *midQ;
782	struct cifs_ses *ses;
783
784	if (tcon == NULL || tcon->ses == NULL) {
785		cERROR(1, "Null smb session");
786		return -EIO;
787	}
788	ses = tcon->ses;
789
790	if (ses->server == NULL) {
791		cERROR(1, "Null tcp session");
792		return -EIO;
793	}
794
795	if (ses->server->tcpStatus == CifsExiting)
796		return -ENOENT;
797
798	/* Ensure that we do not send more than 50 overlapping requests
799	   to the same server. We may make this configurable later or
800	   use ses->maxReq */
801
802	if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
803			MAX_CIFS_HDR_SIZE - 4) {
804		cERROR(1, "Illegal length, greater than maximum frame, %d",
805			   be32_to_cpu(in_buf->smb_buf_length));
806		return -EIO;
807	}
808
809	rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP);
810	if (rc)
811		return rc;
812
813	/* make sure that we sign in the same order that we send on this socket
814	   and avoid races inside tcp sendmsg code that could cause corruption
815	   of smb data */
816
817	mutex_lock(&ses->server->srv_mutex);
818
819	rc = allocate_mid(ses, in_buf, &midQ);
820	if (rc) {
821		mutex_unlock(&ses->server->srv_mutex);
822		return rc;
823	}
824
825	rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
826	if (rc) {
827		delete_mid(midQ);
828		mutex_unlock(&ses->server->srv_mutex);
829		return rc;
830	}
831
832	midQ->midState = MID_REQUEST_SUBMITTED;
833	cifs_in_send_inc(ses->server);
834	rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
835	cifs_in_send_dec(ses->server);
836	cifs_save_when_sent(midQ);
837	mutex_unlock(&ses->server->srv_mutex);
838
839	if (rc < 0) {
840		delete_mid(midQ);
841		return rc;
842	}
843
844	/* Wait for a reply - allow signals to interrupt. */
845	rc = wait_event_interruptible(ses->server->response_q,
846		(!(midQ->midState == MID_REQUEST_SUBMITTED)) ||
847		((ses->server->tcpStatus != CifsGood) &&
848		 (ses->server->tcpStatus != CifsNew)));
849
850	/* Were we interrupted by a signal ? */
851	if ((rc == -ERESTARTSYS) &&
852		(midQ->midState == MID_REQUEST_SUBMITTED) &&
853		((ses->server->tcpStatus == CifsGood) ||
854		 (ses->server->tcpStatus == CifsNew))) {
855
856		if (in_buf->Command == SMB_COM_TRANSACTION2) {
857			/* POSIX lock. We send a NT_CANCEL SMB to cause the
858			   blocking lock to return. */
859			rc = send_nt_cancel(ses->server, in_buf, midQ);
860			if (rc) {
861				delete_mid(midQ);
862				return rc;
863			}
864		} else {
865			/* Windows lock. We send a LOCKINGX_CANCEL_LOCK
866			   to cause the blocking lock to return. */
867
868			rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
869
870			/* If we get -ENOLCK back the lock may have
871			   already been removed. Don't exit in this case. */
872			if (rc && rc != -ENOLCK) {
873				delete_mid(midQ);
874				return rc;
875			}
876		}
877
878		rc = wait_for_response(ses->server, midQ);
879		if (rc) {
880			send_nt_cancel(ses->server, in_buf, midQ);
881			spin_lock(&GlobalMid_Lock);
882			if (midQ->midState == MID_REQUEST_SUBMITTED) {
883				/* no longer considered to be "in-flight" */
884				midQ->callback = DeleteMidQEntry;
885				spin_unlock(&GlobalMid_Lock);
886				return rc;
887			}
888			spin_unlock(&GlobalMid_Lock);
889		}
890
891		/* We got the response - restart system call. */
892		rstart = 1;
893	}
894
895	rc = cifs_sync_mid_result(midQ, ses->server);
896	if (rc != 0)
897		return rc;
898
899	/* rcvd frame is ok */
900	if (out_buf == NULL || midQ->midState != MID_RESPONSE_RECEIVED) {
901		rc = -EIO;
902		cERROR(1, "Bad MID state?");
903		goto out;
904	}
905
906	*pbytes_returned = be32_to_cpu(midQ->resp_buf->smb_buf_length);
907	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
908	rc = cifs_check_receive(midQ, ses->server, 0);
909out:
910	delete_mid(midQ);
911	if (rstart && rc == -EACCES)
912		return -ERESTARTSYS;
913	return rc;
914}
v3.5.6
  1/*
  2 *   fs/cifs/transport.c
  3 *
  4 *   Copyright (C) International Business Machines  Corp., 2002,2008
  5 *   Author(s): Steve French (sfrench@us.ibm.com)
  6 *   Jeremy Allison (jra@samba.org) 2006.
  7 *
  8 *   This library is free software; you can redistribute it and/or modify
  9 *   it under the terms of the GNU Lesser General Public License as published
 10 *   by the Free Software Foundation; either version 2.1 of the License, or
 11 *   (at your option) any later version.
 12 *
 13 *   This library is distributed in the hope that it will be useful,
 14 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 15 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
 16 *   the GNU Lesser General Public License for more details.
 17 *
 18 *   You should have received a copy of the GNU Lesser General Public License
 19 *   along with this library; if not, write to the Free Software
 20 *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 21 */
 22
 23#include <linux/fs.h>
 24#include <linux/list.h>
 25#include <linux/gfp.h>
 26#include <linux/wait.h>
 27#include <linux/net.h>
 28#include <linux/delay.h>
 29#include <linux/freezer.h>
 30#include <asm/uaccess.h>
 31#include <asm/processor.h>
 32#include <linux/mempool.h>
 33#include "cifspdu.h"
 34#include "cifsglob.h"
 35#include "cifsproto.h"
 36#include "cifs_debug.h"
 37
 38extern mempool_t *cifs_mid_poolp;
 39
 40static void
 41wake_up_task(struct mid_q_entry *mid)
 42{
 43	wake_up_process(mid->callback_data);
 44}
 45
 46struct mid_q_entry *
 47AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
 48{
 49	struct mid_q_entry *temp;
 50
 51	if (server == NULL) {
 52		cERROR(1, "Null TCP session in AllocMidQEntry");
 53		return NULL;
 54	}
 55
 56	temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
 57	if (temp == NULL)
 58		return temp;
 59	else {
 60		memset(temp, 0, sizeof(struct mid_q_entry));
 61		temp->mid = smb_buffer->Mid;	/* always LE */
 62		temp->pid = current->pid;
 63		temp->command = cpu_to_le16(smb_buffer->Command);
 64		cFYI(1, "For smb_command %d", smb_buffer->Command);
 65	/*	do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
 66		/* when mid allocated can be before when sent */
 67		temp->when_alloc = jiffies;
 68
 69		/*
 70		 * The default is for the mid to be synchronous, so the
 71		 * default callback just wakes up the current task.
 72		 */
 73		temp->callback = wake_up_task;
 74		temp->callback_data = current;
 75	}
 76
 77	atomic_inc(&midCount);
 78	temp->mid_state = MID_REQUEST_ALLOCATED;
 79	return temp;
 80}
 81
 82void
 83DeleteMidQEntry(struct mid_q_entry *midEntry)
 84{
 85#ifdef CONFIG_CIFS_STATS2
 86	unsigned long now;
 87#endif
 88	midEntry->mid_state = MID_FREE;
 89	atomic_dec(&midCount);
 90	if (midEntry->large_buf)
 91		cifs_buf_release(midEntry->resp_buf);
 92	else
 93		cifs_small_buf_release(midEntry->resp_buf);
 94#ifdef CONFIG_CIFS_STATS2
 95	now = jiffies;
 96	/* commands taking longer than one second are indications that
 97	   something is wrong, unless it is quite a slow link or server */
 98	if ((now - midEntry->when_alloc) > HZ) {
 99		if ((cifsFYI & CIFS_TIMER) &&
100		    (midEntry->command != cpu_to_le16(SMB_COM_LOCKING_ANDX))) {
101			printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %llu",
102			       midEntry->command, midEntry->mid);
103			printk(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
104			       now - midEntry->when_alloc,
105			       now - midEntry->when_sent,
106			       now - midEntry->when_received);
107		}
108	}
109#endif
110	mempool_free(midEntry, cifs_mid_poolp);
111}
112
113static void
114delete_mid(struct mid_q_entry *mid)
115{
116	spin_lock(&GlobalMid_Lock);
117	list_del(&mid->qhead);
118	spin_unlock(&GlobalMid_Lock);
119
120	DeleteMidQEntry(mid);
121}
122
123static int
124smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
125{
126	int rc = 0;
127	int i = 0;
128	struct msghdr smb_msg;
129	__be32 *buf_len = (__be32 *)(iov[0].iov_base);
130	unsigned int len = iov[0].iov_len;
131	unsigned int total_len;
132	int first_vec = 0;
133	unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
134	struct socket *ssocket = server->ssocket;
135
136	if (ssocket == NULL)
137		return -ENOTSOCK; /* BB eventually add reconnect code here */
138
139	smb_msg.msg_name = (struct sockaddr *) &server->dstaddr;
140	smb_msg.msg_namelen = sizeof(struct sockaddr);
141	smb_msg.msg_control = NULL;
142	smb_msg.msg_controllen = 0;
143	if (server->noblocksnd)
144		smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
145	else
146		smb_msg.msg_flags = MSG_NOSIGNAL;
147
148	total_len = 0;
149	for (i = 0; i < n_vec; i++)
150		total_len += iov[i].iov_len;
151
152	cFYI(1, "Sending smb:  total_len %d", total_len);
153	dump_smb(iov[0].iov_base, len);
154
155	i = 0;
156	while (total_len) {
157		rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec],
158				    n_vec - first_vec, total_len);
159		if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
160			i++;
161			/*
162			 * If blocking send we try 3 times, since each can block
163			 * for 5 seconds. For nonblocking  we have to try more
164			 * but wait increasing amounts of time allowing time for
165			 * socket to clear.  The overall time we wait in either
166			 * case to send on the socket is about 15 seconds.
167			 * Similarly we wait for 15 seconds for a response from
168			 * the server in SendReceive[2] for the server to send
169			 * a response back for most types of requests (except
170			 * SMB Write past end of file which can be slow, and
171			 * blocking lock operations). NFS waits slightly longer
172			 * than CIFS, but this can make it take longer for
173			 * nonresponsive servers to be detected and 15 seconds
174			 * is more than enough time for modern networks to
175			 * send a packet.  In most cases if we fail to send
176			 * after the retries we will kill the socket and
177			 * reconnect which may clear the network problem.
178			 */
179			if ((i >= 14) || (!server->noblocksnd && (i > 2))) {
180				cERROR(1, "sends on sock %p stuck for 15 seconds",
181				    ssocket);
182				rc = -EAGAIN;
183				break;
184			}
185			msleep(1 << i);
186			continue;
187		}
188		if (rc < 0)
189			break;
190
191		if (rc == total_len) {
192			total_len = 0;
193			break;
194		} else if (rc > total_len) {
195			cERROR(1, "sent %d requested %d", rc, total_len);
196			break;
197		}
198		if (rc == 0) {
199			/* should never happen, letting socket clear before
200			   retrying is our only obvious option here */
201			cERROR(1, "tcp sent no data");
202			msleep(500);
203			continue;
204		}
205		total_len -= rc;
206		/* the line below resets i */
207		for (i = first_vec; i < n_vec; i++) {
208			if (iov[i].iov_len) {
209				if (rc > iov[i].iov_len) {
210					rc -= iov[i].iov_len;
211					iov[i].iov_len = 0;
212				} else {
213					iov[i].iov_base += rc;
214					iov[i].iov_len -= rc;
215					first_vec = i;
216					break;
217				}
218			}
219		}
220		i = 0; /* in case we get ENOSPC on the next send */
221	}
222
223	if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
224		cFYI(1, "partial send (%d remaining), terminating session",
225			total_len);
226		/* If we have only sent part of an SMB then the next SMB
227		   could be taken as the remainder of this one.  We need
228		   to kill the socket so the server throws away the partial
229		   SMB */
230		server->tcpStatus = CifsNeedReconnect;
231	}
232
233	if (rc < 0 && rc != -EINTR)
234		cERROR(1, "Error %d sending data on socket to server", rc);
235	else
236		rc = 0;
237
238	/* Don't want to modify the buffer as a side effect of this call. */
239	*buf_len = cpu_to_be32(smb_buf_length);
 
240
241	return rc;
242}
243
244int
245smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
246	 unsigned int smb_buf_length)
247{
248	struct kvec iov;
249
250	iov.iov_base = smb_buffer;
251	iov.iov_len = smb_buf_length + 4;
252
253	return smb_sendv(server, &iov, 1);
254}
255
256static int
257wait_for_free_credits(struct TCP_Server_Info *server, const int optype,
258		      int *credits)
259{
260	int rc;
261
262	spin_lock(&server->req_lock);
263	if (optype == CIFS_ASYNC_OP) {
264		/* oplock breaks must not be held up */
265		server->in_flight++;
266		*credits -= 1;
267		spin_unlock(&server->req_lock);
268		return 0;
269	}
270
 
271	while (1) {
272		if (*credits <= 0) {
273			spin_unlock(&server->req_lock);
274			cifs_num_waiters_inc(server);
275			rc = wait_event_killable(server->request_q,
276						 has_credits(server, credits));
 
277			cifs_num_waiters_dec(server);
278			if (rc)
279				return rc;
280			spin_lock(&server->req_lock);
281		} else {
282			if (server->tcpStatus == CifsExiting) {
283				spin_unlock(&server->req_lock);
284				return -ENOENT;
285			}
286
287			/*
288			 * Can not count locking commands against total
289			 * as they are allowed to block on server.
290			 */
291
292			/* update # of requests on the wire to server */
293			if (optype != CIFS_BLOCKING_OP) {
294				*credits -= 1;
295				server->in_flight++;
296			}
297			spin_unlock(&server->req_lock);
298			break;
299		}
300	}
301	return 0;
302}
303
304static int
305wait_for_free_request(struct TCP_Server_Info *server, const int optype)
306{
307	return wait_for_free_credits(server, optype,
308				     server->ops->get_credits_field(server));
309}
310
311static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
312			struct mid_q_entry **ppmidQ)
313{
314	if (ses->server->tcpStatus == CifsExiting) {
315		return -ENOENT;
316	}
317
318	if (ses->server->tcpStatus == CifsNeedReconnect) {
319		cFYI(1, "tcp session dead - return to caller to retry");
320		return -EAGAIN;
321	}
322
323	if (ses->status != CifsGood) {
324		/* check if SMB session is bad because we are setting it up */
325		if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
326			(in_buf->Command != SMB_COM_NEGOTIATE))
327			return -EAGAIN;
328		/* else ok - we are setting up session */
329	}
330	*ppmidQ = AllocMidQEntry(in_buf, ses->server);
331	if (*ppmidQ == NULL)
332		return -ENOMEM;
333	spin_lock(&GlobalMid_Lock);
334	list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
335	spin_unlock(&GlobalMid_Lock);
336	return 0;
337}
338
339static int
340wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
341{
342	int error;
343
344	error = wait_event_freezekillable(server->response_q,
345				    midQ->mid_state != MID_REQUEST_SUBMITTED);
346	if (error < 0)
347		return -ERESTARTSYS;
348
349	return 0;
350}
351
352static int
353cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov,
354			 unsigned int nvec, struct mid_q_entry **ret_mid)
355{
356	int rc;
357	struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
358	struct mid_q_entry *mid;
359
360	/* enable signing if server requires it */
361	if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
362		hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
363
364	mid = AllocMidQEntry(hdr, server);
365	if (mid == NULL)
366		return -ENOMEM;
367
368	rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number);
369	if (rc) {
370		DeleteMidQEntry(mid);
371		return rc;
372	}
373
374	*ret_mid = mid;
375	return 0;
376}
377
378/*
379 * Send a SMB request and set the callback function in the mid to handle
380 * the result. Caller is responsible for dealing with timeouts.
381 */
382int
383cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
384		unsigned int nvec, mid_receive_t *receive,
385		mid_callback_t *callback, void *cbdata, bool ignore_pend)
386{
387	int rc;
388	struct mid_q_entry *mid;
 
389
390	rc = wait_for_free_request(server, ignore_pend ? CIFS_ASYNC_OP : 0);
391	if (rc)
392		return rc;
393
 
 
 
 
394	mutex_lock(&server->srv_mutex);
395	rc = cifs_setup_async_request(server, iov, nvec, &mid);
396	if (rc) {
397		mutex_unlock(&server->srv_mutex);
398		add_credits(server, 1);
399		wake_up(&server->request_q);
400		return rc;
401	}
402
403	mid->receive = receive;
404	mid->callback = callback;
405	mid->callback_data = cbdata;
406	mid->mid_state = MID_REQUEST_SUBMITTED;
407
408	/* put it on the pending_mid_q */
409	spin_lock(&GlobalMid_Lock);
410	list_add_tail(&mid->qhead, &server->pending_mid_q);
411	spin_unlock(&GlobalMid_Lock);
412
 
 
 
 
 
 
 
 
 
413
414	cifs_in_send_inc(server);
415	rc = smb_sendv(server, iov, nvec);
416	cifs_in_send_dec(server);
417	cifs_save_when_sent(mid);
418	mutex_unlock(&server->srv_mutex);
419
420	if (rc == 0)
421		return 0;
422
 
 
423	delete_mid(mid);
424	add_credits(server, 1);
425	wake_up(&server->request_q);
426	return rc;
427}
428
429/*
430 *
431 * Send an SMB Request.  No response info (other than return code)
432 * needs to be parsed.
433 *
434 * flags indicate the type of request buffer and how long to wait
435 * and whether to log NT STATUS code (error) before mapping it to POSIX error
436 *
437 */
438int
439SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
440		 char *in_buf, int flags)
441{
442	int rc;
443	struct kvec iov[1];
444	int resp_buf_type;
445
446	iov[0].iov_base = in_buf;
447	iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
448	flags |= CIFS_NO_RESP;
449	rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
450	cFYI(DBG2, "SendRcvNoRsp flags %d rc %d", flags, rc);
451
452	return rc;
453}
454
455static int
456cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
457{
458	int rc = 0;
459
460	cFYI(1, "%s: cmd=%d mid=%llu state=%d", __func__,
461	     le16_to_cpu(mid->command), mid->mid, mid->mid_state);
462
463	spin_lock(&GlobalMid_Lock);
464	switch (mid->mid_state) {
465	case MID_RESPONSE_RECEIVED:
466		spin_unlock(&GlobalMid_Lock);
467		return rc;
468	case MID_RETRY_NEEDED:
469		rc = -EAGAIN;
470		break;
471	case MID_RESPONSE_MALFORMED:
472		rc = -EIO;
473		break;
474	case MID_SHUTDOWN:
475		rc = -EHOSTDOWN;
476		break;
477	default:
478		list_del_init(&mid->qhead);
479		cERROR(1, "%s: invalid mid state mid=%llu state=%d", __func__,
480		       mid->mid, mid->mid_state);
481		rc = -EIO;
482	}
483	spin_unlock(&GlobalMid_Lock);
484
485	DeleteMidQEntry(mid);
486	return rc;
487}
488
489static inline int
490send_cancel(struct TCP_Server_Info *server, void *buf, struct mid_q_entry *mid)
 
 
 
 
 
 
 
 
 
 
 
491{
492	return server->ops->send_cancel ?
493				server->ops->send_cancel(server, buf, mid) : 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
494}
495
496int
497cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
498		   bool log_error)
499{
500	unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
501
502	dump_smb(mid->resp_buf, min_t(u32, 92, len));
503
504	/* convert the length into a more usable form */
505	if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
506		struct kvec iov;
507
508		iov.iov_base = mid->resp_buf;
509		iov.iov_len = len;
510		/* FIXME: add code to kill session */
511		if (cifs_verify_signature(&iov, 1, server,
512					  mid->sequence_number + 1) != 0)
513			cERROR(1, "Unexpected SMB signature");
514	}
515
516	/* BB special case reconnect tid and uid here? */
517	return map_smb_to_linux_error(mid->resp_buf, log_error);
518}
519
520int
521cifs_setup_request(struct cifs_ses *ses, struct kvec *iov,
522		   unsigned int nvec, struct mid_q_entry **ret_mid)
523{
524	int rc;
525	struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
526	struct mid_q_entry *mid;
527
528	rc = allocate_mid(ses, hdr, &mid);
529	if (rc)
530		return rc;
531	rc = cifs_sign_smb2(iov, nvec, ses->server, &mid->sequence_number);
532	if (rc)
533		delete_mid(mid);
534	*ret_mid = mid;
535	return rc;
536}
537
538int
539SendReceive2(const unsigned int xid, struct cifs_ses *ses,
540	     struct kvec *iov, int n_vec, int *pRespBufType /* ret */,
541	     const int flags)
542{
543	int rc = 0;
544	int long_op;
545	struct mid_q_entry *midQ;
546	char *buf = iov[0].iov_base;
547
548	long_op = flags & CIFS_TIMEOUT_MASK;
549
550	*pRespBufType = CIFS_NO_BUFFER;  /* no response buf yet */
551
552	if ((ses == NULL) || (ses->server == NULL)) {
553		cifs_small_buf_release(buf);
554		cERROR(1, "Null session");
555		return -EIO;
556	}
557
558	if (ses->server->tcpStatus == CifsExiting) {
559		cifs_small_buf_release(buf);
560		return -ENOENT;
561	}
562
563	/*
564	 * Ensure that we do not send more than 50 overlapping requests
565	 * to the same server. We may make this configurable later or
566	 * use ses->maxReq.
567	 */
568
569	rc = wait_for_free_request(ses->server, long_op);
570	if (rc) {
571		cifs_small_buf_release(buf);
572		return rc;
573	}
574
575	/*
576	 * Make sure that we sign in the same order that we send on this socket
577	 * and avoid races inside tcp sendmsg code that could cause corruption
578	 * of smb data.
579	 */
580
581	mutex_lock(&ses->server->srv_mutex);
582
583	rc = ses->server->ops->setup_request(ses, iov, n_vec, &midQ);
584	if (rc) {
585		mutex_unlock(&ses->server->srv_mutex);
586		cifs_small_buf_release(buf);
587		/* Update # of requests on wire to server */
588		add_credits(ses->server, 1);
 
589		return rc;
590	}
 
 
 
 
 
 
591
592	midQ->mid_state = MID_REQUEST_SUBMITTED;
593	cifs_in_send_inc(ses->server);
594	rc = smb_sendv(ses->server, iov, n_vec);
595	cifs_in_send_dec(ses->server);
596	cifs_save_when_sent(midQ);
597
598	mutex_unlock(&ses->server->srv_mutex);
599
600	if (rc < 0) {
601		cifs_small_buf_release(buf);
602		goto out;
603	}
604
605	if (long_op == CIFS_ASYNC_OP) {
606		cifs_small_buf_release(buf);
607		goto out;
608	}
609
610	rc = wait_for_response(ses->server, midQ);
611	if (rc != 0) {
612		send_cancel(ses->server, buf, midQ);
613		spin_lock(&GlobalMid_Lock);
614		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
615			midQ->callback = DeleteMidQEntry;
616			spin_unlock(&GlobalMid_Lock);
617			cifs_small_buf_release(buf);
618			add_credits(ses->server, 1);
 
619			return rc;
620		}
621		spin_unlock(&GlobalMid_Lock);
622	}
623
624	cifs_small_buf_release(buf);
625
626	rc = cifs_sync_mid_result(midQ, ses->server);
627	if (rc != 0) {
628		add_credits(ses->server, 1);
 
629		return rc;
630	}
631
632	if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
633		rc = -EIO;
634		cFYI(1, "Bad MID state?");
635		goto out;
636	}
637
638	buf = (char *)midQ->resp_buf;
639	iov[0].iov_base = buf;
640	iov[0].iov_len = get_rfc1002_length(buf) + 4;
641	if (midQ->large_buf)
642		*pRespBufType = CIFS_LARGE_BUFFER;
643	else
644		*pRespBufType = CIFS_SMALL_BUFFER;
645
646	rc = ses->server->ops->check_receive(midQ, ses->server,
647					     flags & CIFS_LOG_ERROR);
648
649	/* mark it so buf will not be freed by delete_mid */
650	if ((flags & CIFS_NO_RESP) == 0)
651		midQ->resp_buf = NULL;
652out:
653	delete_mid(midQ);
654	add_credits(ses->server, 1);
 
655
656	return rc;
657}
658
659int
660SendReceive(const unsigned int xid, struct cifs_ses *ses,
661	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
662	    int *pbytes_returned, const int long_op)
663{
664	int rc = 0;
665	struct mid_q_entry *midQ;
666
667	if (ses == NULL) {
668		cERROR(1, "Null smb session");
669		return -EIO;
670	}
671	if (ses->server == NULL) {
672		cERROR(1, "Null tcp session");
673		return -EIO;
674	}
675
676	if (ses->server->tcpStatus == CifsExiting)
677		return -ENOENT;
678
679	/* Ensure that we do not send more than 50 overlapping requests
680	   to the same server. We may make this configurable later or
681	   use ses->maxReq */
682
683	if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
684			MAX_CIFS_HDR_SIZE - 4) {
685		cERROR(1, "Illegal length, greater than maximum frame, %d",
686			   be32_to_cpu(in_buf->smb_buf_length));
687		return -EIO;
688	}
689
690	rc = wait_for_free_request(ses->server, long_op);
691	if (rc)
692		return rc;
693
694	/* make sure that we sign in the same order that we send on this socket
695	   and avoid races inside tcp sendmsg code that could cause corruption
696	   of smb data */
697
698	mutex_lock(&ses->server->srv_mutex);
699
700	rc = allocate_mid(ses, in_buf, &midQ);
701	if (rc) {
702		mutex_unlock(&ses->server->srv_mutex);
703		/* Update # of requests on wire to server */
704		add_credits(ses->server, 1);
 
705		return rc;
706	}
707
708	rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
709	if (rc) {
710		mutex_unlock(&ses->server->srv_mutex);
711		goto out;
712	}
713
714	midQ->mid_state = MID_REQUEST_SUBMITTED;
715
716	cifs_in_send_inc(ses->server);
717	rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
718	cifs_in_send_dec(ses->server);
719	cifs_save_when_sent(midQ);
720	mutex_unlock(&ses->server->srv_mutex);
721
722	if (rc < 0)
723		goto out;
724
725	if (long_op == CIFS_ASYNC_OP)
726		goto out;
727
728	rc = wait_for_response(ses->server, midQ);
729	if (rc != 0) {
730		send_cancel(ses->server, in_buf, midQ);
731		spin_lock(&GlobalMid_Lock);
732		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
733			/* no longer considered to be "in-flight" */
734			midQ->callback = DeleteMidQEntry;
735			spin_unlock(&GlobalMid_Lock);
736			add_credits(ses->server, 1);
 
737			return rc;
738		}
739		spin_unlock(&GlobalMid_Lock);
740	}
741
742	rc = cifs_sync_mid_result(midQ, ses->server);
743	if (rc != 0) {
744		add_credits(ses->server, 1);
 
745		return rc;
746	}
747
748	if (!midQ->resp_buf || !out_buf ||
749	    midQ->mid_state != MID_RESPONSE_RECEIVED) {
750		rc = -EIO;
751		cERROR(1, "Bad MID state?");
752		goto out;
753	}
754
755	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
756	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
757	rc = cifs_check_receive(midQ, ses->server, 0);
758out:
759	delete_mid(midQ);
760	add_credits(ses->server, 1);
 
761
762	return rc;
763}
764
765/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
766   blocking lock to return. */
767
768static int
769send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
770			struct smb_hdr *in_buf,
771			struct smb_hdr *out_buf)
772{
773	int bytes_returned;
774	struct cifs_ses *ses = tcon->ses;
775	LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
776
777	/* We just modify the current in_buf to change
778	   the type of lock from LOCKING_ANDX_SHARED_LOCK
779	   or LOCKING_ANDX_EXCLUSIVE_LOCK to
780	   LOCKING_ANDX_CANCEL_LOCK. */
781
782	pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
783	pSMB->Timeout = 0;
784	pSMB->hdr.Mid = get_next_mid(ses->server);
785
786	return SendReceive(xid, ses, in_buf, out_buf,
787			&bytes_returned, 0);
788}
789
790int
791SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
792	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
793	    int *pbytes_returned)
794{
795	int rc = 0;
796	int rstart = 0;
797	struct mid_q_entry *midQ;
798	struct cifs_ses *ses;
799
800	if (tcon == NULL || tcon->ses == NULL) {
801		cERROR(1, "Null smb session");
802		return -EIO;
803	}
804	ses = tcon->ses;
805
806	if (ses->server == NULL) {
807		cERROR(1, "Null tcp session");
808		return -EIO;
809	}
810
811	if (ses->server->tcpStatus == CifsExiting)
812		return -ENOENT;
813
814	/* Ensure that we do not send more than 50 overlapping requests
815	   to the same server. We may make this configurable later or
816	   use ses->maxReq */
817
818	if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
819			MAX_CIFS_HDR_SIZE - 4) {
820		cERROR(1, "Illegal length, greater than maximum frame, %d",
821			   be32_to_cpu(in_buf->smb_buf_length));
822		return -EIO;
823	}
824
825	rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP);
826	if (rc)
827		return rc;
828
829	/* make sure that we sign in the same order that we send on this socket
830	   and avoid races inside tcp sendmsg code that could cause corruption
831	   of smb data */
832
833	mutex_lock(&ses->server->srv_mutex);
834
835	rc = allocate_mid(ses, in_buf, &midQ);
836	if (rc) {
837		mutex_unlock(&ses->server->srv_mutex);
838		return rc;
839	}
840
841	rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
842	if (rc) {
843		delete_mid(midQ);
844		mutex_unlock(&ses->server->srv_mutex);
845		return rc;
846	}
847
848	midQ->mid_state = MID_REQUEST_SUBMITTED;
849	cifs_in_send_inc(ses->server);
850	rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
851	cifs_in_send_dec(ses->server);
852	cifs_save_when_sent(midQ);
853	mutex_unlock(&ses->server->srv_mutex);
854
855	if (rc < 0) {
856		delete_mid(midQ);
857		return rc;
858	}
859
860	/* Wait for a reply - allow signals to interrupt. */
861	rc = wait_event_interruptible(ses->server->response_q,
862		(!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
863		((ses->server->tcpStatus != CifsGood) &&
864		 (ses->server->tcpStatus != CifsNew)));
865
866	/* Were we interrupted by a signal ? */
867	if ((rc == -ERESTARTSYS) &&
868		(midQ->mid_state == MID_REQUEST_SUBMITTED) &&
869		((ses->server->tcpStatus == CifsGood) ||
870		 (ses->server->tcpStatus == CifsNew))) {
871
872		if (in_buf->Command == SMB_COM_TRANSACTION2) {
873			/* POSIX lock. We send a NT_CANCEL SMB to cause the
874			   blocking lock to return. */
875			rc = send_cancel(ses->server, in_buf, midQ);
876			if (rc) {
877				delete_mid(midQ);
878				return rc;
879			}
880		} else {
881			/* Windows lock. We send a LOCKINGX_CANCEL_LOCK
882			   to cause the blocking lock to return. */
883
884			rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
885
886			/* If we get -ENOLCK back the lock may have
887			   already been removed. Don't exit in this case. */
888			if (rc && rc != -ENOLCK) {
889				delete_mid(midQ);
890				return rc;
891			}
892		}
893
894		rc = wait_for_response(ses->server, midQ);
895		if (rc) {
896			send_cancel(ses->server, in_buf, midQ);
897			spin_lock(&GlobalMid_Lock);
898			if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
899				/* no longer considered to be "in-flight" */
900				midQ->callback = DeleteMidQEntry;
901				spin_unlock(&GlobalMid_Lock);
902				return rc;
903			}
904			spin_unlock(&GlobalMid_Lock);
905		}
906
907		/* We got the response - restart system call. */
908		rstart = 1;
909	}
910
911	rc = cifs_sync_mid_result(midQ, ses->server);
912	if (rc != 0)
913		return rc;
914
915	/* rcvd frame is ok */
916	if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
917		rc = -EIO;
918		cERROR(1, "Bad MID state?");
919		goto out;
920	}
921
922	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
923	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
924	rc = cifs_check_receive(midQ, ses->server, 0);
925out:
926	delete_mid(midQ);
927	if (rstart && rc == -EACCES)
928		return -ERESTARTSYS;
929	return rc;
930}