Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 *   fs/cifs/transport.c
   3 *
   4 *   Copyright (C) International Business Machines  Corp., 2002,2008
   5 *   Author(s): Steve French (sfrench@us.ibm.com)
   6 *   Jeremy Allison (jra@samba.org) 2006.
   7 *
   8 *   This library is free software; you can redistribute it and/or modify
   9 *   it under the terms of the GNU Lesser General Public License as published
  10 *   by the Free Software Foundation; either version 2.1 of the License, or
  11 *   (at your option) any later version.
  12 *
  13 *   This library is distributed in the hope that it will be useful,
  14 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
  16 *   the GNU Lesser General Public License for more details.
  17 *
  18 *   You should have received a copy of the GNU Lesser General Public License
  19 *   along with this library; if not, write to the Free Software
  20 *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21 */
  22
  23#include <linux/fs.h>
  24#include <linux/list.h>
  25#include <linux/gfp.h>
  26#include <linux/wait.h>
  27#include <linux/net.h>
  28#include <linux/delay.h>
  29#include <linux/freezer.h>
  30#include <linux/tcp.h>
 
  31#include <linux/highmem.h>
  32#include <asm/uaccess.h>
  33#include <asm/processor.h>
  34#include <linux/mempool.h>
 
  35#include "cifspdu.h"
  36#include "cifsglob.h"
  37#include "cifsproto.h"
  38#include "cifs_debug.h"
 
 
 
 
 
  39
  40void
  41cifs_wake_up_task(struct mid_q_entry *mid)
  42{
  43	wake_up_process(mid->callback_data);
  44}
  45
  46struct mid_q_entry *
  47AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
  48{
  49	struct mid_q_entry *temp;
  50
  51	if (server == NULL) {
  52		cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
  53		return NULL;
  54	}
  55
  56	temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
  57	if (temp == NULL)
  58		return temp;
  59	else {
  60		memset(temp, 0, sizeof(struct mid_q_entry));
  61		temp->mid = get_mid(smb_buffer);
  62		temp->pid = current->pid;
  63		temp->command = cpu_to_le16(smb_buffer->Command);
  64		cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
  65	/*	do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
  66		/* when mid allocated can be before when sent */
  67		temp->when_alloc = jiffies;
  68		temp->server = server;
  69
  70		/*
  71		 * The default is for the mid to be synchronous, so the
  72		 * default callback just wakes up the current task.
  73		 */
  74		temp->callback = cifs_wake_up_task;
  75		temp->callback_data = current;
  76	}
  77
  78	atomic_inc(&midCount);
  79	temp->mid_state = MID_REQUEST_ALLOCATED;
  80	return temp;
  81}
  82
  83void
  84DeleteMidQEntry(struct mid_q_entry *midEntry)
  85{
 
 
  86#ifdef CONFIG_CIFS_STATS2
  87	__le16 command = midEntry->server->vals->lock_cmd;
 
  88	unsigned long now;
 
 
  89#endif
  90	midEntry->mid_state = MID_FREE;
  91	atomic_dec(&midCount);
  92	if (midEntry->large_buf)
  93		cifs_buf_release(midEntry->resp_buf);
  94	else
  95		cifs_small_buf_release(midEntry->resp_buf);
  96#ifdef CONFIG_CIFS_STATS2
  97	now = jiffies;
  98	/* commands taking longer than one second are indications that
  99	   something is wrong, unless it is quite a slow link or server */
 100	if ((now - midEntry->when_alloc) > HZ) {
 101		if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 102			pr_debug(" CIFS slow rsp: cmd %d mid %llu",
 103			       midEntry->command, midEntry->mid);
 104			pr_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
 105			       now - midEntry->when_alloc,
 106			       now - midEntry->when_sent,
 107			       now - midEntry->when_received);
 108		}
 109	}
 110#endif
 
 111	mempool_free(midEntry, cifs_mid_poolp);
 112}
 113
 
 
 
 
 
 
 
 
 
 
 
 
 114void
 115cifs_delete_mid(struct mid_q_entry *mid)
 116{
 117	spin_lock(&GlobalMid_Lock);
 118	list_del(&mid->qhead);
 
 
 
 119	spin_unlock(&GlobalMid_Lock);
 120
 121	DeleteMidQEntry(mid);
 122}
 123
 124/*
 125 * smb_send_kvec - send an array of kvecs to the server
 126 * @server:	Server to send the data to
 127 * @iov:	Pointer to array of kvecs
 128 * @n_vec:	length of kvec array
 129 * @sent:	amount of data sent on socket is stored here
 130 *
 131 * Our basic "send data to server" function. Should be called with srv_mutex
 132 * held. The caller is responsible for handling the results.
 133 */
 134static int
 135smb_send_kvec(struct TCP_Server_Info *server, struct kvec *iov, size_t n_vec,
 136		size_t *sent)
 137{
 138	int rc = 0;
 139	int i = 0;
 140	struct msghdr smb_msg;
 141	unsigned int remaining;
 142	size_t first_vec = 0;
 143	struct socket *ssocket = server->ssocket;
 144
 145	*sent = 0;
 146
 147	smb_msg.msg_name = (struct sockaddr *) &server->dstaddr;
 148	smb_msg.msg_namelen = sizeof(struct sockaddr);
 149	smb_msg.msg_control = NULL;
 150	smb_msg.msg_controllen = 0;
 151	if (server->noblocksnd)
 152		smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
 153	else
 154		smb_msg.msg_flags = MSG_NOSIGNAL;
 155
 156	remaining = 0;
 157	for (i = 0; i < n_vec; i++)
 158		remaining += iov[i].iov_len;
 159
 160	i = 0;
 161	while (remaining) {
 162		/*
 163		 * If blocking send, we try 3 times, since each can block
 164		 * for 5 seconds. For nonblocking  we have to try more
 165		 * but wait increasing amounts of time allowing time for
 166		 * socket to clear.  The overall time we wait in either
 167		 * case to send on the socket is about 15 seconds.
 168		 * Similarly we wait for 15 seconds for a response from
 169		 * the server in SendReceive[2] for the server to send
 170		 * a response back for most types of requests (except
 171		 * SMB Write past end of file which can be slow, and
 172		 * blocking lock operations). NFS waits slightly longer
 173		 * than CIFS, but this can make it take longer for
 174		 * nonresponsive servers to be detected and 15 seconds
 175		 * is more than enough time for modern networks to
 176		 * send a packet.  In most cases if we fail to send
 177		 * after the retries we will kill the socket and
 178		 * reconnect which may clear the network problem.
 179		 */
 180		rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec],
 181				    n_vec - first_vec, remaining);
 182		if (rc == -EAGAIN) {
 183			i++;
 184			if (i >= 14 || (!server->noblocksnd && (i > 2))) {
 185				cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
 
 186					 ssocket);
 187				rc = -EAGAIN;
 188				break;
 189			}
 190			msleep(1 << i);
 191			continue;
 192		}
 193
 194		if (rc < 0)
 195			break;
 196
 197		/* send was at least partially successful */
 198		*sent += rc;
 199
 200		if (rc == remaining) {
 201			remaining = 0;
 202			break;
 203		}
 204
 205		if (rc > remaining) {
 206			cifs_dbg(VFS, "sent %d requested %d\n", rc, remaining);
 207			break;
 208		}
 209
 210		if (rc == 0) {
 211			/* should never happen, letting socket clear before
 212			   retrying is our only obvious option here */
 213			cifs_dbg(VFS, "tcp sent no data\n");
 214			msleep(500);
 215			continue;
 216		}
 217
 218		remaining -= rc;
 219
 220		/* the line below resets i */
 221		for (i = first_vec; i < n_vec; i++) {
 222			if (iov[i].iov_len) {
 223				if (rc > iov[i].iov_len) {
 224					rc -= iov[i].iov_len;
 225					iov[i].iov_len = 0;
 226				} else {
 227					iov[i].iov_base += rc;
 228					iov[i].iov_len -= rc;
 229					first_vec = i;
 230					break;
 231				}
 232			}
 233		}
 234
 235		i = 0; /* in case we get ENOSPC on the next send */
 236		rc = 0;
 237	}
 238	return rc;
 239}
 240
 241/**
 242 * rqst_page_to_kvec - Turn a slot in the smb_rqst page array into a kvec
 243 * @rqst: pointer to smb_rqst
 244 * @idx: index into the array of the page
 245 * @iov: pointer to struct kvec that will hold the result
 246 *
 247 * Helper function to convert a slot in the rqst->rq_pages array into a kvec.
 248 * The page will be kmapped and the address placed into iov_base. The length
 249 * will then be adjusted according to the ptailoff.
 250 */
 251void
 252cifs_rqst_page_to_kvec(struct smb_rqst *rqst, unsigned int idx,
 253			struct kvec *iov)
 254{
 255	/*
 256	 * FIXME: We could avoid this kmap altogether if we used
 257	 * kernel_sendpage instead of kernel_sendmsg. That will only
 258	 * work if signing is disabled though as sendpage inlines the
 259	 * page directly into the fraglist. If userspace modifies the
 260	 * page after we calculate the signature, then the server will
 261	 * reject it and may break the connection. kernel_sendmsg does
 262	 * an extra copy of the data and avoids that issue.
 263	 */
 264	iov->iov_base = kmap(rqst->rq_pages[idx]);
 265
 266	/* if last page, don't send beyond this offset into page */
 267	if (idx == (rqst->rq_npages - 1))
 268		iov->iov_len = rqst->rq_tailsz;
 269	else
 270		iov->iov_len = rqst->rq_pagesz;
 271}
 272
 273static unsigned long
 274rqst_len(struct smb_rqst *rqst)
 275{
 276	unsigned int i;
 277	struct kvec *iov = rqst->rq_iov;
 
 278	unsigned long buflen = 0;
 279
 
 
 
 
 
 
 
 
 
 280	/* total up iov array first */
 281	for (i = 0; i < rqst->rq_nvec; i++)
 282		buflen += iov[i].iov_len;
 283
 284	/* add in the page array if there is one */
 
 
 
 
 
 285	if (rqst->rq_npages) {
 286		buflen += rqst->rq_pagesz * (rqst->rq_npages - 1);
 287		buflen += rqst->rq_tailsz;
 
 
 
 
 
 
 
 
 
 288	}
 289
 290	return buflen;
 291}
 292
 293static int
 294smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
 
 295{
 296	int rc;
 297	struct kvec *iov = rqst->rq_iov;
 298	int n_vec = rqst->rq_nvec;
 299	unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
 300	unsigned long send_length;
 301	unsigned int i;
 302	size_t total_len = 0, sent;
 303	struct socket *ssocket = server->ssocket;
 
 304	int val = 1;
 
 
 
 
 
 
 305
 306	if (ssocket == NULL)
 307		return -ENOTSOCK;
 308
 309	/* sanity check send length */
 310	send_length = rqst_len(rqst);
 311	if (send_length != smb_buf_length + 4) {
 312		WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n",
 313			send_length, smb_buf_length);
 314		return -EIO;
 315	}
 316
 317	cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length);
 318	dump_smb(iov[0].iov_base, iov[0].iov_len);
 319
 320	/* cork the socket */
 321	kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
 322				(char *)&val, sizeof(val));
 323
 324	rc = smb_send_kvec(server, iov, n_vec, &sent);
 325	if (rc < 0)
 326		goto uncork;
 327
 328	total_len += sent;
 
 
 
 
 
 329
 330	/* now walk the page array and send each page in it */
 331	for (i = 0; i < rqst->rq_npages; i++) {
 332		struct kvec p_iov;
 333
 334		cifs_rqst_page_to_kvec(rqst, i, &p_iov);
 335		rc = smb_send_kvec(server, &p_iov, 1, &sent);
 336		kunmap(rqst->rq_pages[i]);
 
 
 
 
 
 337		if (rc < 0)
 338			break;
 339
 340		total_len += sent;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 341	}
 342
 343uncork:
 344	/* uncork it */
 345	val = 0;
 346	kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
 347				(char *)&val, sizeof(val));
 348
 349	if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
 350		cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
 351			 smb_buf_length + 4, total_len);
 352		/*
 353		 * If we have only sent part of an SMB then the next SMB could
 354		 * be taken as the remainder of this one. We need to kill the
 355		 * socket so the server throws away the partial SMB
 356		 */
 357		server->tcpStatus = CifsNeedReconnect;
 
 
 358	}
 359
 360	if (rc < 0 && rc != -EINTR)
 361		cifs_dbg(VFS, "Error %d sending data on socket to server\n",
 362			 rc);
 363	else
 364		rc = 0;
 365
 366	return rc;
 367}
 368
 369static int
 370smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
 
 371{
 372	struct smb_rqst rqst = { .rq_iov = iov,
 373				 .rq_nvec = n_vec };
 
 
 374
 375	return smb_send_rqst(server, &rqst);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 376}
 377
 378int
 379smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
 380	 unsigned int smb_buf_length)
 381{
 382	struct kvec iov;
 
 
 383
 384	iov.iov_base = smb_buffer;
 385	iov.iov_len = smb_buf_length + 4;
 
 
 386
 387	return smb_sendv(server, &iov, 1);
 388}
 389
 390static int
 391wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
 392		      int *credits)
 
 393{
 394	int rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 395
 396	spin_lock(&server->req_lock);
 397	if (timeout == CIFS_ASYNC_OP) {
 398		/* oplock breaks must not be held up */
 399		server->in_flight++;
 
 
 400		*credits -= 1;
 
 401		spin_unlock(&server->req_lock);
 402		return 0;
 403	}
 404
 405	while (1) {
 406		if (*credits <= 0) {
 407			spin_unlock(&server->req_lock);
 408			cifs_num_waiters_inc(server);
 409			rc = wait_event_killable(server->request_q,
 410						 has_credits(server, credits));
 411			cifs_num_waiters_dec(server);
 412			if (rc)
 413				return rc;
 
 
 
 
 
 
 
 414			spin_lock(&server->req_lock);
 415		} else {
 416			if (server->tcpStatus == CifsExiting) {
 417				spin_unlock(&server->req_lock);
 418				return -ENOENT;
 419			}
 420
 421			/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 422			 * Can not count locking commands against total
 423			 * as they are allowed to block on server.
 424			 */
 425
 426			/* update # of requests on the wire to server */
 427			if (timeout != CIFS_BLOCKING_OP) {
 428				*credits -= 1;
 429				server->in_flight++;
 
 
 
 430			}
 431			spin_unlock(&server->req_lock);
 432			break;
 433		}
 434	}
 435	return 0;
 436}
 437
 438static int
 439wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
 440		      const int optype)
 441{
 442	int *val;
 
 
 443
 444	val = server->ops->get_credits_field(server, optype);
 445	/* Since an echo is already inflight, no need to wait to send another */
 446	if (*val <= 0 && optype == CIFS_ECHO_OP)
 447		return -EAGAIN;
 448	return wait_for_free_credits(server, timeout, val);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 449}
 450
 451int
 452cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
 453		      unsigned int *num, unsigned int *credits)
 454{
 455	*num = size;
 456	*credits = 0;
 
 457	return 0;
 458}
 459
 460static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
 461			struct mid_q_entry **ppmidQ)
 462{
 463	if (ses->server->tcpStatus == CifsExiting) {
 464		return -ENOENT;
 465	}
 466
 467	if (ses->server->tcpStatus == CifsNeedReconnect) {
 468		cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
 469		return -EAGAIN;
 470	}
 471
 472	if (ses->status == CifsNew) {
 473		if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
 474			(in_buf->Command != SMB_COM_NEGOTIATE))
 475			return -EAGAIN;
 476		/* else ok - we are setting up session */
 477	}
 478
 479	if (ses->status == CifsExiting) {
 480		/* check if SMB session is bad because we are setting it up */
 481		if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
 482			return -EAGAIN;
 483		/* else ok - we are shutting down session */
 484	}
 485
 486	*ppmidQ = AllocMidQEntry(in_buf, ses->server);
 487	if (*ppmidQ == NULL)
 488		return -ENOMEM;
 489	spin_lock(&GlobalMid_Lock);
 490	list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
 491	spin_unlock(&GlobalMid_Lock);
 492	return 0;
 493}
 494
 495static int
 496wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
 497{
 498	int error;
 499
 500	error = wait_event_freezekillable_unsafe(server->response_q,
 501				    midQ->mid_state != MID_REQUEST_SUBMITTED);
 502	if (error < 0)
 503		return -ERESTARTSYS;
 504
 505	return 0;
 506}
 507
 508struct mid_q_entry *
 509cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
 510{
 511	int rc;
 512	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
 513	struct mid_q_entry *mid;
 514
 
 
 
 
 515	/* enable signing if server requires it */
 516	if (server->sign)
 517		hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
 518
 519	mid = AllocMidQEntry(hdr, server);
 520	if (mid == NULL)
 521		return ERR_PTR(-ENOMEM);
 522
 523	rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
 524	if (rc) {
 525		DeleteMidQEntry(mid);
 526		return ERR_PTR(rc);
 527	}
 528
 529	return mid;
 530}
 531
 532/*
 533 * Send a SMB request and set the callback function in the mid to handle
 534 * the result. Caller is responsible for dealing with timeouts.
 535 */
 536int
 537cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
 538		mid_receive_t *receive, mid_callback_t *callback,
 539		void *cbdata, const int flags)
 
 540{
 541	int rc, timeout, optype;
 542	struct mid_q_entry *mid;
 543	unsigned int credits = 0;
 
 
 544
 545	timeout = flags & CIFS_TIMEOUT_MASK;
 546	optype = flags & CIFS_OP_MASK;
 547
 548	if ((flags & CIFS_HAS_CREDITS) == 0) {
 549		rc = wait_for_free_request(server, timeout, optype);
 550		if (rc)
 551			return rc;
 552		credits = 1;
 553	}
 
 
 554
 555	mutex_lock(&server->srv_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 556	mid = server->ops->setup_async_request(server, rqst);
 557	if (IS_ERR(mid)) {
 558		mutex_unlock(&server->srv_mutex);
 559		add_credits_and_wake_if(server, credits, optype);
 560		return PTR_ERR(mid);
 561	}
 562
 563	mid->receive = receive;
 564	mid->callback = callback;
 565	mid->callback_data = cbdata;
 
 566	mid->mid_state = MID_REQUEST_SUBMITTED;
 567
 568	/* put it on the pending_mid_q */
 569	spin_lock(&GlobalMid_Lock);
 570	list_add_tail(&mid->qhead, &server->pending_mid_q);
 571	spin_unlock(&GlobalMid_Lock);
 572
 573
 
 
 
 
 574	cifs_in_send_inc(server);
 575	rc = smb_send_rqst(server, rqst);
 576	cifs_in_send_dec(server);
 577	cifs_save_when_sent(mid);
 578
 579	if (rc < 0) {
 
 580		server->sequence_number -= 2;
 581		cifs_delete_mid(mid);
 582	}
 583
 584	mutex_unlock(&server->srv_mutex);
 585
 586	if (rc == 0)
 587		return 0;
 588
 589	add_credits_and_wake_if(server, credits, optype);
 590	return rc;
 591}
 592
 593/*
 594 *
 595 * Send an SMB Request.  No response info (other than return code)
 596 * needs to be parsed.
 597 *
 598 * flags indicate the type of request buffer and how long to wait
 599 * and whether to log NT STATUS code (error) before mapping it to POSIX error
 600 *
 601 */
 602int
 603SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
 604		 char *in_buf, int flags)
 605{
 606	int rc;
 607	struct kvec iov[1];
 
 608	int resp_buf_type;
 609
 610	iov[0].iov_base = in_buf;
 611	iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
 612	flags |= CIFS_NO_RESP;
 613	rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
 614	cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
 615
 616	return rc;
 617}
 618
 619static int
 620cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
 621{
 622	int rc = 0;
 623
 624	cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
 625		 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
 626
 627	spin_lock(&GlobalMid_Lock);
 628	switch (mid->mid_state) {
 629	case MID_RESPONSE_RECEIVED:
 630		spin_unlock(&GlobalMid_Lock);
 631		return rc;
 632	case MID_RETRY_NEEDED:
 633		rc = -EAGAIN;
 634		break;
 635	case MID_RESPONSE_MALFORMED:
 636		rc = -EIO;
 637		break;
 638	case MID_SHUTDOWN:
 639		rc = -EHOSTDOWN;
 640		break;
 641	default:
 642		list_del_init(&mid->qhead);
 643		cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
 
 
 
 644			 __func__, mid->mid, mid->mid_state);
 645		rc = -EIO;
 646	}
 647	spin_unlock(&GlobalMid_Lock);
 648
 649	mutex_lock(&server->srv_mutex);
 650	DeleteMidQEntry(mid);
 651	mutex_unlock(&server->srv_mutex);
 652	return rc;
 653}
 654
 655static inline int
 656send_cancel(struct TCP_Server_Info *server, void *buf, struct mid_q_entry *mid)
 
 657{
 658	return server->ops->send_cancel ?
 659				server->ops->send_cancel(server, buf, mid) : 0;
 660}
 661
 662int
 663cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
 664		   bool log_error)
 665{
 666	unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
 667
 668	dump_smb(mid->resp_buf, min_t(u32, 92, len));
 669
 670	/* convert the length into a more usable form */
 671	if (server->sign) {
 672		struct kvec iov;
 673		int rc = 0;
 674		struct smb_rqst rqst = { .rq_iov = &iov,
 675					 .rq_nvec = 1 };
 676
 677		iov.iov_base = mid->resp_buf;
 678		iov.iov_len = len;
 
 
 679		/* FIXME: add code to kill session */
 680		rc = cifs_verify_signature(&rqst, server,
 681					   mid->sequence_number);
 682		if (rc)
 683			cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
 684				 rc);
 685	}
 686
 687	/* BB special case reconnect tid and uid here? */
 688	return map_smb_to_linux_error(mid->resp_buf, log_error);
 689}
 690
 691struct mid_q_entry *
 692cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
 693{
 694	int rc;
 695	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
 696	struct mid_q_entry *mid;
 697
 
 
 
 
 698	rc = allocate_mid(ses, hdr, &mid);
 699	if (rc)
 700		return ERR_PTR(rc);
 701	rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
 702	if (rc) {
 703		cifs_delete_mid(mid);
 704		return ERR_PTR(rc);
 705	}
 706	return mid;
 707}
 708
 709int
 710SendReceive2(const unsigned int xid, struct cifs_ses *ses,
 711	     struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
 712	     const int flags)
 713{
 714	int rc = 0;
 715	int timeout, optype;
 716	struct mid_q_entry *midQ;
 717	char *buf = iov[0].iov_base;
 718	unsigned int credits = 1;
 719	struct smb_rqst rqst = { .rq_iov = iov,
 720				 .rq_nvec = n_vec };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 721
 722	timeout = flags & CIFS_TIMEOUT_MASK;
 723	optype = flags & CIFS_OP_MASK;
 724
 725	*resp_buf_type = CIFS_NO_BUFFER;  /* no response buf yet */
 
 726
 727	if ((ses == NULL) || (ses->server == NULL)) {
 728		cifs_small_buf_release(buf);
 729		cifs_dbg(VFS, "Null session\n");
 730		return -EIO;
 731	}
 732
 733	if (ses->server->tcpStatus == CifsExiting) {
 734		cifs_small_buf_release(buf);
 735		return -ENOENT;
 736	}
 737
 738	/*
 739	 * Ensure that we do not send more than 50 overlapping requests
 740	 * to the same server. We may make this configurable later or
 741	 * use ses->maxReq.
 
 
 
 742	 */
 743
 744	rc = wait_for_free_request(ses->server, timeout, optype);
 745	if (rc) {
 746		cifs_small_buf_release(buf);
 747		return rc;
 
 
 
 
 748	}
 749
 750	/*
 751	 * Make sure that we sign in the same order that we send on this socket
 752	 * and avoid races inside tcp sendmsg code that could cause corruption
 753	 * of smb data.
 754	 */
 755
 756	mutex_lock(&ses->server->srv_mutex);
 757
 758	midQ = ses->server->ops->setup_request(ses, &rqst);
 759	if (IS_ERR(midQ)) {
 760		mutex_unlock(&ses->server->srv_mutex);
 761		cifs_small_buf_release(buf);
 762		/* Update # of requests on wire to server */
 763		add_credits(ses->server, 1, optype);
 764		return PTR_ERR(midQ);
 
 
 
 
 
 765	}
 766
 767	midQ->mid_state = MID_REQUEST_SUBMITTED;
 768	cifs_in_send_inc(ses->server);
 769	rc = smb_sendv(ses->server, iov, n_vec);
 770	cifs_in_send_dec(ses->server);
 771	cifs_save_when_sent(midQ);
 
 
 
 
 
 
 
 
 772
 773	if (rc < 0)
 774		ses->server->sequence_number -= 2;
 775	mutex_unlock(&ses->server->srv_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 776
 777	if (rc < 0) {
 778		cifs_small_buf_release(buf);
 779		goto out;
 780	}
 781
 782	if (timeout == CIFS_ASYNC_OP) {
 783		cifs_small_buf_release(buf);
 
 
 
 
 
 
 
 784		goto out;
 785	}
 786
 787	rc = wait_for_response(ses->server, midQ);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 788	if (rc != 0) {
 789		send_cancel(ses->server, buf, midQ);
 790		spin_lock(&GlobalMid_Lock);
 791		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
 792			midQ->callback = DeleteMidQEntry;
 
 
 
 
 
 
 
 793			spin_unlock(&GlobalMid_Lock);
 794			cifs_small_buf_release(buf);
 795			add_credits(ses->server, 1, optype);
 796			return rc;
 797		}
 798		spin_unlock(&GlobalMid_Lock);
 799	}
 800
 801	cifs_small_buf_release(buf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 802
 803	rc = cifs_sync_mid_result(midQ, ses->server);
 804	if (rc != 0) {
 805		add_credits(ses->server, 1, optype);
 806		return rc;
 807	}
 808
 809	if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
 810		rc = -EIO;
 811		cifs_dbg(FYI, "Bad MID state?\n");
 812		goto out;
 
 
 
 
 
 813	}
 814
 815	buf = (char *)midQ->resp_buf;
 816	iov[0].iov_base = buf;
 817	iov[0].iov_len = get_rfc1002_length(buf) + 4;
 818	if (midQ->large_buf)
 819		*resp_buf_type = CIFS_LARGE_BUFFER;
 820	else
 821		*resp_buf_type = CIFS_SMALL_BUFFER;
 
 
 
 
 822
 823	credits = ses->server->ops->get_credits(midQ);
 
 824
 825	rc = ses->server->ops->check_receive(midQ, ses->server,
 826					     flags & CIFS_LOG_ERROR);
 
 
 
 
 
 
 827
 828	/* mark it so buf will not be freed by cifs_delete_mid */
 829	if ((flags & CIFS_NO_RESP) == 0)
 830		midQ->resp_buf = NULL;
 831out:
 832	cifs_delete_mid(midQ);
 833	add_credits(ses->server, credits, optype);
 
 
 
 
 
 
 
 
 
 
 
 
 
 834
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 835	return rc;
 836}
 837
 838int
 839SendReceive(const unsigned int xid, struct cifs_ses *ses,
 840	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
 841	    int *pbytes_returned, const int timeout)
 842{
 843	int rc = 0;
 844	struct mid_q_entry *midQ;
 
 
 
 
 
 845
 846	if (ses == NULL) {
 847		cifs_dbg(VFS, "Null smb session\n");
 848		return -EIO;
 849	}
 850	if (ses->server == NULL) {
 
 851		cifs_dbg(VFS, "Null tcp session\n");
 852		return -EIO;
 853	}
 854
 855	if (ses->server->tcpStatus == CifsExiting)
 856		return -ENOENT;
 857
 858	/* Ensure that we do not send more than 50 overlapping requests
 859	   to the same server. We may make this configurable later or
 860	   use ses->maxReq */
 861
 862	if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
 863			MAX_CIFS_HDR_SIZE - 4) {
 864		cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
 865			 be32_to_cpu(in_buf->smb_buf_length));
 866		return -EIO;
 867	}
 868
 869	rc = wait_for_free_request(ses->server, timeout, 0);
 870	if (rc)
 871		return rc;
 872
 873	/* make sure that we sign in the same order that we send on this socket
 874	   and avoid races inside tcp sendmsg code that could cause corruption
 875	   of smb data */
 876
 877	mutex_lock(&ses->server->srv_mutex);
 878
 879	rc = allocate_mid(ses, in_buf, &midQ);
 880	if (rc) {
 881		mutex_unlock(&ses->server->srv_mutex);
 882		/* Update # of requests on wire to server */
 883		add_credits(ses->server, 1, 0);
 884		return rc;
 885	}
 886
 887	rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
 888	if (rc) {
 889		mutex_unlock(&ses->server->srv_mutex);
 890		goto out;
 891	}
 892
 893	midQ->mid_state = MID_REQUEST_SUBMITTED;
 894
 895	cifs_in_send_inc(ses->server);
 896	rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
 897	cifs_in_send_dec(ses->server);
 898	cifs_save_when_sent(midQ);
 899
 900	if (rc < 0)
 901		ses->server->sequence_number -= 2;
 902
 903	mutex_unlock(&ses->server->srv_mutex);
 904
 905	if (rc < 0)
 906		goto out;
 907
 908	if (timeout == CIFS_ASYNC_OP)
 909		goto out;
 910
 911	rc = wait_for_response(ses->server, midQ);
 912	if (rc != 0) {
 913		send_cancel(ses->server, in_buf, midQ);
 914		spin_lock(&GlobalMid_Lock);
 915		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
 916			/* no longer considered to be "in-flight" */
 917			midQ->callback = DeleteMidQEntry;
 918			spin_unlock(&GlobalMid_Lock);
 919			add_credits(ses->server, 1, 0);
 920			return rc;
 921		}
 922		spin_unlock(&GlobalMid_Lock);
 923	}
 924
 925	rc = cifs_sync_mid_result(midQ, ses->server);
 926	if (rc != 0) {
 927		add_credits(ses->server, 1, 0);
 928		return rc;
 929	}
 930
 931	if (!midQ->resp_buf || !out_buf ||
 932	    midQ->mid_state != MID_RESPONSE_RECEIVED) {
 933		rc = -EIO;
 934		cifs_dbg(VFS, "Bad MID state?\n");
 935		goto out;
 936	}
 937
 938	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
 939	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
 940	rc = cifs_check_receive(midQ, ses->server, 0);
 941out:
 942	cifs_delete_mid(midQ);
 943	add_credits(ses->server, 1, 0);
 944
 945	return rc;
 946}
 947
 948/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
 949   blocking lock to return. */
 950
 951static int
 952send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
 953			struct smb_hdr *in_buf,
 954			struct smb_hdr *out_buf)
 955{
 956	int bytes_returned;
 957	struct cifs_ses *ses = tcon->ses;
 958	LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
 959
 960	/* We just modify the current in_buf to change
 961	   the type of lock from LOCKING_ANDX_SHARED_LOCK
 962	   or LOCKING_ANDX_EXCLUSIVE_LOCK to
 963	   LOCKING_ANDX_CANCEL_LOCK. */
 964
 965	pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
 966	pSMB->Timeout = 0;
 967	pSMB->hdr.Mid = get_next_mid(ses->server);
 968
 969	return SendReceive(xid, ses, in_buf, out_buf,
 970			&bytes_returned, 0);
 971}
 972
 973int
 974SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
 975	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
 976	    int *pbytes_returned)
 977{
 978	int rc = 0;
 979	int rstart = 0;
 980	struct mid_q_entry *midQ;
 981	struct cifs_ses *ses;
 
 
 
 
 
 982
 983	if (tcon == NULL || tcon->ses == NULL) {
 984		cifs_dbg(VFS, "Null smb session\n");
 985		return -EIO;
 986	}
 987	ses = tcon->ses;
 
 988
 989	if (ses->server == NULL) {
 990		cifs_dbg(VFS, "Null tcp session\n");
 991		return -EIO;
 992	}
 993
 994	if (ses->server->tcpStatus == CifsExiting)
 995		return -ENOENT;
 996
 997	/* Ensure that we do not send more than 50 overlapping requests
 998	   to the same server. We may make this configurable later or
 999	   use ses->maxReq */
1000
1001	if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
1002			MAX_CIFS_HDR_SIZE - 4) {
1003		cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1004			 be32_to_cpu(in_buf->smb_buf_length));
1005		return -EIO;
1006	}
1007
1008	rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
1009	if (rc)
1010		return rc;
1011
1012	/* make sure that we sign in the same order that we send on this socket
1013	   and avoid races inside tcp sendmsg code that could cause corruption
1014	   of smb data */
1015
1016	mutex_lock(&ses->server->srv_mutex);
1017
1018	rc = allocate_mid(ses, in_buf, &midQ);
1019	if (rc) {
1020		mutex_unlock(&ses->server->srv_mutex);
1021		return rc;
1022	}
1023
1024	rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1025	if (rc) {
1026		cifs_delete_mid(midQ);
1027		mutex_unlock(&ses->server->srv_mutex);
1028		return rc;
1029	}
1030
1031	midQ->mid_state = MID_REQUEST_SUBMITTED;
1032	cifs_in_send_inc(ses->server);
1033	rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
1034	cifs_in_send_dec(ses->server);
1035	cifs_save_when_sent(midQ);
1036
1037	if (rc < 0)
1038		ses->server->sequence_number -= 2;
1039
1040	mutex_unlock(&ses->server->srv_mutex);
1041
1042	if (rc < 0) {
1043		cifs_delete_mid(midQ);
1044		return rc;
1045	}
1046
1047	/* Wait for a reply - allow signals to interrupt. */
1048	rc = wait_event_interruptible(ses->server->response_q,
1049		(!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1050		((ses->server->tcpStatus != CifsGood) &&
1051		 (ses->server->tcpStatus != CifsNew)));
1052
1053	/* Were we interrupted by a signal ? */
1054	if ((rc == -ERESTARTSYS) &&
1055		(midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1056		((ses->server->tcpStatus == CifsGood) ||
1057		 (ses->server->tcpStatus == CifsNew))) {
1058
1059		if (in_buf->Command == SMB_COM_TRANSACTION2) {
1060			/* POSIX lock. We send a NT_CANCEL SMB to cause the
1061			   blocking lock to return. */
1062			rc = send_cancel(ses->server, in_buf, midQ);
1063			if (rc) {
1064				cifs_delete_mid(midQ);
1065				return rc;
1066			}
1067		} else {
1068			/* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1069			   to cause the blocking lock to return. */
1070
1071			rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1072
1073			/* If we get -ENOLCK back the lock may have
1074			   already been removed. Don't exit in this case. */
1075			if (rc && rc != -ENOLCK) {
1076				cifs_delete_mid(midQ);
1077				return rc;
1078			}
1079		}
1080
1081		rc = wait_for_response(ses->server, midQ);
1082		if (rc) {
1083			send_cancel(ses->server, in_buf, midQ);
1084			spin_lock(&GlobalMid_Lock);
1085			if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1086				/* no longer considered to be "in-flight" */
1087				midQ->callback = DeleteMidQEntry;
1088				spin_unlock(&GlobalMid_Lock);
1089				return rc;
1090			}
1091			spin_unlock(&GlobalMid_Lock);
1092		}
1093
1094		/* We got the response - restart system call. */
1095		rstart = 1;
1096	}
1097
1098	rc = cifs_sync_mid_result(midQ, ses->server);
1099	if (rc != 0)
1100		return rc;
1101
1102	/* rcvd frame is ok */
1103	if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1104		rc = -EIO;
1105		cifs_dbg(VFS, "Bad MID state?\n");
1106		goto out;
1107	}
1108
1109	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1110	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1111	rc = cifs_check_receive(midQ, ses->server, 0);
1112out:
1113	cifs_delete_mid(midQ);
1114	if (rstart && rc == -EACCES)
1115		return -ERESTARTSYS;
1116	return rc;
1117}
v5.4
   1/*
   2 *   fs/cifs/transport.c
   3 *
   4 *   Copyright (C) International Business Machines  Corp., 2002,2008
   5 *   Author(s): Steve French (sfrench@us.ibm.com)
   6 *   Jeremy Allison (jra@samba.org) 2006.
   7 *
   8 *   This library is free software; you can redistribute it and/or modify
   9 *   it under the terms of the GNU Lesser General Public License as published
  10 *   by the Free Software Foundation; either version 2.1 of the License, or
  11 *   (at your option) any later version.
  12 *
  13 *   This library is distributed in the hope that it will be useful,
  14 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
  16 *   the GNU Lesser General Public License for more details.
  17 *
  18 *   You should have received a copy of the GNU Lesser General Public License
  19 *   along with this library; if not, write to the Free Software
  20 *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21 */
  22
  23#include <linux/fs.h>
  24#include <linux/list.h>
  25#include <linux/gfp.h>
  26#include <linux/wait.h>
  27#include <linux/net.h>
  28#include <linux/delay.h>
  29#include <linux/freezer.h>
  30#include <linux/tcp.h>
  31#include <linux/bvec.h>
  32#include <linux/highmem.h>
  33#include <linux/uaccess.h>
  34#include <asm/processor.h>
  35#include <linux/mempool.h>
  36#include <linux/sched/signal.h>
  37#include "cifspdu.h"
  38#include "cifsglob.h"
  39#include "cifsproto.h"
  40#include "cifs_debug.h"
  41#include "smb2proto.h"
  42#include "smbdirect.h"
  43
  44/* Max number of iovectors we can use off the stack when sending requests. */
  45#define CIFS_MAX_IOV_SIZE 8
  46
  47void
  48cifs_wake_up_task(struct mid_q_entry *mid)
  49{
  50	wake_up_process(mid->callback_data);
  51}
  52
  53struct mid_q_entry *
  54AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
  55{
  56	struct mid_q_entry *temp;
  57
  58	if (server == NULL) {
  59		cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
  60		return NULL;
  61	}
  62
  63	temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
  64	memset(temp, 0, sizeof(struct mid_q_entry));
  65	kref_init(&temp->refcount);
  66	temp->mid = get_mid(smb_buffer);
  67	temp->pid = current->pid;
  68	temp->command = cpu_to_le16(smb_buffer->Command);
  69	cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
 
 
  70	/*	do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
  71	/* when mid allocated can be before when sent */
  72	temp->when_alloc = jiffies;
  73	temp->server = server;
  74
  75	/*
  76	 * The default is for the mid to be synchronous, so the
  77	 * default callback just wakes up the current task.
  78	 */
  79	temp->callback = cifs_wake_up_task;
  80	temp->callback_data = current;
 
  81
  82	atomic_inc(&midCount);
  83	temp->mid_state = MID_REQUEST_ALLOCATED;
  84	return temp;
  85}
  86
  87static void _cifs_mid_q_entry_release(struct kref *refcount)
 
  88{
  89	struct mid_q_entry *midEntry =
  90			container_of(refcount, struct mid_q_entry, refcount);
  91#ifdef CONFIG_CIFS_STATS2
  92	__le16 command = midEntry->server->vals->lock_cmd;
  93	__u16 smb_cmd = le16_to_cpu(midEntry->command);
  94	unsigned long now;
  95	unsigned long roundtrip_time;
  96	struct TCP_Server_Info *server = midEntry->server;
  97#endif
  98	midEntry->mid_state = MID_FREE;
  99	atomic_dec(&midCount);
 100	if (midEntry->large_buf)
 101		cifs_buf_release(midEntry->resp_buf);
 102	else
 103		cifs_small_buf_release(midEntry->resp_buf);
 104#ifdef CONFIG_CIFS_STATS2
 105	now = jiffies;
 106	if (now < midEntry->when_alloc)
 107		cifs_server_dbg(VFS, "invalid mid allocation time\n");
 108	roundtrip_time = now - midEntry->when_alloc;
 109
 110	if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
 111		if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
 112			server->slowest_cmd[smb_cmd] = roundtrip_time;
 113			server->fastest_cmd[smb_cmd] = roundtrip_time;
 114		} else {
 115			if (server->slowest_cmd[smb_cmd] < roundtrip_time)
 116				server->slowest_cmd[smb_cmd] = roundtrip_time;
 117			else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
 118				server->fastest_cmd[smb_cmd] = roundtrip_time;
 119		}
 120		cifs_stats_inc(&server->num_cmds[smb_cmd]);
 121		server->time_per_cmd[smb_cmd] += roundtrip_time;
 122	}
 123	/*
 124	 * commands taking longer than one second (default) can be indications
 125	 * that something is wrong, unless it is quite a slow link or a very
 126	 * busy server. Note that this calc is unlikely or impossible to wrap
 127	 * as long as slow_rsp_threshold is not set way above recommended max
 128	 * value (32767 ie 9 hours) and is generally harmless even if wrong
 129	 * since only affects debug counters - so leaving the calc as simple
 130	 * comparison rather than doing multiple conversions and overflow
 131	 * checks
 132	 */
 133	if ((slow_rsp_threshold != 0) &&
 134	    time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
 135	    (midEntry->command != command)) {
 136		/*
 137		 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
 138		 * NB: le16_to_cpu returns unsigned so can not be negative below
 139		 */
 140		if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
 141			cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
 142
 143		trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
 144			       midEntry->when_sent, midEntry->when_received);
 145		if (cifsFYI & CIFS_TIMER) {
 146			pr_debug(" CIFS slow rsp: cmd %d mid %llu",
 147			       midEntry->command, midEntry->mid);
 148			cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
 149			       now - midEntry->when_alloc,
 150			       now - midEntry->when_sent,
 151			       now - midEntry->when_received);
 152		}
 153	}
 154#endif
 155
 156	mempool_free(midEntry, cifs_mid_poolp);
 157}
 158
 159void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
 160{
 161	spin_lock(&GlobalMid_Lock);
 162	kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
 163	spin_unlock(&GlobalMid_Lock);
 164}
 165
 166void DeleteMidQEntry(struct mid_q_entry *midEntry)
 167{
 168	cifs_mid_q_entry_release(midEntry);
 169}
 170
 171void
 172cifs_delete_mid(struct mid_q_entry *mid)
 173{
 174	spin_lock(&GlobalMid_Lock);
 175	if (!(mid->mid_flags & MID_DELETED)) {
 176		list_del_init(&mid->qhead);
 177		mid->mid_flags |= MID_DELETED;
 178	}
 179	spin_unlock(&GlobalMid_Lock);
 180
 181	DeleteMidQEntry(mid);
 182}
 183
 184/*
 185 * smb_send_kvec - send an array of kvecs to the server
 186 * @server:	Server to send the data to
 187 * @smb_msg:	Message to send
 
 188 * @sent:	amount of data sent on socket is stored here
 189 *
 190 * Our basic "send data to server" function. Should be called with srv_mutex
 191 * held. The caller is responsible for handling the results.
 192 */
 193static int
 194smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
 195	      size_t *sent)
 196{
 197	int rc = 0;
 198	int retries = 0;
 
 
 
 199	struct socket *ssocket = server->ssocket;
 200
 201	*sent = 0;
 202
 203	smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
 204	smb_msg->msg_namelen = sizeof(struct sockaddr);
 205	smb_msg->msg_control = NULL;
 206	smb_msg->msg_controllen = 0;
 207	if (server->noblocksnd)
 208		smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
 209	else
 210		smb_msg->msg_flags = MSG_NOSIGNAL;
 211
 212	while (msg_data_left(smb_msg)) {
 
 
 
 
 
 213		/*
 214		 * If blocking send, we try 3 times, since each can block
 215		 * for 5 seconds. For nonblocking  we have to try more
 216		 * but wait increasing amounts of time allowing time for
 217		 * socket to clear.  The overall time we wait in either
 218		 * case to send on the socket is about 15 seconds.
 219		 * Similarly we wait for 15 seconds for a response from
 220		 * the server in SendReceive[2] for the server to send
 221		 * a response back for most types of requests (except
 222		 * SMB Write past end of file which can be slow, and
 223		 * blocking lock operations). NFS waits slightly longer
 224		 * than CIFS, but this can make it take longer for
 225		 * nonresponsive servers to be detected and 15 seconds
 226		 * is more than enough time for modern networks to
 227		 * send a packet.  In most cases if we fail to send
 228		 * after the retries we will kill the socket and
 229		 * reconnect which may clear the network problem.
 230		 */
 231		rc = sock_sendmsg(ssocket, smb_msg);
 
 232		if (rc == -EAGAIN) {
 233			retries++;
 234			if (retries >= 14 ||
 235			    (!server->noblocksnd && (retries > 2))) {
 236				cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
 237					 ssocket);
 238				return -EAGAIN;
 
 239			}
 240			msleep(1 << retries);
 241			continue;
 242		}
 243
 244		if (rc < 0)
 245			return rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 246
 247		if (rc == 0) {
 248			/* should never happen, letting socket clear before
 249			   retrying is our only obvious option here */
 250			cifs_server_dbg(VFS, "tcp sent no data\n");
 251			msleep(500);
 252			continue;
 253		}
 254
 255		/* send was at least partially successful */
 256		*sent += rc;
 257		retries = 0; /* in case we get ENOSPC on the next send */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 258	}
 259	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 260}
 261
 262unsigned long
 263smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
 264{
 265	unsigned int i;
 266	struct kvec *iov;
 267	int nvec;
 268	unsigned long buflen = 0;
 269
 270	if (server->vals->header_preamble_size == 0 &&
 271	    rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
 272		iov = &rqst->rq_iov[1];
 273		nvec = rqst->rq_nvec - 1;
 274	} else {
 275		iov = rqst->rq_iov;
 276		nvec = rqst->rq_nvec;
 277	}
 278
 279	/* total up iov array first */
 280	for (i = 0; i < nvec; i++)
 281		buflen += iov[i].iov_len;
 282
 283	/*
 284	 * Add in the page array if there is one. The caller needs to make
 285	 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
 286	 * multiple pages ends at page boundary, rq_tailsz needs to be set to
 287	 * PAGE_SIZE.
 288	 */
 289	if (rqst->rq_npages) {
 290		if (rqst->rq_npages == 1)
 291			buflen += rqst->rq_tailsz;
 292		else {
 293			/*
 294			 * If there is more than one page, calculate the
 295			 * buffer length based on rq_offset and rq_tailsz
 296			 */
 297			buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
 298					rqst->rq_offset;
 299			buflen += rqst->rq_tailsz;
 300		}
 301	}
 302
 303	return buflen;
 304}
 305
 306static int
 307__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
 308		struct smb_rqst *rqst)
 309{
 310	int rc = 0;
 311	struct kvec *iov;
 312	int n_vec;
 313	unsigned int send_length = 0;
 314	unsigned int i, j;
 315	sigset_t mask, oldmask;
 316	size_t total_len = 0, sent, size;
 317	struct socket *ssocket = server->ssocket;
 318	struct msghdr smb_msg;
 319	int val = 1;
 320	__be32 rfc1002_marker;
 321
 322	if (cifs_rdma_enabled(server) && server->smbd_conn) {
 323		rc = smbd_send(server, num_rqst, rqst);
 324		goto smbd_done;
 325	}
 326
 327	if (ssocket == NULL)
 328		return -EAGAIN;
 329
 330	if (signal_pending(current)) {
 331		cifs_dbg(FYI, "signal is pending before sending any data\n");
 332		return -EINTR;
 
 
 
 333	}
 334
 
 
 
 335	/* cork the socket */
 336	kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
 337				(char *)&val, sizeof(val));
 338
 339	for (j = 0; j < num_rqst; j++)
 340		send_length += smb_rqst_len(server, &rqst[j]);
 341	rfc1002_marker = cpu_to_be32(send_length);
 342
 343	/*
 344	 * We should not allow signals to interrupt the network send because
 345	 * any partial send will cause session reconnects thus increasing
 346	 * latency of system calls and overload a server with unnecessary
 347	 * requests.
 348	 */
 349
 350	sigfillset(&mask);
 351	sigprocmask(SIG_BLOCK, &mask, &oldmask);
 
 352
 353	/* Generate a rfc1002 marker for SMB2+ */
 354	if (server->vals->header_preamble_size == 0) {
 355		struct kvec hiov = {
 356			.iov_base = &rfc1002_marker,
 357			.iov_len  = 4
 358		};
 359		iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
 360		rc = smb_send_kvec(server, &smb_msg, &sent);
 361		if (rc < 0)
 362			goto unmask;
 363
 364		total_len += sent;
 365		send_length += 4;
 366	}
 367
 368	cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
 369
 370	for (j = 0; j < num_rqst; j++) {
 371		iov = rqst[j].rq_iov;
 372		n_vec = rqst[j].rq_nvec;
 373
 374		size = 0;
 375		for (i = 0; i < n_vec; i++) {
 376			dump_smb(iov[i].iov_base, iov[i].iov_len);
 377			size += iov[i].iov_len;
 378		}
 379
 380		iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
 381
 382		rc = smb_send_kvec(server, &smb_msg, &sent);
 383		if (rc < 0)
 384			goto unmask;
 385
 386		total_len += sent;
 387
 388		/* now walk the page array and send each page in it */
 389		for (i = 0; i < rqst[j].rq_npages; i++) {
 390			struct bio_vec bvec;
 391
 392			bvec.bv_page = rqst[j].rq_pages[i];
 393			rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
 394					     &bvec.bv_offset);
 395
 396			iov_iter_bvec(&smb_msg.msg_iter, WRITE,
 397				      &bvec, 1, bvec.bv_len);
 398			rc = smb_send_kvec(server, &smb_msg, &sent);
 399			if (rc < 0)
 400				break;
 401
 402			total_len += sent;
 403		}
 404	}
 405
 406unmask:
 407	sigprocmask(SIG_SETMASK, &oldmask, NULL);
 408
 409	/*
 410	 * If signal is pending but we have already sent the whole packet to
 411	 * the server we need to return success status to allow a corresponding
 412	 * mid entry to be kept in the pending requests queue thus allowing
 413	 * to handle responses from the server by the client.
 414	 *
 415	 * If only part of the packet has been sent there is no need to hide
 416	 * interrupt because the session will be reconnected anyway, so there
 417	 * won't be any response from the server to handle.
 418	 */
 419
 420	if (signal_pending(current) && (total_len != send_length)) {
 421		cifs_dbg(FYI, "signal is pending after attempt to send\n");
 422		rc = -EINTR;
 423	}
 424
 
 425	/* uncork it */
 426	val = 0;
 427	kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
 428				(char *)&val, sizeof(val));
 429
 430	if ((total_len > 0) && (total_len != send_length)) {
 431		cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
 432			 send_length, total_len);
 433		/*
 434		 * If we have only sent part of an SMB then the next SMB could
 435		 * be taken as the remainder of this one. We need to kill the
 436		 * socket so the server throws away the partial SMB
 437		 */
 438		server->tcpStatus = CifsNeedReconnect;
 439		trace_smb3_partial_send_reconnect(server->CurrentMid,
 440						  server->hostname);
 441	}
 442smbd_done:
 443	if (rc < 0 && rc != -EINTR)
 444		cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
 445			 rc);
 446	else if (rc > 0)
 447		rc = 0;
 448
 449	return rc;
 450}
 451
 452static int
 453smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
 454	      struct smb_rqst *rqst, int flags)
 455{
 456	struct kvec iov;
 457	struct smb2_transform_hdr tr_hdr;
 458	struct smb_rqst cur_rqst[MAX_COMPOUND];
 459	int rc;
 460
 461	if (!(flags & CIFS_TRANSFORM_REQ))
 462		return __smb_send_rqst(server, num_rqst, rqst);
 463
 464	if (num_rqst > MAX_COMPOUND - 1)
 465		return -ENOMEM;
 466
 467	memset(&cur_rqst[0], 0, sizeof(cur_rqst));
 468	memset(&iov, 0, sizeof(iov));
 469	memset(&tr_hdr, 0, sizeof(tr_hdr));
 470
 471	iov.iov_base = &tr_hdr;
 472	iov.iov_len = sizeof(tr_hdr);
 473	cur_rqst[0].rq_iov = &iov;
 474	cur_rqst[0].rq_nvec = 1;
 475
 476	if (!server->ops->init_transform_rq) {
 477		cifs_server_dbg(VFS, "Encryption requested but transform "
 478				"callback is missing\n");
 479		return -EIO;
 480	}
 481
 482	rc = server->ops->init_transform_rq(server, num_rqst + 1,
 483					    &cur_rqst[0], rqst);
 484	if (rc)
 485		return rc;
 486
 487	rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
 488	smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
 489	return rc;
 490}
 491
 492int
 493smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
 494	 unsigned int smb_buf_length)
 495{
 496	struct kvec iov[2];
 497	struct smb_rqst rqst = { .rq_iov = iov,
 498				 .rq_nvec = 2 };
 499
 500	iov[0].iov_base = smb_buffer;
 501	iov[0].iov_len = 4;
 502	iov[1].iov_base = (char *)smb_buffer + 4;
 503	iov[1].iov_len = smb_buf_length;
 504
 505	return __smb_send_rqst(server, 1, &rqst);
 506}
 507
 508static int
 509wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
 510		      const int timeout, const int flags,
 511		      unsigned int *instance)
 512{
 513	int rc;
 514	int *credits;
 515	int optype;
 516	long int t;
 517
 518	if (timeout < 0)
 519		t = MAX_JIFFY_OFFSET;
 520	else
 521		t = msecs_to_jiffies(timeout);
 522
 523	optype = flags & CIFS_OP_MASK;
 524
 525	*instance = 0;
 526
 527	credits = server->ops->get_credits_field(server, optype);
 528	/* Since an echo is already inflight, no need to wait to send another */
 529	if (*credits <= 0 && optype == CIFS_ECHO_OP)
 530		return -EAGAIN;
 531
 532	spin_lock(&server->req_lock);
 533	if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
 534		/* oplock breaks must not be held up */
 535		server->in_flight++;
 536		if (server->in_flight > server->max_in_flight)
 537			server->max_in_flight = server->in_flight;
 538		*credits -= 1;
 539		*instance = server->reconnect_instance;
 540		spin_unlock(&server->req_lock);
 541		return 0;
 542	}
 543
 544	while (1) {
 545		if (*credits < num_credits) {
 546			spin_unlock(&server->req_lock);
 547			cifs_num_waiters_inc(server);
 548			rc = wait_event_killable_timeout(server->request_q,
 549				has_credits(server, credits, num_credits), t);
 550			cifs_num_waiters_dec(server);
 551			if (!rc) {
 552				trace_smb3_credit_timeout(server->CurrentMid,
 553					server->hostname, num_credits);
 554				cifs_server_dbg(VFS, "wait timed out after %d ms\n",
 555					 timeout);
 556				return -ENOTSUPP;
 557			}
 558			if (rc == -ERESTARTSYS)
 559				return -ERESTARTSYS;
 560			spin_lock(&server->req_lock);
 561		} else {
 562			if (server->tcpStatus == CifsExiting) {
 563				spin_unlock(&server->req_lock);
 564				return -ENOENT;
 565			}
 566
 567			/*
 568			 * For normal commands, reserve the last MAX_COMPOUND
 569			 * credits to compound requests.
 570			 * Otherwise these compounds could be permanently
 571			 * starved for credits by single-credit requests.
 572			 *
 573			 * To prevent spinning CPU, block this thread until
 574			 * there are >MAX_COMPOUND credits available.
 575			 * But only do this is we already have a lot of
 576			 * credits in flight to avoid triggering this check
 577			 * for servers that are slow to hand out credits on
 578			 * new sessions.
 579			 */
 580			if (!optype && num_credits == 1 &&
 581			    server->in_flight > 2 * MAX_COMPOUND &&
 582			    *credits <= MAX_COMPOUND) {
 583				spin_unlock(&server->req_lock);
 584				cifs_num_waiters_inc(server);
 585				rc = wait_event_killable_timeout(
 586					server->request_q,
 587					has_credits(server, credits,
 588						    MAX_COMPOUND + 1),
 589					t);
 590				cifs_num_waiters_dec(server);
 591				if (!rc) {
 592					trace_smb3_credit_timeout(
 593						server->CurrentMid,
 594						server->hostname, num_credits);
 595					cifs_server_dbg(VFS, "wait timed out after %d ms\n",
 596						 timeout);
 597					return -ENOTSUPP;
 598				}
 599				if (rc == -ERESTARTSYS)
 600					return -ERESTARTSYS;
 601				spin_lock(&server->req_lock);
 602				continue;
 603			}
 604
 605			/*
 606			 * Can not count locking commands against total
 607			 * as they are allowed to block on server.
 608			 */
 609
 610			/* update # of requests on the wire to server */
 611			if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
 612				*credits -= num_credits;
 613				server->in_flight += num_credits;
 614				if (server->in_flight > server->max_in_flight)
 615					server->max_in_flight = server->in_flight;
 616				*instance = server->reconnect_instance;
 617			}
 618			spin_unlock(&server->req_lock);
 619			break;
 620		}
 621	}
 622	return 0;
 623}
 624
 625static int
 626wait_for_free_request(struct TCP_Server_Info *server, const int flags,
 627		      unsigned int *instance)
 628{
 629	return wait_for_free_credits(server, 1, -1, flags,
 630				     instance);
 631}
 632
 633static int
 634wait_for_compound_request(struct TCP_Server_Info *server, int num,
 635			  const int flags, unsigned int *instance)
 636{
 637	int *credits;
 638
 639	credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
 640
 641	spin_lock(&server->req_lock);
 642	if (*credits < num) {
 643		/*
 644		 * Return immediately if not too many requests in flight since
 645		 * we will likely be stuck on waiting for credits.
 646		 */
 647		if (server->in_flight < num - *credits) {
 648			spin_unlock(&server->req_lock);
 649			return -ENOTSUPP;
 650		}
 651	}
 652	spin_unlock(&server->req_lock);
 653
 654	return wait_for_free_credits(server, num, 60000, flags,
 655				     instance);
 656}
 657
 658int
 659cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
 660		      unsigned int *num, struct cifs_credits *credits)
 661{
 662	*num = size;
 663	credits->value = 0;
 664	credits->instance = server->reconnect_instance;
 665	return 0;
 666}
 667
 668static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
 669			struct mid_q_entry **ppmidQ)
 670{
 671	if (ses->server->tcpStatus == CifsExiting) {
 672		return -ENOENT;
 673	}
 674
 675	if (ses->server->tcpStatus == CifsNeedReconnect) {
 676		cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
 677		return -EAGAIN;
 678	}
 679
 680	if (ses->status == CifsNew) {
 681		if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
 682			(in_buf->Command != SMB_COM_NEGOTIATE))
 683			return -EAGAIN;
 684		/* else ok - we are setting up session */
 685	}
 686
 687	if (ses->status == CifsExiting) {
 688		/* check if SMB session is bad because we are setting it up */
 689		if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
 690			return -EAGAIN;
 691		/* else ok - we are shutting down session */
 692	}
 693
 694	*ppmidQ = AllocMidQEntry(in_buf, ses->server);
 695	if (*ppmidQ == NULL)
 696		return -ENOMEM;
 697	spin_lock(&GlobalMid_Lock);
 698	list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
 699	spin_unlock(&GlobalMid_Lock);
 700	return 0;
 701}
 702
 703static int
 704wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
 705{
 706	int error;
 707
 708	error = wait_event_freezekillable_unsafe(server->response_q,
 709				    midQ->mid_state != MID_REQUEST_SUBMITTED);
 710	if (error < 0)
 711		return -ERESTARTSYS;
 712
 713	return 0;
 714}
 715
 716struct mid_q_entry *
 717cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
 718{
 719	int rc;
 720	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
 721	struct mid_q_entry *mid;
 722
 723	if (rqst->rq_iov[0].iov_len != 4 ||
 724	    rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
 725		return ERR_PTR(-EIO);
 726
 727	/* enable signing if server requires it */
 728	if (server->sign)
 729		hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
 730
 731	mid = AllocMidQEntry(hdr, server);
 732	if (mid == NULL)
 733		return ERR_PTR(-ENOMEM);
 734
 735	rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
 736	if (rc) {
 737		DeleteMidQEntry(mid);
 738		return ERR_PTR(rc);
 739	}
 740
 741	return mid;
 742}
 743
 744/*
 745 * Send a SMB request and set the callback function in the mid to handle
 746 * the result. Caller is responsible for dealing with timeouts.
 747 */
 748int
 749cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
 750		mid_receive_t *receive, mid_callback_t *callback,
 751		mid_handle_t *handle, void *cbdata, const int flags,
 752		const struct cifs_credits *exist_credits)
 753{
 754	int rc;
 755	struct mid_q_entry *mid;
 756	struct cifs_credits credits = { .value = 0, .instance = 0 };
 757	unsigned int instance;
 758	int optype;
 759
 
 760	optype = flags & CIFS_OP_MASK;
 761
 762	if ((flags & CIFS_HAS_CREDITS) == 0) {
 763		rc = wait_for_free_request(server, flags, &instance);
 764		if (rc)
 765			return rc;
 766		credits.value = 1;
 767		credits.instance = instance;
 768	} else
 769		instance = exist_credits->instance;
 770
 771	mutex_lock(&server->srv_mutex);
 772
 773	/*
 774	 * We can't use credits obtained from the previous session to send this
 775	 * request. Check if there were reconnects after we obtained credits and
 776	 * return -EAGAIN in such cases to let callers handle it.
 777	 */
 778	if (instance != server->reconnect_instance) {
 779		mutex_unlock(&server->srv_mutex);
 780		add_credits_and_wake_if(server, &credits, optype);
 781		return -EAGAIN;
 782	}
 783
 784	mid = server->ops->setup_async_request(server, rqst);
 785	if (IS_ERR(mid)) {
 786		mutex_unlock(&server->srv_mutex);
 787		add_credits_and_wake_if(server, &credits, optype);
 788		return PTR_ERR(mid);
 789	}
 790
 791	mid->receive = receive;
 792	mid->callback = callback;
 793	mid->callback_data = cbdata;
 794	mid->handle = handle;
 795	mid->mid_state = MID_REQUEST_SUBMITTED;
 796
 797	/* put it on the pending_mid_q */
 798	spin_lock(&GlobalMid_Lock);
 799	list_add_tail(&mid->qhead, &server->pending_mid_q);
 800	spin_unlock(&GlobalMid_Lock);
 801
 802	/*
 803	 * Need to store the time in mid before calling I/O. For call_async,
 804	 * I/O response may come back and free the mid entry on another thread.
 805	 */
 806	cifs_save_when_sent(mid);
 807	cifs_in_send_inc(server);
 808	rc = smb_send_rqst(server, 1, rqst, flags);
 809	cifs_in_send_dec(server);
 
 810
 811	if (rc < 0) {
 812		revert_current_mid(server, mid->credits);
 813		server->sequence_number -= 2;
 814		cifs_delete_mid(mid);
 815	}
 816
 817	mutex_unlock(&server->srv_mutex);
 818
 819	if (rc == 0)
 820		return 0;
 821
 822	add_credits_and_wake_if(server, &credits, optype);
 823	return rc;
 824}
 825
 826/*
 827 *
 828 * Send an SMB Request.  No response info (other than return code)
 829 * needs to be parsed.
 830 *
 831 * flags indicate the type of request buffer and how long to wait
 832 * and whether to log NT STATUS code (error) before mapping it to POSIX error
 833 *
 834 */
 835int
 836SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
 837		 char *in_buf, int flags)
 838{
 839	int rc;
 840	struct kvec iov[1];
 841	struct kvec rsp_iov;
 842	int resp_buf_type;
 843
 844	iov[0].iov_base = in_buf;
 845	iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
 846	flags |= CIFS_NO_RSP_BUF;
 847	rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
 848	cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
 849
 850	return rc;
 851}
 852
 853static int
 854cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
 855{
 856	int rc = 0;
 857
 858	cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
 859		 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
 860
 861	spin_lock(&GlobalMid_Lock);
 862	switch (mid->mid_state) {
 863	case MID_RESPONSE_RECEIVED:
 864		spin_unlock(&GlobalMid_Lock);
 865		return rc;
 866	case MID_RETRY_NEEDED:
 867		rc = -EAGAIN;
 868		break;
 869	case MID_RESPONSE_MALFORMED:
 870		rc = -EIO;
 871		break;
 872	case MID_SHUTDOWN:
 873		rc = -EHOSTDOWN;
 874		break;
 875	default:
 876		if (!(mid->mid_flags & MID_DELETED)) {
 877			list_del_init(&mid->qhead);
 878			mid->mid_flags |= MID_DELETED;
 879		}
 880		cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
 881			 __func__, mid->mid, mid->mid_state);
 882		rc = -EIO;
 883	}
 884	spin_unlock(&GlobalMid_Lock);
 885
 
 886	DeleteMidQEntry(mid);
 
 887	return rc;
 888}
 889
 890static inline int
 891send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
 892	    struct mid_q_entry *mid)
 893{
 894	return server->ops->send_cancel ?
 895				server->ops->send_cancel(server, rqst, mid) : 0;
 896}
 897
 898int
 899cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
 900		   bool log_error)
 901{
 902	unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
 903
 904	dump_smb(mid->resp_buf, min_t(u32, 92, len));
 905
 906	/* convert the length into a more usable form */
 907	if (server->sign) {
 908		struct kvec iov[2];
 909		int rc = 0;
 910		struct smb_rqst rqst = { .rq_iov = iov,
 911					 .rq_nvec = 2 };
 912
 913		iov[0].iov_base = mid->resp_buf;
 914		iov[0].iov_len = 4;
 915		iov[1].iov_base = (char *)mid->resp_buf + 4;
 916		iov[1].iov_len = len - 4;
 917		/* FIXME: add code to kill session */
 918		rc = cifs_verify_signature(&rqst, server,
 919					   mid->sequence_number);
 920		if (rc)
 921			cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
 922				 rc);
 923	}
 924
 925	/* BB special case reconnect tid and uid here? */
 926	return map_smb_to_linux_error(mid->resp_buf, log_error);
 927}
 928
 929struct mid_q_entry *
 930cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
 931{
 932	int rc;
 933	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
 934	struct mid_q_entry *mid;
 935
 936	if (rqst->rq_iov[0].iov_len != 4 ||
 937	    rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
 938		return ERR_PTR(-EIO);
 939
 940	rc = allocate_mid(ses, hdr, &mid);
 941	if (rc)
 942		return ERR_PTR(rc);
 943	rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
 944	if (rc) {
 945		cifs_delete_mid(mid);
 946		return ERR_PTR(rc);
 947	}
 948	return mid;
 949}
 950
 951static void
 952cifs_compound_callback(struct mid_q_entry *mid)
 
 
 953{
 954	struct TCP_Server_Info *server = mid->server;
 955	struct cifs_credits credits;
 956
 957	credits.value = server->ops->get_credits(mid);
 958	credits.instance = server->reconnect_instance;
 959
 960	add_credits(server, &credits, mid->optype);
 961}
 962
 963static void
 964cifs_compound_last_callback(struct mid_q_entry *mid)
 965{
 966	cifs_compound_callback(mid);
 967	cifs_wake_up_task(mid);
 968}
 969
 970static void
 971cifs_cancelled_callback(struct mid_q_entry *mid)
 972{
 973	cifs_compound_callback(mid);
 974	DeleteMidQEntry(mid);
 975}
 976
 977int
 978compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
 979		   const int flags, const int num_rqst, struct smb_rqst *rqst,
 980		   int *resp_buf_type, struct kvec *resp_iov)
 981{
 982	int i, j, optype, rc = 0;
 983	struct mid_q_entry *midQ[MAX_COMPOUND];
 984	bool cancelled_mid[MAX_COMPOUND] = {false};
 985	struct cifs_credits credits[MAX_COMPOUND] = {
 986		{ .value = 0, .instance = 0 }
 987	};
 988	unsigned int instance;
 989	char *buf;
 990	struct TCP_Server_Info *server;
 991
 
 992	optype = flags & CIFS_OP_MASK;
 993
 994	for (i = 0; i < num_rqst; i++)
 995		resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
 996
 997	if ((ses == NULL) || (ses->server == NULL)) {
 
 998		cifs_dbg(VFS, "Null session\n");
 999		return -EIO;
1000	}
1001
1002	server = ses->server;
1003	if (server->tcpStatus == CifsExiting)
1004		return -ENOENT;
 
1005
1006	/*
1007	 * Wait for all the requests to become available.
1008	 * This approach still leaves the possibility to be stuck waiting for
1009	 * credits if the server doesn't grant credits to the outstanding
1010	 * requests and if the client is completely idle, not generating any
1011	 * other requests.
1012	 * This can be handled by the eventual session reconnect.
1013	 */
1014	rc = wait_for_compound_request(server, num_rqst, flags,
1015				       &instance);
1016	if (rc)
 
1017		return rc;
1018
1019	for (i = 0; i < num_rqst; i++) {
1020		credits[i].value = 1;
1021		credits[i].instance = instance;
1022	}
1023
1024	/*
1025	 * Make sure that we sign in the same order that we send on this socket
1026	 * and avoid races inside tcp sendmsg code that could cause corruption
1027	 * of smb data.
1028	 */
1029
1030	mutex_lock(&server->srv_mutex);
1031
1032	/*
1033	 * All the parts of the compound chain belong obtained credits from the
1034	 * same session. We can not use credits obtained from the previous
1035	 * session to send this request. Check if there were reconnects after
1036	 * we obtained credits and return -EAGAIN in such cases to let callers
1037	 * handle it.
1038	 */
1039	if (instance != server->reconnect_instance) {
1040		mutex_unlock(&server->srv_mutex);
1041		for (j = 0; j < num_rqst; j++)
1042			add_credits(server, &credits[j], optype);
1043		return -EAGAIN;
1044	}
1045
1046	for (i = 0; i < num_rqst; i++) {
1047		midQ[i] = server->ops->setup_request(ses, &rqst[i]);
1048		if (IS_ERR(midQ[i])) {
1049			revert_current_mid(server, i);
1050			for (j = 0; j < i; j++)
1051				cifs_delete_mid(midQ[j]);
1052			mutex_unlock(&server->srv_mutex);
1053
1054			/* Update # of requests on wire to server */
1055			for (j = 0; j < num_rqst; j++)
1056				add_credits(server, &credits[j], optype);
1057			return PTR_ERR(midQ[i]);
1058		}
1059
1060		midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1061		midQ[i]->optype = optype;
1062		/*
1063		 * Invoke callback for every part of the compound chain
1064		 * to calculate credits properly. Wake up this thread only when
1065		 * the last element is received.
1066		 */
1067		if (i < num_rqst - 1)
1068			midQ[i]->callback = cifs_compound_callback;
1069		else
1070			midQ[i]->callback = cifs_compound_last_callback;
1071	}
1072	cifs_in_send_inc(server);
1073	rc = smb_send_rqst(server, num_rqst, rqst, flags);
1074	cifs_in_send_dec(server);
1075
1076	for (i = 0; i < num_rqst; i++)
1077		cifs_save_when_sent(midQ[i]);
1078
1079	if (rc < 0) {
1080		revert_current_mid(server, num_rqst);
1081		server->sequence_number -= 2;
1082	}
1083
1084	mutex_unlock(&server->srv_mutex);
1085
1086	/*
1087	 * If sending failed for some reason or it is an oplock break that we
1088	 * will not receive a response to - return credits back
1089	 */
1090	if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1091		for (i = 0; i < num_rqst; i++)
1092			add_credits(server, &credits[i], optype);
1093		goto out;
1094	}
1095
1096	/*
1097	 * At this point the request is passed to the network stack - we assume
1098	 * that any credits taken from the server structure on the client have
1099	 * been spent and we can't return them back. Once we receive responses
1100	 * we will collect credits granted by the server in the mid callbacks
1101	 * and add those credits to the server structure.
1102	 */
1103
1104	/*
1105	 * Compounding is never used during session establish.
1106	 */
1107	if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
1108		smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1109					   rqst[0].rq_nvec);
1110
1111	for (i = 0; i < num_rqst; i++) {
1112		rc = wait_for_response(server, midQ[i]);
1113		if (rc != 0)
1114			break;
1115	}
1116	if (rc != 0) {
1117		for (; i < num_rqst; i++) {
1118			cifs_server_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
1119				 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1120			send_cancel(server, &rqst[i], midQ[i]);
1121			spin_lock(&GlobalMid_Lock);
1122			if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1123				midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1124				midQ[i]->callback = cifs_cancelled_callback;
1125				cancelled_mid[i] = true;
1126				credits[i].value = 0;
1127			}
1128			spin_unlock(&GlobalMid_Lock);
 
 
 
1129		}
 
1130	}
1131
1132	for (i = 0; i < num_rqst; i++) {
1133		if (rc < 0)
1134			goto out;
1135
1136		rc = cifs_sync_mid_result(midQ[i], server);
1137		if (rc != 0) {
1138			/* mark this mid as cancelled to not free it below */
1139			cancelled_mid[i] = true;
1140			goto out;
1141		}
1142
1143		if (!midQ[i]->resp_buf ||
1144		    midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1145			rc = -EIO;
1146			cifs_dbg(FYI, "Bad MID state?\n");
1147			goto out;
1148		}
1149
1150		buf = (char *)midQ[i]->resp_buf;
1151		resp_iov[i].iov_base = buf;
1152		resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1153			server->vals->header_preamble_size;
1154
1155		if (midQ[i]->large_buf)
1156			resp_buf_type[i] = CIFS_LARGE_BUFFER;
1157		else
1158			resp_buf_type[i] = CIFS_SMALL_BUFFER;
1159
1160		rc = server->ops->check_receive(midQ[i], server,
1161						     flags & CIFS_LOG_ERROR);
1162
1163		/* mark it so buf will not be freed by cifs_delete_mid */
1164		if ((flags & CIFS_NO_RSP_BUF) == 0)
1165			midQ[i]->resp_buf = NULL;
1166
 
 
 
 
1167	}
1168
1169	/*
1170	 * Compounding is never used during session establish.
1171	 */
1172	if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1173		struct kvec iov = {
1174			.iov_base = resp_iov[0].iov_base,
1175			.iov_len = resp_iov[0].iov_len
1176		};
1177		smb311_update_preauth_hash(ses, &iov, 1);
1178	}
1179
1180out:
1181	/*
1182	 * This will dequeue all mids. After this it is important that the
1183	 * demultiplex_thread will not process any of these mids any futher.
1184	 * This is prevented above by using a noop callback that will not
1185	 * wake this thread except for the very last PDU.
1186	 */
1187	for (i = 0; i < num_rqst; i++) {
1188		if (!cancelled_mid[i])
1189			cifs_delete_mid(midQ[i]);
1190	}
1191
1192	return rc;
1193}
1194
1195int
1196cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1197	       struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1198	       struct kvec *resp_iov)
1199{
1200	return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
1201				  resp_iov);
1202}
1203
1204int
1205SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1206	     struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1207	     const int flags, struct kvec *resp_iov)
1208{
1209	struct smb_rqst rqst;
1210	struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1211	int rc;
1212
1213	if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1214		new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1215					GFP_KERNEL);
1216		if (!new_iov) {
1217			/* otherwise cifs_send_recv below sets resp_buf_type */
1218			*resp_buf_type = CIFS_NO_BUFFER;
1219			return -ENOMEM;
1220		}
1221	} else
1222		new_iov = s_iov;
1223
1224	/* 1st iov is a RFC1001 length followed by the rest of the packet */
1225	memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1226
1227	new_iov[0].iov_base = new_iov[1].iov_base;
1228	new_iov[0].iov_len = 4;
1229	new_iov[1].iov_base += 4;
1230	new_iov[1].iov_len -= 4;
1231
1232	memset(&rqst, 0, sizeof(struct smb_rqst));
1233	rqst.rq_iov = new_iov;
1234	rqst.rq_nvec = n_vec + 1;
1235
1236	rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
1237	if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1238		kfree(new_iov);
1239	return rc;
1240}
1241
1242int
1243SendReceive(const unsigned int xid, struct cifs_ses *ses,
1244	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1245	    int *pbytes_returned, const int flags)
1246{
1247	int rc = 0;
1248	struct mid_q_entry *midQ;
1249	unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1250	struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1251	struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1252	struct cifs_credits credits = { .value = 1, .instance = 0 };
1253	struct TCP_Server_Info *server;
1254
1255	if (ses == NULL) {
1256		cifs_dbg(VFS, "Null smb session\n");
1257		return -EIO;
1258	}
1259	server = ses->server;
1260	if (server == NULL) {
1261		cifs_dbg(VFS, "Null tcp session\n");
1262		return -EIO;
1263	}
1264
1265	if (server->tcpStatus == CifsExiting)
1266		return -ENOENT;
1267
1268	/* Ensure that we do not send more than 50 overlapping requests
1269	   to the same server. We may make this configurable later or
1270	   use ses->maxReq */
1271
1272	if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1273		cifs_server_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1274			 len);
 
1275		return -EIO;
1276	}
1277
1278	rc = wait_for_free_request(server, flags, &credits.instance);
1279	if (rc)
1280		return rc;
1281
1282	/* make sure that we sign in the same order that we send on this socket
1283	   and avoid races inside tcp sendmsg code that could cause corruption
1284	   of smb data */
1285
1286	mutex_lock(&server->srv_mutex);
1287
1288	rc = allocate_mid(ses, in_buf, &midQ);
1289	if (rc) {
1290		mutex_unlock(&ses->server->srv_mutex);
1291		/* Update # of requests on wire to server */
1292		add_credits(server, &credits, 0);
1293		return rc;
1294	}
1295
1296	rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1297	if (rc) {
1298		mutex_unlock(&server->srv_mutex);
1299		goto out;
1300	}
1301
1302	midQ->mid_state = MID_REQUEST_SUBMITTED;
1303
1304	cifs_in_send_inc(server);
1305	rc = smb_send(server, in_buf, len);
1306	cifs_in_send_dec(server);
1307	cifs_save_when_sent(midQ);
1308
1309	if (rc < 0)
1310		server->sequence_number -= 2;
1311
1312	mutex_unlock(&server->srv_mutex);
1313
1314	if (rc < 0)
1315		goto out;
1316
1317	rc = wait_for_response(server, midQ);
 
 
 
1318	if (rc != 0) {
1319		send_cancel(server, &rqst, midQ);
1320		spin_lock(&GlobalMid_Lock);
1321		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1322			/* no longer considered to be "in-flight" */
1323			midQ->callback = DeleteMidQEntry;
1324			spin_unlock(&GlobalMid_Lock);
1325			add_credits(server, &credits, 0);
1326			return rc;
1327		}
1328		spin_unlock(&GlobalMid_Lock);
1329	}
1330
1331	rc = cifs_sync_mid_result(midQ, server);
1332	if (rc != 0) {
1333		add_credits(server, &credits, 0);
1334		return rc;
1335	}
1336
1337	if (!midQ->resp_buf || !out_buf ||
1338	    midQ->mid_state != MID_RESPONSE_RECEIVED) {
1339		rc = -EIO;
1340		cifs_server_dbg(VFS, "Bad MID state?\n");
1341		goto out;
1342	}
1343
1344	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1345	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1346	rc = cifs_check_receive(midQ, server, 0);
1347out:
1348	cifs_delete_mid(midQ);
1349	add_credits(server, &credits, 0);
1350
1351	return rc;
1352}
1353
1354/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1355   blocking lock to return. */
1356
1357static int
1358send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1359			struct smb_hdr *in_buf,
1360			struct smb_hdr *out_buf)
1361{
1362	int bytes_returned;
1363	struct cifs_ses *ses = tcon->ses;
1364	LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1365
1366	/* We just modify the current in_buf to change
1367	   the type of lock from LOCKING_ANDX_SHARED_LOCK
1368	   or LOCKING_ANDX_EXCLUSIVE_LOCK to
1369	   LOCKING_ANDX_CANCEL_LOCK. */
1370
1371	pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1372	pSMB->Timeout = 0;
1373	pSMB->hdr.Mid = get_next_mid(ses->server);
1374
1375	return SendReceive(xid, ses, in_buf, out_buf,
1376			&bytes_returned, 0);
1377}
1378
1379int
1380SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1381	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1382	    int *pbytes_returned)
1383{
1384	int rc = 0;
1385	int rstart = 0;
1386	struct mid_q_entry *midQ;
1387	struct cifs_ses *ses;
1388	unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1389	struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1390	struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1391	unsigned int instance;
1392	struct TCP_Server_Info *server;
1393
1394	if (tcon == NULL || tcon->ses == NULL) {
1395		cifs_dbg(VFS, "Null smb session\n");
1396		return -EIO;
1397	}
1398	ses = tcon->ses;
1399	server = ses->server;
1400
1401	if (server == NULL) {
1402		cifs_dbg(VFS, "Null tcp session\n");
1403		return -EIO;
1404	}
1405
1406	if (server->tcpStatus == CifsExiting)
1407		return -ENOENT;
1408
1409	/* Ensure that we do not send more than 50 overlapping requests
1410	   to the same server. We may make this configurable later or
1411	   use ses->maxReq */
1412
1413	if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1414		cifs_tcon_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1415			 len);
 
1416		return -EIO;
1417	}
1418
1419	rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1420	if (rc)
1421		return rc;
1422
1423	/* make sure that we sign in the same order that we send on this socket
1424	   and avoid races inside tcp sendmsg code that could cause corruption
1425	   of smb data */
1426
1427	mutex_lock(&server->srv_mutex);
1428
1429	rc = allocate_mid(ses, in_buf, &midQ);
1430	if (rc) {
1431		mutex_unlock(&server->srv_mutex);
1432		return rc;
1433	}
1434
1435	rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1436	if (rc) {
1437		cifs_delete_mid(midQ);
1438		mutex_unlock(&server->srv_mutex);
1439		return rc;
1440	}
1441
1442	midQ->mid_state = MID_REQUEST_SUBMITTED;
1443	cifs_in_send_inc(server);
1444	rc = smb_send(server, in_buf, len);
1445	cifs_in_send_dec(server);
1446	cifs_save_when_sent(midQ);
1447
1448	if (rc < 0)
1449		server->sequence_number -= 2;
1450
1451	mutex_unlock(&server->srv_mutex);
1452
1453	if (rc < 0) {
1454		cifs_delete_mid(midQ);
1455		return rc;
1456	}
1457
1458	/* Wait for a reply - allow signals to interrupt. */
1459	rc = wait_event_interruptible(server->response_q,
1460		(!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1461		((server->tcpStatus != CifsGood) &&
1462		 (server->tcpStatus != CifsNew)));
1463
1464	/* Were we interrupted by a signal ? */
1465	if ((rc == -ERESTARTSYS) &&
1466		(midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1467		((server->tcpStatus == CifsGood) ||
1468		 (server->tcpStatus == CifsNew))) {
1469
1470		if (in_buf->Command == SMB_COM_TRANSACTION2) {
1471			/* POSIX lock. We send a NT_CANCEL SMB to cause the
1472			   blocking lock to return. */
1473			rc = send_cancel(server, &rqst, midQ);
1474			if (rc) {
1475				cifs_delete_mid(midQ);
1476				return rc;
1477			}
1478		} else {
1479			/* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1480			   to cause the blocking lock to return. */
1481
1482			rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1483
1484			/* If we get -ENOLCK back the lock may have
1485			   already been removed. Don't exit in this case. */
1486			if (rc && rc != -ENOLCK) {
1487				cifs_delete_mid(midQ);
1488				return rc;
1489			}
1490		}
1491
1492		rc = wait_for_response(server, midQ);
1493		if (rc) {
1494			send_cancel(server, &rqst, midQ);
1495			spin_lock(&GlobalMid_Lock);
1496			if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1497				/* no longer considered to be "in-flight" */
1498				midQ->callback = DeleteMidQEntry;
1499				spin_unlock(&GlobalMid_Lock);
1500				return rc;
1501			}
1502			spin_unlock(&GlobalMid_Lock);
1503		}
1504
1505		/* We got the response - restart system call. */
1506		rstart = 1;
1507	}
1508
1509	rc = cifs_sync_mid_result(midQ, server);
1510	if (rc != 0)
1511		return rc;
1512
1513	/* rcvd frame is ok */
1514	if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1515		rc = -EIO;
1516		cifs_tcon_dbg(VFS, "Bad MID state?\n");
1517		goto out;
1518	}
1519
1520	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1521	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1522	rc = cifs_check_receive(midQ, server, 0);
1523out:
1524	cifs_delete_mid(midQ);
1525	if (rstart && rc == -EACCES)
1526		return -ERESTARTSYS;
1527	return rc;
1528}