Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 *   fs/cifs/transport.c
   3 *
   4 *   Copyright (C) International Business Machines  Corp., 2002,2008
   5 *   Author(s): Steve French (sfrench@us.ibm.com)
   6 *   Jeremy Allison (jra@samba.org) 2006.
   7 *
   8 *   This library is free software; you can redistribute it and/or modify
   9 *   it under the terms of the GNU Lesser General Public License as published
  10 *   by the Free Software Foundation; either version 2.1 of the License, or
  11 *   (at your option) any later version.
  12 *
  13 *   This library is distributed in the hope that it will be useful,
  14 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
  16 *   the GNU Lesser General Public License for more details.
  17 *
  18 *   You should have received a copy of the GNU Lesser General Public License
  19 *   along with this library; if not, write to the Free Software
  20 *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21 */
  22
  23#include <linux/fs.h>
  24#include <linux/list.h>
  25#include <linux/gfp.h>
  26#include <linux/wait.h>
  27#include <linux/net.h>
  28#include <linux/delay.h>
  29#include <linux/freezer.h>
  30#include <linux/tcp.h>
 
  31#include <linux/highmem.h>
  32#include <asm/uaccess.h>
  33#include <asm/processor.h>
  34#include <linux/mempool.h>
 
  35#include "cifspdu.h"
  36#include "cifsglob.h"
  37#include "cifsproto.h"
  38#include "cifs_debug.h"
 
 
 
 
 
  39
  40void
  41cifs_wake_up_task(struct mid_q_entry *mid)
  42{
  43	wake_up_process(mid->callback_data);
  44}
  45
  46struct mid_q_entry *
  47AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
  48{
  49	struct mid_q_entry *temp;
  50
  51	if (server == NULL) {
  52		cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
  53		return NULL;
  54	}
  55
  56	temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
  57	if (temp == NULL)
  58		return temp;
  59	else {
  60		memset(temp, 0, sizeof(struct mid_q_entry));
  61		temp->mid = get_mid(smb_buffer);
  62		temp->pid = current->pid;
  63		temp->command = cpu_to_le16(smb_buffer->Command);
  64		cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
  65	/*	do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
  66		/* when mid allocated can be before when sent */
  67		temp->when_alloc = jiffies;
  68		temp->server = server;
  69
  70		/*
  71		 * The default is for the mid to be synchronous, so the
  72		 * default callback just wakes up the current task.
  73		 */
  74		temp->callback = cifs_wake_up_task;
  75		temp->callback_data = current;
  76	}
 
  77
  78	atomic_inc(&midCount);
  79	temp->mid_state = MID_REQUEST_ALLOCATED;
  80	return temp;
  81}
  82
  83void
  84DeleteMidQEntry(struct mid_q_entry *midEntry)
  85{
 
 
  86#ifdef CONFIG_CIFS_STATS2
  87	__le16 command = midEntry->server->vals->lock_cmd;
 
  88	unsigned long now;
 
  89#endif
 
 
 
 
 
 
 
  90	midEntry->mid_state = MID_FREE;
  91	atomic_dec(&midCount);
  92	if (midEntry->large_buf)
  93		cifs_buf_release(midEntry->resp_buf);
  94	else
  95		cifs_small_buf_release(midEntry->resp_buf);
  96#ifdef CONFIG_CIFS_STATS2
  97	now = jiffies;
  98	/* commands taking longer than one second are indications that
  99	   something is wrong, unless it is quite a slow link or server */
 100	if ((now - midEntry->when_alloc) > HZ) {
 101		if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) {
 102			pr_debug(" CIFS slow rsp: cmd %d mid %llu",
 103			       midEntry->command, midEntry->mid);
 104			pr_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
 105			       now - midEntry->when_alloc,
 106			       now - midEntry->when_sent,
 107			       now - midEntry->when_received);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 108		}
 109	}
 110#endif
 
 
 111	mempool_free(midEntry, cifs_mid_poolp);
 112}
 113
 
 
 
 
 
 
 
 
 
 
 
 
 114void
 115cifs_delete_mid(struct mid_q_entry *mid)
 116{
 117	spin_lock(&GlobalMid_Lock);
 118	list_del(&mid->qhead);
 
 
 
 119	spin_unlock(&GlobalMid_Lock);
 120
 121	DeleteMidQEntry(mid);
 122}
 123
 124/*
 125 * smb_send_kvec - send an array of kvecs to the server
 126 * @server:	Server to send the data to
 127 * @iov:	Pointer to array of kvecs
 128 * @n_vec:	length of kvec array
 129 * @sent:	amount of data sent on socket is stored here
 130 *
 131 * Our basic "send data to server" function. Should be called with srv_mutex
 132 * held. The caller is responsible for handling the results.
 133 */
 134static int
 135smb_send_kvec(struct TCP_Server_Info *server, struct kvec *iov, size_t n_vec,
 136		size_t *sent)
 137{
 138	int rc = 0;
 139	int i = 0;
 140	struct msghdr smb_msg;
 141	unsigned int remaining;
 142	size_t first_vec = 0;
 143	struct socket *ssocket = server->ssocket;
 144
 145	*sent = 0;
 146
 147	smb_msg.msg_name = (struct sockaddr *) &server->dstaddr;
 148	smb_msg.msg_namelen = sizeof(struct sockaddr);
 149	smb_msg.msg_control = NULL;
 150	smb_msg.msg_controllen = 0;
 151	if (server->noblocksnd)
 152		smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
 153	else
 154		smb_msg.msg_flags = MSG_NOSIGNAL;
 155
 156	remaining = 0;
 157	for (i = 0; i < n_vec; i++)
 158		remaining += iov[i].iov_len;
 159
 160	i = 0;
 161	while (remaining) {
 162		/*
 163		 * If blocking send, we try 3 times, since each can block
 164		 * for 5 seconds. For nonblocking  we have to try more
 165		 * but wait increasing amounts of time allowing time for
 166		 * socket to clear.  The overall time we wait in either
 167		 * case to send on the socket is about 15 seconds.
 168		 * Similarly we wait for 15 seconds for a response from
 169		 * the server in SendReceive[2] for the server to send
 170		 * a response back for most types of requests (except
 171		 * SMB Write past end of file which can be slow, and
 172		 * blocking lock operations). NFS waits slightly longer
 173		 * than CIFS, but this can make it take longer for
 174		 * nonresponsive servers to be detected and 15 seconds
 175		 * is more than enough time for modern networks to
 176		 * send a packet.  In most cases if we fail to send
 177		 * after the retries we will kill the socket and
 178		 * reconnect which may clear the network problem.
 179		 */
 180		rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec],
 181				    n_vec - first_vec, remaining);
 182		if (rc == -EAGAIN) {
 183			i++;
 184			if (i >= 14 || (!server->noblocksnd && (i > 2))) {
 185				cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
 
 186					 ssocket);
 187				rc = -EAGAIN;
 188				break;
 189			}
 190			msleep(1 << i);
 191			continue;
 192		}
 193
 194		if (rc < 0)
 195			break;
 196
 197		/* send was at least partially successful */
 198		*sent += rc;
 199
 200		if (rc == remaining) {
 201			remaining = 0;
 202			break;
 203		}
 204
 205		if (rc > remaining) {
 206			cifs_dbg(VFS, "sent %d requested %d\n", rc, remaining);
 207			break;
 208		}
 209
 210		if (rc == 0) {
 211			/* should never happen, letting socket clear before
 212			   retrying is our only obvious option here */
 213			cifs_dbg(VFS, "tcp sent no data\n");
 214			msleep(500);
 215			continue;
 216		}
 217
 218		remaining -= rc;
 219
 220		/* the line below resets i */
 221		for (i = first_vec; i < n_vec; i++) {
 222			if (iov[i].iov_len) {
 223				if (rc > iov[i].iov_len) {
 224					rc -= iov[i].iov_len;
 225					iov[i].iov_len = 0;
 226				} else {
 227					iov[i].iov_base += rc;
 228					iov[i].iov_len -= rc;
 229					first_vec = i;
 230					break;
 231				}
 232			}
 233		}
 234
 235		i = 0; /* in case we get ENOSPC on the next send */
 236		rc = 0;
 237	}
 238	return rc;
 239}
 240
 241/**
 242 * rqst_page_to_kvec - Turn a slot in the smb_rqst page array into a kvec
 243 * @rqst: pointer to smb_rqst
 244 * @idx: index into the array of the page
 245 * @iov: pointer to struct kvec that will hold the result
 246 *
 247 * Helper function to convert a slot in the rqst->rq_pages array into a kvec.
 248 * The page will be kmapped and the address placed into iov_base. The length
 249 * will then be adjusted according to the ptailoff.
 250 */
 251void
 252cifs_rqst_page_to_kvec(struct smb_rqst *rqst, unsigned int idx,
 253			struct kvec *iov)
 254{
 255	/*
 256	 * FIXME: We could avoid this kmap altogether if we used
 257	 * kernel_sendpage instead of kernel_sendmsg. That will only
 258	 * work if signing is disabled though as sendpage inlines the
 259	 * page directly into the fraglist. If userspace modifies the
 260	 * page after we calculate the signature, then the server will
 261	 * reject it and may break the connection. kernel_sendmsg does
 262	 * an extra copy of the data and avoids that issue.
 263	 */
 264	iov->iov_base = kmap(rqst->rq_pages[idx]);
 265
 266	/* if last page, don't send beyond this offset into page */
 267	if (idx == (rqst->rq_npages - 1))
 268		iov->iov_len = rqst->rq_tailsz;
 269	else
 270		iov->iov_len = rqst->rq_pagesz;
 271}
 272
 273static unsigned long
 274rqst_len(struct smb_rqst *rqst)
 275{
 276	unsigned int i;
 277	struct kvec *iov = rqst->rq_iov;
 
 278	unsigned long buflen = 0;
 279
 
 
 
 
 
 
 
 
 
 280	/* total up iov array first */
 281	for (i = 0; i < rqst->rq_nvec; i++)
 282		buflen += iov[i].iov_len;
 283
 284	/* add in the page array if there is one */
 
 
 
 
 
 285	if (rqst->rq_npages) {
 286		buflen += rqst->rq_pagesz * (rqst->rq_npages - 1);
 287		buflen += rqst->rq_tailsz;
 
 
 
 
 
 
 
 
 
 288	}
 289
 290	return buflen;
 291}
 292
 293static int
 294smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
 
 295{
 296	int rc;
 297	struct kvec *iov = rqst->rq_iov;
 298	int n_vec = rqst->rq_nvec;
 299	unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
 300	unsigned long send_length;
 301	unsigned int i;
 302	size_t total_len = 0, sent;
 303	struct socket *ssocket = server->ssocket;
 304	int val = 1;
 
 
 
 
 
 
 
 
 
 305
 306	if (ssocket == NULL)
 307		return -ENOTSOCK;
 308
 309	/* sanity check send length */
 310	send_length = rqst_len(rqst);
 311	if (send_length != smb_buf_length + 4) {
 312		WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n",
 313			send_length, smb_buf_length);
 314		return -EIO;
 315	}
 316
 317	cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length);
 318	dump_smb(iov[0].iov_base, iov[0].iov_len);
 319
 320	/* cork the socket */
 321	kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
 322				(char *)&val, sizeof(val));
 323
 324	rc = smb_send_kvec(server, iov, n_vec, &sent);
 325	if (rc < 0)
 326		goto uncork;
 327
 328	total_len += sent;
 
 
 
 
 
 329
 330	/* now walk the page array and send each page in it */
 331	for (i = 0; i < rqst->rq_npages; i++) {
 332		struct kvec p_iov;
 333
 334		cifs_rqst_page_to_kvec(rqst, i, &p_iov);
 335		rc = smb_send_kvec(server, &p_iov, 1, &sent);
 336		kunmap(rqst->rq_pages[i]);
 
 
 
 
 
 337		if (rc < 0)
 338			break;
 339
 340		total_len += sent;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 341	}
 342
 343uncork:
 344	/* uncork it */
 345	val = 0;
 346	kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
 347				(char *)&val, sizeof(val));
 348
 349	if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
 350		cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
 351			 smb_buf_length + 4, total_len);
 352		/*
 353		 * If we have only sent part of an SMB then the next SMB could
 354		 * be taken as the remainder of this one. We need to kill the
 355		 * socket so the server throws away the partial SMB
 356		 */
 
 357		server->tcpStatus = CifsNeedReconnect;
 
 
 
 358	}
 359
 360	if (rc < 0 && rc != -EINTR)
 361		cifs_dbg(VFS, "Error %d sending data on socket to server\n",
 362			 rc);
 363	else
 364		rc = 0;
 365
 366	return rc;
 367}
 368
 369static int
 370smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
 
 371{
 372	struct smb_rqst rqst = { .rq_iov = iov,
 373				 .rq_nvec = n_vec };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 374
 375	return smb_send_rqst(server, &rqst);
 
 
 
 
 376}
 377
 378int
 379smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
 380	 unsigned int smb_buf_length)
 381{
 382	struct kvec iov;
 
 
 383
 384	iov.iov_base = smb_buffer;
 385	iov.iov_len = smb_buf_length + 4;
 
 
 386
 387	return smb_sendv(server, &iov, 1);
 388}
 389
 390static int
 391wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
 392		      int *credits)
 393{
 394	int rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 395
 396	spin_lock(&server->req_lock);
 397	if (timeout == CIFS_ASYNC_OP) {
 398		/* oplock breaks must not be held up */
 399		server->in_flight++;
 
 
 400		*credits -= 1;
 
 
 
 401		spin_unlock(&server->req_lock);
 
 
 
 
 
 
 402		return 0;
 403	}
 404
 405	while (1) {
 406		if (*credits <= 0) {
 
 407			spin_unlock(&server->req_lock);
 
 408			cifs_num_waiters_inc(server);
 409			rc = wait_event_killable(server->request_q,
 410						 has_credits(server, credits));
 411			cifs_num_waiters_dec(server);
 412			if (rc)
 413				return rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 414			spin_lock(&server->req_lock);
 415		} else {
 416			if (server->tcpStatus == CifsExiting) {
 417				spin_unlock(&server->req_lock);
 418				return -ENOENT;
 419			}
 420
 421			/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 422			 * Can not count locking commands against total
 423			 * as they are allowed to block on server.
 424			 */
 425
 426			/* update # of requests on the wire to server */
 427			if (timeout != CIFS_BLOCKING_OP) {
 428				*credits -= 1;
 429				server->in_flight++;
 
 
 
 430			}
 
 
 431			spin_unlock(&server->req_lock);
 
 
 
 
 
 
 432			break;
 433		}
 434	}
 435	return 0;
 436}
 437
 438static int
 439wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
 440		      const int optype)
 441{
 442	int *val;
 
 
 443
 444	val = server->ops->get_credits_field(server, optype);
 445	/* Since an echo is already inflight, no need to wait to send another */
 446	if (*val <= 0 && optype == CIFS_ECHO_OP)
 447		return -EAGAIN;
 448	return wait_for_free_credits(server, timeout, val);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 449}
 450
 451int
 452cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
 453		      unsigned int *num, unsigned int *credits)
 454{
 455	*num = size;
 456	*credits = 0;
 
 457	return 0;
 458}
 459
 460static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
 461			struct mid_q_entry **ppmidQ)
 462{
 463	if (ses->server->tcpStatus == CifsExiting) {
 464		return -ENOENT;
 465	}
 466
 467	if (ses->server->tcpStatus == CifsNeedReconnect) {
 468		cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
 469		return -EAGAIN;
 470	}
 471
 472	if (ses->status == CifsNew) {
 473		if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
 474			(in_buf->Command != SMB_COM_NEGOTIATE))
 475			return -EAGAIN;
 476		/* else ok - we are setting up session */
 477	}
 478
 479	if (ses->status == CifsExiting) {
 480		/* check if SMB session is bad because we are setting it up */
 481		if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
 482			return -EAGAIN;
 483		/* else ok - we are shutting down session */
 484	}
 485
 486	*ppmidQ = AllocMidQEntry(in_buf, ses->server);
 487	if (*ppmidQ == NULL)
 488		return -ENOMEM;
 489	spin_lock(&GlobalMid_Lock);
 490	list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
 491	spin_unlock(&GlobalMid_Lock);
 492	return 0;
 493}
 494
 495static int
 496wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
 497{
 498	int error;
 499
 500	error = wait_event_freezekillable_unsafe(server->response_q,
 501				    midQ->mid_state != MID_REQUEST_SUBMITTED);
 502	if (error < 0)
 503		return -ERESTARTSYS;
 504
 505	return 0;
 506}
 507
 508struct mid_q_entry *
 509cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
 510{
 511	int rc;
 512	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
 513	struct mid_q_entry *mid;
 514
 
 
 
 
 515	/* enable signing if server requires it */
 516	if (server->sign)
 517		hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
 518
 519	mid = AllocMidQEntry(hdr, server);
 520	if (mid == NULL)
 521		return ERR_PTR(-ENOMEM);
 522
 523	rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
 524	if (rc) {
 525		DeleteMidQEntry(mid);
 526		return ERR_PTR(rc);
 527	}
 528
 529	return mid;
 530}
 531
 532/*
 533 * Send a SMB request and set the callback function in the mid to handle
 534 * the result. Caller is responsible for dealing with timeouts.
 535 */
 536int
 537cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
 538		mid_receive_t *receive, mid_callback_t *callback,
 539		void *cbdata, const int flags)
 
 540{
 541	int rc, timeout, optype;
 542	struct mid_q_entry *mid;
 543	unsigned int credits = 0;
 
 
 544
 545	timeout = flags & CIFS_TIMEOUT_MASK;
 546	optype = flags & CIFS_OP_MASK;
 547
 548	if ((flags & CIFS_HAS_CREDITS) == 0) {
 549		rc = wait_for_free_request(server, timeout, optype);
 550		if (rc)
 551			return rc;
 552		credits = 1;
 553	}
 
 
 554
 555	mutex_lock(&server->srv_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 556	mid = server->ops->setup_async_request(server, rqst);
 557	if (IS_ERR(mid)) {
 558		mutex_unlock(&server->srv_mutex);
 559		add_credits_and_wake_if(server, credits, optype);
 560		return PTR_ERR(mid);
 561	}
 562
 563	mid->receive = receive;
 564	mid->callback = callback;
 565	mid->callback_data = cbdata;
 
 566	mid->mid_state = MID_REQUEST_SUBMITTED;
 567
 568	/* put it on the pending_mid_q */
 569	spin_lock(&GlobalMid_Lock);
 570	list_add_tail(&mid->qhead, &server->pending_mid_q);
 571	spin_unlock(&GlobalMid_Lock);
 572
 573
 
 
 
 
 574	cifs_in_send_inc(server);
 575	rc = smb_send_rqst(server, rqst);
 576	cifs_in_send_dec(server);
 577	cifs_save_when_sent(mid);
 578
 579	if (rc < 0) {
 
 580		server->sequence_number -= 2;
 581		cifs_delete_mid(mid);
 582	}
 583
 584	mutex_unlock(&server->srv_mutex);
 585
 586	if (rc == 0)
 587		return 0;
 588
 589	add_credits_and_wake_if(server, credits, optype);
 590	return rc;
 591}
 592
 593/*
 594 *
 595 * Send an SMB Request.  No response info (other than return code)
 596 * needs to be parsed.
 597 *
 598 * flags indicate the type of request buffer and how long to wait
 599 * and whether to log NT STATUS code (error) before mapping it to POSIX error
 600 *
 601 */
 602int
 603SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
 604		 char *in_buf, int flags)
 605{
 606	int rc;
 607	struct kvec iov[1];
 
 608	int resp_buf_type;
 609
 610	iov[0].iov_base = in_buf;
 611	iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
 612	flags |= CIFS_NO_RESP;
 613	rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
 614	cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
 615
 616	return rc;
 617}
 618
 619static int
 620cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
 621{
 622	int rc = 0;
 623
 624	cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
 625		 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
 626
 627	spin_lock(&GlobalMid_Lock);
 628	switch (mid->mid_state) {
 629	case MID_RESPONSE_RECEIVED:
 630		spin_unlock(&GlobalMid_Lock);
 631		return rc;
 632	case MID_RETRY_NEEDED:
 633		rc = -EAGAIN;
 634		break;
 635	case MID_RESPONSE_MALFORMED:
 636		rc = -EIO;
 637		break;
 638	case MID_SHUTDOWN:
 639		rc = -EHOSTDOWN;
 640		break;
 641	default:
 642		list_del_init(&mid->qhead);
 643		cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
 
 
 
 644			 __func__, mid->mid, mid->mid_state);
 645		rc = -EIO;
 646	}
 647	spin_unlock(&GlobalMid_Lock);
 648
 649	mutex_lock(&server->srv_mutex);
 650	DeleteMidQEntry(mid);
 651	mutex_unlock(&server->srv_mutex);
 652	return rc;
 653}
 654
 655static inline int
 656send_cancel(struct TCP_Server_Info *server, void *buf, struct mid_q_entry *mid)
 
 657{
 658	return server->ops->send_cancel ?
 659				server->ops->send_cancel(server, buf, mid) : 0;
 660}
 661
 662int
 663cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
 664		   bool log_error)
 665{
 666	unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
 667
 668	dump_smb(mid->resp_buf, min_t(u32, 92, len));
 669
 670	/* convert the length into a more usable form */
 671	if (server->sign) {
 672		struct kvec iov;
 673		int rc = 0;
 674		struct smb_rqst rqst = { .rq_iov = &iov,
 675					 .rq_nvec = 1 };
 676
 677		iov.iov_base = mid->resp_buf;
 678		iov.iov_len = len;
 
 
 679		/* FIXME: add code to kill session */
 680		rc = cifs_verify_signature(&rqst, server,
 681					   mid->sequence_number);
 682		if (rc)
 683			cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
 684				 rc);
 685	}
 686
 687	/* BB special case reconnect tid and uid here? */
 688	return map_smb_to_linux_error(mid->resp_buf, log_error);
 689}
 690
 691struct mid_q_entry *
 692cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
 
 693{
 694	int rc;
 695	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
 696	struct mid_q_entry *mid;
 697
 
 
 
 
 698	rc = allocate_mid(ses, hdr, &mid);
 699	if (rc)
 700		return ERR_PTR(rc);
 701	rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
 702	if (rc) {
 703		cifs_delete_mid(mid);
 704		return ERR_PTR(rc);
 705	}
 706	return mid;
 707}
 708
 709int
 710SendReceive2(const unsigned int xid, struct cifs_ses *ses,
 711	     struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
 712	     const int flags)
 713{
 714	int rc = 0;
 715	int timeout, optype;
 716	struct mid_q_entry *midQ;
 717	char *buf = iov[0].iov_base;
 718	unsigned int credits = 1;
 719	struct smb_rqst rqst = { .rq_iov = iov,
 720				 .rq_nvec = n_vec };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 721
 722	timeout = flags & CIFS_TIMEOUT_MASK;
 723	optype = flags & CIFS_OP_MASK;
 724
 725	*resp_buf_type = CIFS_NO_BUFFER;  /* no response buf yet */
 
 726
 727	if ((ses == NULL) || (ses->server == NULL)) {
 728		cifs_small_buf_release(buf);
 729		cifs_dbg(VFS, "Null session\n");
 730		return -EIO;
 731	}
 732
 733	if (ses->server->tcpStatus == CifsExiting) {
 734		cifs_small_buf_release(buf);
 735		return -ENOENT;
 736	}
 737
 738	/*
 739	 * Ensure that we do not send more than 50 overlapping requests
 740	 * to the same server. We may make this configurable later or
 741	 * use ses->maxReq.
 
 
 
 742	 */
 743
 744	rc = wait_for_free_request(ses->server, timeout, optype);
 745	if (rc) {
 746		cifs_small_buf_release(buf);
 747		return rc;
 
 
 
 
 748	}
 749
 750	/*
 751	 * Make sure that we sign in the same order that we send on this socket
 752	 * and avoid races inside tcp sendmsg code that could cause corruption
 753	 * of smb data.
 754	 */
 755
 756	mutex_lock(&ses->server->srv_mutex);
 757
 758	midQ = ses->server->ops->setup_request(ses, &rqst);
 759	if (IS_ERR(midQ)) {
 760		mutex_unlock(&ses->server->srv_mutex);
 761		cifs_small_buf_release(buf);
 762		/* Update # of requests on wire to server */
 763		add_credits(ses->server, 1, optype);
 764		return PTR_ERR(midQ);
 
 
 
 
 
 765	}
 766
 767	midQ->mid_state = MID_REQUEST_SUBMITTED;
 768	cifs_in_send_inc(ses->server);
 769	rc = smb_sendv(ses->server, iov, n_vec);
 770	cifs_in_send_dec(ses->server);
 771	cifs_save_when_sent(midQ);
 
 
 
 
 
 
 
 
 772
 773	if (rc < 0)
 774		ses->server->sequence_number -= 2;
 775	mutex_unlock(&ses->server->srv_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 776
 777	if (rc < 0) {
 778		cifs_small_buf_release(buf);
 779		goto out;
 780	}
 781
 782	if (timeout == CIFS_ASYNC_OP) {
 783		cifs_small_buf_release(buf);
 
 
 
 
 
 
 
 784		goto out;
 785	}
 786
 787	rc = wait_for_response(ses->server, midQ);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 788	if (rc != 0) {
 789		send_cancel(ses->server, buf, midQ);
 790		spin_lock(&GlobalMid_Lock);
 791		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
 792			midQ->callback = DeleteMidQEntry;
 
 
 
 
 
 
 
 793			spin_unlock(&GlobalMid_Lock);
 794			cifs_small_buf_release(buf);
 795			add_credits(ses->server, 1, optype);
 796			return rc;
 797		}
 798		spin_unlock(&GlobalMid_Lock);
 799	}
 800
 801	cifs_small_buf_release(buf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 802
 803	rc = cifs_sync_mid_result(midQ, ses->server);
 804	if (rc != 0) {
 805		add_credits(ses->server, 1, optype);
 806		return rc;
 807	}
 808
 809	if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
 810		rc = -EIO;
 811		cifs_dbg(FYI, "Bad MID state?\n");
 812		goto out;
 
 
 
 
 
 
 
 813	}
 814
 815	buf = (char *)midQ->resp_buf;
 816	iov[0].iov_base = buf;
 817	iov[0].iov_len = get_rfc1002_length(buf) + 4;
 818	if (midQ->large_buf)
 819		*resp_buf_type = CIFS_LARGE_BUFFER;
 820	else
 821		*resp_buf_type = CIFS_SMALL_BUFFER;
 
 
 
 
 822
 823	credits = ses->server->ops->get_credits(midQ);
 
 
 
 
 
 
 
 
 
 
 
 824
 825	rc = ses->server->ops->check_receive(midQ, ses->server,
 826					     flags & CIFS_LOG_ERROR);
 
 
 
 
 
 
 827
 828	/* mark it so buf will not be freed by cifs_delete_mid */
 829	if ((flags & CIFS_NO_RESP) == 0)
 830		midQ->resp_buf = NULL;
 831out:
 832	cifs_delete_mid(midQ);
 833	add_credits(ses->server, credits, optype);
 
 
 
 
 834
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 835	return rc;
 836}
 837
 838int
 839SendReceive(const unsigned int xid, struct cifs_ses *ses,
 840	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
 841	    int *pbytes_returned, const int timeout)
 842{
 843	int rc = 0;
 844	struct mid_q_entry *midQ;
 
 
 
 
 
 845
 846	if (ses == NULL) {
 847		cifs_dbg(VFS, "Null smb session\n");
 848		return -EIO;
 849	}
 850	if (ses->server == NULL) {
 
 851		cifs_dbg(VFS, "Null tcp session\n");
 852		return -EIO;
 853	}
 854
 855	if (ses->server->tcpStatus == CifsExiting)
 856		return -ENOENT;
 857
 858	/* Ensure that we do not send more than 50 overlapping requests
 859	   to the same server. We may make this configurable later or
 860	   use ses->maxReq */
 861
 862	if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
 863			MAX_CIFS_HDR_SIZE - 4) {
 864		cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
 865			 be32_to_cpu(in_buf->smb_buf_length));
 866		return -EIO;
 867	}
 868
 869	rc = wait_for_free_request(ses->server, timeout, 0);
 870	if (rc)
 871		return rc;
 872
 873	/* make sure that we sign in the same order that we send on this socket
 874	   and avoid races inside tcp sendmsg code that could cause corruption
 875	   of smb data */
 876
 877	mutex_lock(&ses->server->srv_mutex);
 878
 879	rc = allocate_mid(ses, in_buf, &midQ);
 880	if (rc) {
 881		mutex_unlock(&ses->server->srv_mutex);
 882		/* Update # of requests on wire to server */
 883		add_credits(ses->server, 1, 0);
 884		return rc;
 885	}
 886
 887	rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
 888	if (rc) {
 889		mutex_unlock(&ses->server->srv_mutex);
 890		goto out;
 891	}
 892
 893	midQ->mid_state = MID_REQUEST_SUBMITTED;
 894
 895	cifs_in_send_inc(ses->server);
 896	rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
 897	cifs_in_send_dec(ses->server);
 898	cifs_save_when_sent(midQ);
 899
 900	if (rc < 0)
 901		ses->server->sequence_number -= 2;
 902
 903	mutex_unlock(&ses->server->srv_mutex);
 904
 905	if (rc < 0)
 906		goto out;
 907
 908	if (timeout == CIFS_ASYNC_OP)
 909		goto out;
 910
 911	rc = wait_for_response(ses->server, midQ);
 912	if (rc != 0) {
 913		send_cancel(ses->server, in_buf, midQ);
 914		spin_lock(&GlobalMid_Lock);
 915		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
 916			/* no longer considered to be "in-flight" */
 917			midQ->callback = DeleteMidQEntry;
 918			spin_unlock(&GlobalMid_Lock);
 919			add_credits(ses->server, 1, 0);
 920			return rc;
 921		}
 922		spin_unlock(&GlobalMid_Lock);
 923	}
 924
 925	rc = cifs_sync_mid_result(midQ, ses->server);
 926	if (rc != 0) {
 927		add_credits(ses->server, 1, 0);
 928		return rc;
 929	}
 930
 931	if (!midQ->resp_buf || !out_buf ||
 932	    midQ->mid_state != MID_RESPONSE_RECEIVED) {
 933		rc = -EIO;
 934		cifs_dbg(VFS, "Bad MID state?\n");
 935		goto out;
 936	}
 937
 938	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
 939	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
 940	rc = cifs_check_receive(midQ, ses->server, 0);
 941out:
 942	cifs_delete_mid(midQ);
 943	add_credits(ses->server, 1, 0);
 944
 945	return rc;
 946}
 947
 948/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
 949   blocking lock to return. */
 950
 951static int
 952send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
 953			struct smb_hdr *in_buf,
 954			struct smb_hdr *out_buf)
 955{
 956	int bytes_returned;
 957	struct cifs_ses *ses = tcon->ses;
 958	LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
 959
 960	/* We just modify the current in_buf to change
 961	   the type of lock from LOCKING_ANDX_SHARED_LOCK
 962	   or LOCKING_ANDX_EXCLUSIVE_LOCK to
 963	   LOCKING_ANDX_CANCEL_LOCK. */
 964
 965	pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
 966	pSMB->Timeout = 0;
 967	pSMB->hdr.Mid = get_next_mid(ses->server);
 968
 969	return SendReceive(xid, ses, in_buf, out_buf,
 970			&bytes_returned, 0);
 971}
 972
 973int
 974SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
 975	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
 976	    int *pbytes_returned)
 977{
 978	int rc = 0;
 979	int rstart = 0;
 980	struct mid_q_entry *midQ;
 981	struct cifs_ses *ses;
 
 
 
 
 
 982
 983	if (tcon == NULL || tcon->ses == NULL) {
 984		cifs_dbg(VFS, "Null smb session\n");
 985		return -EIO;
 986	}
 987	ses = tcon->ses;
 
 988
 989	if (ses->server == NULL) {
 990		cifs_dbg(VFS, "Null tcp session\n");
 991		return -EIO;
 992	}
 993
 994	if (ses->server->tcpStatus == CifsExiting)
 995		return -ENOENT;
 996
 997	/* Ensure that we do not send more than 50 overlapping requests
 998	   to the same server. We may make this configurable later or
 999	   use ses->maxReq */
1000
1001	if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
1002			MAX_CIFS_HDR_SIZE - 4) {
1003		cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1004			 be32_to_cpu(in_buf->smb_buf_length));
1005		return -EIO;
1006	}
1007
1008	rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
1009	if (rc)
1010		return rc;
1011
1012	/* make sure that we sign in the same order that we send on this socket
1013	   and avoid races inside tcp sendmsg code that could cause corruption
1014	   of smb data */
1015
1016	mutex_lock(&ses->server->srv_mutex);
1017
1018	rc = allocate_mid(ses, in_buf, &midQ);
1019	if (rc) {
1020		mutex_unlock(&ses->server->srv_mutex);
1021		return rc;
1022	}
1023
1024	rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1025	if (rc) {
1026		cifs_delete_mid(midQ);
1027		mutex_unlock(&ses->server->srv_mutex);
1028		return rc;
1029	}
1030
1031	midQ->mid_state = MID_REQUEST_SUBMITTED;
1032	cifs_in_send_inc(ses->server);
1033	rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
1034	cifs_in_send_dec(ses->server);
1035	cifs_save_when_sent(midQ);
1036
1037	if (rc < 0)
1038		ses->server->sequence_number -= 2;
1039
1040	mutex_unlock(&ses->server->srv_mutex);
1041
1042	if (rc < 0) {
1043		cifs_delete_mid(midQ);
1044		return rc;
1045	}
1046
1047	/* Wait for a reply - allow signals to interrupt. */
1048	rc = wait_event_interruptible(ses->server->response_q,
1049		(!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1050		((ses->server->tcpStatus != CifsGood) &&
1051		 (ses->server->tcpStatus != CifsNew)));
1052
1053	/* Were we interrupted by a signal ? */
1054	if ((rc == -ERESTARTSYS) &&
1055		(midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1056		((ses->server->tcpStatus == CifsGood) ||
1057		 (ses->server->tcpStatus == CifsNew))) {
1058
1059		if (in_buf->Command == SMB_COM_TRANSACTION2) {
1060			/* POSIX lock. We send a NT_CANCEL SMB to cause the
1061			   blocking lock to return. */
1062			rc = send_cancel(ses->server, in_buf, midQ);
1063			if (rc) {
1064				cifs_delete_mid(midQ);
1065				return rc;
1066			}
1067		} else {
1068			/* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1069			   to cause the blocking lock to return. */
1070
1071			rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1072
1073			/* If we get -ENOLCK back the lock may have
1074			   already been removed. Don't exit in this case. */
1075			if (rc && rc != -ENOLCK) {
1076				cifs_delete_mid(midQ);
1077				return rc;
1078			}
1079		}
1080
1081		rc = wait_for_response(ses->server, midQ);
1082		if (rc) {
1083			send_cancel(ses->server, in_buf, midQ);
1084			spin_lock(&GlobalMid_Lock);
1085			if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1086				/* no longer considered to be "in-flight" */
1087				midQ->callback = DeleteMidQEntry;
1088				spin_unlock(&GlobalMid_Lock);
1089				return rc;
1090			}
1091			spin_unlock(&GlobalMid_Lock);
1092		}
1093
1094		/* We got the response - restart system call. */
1095		rstart = 1;
1096	}
1097
1098	rc = cifs_sync_mid_result(midQ, ses->server);
1099	if (rc != 0)
1100		return rc;
1101
1102	/* rcvd frame is ok */
1103	if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1104		rc = -EIO;
1105		cifs_dbg(VFS, "Bad MID state?\n");
1106		goto out;
1107	}
1108
1109	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1110	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1111	rc = cifs_check_receive(midQ, ses->server, 0);
1112out:
1113	cifs_delete_mid(midQ);
1114	if (rstart && rc == -EACCES)
1115		return -ERESTARTSYS;
1116	return rc;
1117}
v5.14.15
   1// SPDX-License-Identifier: LGPL-2.1
   2/*
   3 *   fs/cifs/transport.c
   4 *
   5 *   Copyright (C) International Business Machines  Corp., 2002,2008
   6 *   Author(s): Steve French (sfrench@us.ibm.com)
   7 *   Jeremy Allison (jra@samba.org) 2006.
   8 *
 
 
 
 
 
 
 
 
 
 
 
 
 
   9 */
  10
  11#include <linux/fs.h>
  12#include <linux/list.h>
  13#include <linux/gfp.h>
  14#include <linux/wait.h>
  15#include <linux/net.h>
  16#include <linux/delay.h>
  17#include <linux/freezer.h>
  18#include <linux/tcp.h>
  19#include <linux/bvec.h>
  20#include <linux/highmem.h>
  21#include <linux/uaccess.h>
  22#include <asm/processor.h>
  23#include <linux/mempool.h>
  24#include <linux/sched/signal.h>
  25#include "cifspdu.h"
  26#include "cifsglob.h"
  27#include "cifsproto.h"
  28#include "cifs_debug.h"
  29#include "smb2proto.h"
  30#include "smbdirect.h"
  31
  32/* Max number of iovectors we can use off the stack when sending requests. */
  33#define CIFS_MAX_IOV_SIZE 8
  34
  35void
  36cifs_wake_up_task(struct mid_q_entry *mid)
  37{
  38	wake_up_process(mid->callback_data);
  39}
  40
  41struct mid_q_entry *
  42AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
  43{
  44	struct mid_q_entry *temp;
  45
  46	if (server == NULL) {
  47		cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
  48		return NULL;
  49	}
  50
  51	temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
  52	memset(temp, 0, sizeof(struct mid_q_entry));
  53	kref_init(&temp->refcount);
  54	temp->mid = get_mid(smb_buffer);
  55	temp->pid = current->pid;
  56	temp->command = cpu_to_le16(smb_buffer->Command);
  57	cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
 
 
  58	/*	do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
  59	/* when mid allocated can be before when sent */
  60	temp->when_alloc = jiffies;
  61	temp->server = server;
  62
  63	/*
  64	 * The default is for the mid to be synchronous, so the
  65	 * default callback just wakes up the current task.
  66	 */
  67	get_task_struct(current);
  68	temp->creator = current;
  69	temp->callback = cifs_wake_up_task;
  70	temp->callback_data = current;
  71
  72	atomic_inc(&midCount);
  73	temp->mid_state = MID_REQUEST_ALLOCATED;
  74	return temp;
  75}
  76
  77static void _cifs_mid_q_entry_release(struct kref *refcount)
 
  78{
  79	struct mid_q_entry *midEntry =
  80			container_of(refcount, struct mid_q_entry, refcount);
  81#ifdef CONFIG_CIFS_STATS2
  82	__le16 command = midEntry->server->vals->lock_cmd;
  83	__u16 smb_cmd = le16_to_cpu(midEntry->command);
  84	unsigned long now;
  85	unsigned long roundtrip_time;
  86#endif
  87	struct TCP_Server_Info *server = midEntry->server;
  88
  89	if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
  90	    midEntry->mid_state == MID_RESPONSE_RECEIVED &&
  91	    server->ops->handle_cancelled_mid)
  92		server->ops->handle_cancelled_mid(midEntry, server);
  93
  94	midEntry->mid_state = MID_FREE;
  95	atomic_dec(&midCount);
  96	if (midEntry->large_buf)
  97		cifs_buf_release(midEntry->resp_buf);
  98	else
  99		cifs_small_buf_release(midEntry->resp_buf);
 100#ifdef CONFIG_CIFS_STATS2
 101	now = jiffies;
 102	if (now < midEntry->when_alloc)
 103		cifs_server_dbg(VFS, "Invalid mid allocation time\n");
 104	roundtrip_time = now - midEntry->when_alloc;
 105
 106	if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
 107		if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
 108			server->slowest_cmd[smb_cmd] = roundtrip_time;
 109			server->fastest_cmd[smb_cmd] = roundtrip_time;
 110		} else {
 111			if (server->slowest_cmd[smb_cmd] < roundtrip_time)
 112				server->slowest_cmd[smb_cmd] = roundtrip_time;
 113			else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
 114				server->fastest_cmd[smb_cmd] = roundtrip_time;
 115		}
 116		cifs_stats_inc(&server->num_cmds[smb_cmd]);
 117		server->time_per_cmd[smb_cmd] += roundtrip_time;
 118	}
 119	/*
 120	 * commands taking longer than one second (default) can be indications
 121	 * that something is wrong, unless it is quite a slow link or a very
 122	 * busy server. Note that this calc is unlikely or impossible to wrap
 123	 * as long as slow_rsp_threshold is not set way above recommended max
 124	 * value (32767 ie 9 hours) and is generally harmless even if wrong
 125	 * since only affects debug counters - so leaving the calc as simple
 126	 * comparison rather than doing multiple conversions and overflow
 127	 * checks
 128	 */
 129	if ((slow_rsp_threshold != 0) &&
 130	    time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
 131	    (midEntry->command != command)) {
 132		/*
 133		 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
 134		 * NB: le16_to_cpu returns unsigned so can not be negative below
 135		 */
 136		if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
 137			cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
 138
 139		trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
 140			       midEntry->when_sent, midEntry->when_received);
 141		if (cifsFYI & CIFS_TIMER) {
 142			pr_debug("slow rsp: cmd %d mid %llu",
 143				 midEntry->command, midEntry->mid);
 144			cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
 145				  now - midEntry->when_alloc,
 146				  now - midEntry->when_sent,
 147				  now - midEntry->when_received);
 148		}
 149	}
 150#endif
 151	put_task_struct(midEntry->creator);
 152
 153	mempool_free(midEntry, cifs_mid_poolp);
 154}
 155
 156void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
 157{
 158	spin_lock(&GlobalMid_Lock);
 159	kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
 160	spin_unlock(&GlobalMid_Lock);
 161}
 162
 163void DeleteMidQEntry(struct mid_q_entry *midEntry)
 164{
 165	cifs_mid_q_entry_release(midEntry);
 166}
 167
 168void
 169cifs_delete_mid(struct mid_q_entry *mid)
 170{
 171	spin_lock(&GlobalMid_Lock);
 172	if (!(mid->mid_flags & MID_DELETED)) {
 173		list_del_init(&mid->qhead);
 174		mid->mid_flags |= MID_DELETED;
 175	}
 176	spin_unlock(&GlobalMid_Lock);
 177
 178	DeleteMidQEntry(mid);
 179}
 180
 181/*
 182 * smb_send_kvec - send an array of kvecs to the server
 183 * @server:	Server to send the data to
 184 * @smb_msg:	Message to send
 
 185 * @sent:	amount of data sent on socket is stored here
 186 *
 187 * Our basic "send data to server" function. Should be called with srv_mutex
 188 * held. The caller is responsible for handling the results.
 189 */
 190static int
 191smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
 192	      size_t *sent)
 193{
 194	int rc = 0;
 195	int retries = 0;
 
 
 
 196	struct socket *ssocket = server->ssocket;
 197
 198	*sent = 0;
 199
 200	smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
 201	smb_msg->msg_namelen = sizeof(struct sockaddr);
 202	smb_msg->msg_control = NULL;
 203	smb_msg->msg_controllen = 0;
 204	if (server->noblocksnd)
 205		smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
 206	else
 207		smb_msg->msg_flags = MSG_NOSIGNAL;
 
 
 
 
 208
 209	while (msg_data_left(smb_msg)) {
 
 210		/*
 211		 * If blocking send, we try 3 times, since each can block
 212		 * for 5 seconds. For nonblocking  we have to try more
 213		 * but wait increasing amounts of time allowing time for
 214		 * socket to clear.  The overall time we wait in either
 215		 * case to send on the socket is about 15 seconds.
 216		 * Similarly we wait for 15 seconds for a response from
 217		 * the server in SendReceive[2] for the server to send
 218		 * a response back for most types of requests (except
 219		 * SMB Write past end of file which can be slow, and
 220		 * blocking lock operations). NFS waits slightly longer
 221		 * than CIFS, but this can make it take longer for
 222		 * nonresponsive servers to be detected and 15 seconds
 223		 * is more than enough time for modern networks to
 224		 * send a packet.  In most cases if we fail to send
 225		 * after the retries we will kill the socket and
 226		 * reconnect which may clear the network problem.
 227		 */
 228		rc = sock_sendmsg(ssocket, smb_msg);
 
 229		if (rc == -EAGAIN) {
 230			retries++;
 231			if (retries >= 14 ||
 232			    (!server->noblocksnd && (retries > 2))) {
 233				cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
 234					 ssocket);
 235				return -EAGAIN;
 
 236			}
 237			msleep(1 << retries);
 238			continue;
 239		}
 240
 241		if (rc < 0)
 242			return rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 243
 244		if (rc == 0) {
 245			/* should never happen, letting socket clear before
 246			   retrying is our only obvious option here */
 247			cifs_server_dbg(VFS, "tcp sent no data\n");
 248			msleep(500);
 249			continue;
 250		}
 251
 252		/* send was at least partially successful */
 253		*sent += rc;
 254		retries = 0; /* in case we get ENOSPC on the next send */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 255	}
 256	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 257}
 258
 259unsigned long
 260smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
 261{
 262	unsigned int i;
 263	struct kvec *iov;
 264	int nvec;
 265	unsigned long buflen = 0;
 266
 267	if (server->vals->header_preamble_size == 0 &&
 268	    rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
 269		iov = &rqst->rq_iov[1];
 270		nvec = rqst->rq_nvec - 1;
 271	} else {
 272		iov = rqst->rq_iov;
 273		nvec = rqst->rq_nvec;
 274	}
 275
 276	/* total up iov array first */
 277	for (i = 0; i < nvec; i++)
 278		buflen += iov[i].iov_len;
 279
 280	/*
 281	 * Add in the page array if there is one. The caller needs to make
 282	 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
 283	 * multiple pages ends at page boundary, rq_tailsz needs to be set to
 284	 * PAGE_SIZE.
 285	 */
 286	if (rqst->rq_npages) {
 287		if (rqst->rq_npages == 1)
 288			buflen += rqst->rq_tailsz;
 289		else {
 290			/*
 291			 * If there is more than one page, calculate the
 292			 * buffer length based on rq_offset and rq_tailsz
 293			 */
 294			buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
 295					rqst->rq_offset;
 296			buflen += rqst->rq_tailsz;
 297		}
 298	}
 299
 300	return buflen;
 301}
 302
 303static int
 304__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
 305		struct smb_rqst *rqst)
 306{
 307	int rc = 0;
 308	struct kvec *iov;
 309	int n_vec;
 310	unsigned int send_length = 0;
 311	unsigned int i, j;
 312	sigset_t mask, oldmask;
 313	size_t total_len = 0, sent, size;
 314	struct socket *ssocket = server->ssocket;
 315	struct msghdr smb_msg;
 316	__be32 rfc1002_marker;
 317
 318	if (cifs_rdma_enabled(server)) {
 319		/* return -EAGAIN when connecting or reconnecting */
 320		rc = -EAGAIN;
 321		if (server->smbd_conn)
 322			rc = smbd_send(server, num_rqst, rqst);
 323		goto smbd_done;
 324	}
 325
 326	if (ssocket == NULL)
 327		return -EAGAIN;
 328
 329	if (fatal_signal_pending(current)) {
 330		cifs_dbg(FYI, "signal pending before send request\n");
 331		return -ERESTARTSYS;
 
 
 
 332	}
 333
 
 
 
 334	/* cork the socket */
 335	tcp_sock_set_cork(ssocket->sk, true);
 
 336
 337	for (j = 0; j < num_rqst; j++)
 338		send_length += smb_rqst_len(server, &rqst[j]);
 339	rfc1002_marker = cpu_to_be32(send_length);
 340
 341	/*
 342	 * We should not allow signals to interrupt the network send because
 343	 * any partial send will cause session reconnects thus increasing
 344	 * latency of system calls and overload a server with unnecessary
 345	 * requests.
 346	 */
 347
 348	sigfillset(&mask);
 349	sigprocmask(SIG_BLOCK, &mask, &oldmask);
 
 350
 351	/* Generate a rfc1002 marker for SMB2+ */
 352	if (server->vals->header_preamble_size == 0) {
 353		struct kvec hiov = {
 354			.iov_base = &rfc1002_marker,
 355			.iov_len  = 4
 356		};
 357		iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
 358		rc = smb_send_kvec(server, &smb_msg, &sent);
 359		if (rc < 0)
 360			goto unmask;
 361
 362		total_len += sent;
 363		send_length += 4;
 364	}
 365
 366	cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
 367
 368	for (j = 0; j < num_rqst; j++) {
 369		iov = rqst[j].rq_iov;
 370		n_vec = rqst[j].rq_nvec;
 371
 372		size = 0;
 373		for (i = 0; i < n_vec; i++) {
 374			dump_smb(iov[i].iov_base, iov[i].iov_len);
 375			size += iov[i].iov_len;
 376		}
 377
 378		iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
 379
 380		rc = smb_send_kvec(server, &smb_msg, &sent);
 381		if (rc < 0)
 382			goto unmask;
 383
 384		total_len += sent;
 385
 386		/* now walk the page array and send each page in it */
 387		for (i = 0; i < rqst[j].rq_npages; i++) {
 388			struct bio_vec bvec;
 389
 390			bvec.bv_page = rqst[j].rq_pages[i];
 391			rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
 392					     &bvec.bv_offset);
 393
 394			iov_iter_bvec(&smb_msg.msg_iter, WRITE,
 395				      &bvec, 1, bvec.bv_len);
 396			rc = smb_send_kvec(server, &smb_msg, &sent);
 397			if (rc < 0)
 398				break;
 399
 400			total_len += sent;
 401		}
 402	}
 403
 404unmask:
 405	sigprocmask(SIG_SETMASK, &oldmask, NULL);
 406
 407	/*
 408	 * If signal is pending but we have already sent the whole packet to
 409	 * the server we need to return success status to allow a corresponding
 410	 * mid entry to be kept in the pending requests queue thus allowing
 411	 * to handle responses from the server by the client.
 412	 *
 413	 * If only part of the packet has been sent there is no need to hide
 414	 * interrupt because the session will be reconnected anyway, so there
 415	 * won't be any response from the server to handle.
 416	 */
 417
 418	if (signal_pending(current) && (total_len != send_length)) {
 419		cifs_dbg(FYI, "signal is pending after attempt to send\n");
 420		rc = -ERESTARTSYS;
 421	}
 422
 
 423	/* uncork it */
 424	tcp_sock_set_cork(ssocket->sk, false);
 
 
 425
 426	if ((total_len > 0) && (total_len != send_length)) {
 427		cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
 428			 send_length, total_len);
 429		/*
 430		 * If we have only sent part of an SMB then the next SMB could
 431		 * be taken as the remainder of this one. We need to kill the
 432		 * socket so the server throws away the partial SMB
 433		 */
 434		spin_lock(&GlobalMid_Lock);
 435		server->tcpStatus = CifsNeedReconnect;
 436		spin_unlock(&GlobalMid_Lock);
 437		trace_smb3_partial_send_reconnect(server->CurrentMid,
 438						  server->conn_id, server->hostname);
 439	}
 440smbd_done:
 441	if (rc < 0 && rc != -EINTR)
 442		cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
 443			 rc);
 444	else if (rc > 0)
 445		rc = 0;
 446
 447	return rc;
 448}
 449
 450static int
 451smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
 452	      struct smb_rqst *rqst, int flags)
 453{
 454	struct kvec iov;
 455	struct smb2_transform_hdr *tr_hdr;
 456	struct smb_rqst cur_rqst[MAX_COMPOUND];
 457	int rc;
 458
 459	if (!(flags & CIFS_TRANSFORM_REQ))
 460		return __smb_send_rqst(server, num_rqst, rqst);
 461
 462	if (num_rqst > MAX_COMPOUND - 1)
 463		return -ENOMEM;
 464
 465	if (!server->ops->init_transform_rq) {
 466		cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
 467		return -EIO;
 468	}
 469
 470	tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
 471	if (!tr_hdr)
 472		return -ENOMEM;
 473
 474	memset(&cur_rqst[0], 0, sizeof(cur_rqst));
 475	memset(&iov, 0, sizeof(iov));
 476	memset(tr_hdr, 0, sizeof(*tr_hdr));
 477
 478	iov.iov_base = tr_hdr;
 479	iov.iov_len = sizeof(*tr_hdr);
 480	cur_rqst[0].rq_iov = &iov;
 481	cur_rqst[0].rq_nvec = 1;
 482
 483	rc = server->ops->init_transform_rq(server, num_rqst + 1,
 484					    &cur_rqst[0], rqst);
 485	if (rc)
 486		goto out;
 487
 488	rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
 489	smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
 490out:
 491	kfree(tr_hdr);
 492	return rc;
 493}
 494
 495int
 496smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
 497	 unsigned int smb_buf_length)
 498{
 499	struct kvec iov[2];
 500	struct smb_rqst rqst = { .rq_iov = iov,
 501				 .rq_nvec = 2 };
 502
 503	iov[0].iov_base = smb_buffer;
 504	iov[0].iov_len = 4;
 505	iov[1].iov_base = (char *)smb_buffer + 4;
 506	iov[1].iov_len = smb_buf_length;
 507
 508	return __smb_send_rqst(server, 1, &rqst);
 509}
 510
 511static int
 512wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
 513		      const int timeout, const int flags,
 514		      unsigned int *instance)
 515{
 516	long rc;
 517	int *credits;
 518	int optype;
 519	long int t;
 520	int scredits, in_flight;
 521
 522	if (timeout < 0)
 523		t = MAX_JIFFY_OFFSET;
 524	else
 525		t = msecs_to_jiffies(timeout);
 526
 527	optype = flags & CIFS_OP_MASK;
 528
 529	*instance = 0;
 530
 531	credits = server->ops->get_credits_field(server, optype);
 532	/* Since an echo is already inflight, no need to wait to send another */
 533	if (*credits <= 0 && optype == CIFS_ECHO_OP)
 534		return -EAGAIN;
 535
 536	spin_lock(&server->req_lock);
 537	if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
 538		/* oplock breaks must not be held up */
 539		server->in_flight++;
 540		if (server->in_flight > server->max_in_flight)
 541			server->max_in_flight = server->in_flight;
 542		*credits -= 1;
 543		*instance = server->reconnect_instance;
 544		scredits = *credits;
 545		in_flight = server->in_flight;
 546		spin_unlock(&server->req_lock);
 547
 548		trace_smb3_add_credits(server->CurrentMid,
 549				server->conn_id, server->hostname, scredits, -1, in_flight);
 550		cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
 551				__func__, 1, scredits);
 552
 553		return 0;
 554	}
 555
 556	while (1) {
 557		if (*credits < num_credits) {
 558			scredits = *credits;
 559			spin_unlock(&server->req_lock);
 560
 561			cifs_num_waiters_inc(server);
 562			rc = wait_event_killable_timeout(server->request_q,
 563				has_credits(server, credits, num_credits), t);
 564			cifs_num_waiters_dec(server);
 565			if (!rc) {
 566				spin_lock(&server->req_lock);
 567				scredits = *credits;
 568				in_flight = server->in_flight;
 569				spin_unlock(&server->req_lock);
 570
 571				trace_smb3_credit_timeout(server->CurrentMid,
 572						server->conn_id, server->hostname, scredits,
 573						num_credits, in_flight);
 574				cifs_server_dbg(VFS, "wait timed out after %d ms\n",
 575						timeout);
 576				return -EBUSY;
 577			}
 578			if (rc == -ERESTARTSYS)
 579				return -ERESTARTSYS;
 580			spin_lock(&server->req_lock);
 581		} else {
 582			if (server->tcpStatus == CifsExiting) {
 583				spin_unlock(&server->req_lock);
 584				return -ENOENT;
 585			}
 586
 587			/*
 588			 * For normal commands, reserve the last MAX_COMPOUND
 589			 * credits to compound requests.
 590			 * Otherwise these compounds could be permanently
 591			 * starved for credits by single-credit requests.
 592			 *
 593			 * To prevent spinning CPU, block this thread until
 594			 * there are >MAX_COMPOUND credits available.
 595			 * But only do this is we already have a lot of
 596			 * credits in flight to avoid triggering this check
 597			 * for servers that are slow to hand out credits on
 598			 * new sessions.
 599			 */
 600			if (!optype && num_credits == 1 &&
 601			    server->in_flight > 2 * MAX_COMPOUND &&
 602			    *credits <= MAX_COMPOUND) {
 603				spin_unlock(&server->req_lock);
 604
 605				cifs_num_waiters_inc(server);
 606				rc = wait_event_killable_timeout(
 607					server->request_q,
 608					has_credits(server, credits,
 609						    MAX_COMPOUND + 1),
 610					t);
 611				cifs_num_waiters_dec(server);
 612				if (!rc) {
 613					spin_lock(&server->req_lock);
 614					scredits = *credits;
 615					in_flight = server->in_flight;
 616					spin_unlock(&server->req_lock);
 617
 618					trace_smb3_credit_timeout(
 619							server->CurrentMid,
 620							server->conn_id, server->hostname,
 621							scredits, num_credits, in_flight);
 622					cifs_server_dbg(VFS, "wait timed out after %d ms\n",
 623							timeout);
 624					return -EBUSY;
 625				}
 626				if (rc == -ERESTARTSYS)
 627					return -ERESTARTSYS;
 628				spin_lock(&server->req_lock);
 629				continue;
 630			}
 631
 632			/*
 633			 * Can not count locking commands against total
 634			 * as they are allowed to block on server.
 635			 */
 636
 637			/* update # of requests on the wire to server */
 638			if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
 639				*credits -= num_credits;
 640				server->in_flight += num_credits;
 641				if (server->in_flight > server->max_in_flight)
 642					server->max_in_flight = server->in_flight;
 643				*instance = server->reconnect_instance;
 644			}
 645			scredits = *credits;
 646			in_flight = server->in_flight;
 647			spin_unlock(&server->req_lock);
 648
 649			trace_smb3_add_credits(server->CurrentMid,
 650					server->conn_id, server->hostname, scredits,
 651					-(num_credits), in_flight);
 652			cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
 653					__func__, num_credits, scredits);
 654			break;
 655		}
 656	}
 657	return 0;
 658}
 659
 660static int
 661wait_for_free_request(struct TCP_Server_Info *server, const int flags,
 662		      unsigned int *instance)
 663{
 664	return wait_for_free_credits(server, 1, -1, flags,
 665				     instance);
 666}
 667
 668static int
 669wait_for_compound_request(struct TCP_Server_Info *server, int num,
 670			  const int flags, unsigned int *instance)
 671{
 672	int *credits;
 673	int scredits, in_flight;
 674
 675	credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
 676
 677	spin_lock(&server->req_lock);
 678	scredits = *credits;
 679	in_flight = server->in_flight;
 680
 681	if (*credits < num) {
 682		/*
 683		 * If the server is tight on resources or just gives us less
 684		 * credits for other reasons (e.g. requests are coming out of
 685		 * order and the server delays granting more credits until it
 686		 * processes a missing mid) and we exhausted most available
 687		 * credits there may be situations when we try to send
 688		 * a compound request but we don't have enough credits. At this
 689		 * point the client needs to decide if it should wait for
 690		 * additional credits or fail the request. If at least one
 691		 * request is in flight there is a high probability that the
 692		 * server will return enough credits to satisfy this compound
 693		 * request.
 694		 *
 695		 * Return immediately if no requests in flight since we will be
 696		 * stuck on waiting for credits.
 697		 */
 698		if (server->in_flight == 0) {
 699			spin_unlock(&server->req_lock);
 700			trace_smb3_insufficient_credits(server->CurrentMid,
 701					server->conn_id, server->hostname, scredits,
 702					num, in_flight);
 703			cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
 704					__func__, in_flight, num, scredits);
 705			return -EDEADLK;
 706		}
 707	}
 708	spin_unlock(&server->req_lock);
 709
 710	return wait_for_free_credits(server, num, 60000, flags,
 711				     instance);
 712}
 713
 714int
 715cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
 716		      unsigned int *num, struct cifs_credits *credits)
 717{
 718	*num = size;
 719	credits->value = 0;
 720	credits->instance = server->reconnect_instance;
 721	return 0;
 722}
 723
 724static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
 725			struct mid_q_entry **ppmidQ)
 726{
 727	if (ses->server->tcpStatus == CifsExiting) {
 728		return -ENOENT;
 729	}
 730
 731	if (ses->server->tcpStatus == CifsNeedReconnect) {
 732		cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
 733		return -EAGAIN;
 734	}
 735
 736	if (ses->status == CifsNew) {
 737		if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
 738			(in_buf->Command != SMB_COM_NEGOTIATE))
 739			return -EAGAIN;
 740		/* else ok - we are setting up session */
 741	}
 742
 743	if (ses->status == CifsExiting) {
 744		/* check if SMB session is bad because we are setting it up */
 745		if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
 746			return -EAGAIN;
 747		/* else ok - we are shutting down session */
 748	}
 749
 750	*ppmidQ = AllocMidQEntry(in_buf, ses->server);
 751	if (*ppmidQ == NULL)
 752		return -ENOMEM;
 753	spin_lock(&GlobalMid_Lock);
 754	list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
 755	spin_unlock(&GlobalMid_Lock);
 756	return 0;
 757}
 758
 759static int
 760wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
 761{
 762	int error;
 763
 764	error = wait_event_freezekillable_unsafe(server->response_q,
 765				    midQ->mid_state != MID_REQUEST_SUBMITTED);
 766	if (error < 0)
 767		return -ERESTARTSYS;
 768
 769	return 0;
 770}
 771
 772struct mid_q_entry *
 773cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
 774{
 775	int rc;
 776	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
 777	struct mid_q_entry *mid;
 778
 779	if (rqst->rq_iov[0].iov_len != 4 ||
 780	    rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
 781		return ERR_PTR(-EIO);
 782
 783	/* enable signing if server requires it */
 784	if (server->sign)
 785		hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
 786
 787	mid = AllocMidQEntry(hdr, server);
 788	if (mid == NULL)
 789		return ERR_PTR(-ENOMEM);
 790
 791	rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
 792	if (rc) {
 793		DeleteMidQEntry(mid);
 794		return ERR_PTR(rc);
 795	}
 796
 797	return mid;
 798}
 799
 800/*
 801 * Send a SMB request and set the callback function in the mid to handle
 802 * the result. Caller is responsible for dealing with timeouts.
 803 */
 804int
 805cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
 806		mid_receive_t *receive, mid_callback_t *callback,
 807		mid_handle_t *handle, void *cbdata, const int flags,
 808		const struct cifs_credits *exist_credits)
 809{
 810	int rc;
 811	struct mid_q_entry *mid;
 812	struct cifs_credits credits = { .value = 0, .instance = 0 };
 813	unsigned int instance;
 814	int optype;
 815
 
 816	optype = flags & CIFS_OP_MASK;
 817
 818	if ((flags & CIFS_HAS_CREDITS) == 0) {
 819		rc = wait_for_free_request(server, flags, &instance);
 820		if (rc)
 821			return rc;
 822		credits.value = 1;
 823		credits.instance = instance;
 824	} else
 825		instance = exist_credits->instance;
 826
 827	mutex_lock(&server->srv_mutex);
 828
 829	/*
 830	 * We can't use credits obtained from the previous session to send this
 831	 * request. Check if there were reconnects after we obtained credits and
 832	 * return -EAGAIN in such cases to let callers handle it.
 833	 */
 834	if (instance != server->reconnect_instance) {
 835		mutex_unlock(&server->srv_mutex);
 836		add_credits_and_wake_if(server, &credits, optype);
 837		return -EAGAIN;
 838	}
 839
 840	mid = server->ops->setup_async_request(server, rqst);
 841	if (IS_ERR(mid)) {
 842		mutex_unlock(&server->srv_mutex);
 843		add_credits_and_wake_if(server, &credits, optype);
 844		return PTR_ERR(mid);
 845	}
 846
 847	mid->receive = receive;
 848	mid->callback = callback;
 849	mid->callback_data = cbdata;
 850	mid->handle = handle;
 851	mid->mid_state = MID_REQUEST_SUBMITTED;
 852
 853	/* put it on the pending_mid_q */
 854	spin_lock(&GlobalMid_Lock);
 855	list_add_tail(&mid->qhead, &server->pending_mid_q);
 856	spin_unlock(&GlobalMid_Lock);
 857
 858	/*
 859	 * Need to store the time in mid before calling I/O. For call_async,
 860	 * I/O response may come back and free the mid entry on another thread.
 861	 */
 862	cifs_save_when_sent(mid);
 863	cifs_in_send_inc(server);
 864	rc = smb_send_rqst(server, 1, rqst, flags);
 865	cifs_in_send_dec(server);
 
 866
 867	if (rc < 0) {
 868		revert_current_mid(server, mid->credits);
 869		server->sequence_number -= 2;
 870		cifs_delete_mid(mid);
 871	}
 872
 873	mutex_unlock(&server->srv_mutex);
 874
 875	if (rc == 0)
 876		return 0;
 877
 878	add_credits_and_wake_if(server, &credits, optype);
 879	return rc;
 880}
 881
 882/*
 883 *
 884 * Send an SMB Request.  No response info (other than return code)
 885 * needs to be parsed.
 886 *
 887 * flags indicate the type of request buffer and how long to wait
 888 * and whether to log NT STATUS code (error) before mapping it to POSIX error
 889 *
 890 */
 891int
 892SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
 893		 char *in_buf, int flags)
 894{
 895	int rc;
 896	struct kvec iov[1];
 897	struct kvec rsp_iov;
 898	int resp_buf_type;
 899
 900	iov[0].iov_base = in_buf;
 901	iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
 902	flags |= CIFS_NO_RSP_BUF;
 903	rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
 904	cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
 905
 906	return rc;
 907}
 908
 909static int
 910cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
 911{
 912	int rc = 0;
 913
 914	cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
 915		 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
 916
 917	spin_lock(&GlobalMid_Lock);
 918	switch (mid->mid_state) {
 919	case MID_RESPONSE_RECEIVED:
 920		spin_unlock(&GlobalMid_Lock);
 921		return rc;
 922	case MID_RETRY_NEEDED:
 923		rc = -EAGAIN;
 924		break;
 925	case MID_RESPONSE_MALFORMED:
 926		rc = -EIO;
 927		break;
 928	case MID_SHUTDOWN:
 929		rc = -EHOSTDOWN;
 930		break;
 931	default:
 932		if (!(mid->mid_flags & MID_DELETED)) {
 933			list_del_init(&mid->qhead);
 934			mid->mid_flags |= MID_DELETED;
 935		}
 936		cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
 937			 __func__, mid->mid, mid->mid_state);
 938		rc = -EIO;
 939	}
 940	spin_unlock(&GlobalMid_Lock);
 941
 
 942	DeleteMidQEntry(mid);
 
 943	return rc;
 944}
 945
 946static inline int
 947send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
 948	    struct mid_q_entry *mid)
 949{
 950	return server->ops->send_cancel ?
 951				server->ops->send_cancel(server, rqst, mid) : 0;
 952}
 953
 954int
 955cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
 956		   bool log_error)
 957{
 958	unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
 959
 960	dump_smb(mid->resp_buf, min_t(u32, 92, len));
 961
 962	/* convert the length into a more usable form */
 963	if (server->sign) {
 964		struct kvec iov[2];
 965		int rc = 0;
 966		struct smb_rqst rqst = { .rq_iov = iov,
 967					 .rq_nvec = 2 };
 968
 969		iov[0].iov_base = mid->resp_buf;
 970		iov[0].iov_len = 4;
 971		iov[1].iov_base = (char *)mid->resp_buf + 4;
 972		iov[1].iov_len = len - 4;
 973		/* FIXME: add code to kill session */
 974		rc = cifs_verify_signature(&rqst, server,
 975					   mid->sequence_number);
 976		if (rc)
 977			cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
 978				 rc);
 979	}
 980
 981	/* BB special case reconnect tid and uid here? */
 982	return map_and_check_smb_error(mid, log_error);
 983}
 984
 985struct mid_q_entry *
 986cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
 987		   struct smb_rqst *rqst)
 988{
 989	int rc;
 990	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
 991	struct mid_q_entry *mid;
 992
 993	if (rqst->rq_iov[0].iov_len != 4 ||
 994	    rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
 995		return ERR_PTR(-EIO);
 996
 997	rc = allocate_mid(ses, hdr, &mid);
 998	if (rc)
 999		return ERR_PTR(rc);
1000	rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
1001	if (rc) {
1002		cifs_delete_mid(mid);
1003		return ERR_PTR(rc);
1004	}
1005	return mid;
1006}
1007
1008static void
1009cifs_compound_callback(struct mid_q_entry *mid)
 
 
1010{
1011	struct TCP_Server_Info *server = mid->server;
1012	struct cifs_credits credits;
1013
1014	credits.value = server->ops->get_credits(mid);
1015	credits.instance = server->reconnect_instance;
1016
1017	add_credits(server, &credits, mid->optype);
1018}
1019
1020static void
1021cifs_compound_last_callback(struct mid_q_entry *mid)
1022{
1023	cifs_compound_callback(mid);
1024	cifs_wake_up_task(mid);
1025}
1026
1027static void
1028cifs_cancelled_callback(struct mid_q_entry *mid)
1029{
1030	cifs_compound_callback(mid);
1031	DeleteMidQEntry(mid);
1032}
1033
1034/*
1035 * Return a channel (master if none) of @ses that can be used to send
1036 * regular requests.
1037 *
1038 * If we are currently binding a new channel (negprot/sess.setup),
1039 * return the new incomplete channel.
1040 */
1041struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1042{
1043	uint index = 0;
1044
1045	if (!ses)
1046		return NULL;
1047
1048	if (!ses->binding) {
1049		/* round robin */
1050		if (ses->chan_count > 1) {
1051			index = (uint)atomic_inc_return(&ses->chan_seq);
1052			index %= ses->chan_count;
1053		}
1054		return ses->chans[index].server;
1055	} else {
1056		return cifs_ses_server(ses);
1057	}
1058}
1059
1060int
1061compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1062		   struct TCP_Server_Info *server,
1063		   const int flags, const int num_rqst, struct smb_rqst *rqst,
1064		   int *resp_buf_type, struct kvec *resp_iov)
1065{
1066	int i, j, optype, rc = 0;
1067	struct mid_q_entry *midQ[MAX_COMPOUND];
1068	bool cancelled_mid[MAX_COMPOUND] = {false};
1069	struct cifs_credits credits[MAX_COMPOUND] = {
1070		{ .value = 0, .instance = 0 }
1071	};
1072	unsigned int instance;
1073	char *buf;
1074
 
1075	optype = flags & CIFS_OP_MASK;
1076
1077	for (i = 0; i < num_rqst; i++)
1078		resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
1079
1080	if (!ses || !ses->server || !server) {
 
1081		cifs_dbg(VFS, "Null session\n");
1082		return -EIO;
1083	}
1084
1085	if (server->tcpStatus == CifsExiting)
 
1086		return -ENOENT;
 
1087
1088	/*
1089	 * Wait for all the requests to become available.
1090	 * This approach still leaves the possibility to be stuck waiting for
1091	 * credits if the server doesn't grant credits to the outstanding
1092	 * requests and if the client is completely idle, not generating any
1093	 * other requests.
1094	 * This can be handled by the eventual session reconnect.
1095	 */
1096	rc = wait_for_compound_request(server, num_rqst, flags,
1097				       &instance);
1098	if (rc)
 
1099		return rc;
1100
1101	for (i = 0; i < num_rqst; i++) {
1102		credits[i].value = 1;
1103		credits[i].instance = instance;
1104	}
1105
1106	/*
1107	 * Make sure that we sign in the same order that we send on this socket
1108	 * and avoid races inside tcp sendmsg code that could cause corruption
1109	 * of smb data.
1110	 */
1111
1112	mutex_lock(&server->srv_mutex);
1113
1114	/*
1115	 * All the parts of the compound chain belong obtained credits from the
1116	 * same session. We can not use credits obtained from the previous
1117	 * session to send this request. Check if there were reconnects after
1118	 * we obtained credits and return -EAGAIN in such cases to let callers
1119	 * handle it.
1120	 */
1121	if (instance != server->reconnect_instance) {
1122		mutex_unlock(&server->srv_mutex);
1123		for (j = 0; j < num_rqst; j++)
1124			add_credits(server, &credits[j], optype);
1125		return -EAGAIN;
1126	}
1127
1128	for (i = 0; i < num_rqst; i++) {
1129		midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1130		if (IS_ERR(midQ[i])) {
1131			revert_current_mid(server, i);
1132			for (j = 0; j < i; j++)
1133				cifs_delete_mid(midQ[j]);
1134			mutex_unlock(&server->srv_mutex);
1135
1136			/* Update # of requests on wire to server */
1137			for (j = 0; j < num_rqst; j++)
1138				add_credits(server, &credits[j], optype);
1139			return PTR_ERR(midQ[i]);
1140		}
1141
1142		midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1143		midQ[i]->optype = optype;
1144		/*
1145		 * Invoke callback for every part of the compound chain
1146		 * to calculate credits properly. Wake up this thread only when
1147		 * the last element is received.
1148		 */
1149		if (i < num_rqst - 1)
1150			midQ[i]->callback = cifs_compound_callback;
1151		else
1152			midQ[i]->callback = cifs_compound_last_callback;
1153	}
1154	cifs_in_send_inc(server);
1155	rc = smb_send_rqst(server, num_rqst, rqst, flags);
1156	cifs_in_send_dec(server);
1157
1158	for (i = 0; i < num_rqst; i++)
1159		cifs_save_when_sent(midQ[i]);
1160
1161	if (rc < 0) {
1162		revert_current_mid(server, num_rqst);
1163		server->sequence_number -= 2;
1164	}
1165
1166	mutex_unlock(&server->srv_mutex);
1167
1168	/*
1169	 * If sending failed for some reason or it is an oplock break that we
1170	 * will not receive a response to - return credits back
1171	 */
1172	if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1173		for (i = 0; i < num_rqst; i++)
1174			add_credits(server, &credits[i], optype);
1175		goto out;
1176	}
1177
1178	/*
1179	 * At this point the request is passed to the network stack - we assume
1180	 * that any credits taken from the server structure on the client have
1181	 * been spent and we can't return them back. Once we receive responses
1182	 * we will collect credits granted by the server in the mid callbacks
1183	 * and add those credits to the server structure.
1184	 */
1185
1186	/*
1187	 * Compounding is never used during session establish.
1188	 */
1189	if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1190		mutex_lock(&server->srv_mutex);
1191		smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1192					   rqst[0].rq_nvec);
1193		mutex_unlock(&server->srv_mutex);
1194	}
1195
1196	for (i = 0; i < num_rqst; i++) {
1197		rc = wait_for_response(server, midQ[i]);
1198		if (rc != 0)
1199			break;
1200	}
1201	if (rc != 0) {
1202		for (; i < num_rqst; i++) {
1203			cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
1204				 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1205			send_cancel(server, &rqst[i], midQ[i]);
1206			spin_lock(&GlobalMid_Lock);
1207			midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1208			if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1209				midQ[i]->callback = cifs_cancelled_callback;
1210				cancelled_mid[i] = true;
1211				credits[i].value = 0;
1212			}
1213			spin_unlock(&GlobalMid_Lock);
 
 
 
1214		}
 
1215	}
1216
1217	for (i = 0; i < num_rqst; i++) {
1218		if (rc < 0)
1219			goto out;
1220
1221		rc = cifs_sync_mid_result(midQ[i], server);
1222		if (rc != 0) {
1223			/* mark this mid as cancelled to not free it below */
1224			cancelled_mid[i] = true;
1225			goto out;
1226		}
1227
1228		if (!midQ[i]->resp_buf ||
1229		    midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1230			rc = -EIO;
1231			cifs_dbg(FYI, "Bad MID state?\n");
1232			goto out;
1233		}
1234
1235		buf = (char *)midQ[i]->resp_buf;
1236		resp_iov[i].iov_base = buf;
1237		resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1238			server->vals->header_preamble_size;
1239
1240		if (midQ[i]->large_buf)
1241			resp_buf_type[i] = CIFS_LARGE_BUFFER;
1242		else
1243			resp_buf_type[i] = CIFS_SMALL_BUFFER;
1244
1245		rc = server->ops->check_receive(midQ[i], server,
1246						     flags & CIFS_LOG_ERROR);
1247
1248		/* mark it so buf will not be freed by cifs_delete_mid */
1249		if ((flags & CIFS_NO_RSP_BUF) == 0)
1250			midQ[i]->resp_buf = NULL;
1251
 
 
 
 
1252	}
1253
1254	/*
1255	 * Compounding is never used during session establish.
1256	 */
1257	if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1258		struct kvec iov = {
1259			.iov_base = resp_iov[0].iov_base,
1260			.iov_len = resp_iov[0].iov_len
1261		};
1262		mutex_lock(&server->srv_mutex);
1263		smb311_update_preauth_hash(ses, &iov, 1);
1264		mutex_unlock(&server->srv_mutex);
1265	}
1266
1267out:
1268	/*
1269	 * This will dequeue all mids. After this it is important that the
1270	 * demultiplex_thread will not process any of these mids any futher.
1271	 * This is prevented above by using a noop callback that will not
1272	 * wake this thread except for the very last PDU.
1273	 */
1274	for (i = 0; i < num_rqst; i++) {
1275		if (!cancelled_mid[i])
1276			cifs_delete_mid(midQ[i]);
1277	}
1278
1279	return rc;
1280}
1281
1282int
1283cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1284	       struct TCP_Server_Info *server,
1285	       struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1286	       struct kvec *resp_iov)
1287{
1288	return compound_send_recv(xid, ses, server, flags, 1,
1289				  rqst, resp_buf_type, resp_iov);
1290}
1291
1292int
1293SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1294	     struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1295	     const int flags, struct kvec *resp_iov)
1296{
1297	struct smb_rqst rqst;
1298	struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1299	int rc;
1300
1301	if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1302		new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1303					GFP_KERNEL);
1304		if (!new_iov) {
1305			/* otherwise cifs_send_recv below sets resp_buf_type */
1306			*resp_buf_type = CIFS_NO_BUFFER;
1307			return -ENOMEM;
1308		}
1309	} else
1310		new_iov = s_iov;
1311
1312	/* 1st iov is a RFC1001 length followed by the rest of the packet */
1313	memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1314
1315	new_iov[0].iov_base = new_iov[1].iov_base;
1316	new_iov[0].iov_len = 4;
1317	new_iov[1].iov_base += 4;
1318	new_iov[1].iov_len -= 4;
1319
1320	memset(&rqst, 0, sizeof(struct smb_rqst));
1321	rqst.rq_iov = new_iov;
1322	rqst.rq_nvec = n_vec + 1;
1323
1324	rc = cifs_send_recv(xid, ses, ses->server,
1325			    &rqst, resp_buf_type, flags, resp_iov);
1326	if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1327		kfree(new_iov);
1328	return rc;
1329}
1330
1331int
1332SendReceive(const unsigned int xid, struct cifs_ses *ses,
1333	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1334	    int *pbytes_returned, const int flags)
1335{
1336	int rc = 0;
1337	struct mid_q_entry *midQ;
1338	unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1339	struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1340	struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1341	struct cifs_credits credits = { .value = 1, .instance = 0 };
1342	struct TCP_Server_Info *server;
1343
1344	if (ses == NULL) {
1345		cifs_dbg(VFS, "Null smb session\n");
1346		return -EIO;
1347	}
1348	server = ses->server;
1349	if (server == NULL) {
1350		cifs_dbg(VFS, "Null tcp session\n");
1351		return -EIO;
1352	}
1353
1354	if (server->tcpStatus == CifsExiting)
1355		return -ENOENT;
1356
1357	/* Ensure that we do not send more than 50 overlapping requests
1358	   to the same server. We may make this configurable later or
1359	   use ses->maxReq */
1360
1361	if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1362		cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1363				len);
 
1364		return -EIO;
1365	}
1366
1367	rc = wait_for_free_request(server, flags, &credits.instance);
1368	if (rc)
1369		return rc;
1370
1371	/* make sure that we sign in the same order that we send on this socket
1372	   and avoid races inside tcp sendmsg code that could cause corruption
1373	   of smb data */
1374
1375	mutex_lock(&server->srv_mutex);
1376
1377	rc = allocate_mid(ses, in_buf, &midQ);
1378	if (rc) {
1379		mutex_unlock(&server->srv_mutex);
1380		/* Update # of requests on wire to server */
1381		add_credits(server, &credits, 0);
1382		return rc;
1383	}
1384
1385	rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1386	if (rc) {
1387		mutex_unlock(&server->srv_mutex);
1388		goto out;
1389	}
1390
1391	midQ->mid_state = MID_REQUEST_SUBMITTED;
1392
1393	cifs_in_send_inc(server);
1394	rc = smb_send(server, in_buf, len);
1395	cifs_in_send_dec(server);
1396	cifs_save_when_sent(midQ);
1397
1398	if (rc < 0)
1399		server->sequence_number -= 2;
1400
1401	mutex_unlock(&server->srv_mutex);
1402
1403	if (rc < 0)
1404		goto out;
1405
1406	rc = wait_for_response(server, midQ);
 
 
 
1407	if (rc != 0) {
1408		send_cancel(server, &rqst, midQ);
1409		spin_lock(&GlobalMid_Lock);
1410		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1411			/* no longer considered to be "in-flight" */
1412			midQ->callback = DeleteMidQEntry;
1413			spin_unlock(&GlobalMid_Lock);
1414			add_credits(server, &credits, 0);
1415			return rc;
1416		}
1417		spin_unlock(&GlobalMid_Lock);
1418	}
1419
1420	rc = cifs_sync_mid_result(midQ, server);
1421	if (rc != 0) {
1422		add_credits(server, &credits, 0);
1423		return rc;
1424	}
1425
1426	if (!midQ->resp_buf || !out_buf ||
1427	    midQ->mid_state != MID_RESPONSE_RECEIVED) {
1428		rc = -EIO;
1429		cifs_server_dbg(VFS, "Bad MID state?\n");
1430		goto out;
1431	}
1432
1433	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1434	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1435	rc = cifs_check_receive(midQ, server, 0);
1436out:
1437	cifs_delete_mid(midQ);
1438	add_credits(server, &credits, 0);
1439
1440	return rc;
1441}
1442
1443/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1444   blocking lock to return. */
1445
1446static int
1447send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1448			struct smb_hdr *in_buf,
1449			struct smb_hdr *out_buf)
1450{
1451	int bytes_returned;
1452	struct cifs_ses *ses = tcon->ses;
1453	LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1454
1455	/* We just modify the current in_buf to change
1456	   the type of lock from LOCKING_ANDX_SHARED_LOCK
1457	   or LOCKING_ANDX_EXCLUSIVE_LOCK to
1458	   LOCKING_ANDX_CANCEL_LOCK. */
1459
1460	pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1461	pSMB->Timeout = 0;
1462	pSMB->hdr.Mid = get_next_mid(ses->server);
1463
1464	return SendReceive(xid, ses, in_buf, out_buf,
1465			&bytes_returned, 0);
1466}
1467
1468int
1469SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1470	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1471	    int *pbytes_returned)
1472{
1473	int rc = 0;
1474	int rstart = 0;
1475	struct mid_q_entry *midQ;
1476	struct cifs_ses *ses;
1477	unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1478	struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1479	struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1480	unsigned int instance;
1481	struct TCP_Server_Info *server;
1482
1483	if (tcon == NULL || tcon->ses == NULL) {
1484		cifs_dbg(VFS, "Null smb session\n");
1485		return -EIO;
1486	}
1487	ses = tcon->ses;
1488	server = ses->server;
1489
1490	if (server == NULL) {
1491		cifs_dbg(VFS, "Null tcp session\n");
1492		return -EIO;
1493	}
1494
1495	if (server->tcpStatus == CifsExiting)
1496		return -ENOENT;
1497
1498	/* Ensure that we do not send more than 50 overlapping requests
1499	   to the same server. We may make this configurable later or
1500	   use ses->maxReq */
1501
1502	if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1503		cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1504			      len);
 
1505		return -EIO;
1506	}
1507
1508	rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1509	if (rc)
1510		return rc;
1511
1512	/* make sure that we sign in the same order that we send on this socket
1513	   and avoid races inside tcp sendmsg code that could cause corruption
1514	   of smb data */
1515
1516	mutex_lock(&server->srv_mutex);
1517
1518	rc = allocate_mid(ses, in_buf, &midQ);
1519	if (rc) {
1520		mutex_unlock(&server->srv_mutex);
1521		return rc;
1522	}
1523
1524	rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1525	if (rc) {
1526		cifs_delete_mid(midQ);
1527		mutex_unlock(&server->srv_mutex);
1528		return rc;
1529	}
1530
1531	midQ->mid_state = MID_REQUEST_SUBMITTED;
1532	cifs_in_send_inc(server);
1533	rc = smb_send(server, in_buf, len);
1534	cifs_in_send_dec(server);
1535	cifs_save_when_sent(midQ);
1536
1537	if (rc < 0)
1538		server->sequence_number -= 2;
1539
1540	mutex_unlock(&server->srv_mutex);
1541
1542	if (rc < 0) {
1543		cifs_delete_mid(midQ);
1544		return rc;
1545	}
1546
1547	/* Wait for a reply - allow signals to interrupt. */
1548	rc = wait_event_interruptible(server->response_q,
1549		(!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1550		((server->tcpStatus != CifsGood) &&
1551		 (server->tcpStatus != CifsNew)));
1552
1553	/* Were we interrupted by a signal ? */
1554	if ((rc == -ERESTARTSYS) &&
1555		(midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1556		((server->tcpStatus == CifsGood) ||
1557		 (server->tcpStatus == CifsNew))) {
1558
1559		if (in_buf->Command == SMB_COM_TRANSACTION2) {
1560			/* POSIX lock. We send a NT_CANCEL SMB to cause the
1561			   blocking lock to return. */
1562			rc = send_cancel(server, &rqst, midQ);
1563			if (rc) {
1564				cifs_delete_mid(midQ);
1565				return rc;
1566			}
1567		} else {
1568			/* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1569			   to cause the blocking lock to return. */
1570
1571			rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1572
1573			/* If we get -ENOLCK back the lock may have
1574			   already been removed. Don't exit in this case. */
1575			if (rc && rc != -ENOLCK) {
1576				cifs_delete_mid(midQ);
1577				return rc;
1578			}
1579		}
1580
1581		rc = wait_for_response(server, midQ);
1582		if (rc) {
1583			send_cancel(server, &rqst, midQ);
1584			spin_lock(&GlobalMid_Lock);
1585			if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1586				/* no longer considered to be "in-flight" */
1587				midQ->callback = DeleteMidQEntry;
1588				spin_unlock(&GlobalMid_Lock);
1589				return rc;
1590			}
1591			spin_unlock(&GlobalMid_Lock);
1592		}
1593
1594		/* We got the response - restart system call. */
1595		rstart = 1;
1596	}
1597
1598	rc = cifs_sync_mid_result(midQ, server);
1599	if (rc != 0)
1600		return rc;
1601
1602	/* rcvd frame is ok */
1603	if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1604		rc = -EIO;
1605		cifs_tcon_dbg(VFS, "Bad MID state?\n");
1606		goto out;
1607	}
1608
1609	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1610	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1611	rc = cifs_check_receive(midQ, server, 0);
1612out:
1613	cifs_delete_mid(midQ);
1614	if (rstart && rc == -EACCES)
1615		return -ERESTARTSYS;
1616	return rc;
1617}