Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: LGPL-2.1
   2/*
 
   3 *
   4 *   Copyright (C) International Business Machines  Corp., 2002,2008
   5 *   Author(s): Steve French (sfrench@us.ibm.com)
   6 *   Jeremy Allison (jra@samba.org) 2006.
   7 *
 
 
 
 
 
 
 
 
 
 
 
 
 
   8 */
   9
  10#include <linux/fs.h>
  11#include <linux/list.h>
  12#include <linux/gfp.h>
  13#include <linux/wait.h>
  14#include <linux/net.h>
  15#include <linux/delay.h>
  16#include <linux/freezer.h>
  17#include <linux/tcp.h>
  18#include <linux/bvec.h>
  19#include <linux/highmem.h>
  20#include <linux/uaccess.h>
  21#include <asm/processor.h>
  22#include <linux/mempool.h>
  23#include <linux/sched/signal.h>
  24#include <linux/task_io_accounting_ops.h>
  25#include "cifspdu.h"
  26#include "cifsglob.h"
  27#include "cifsproto.h"
  28#include "cifs_debug.h"
  29#include "smb2proto.h"
  30#include "smbdirect.h"
  31
  32/* Max number of iovectors we can use off the stack when sending requests. */
  33#define CIFS_MAX_IOV_SIZE 8
  34
  35void
  36cifs_wake_up_task(struct mid_q_entry *mid)
  37{
  38	wake_up_process(mid->callback_data);
  39}
  40
  41static struct mid_q_entry *
  42alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
  43{
  44	struct mid_q_entry *temp;
  45
  46	if (server == NULL) {
  47		cifs_dbg(VFS, "%s: null TCP session\n", __func__);
  48		return NULL;
  49	}
  50
  51	temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
  52	memset(temp, 0, sizeof(struct mid_q_entry));
  53	kref_init(&temp->refcount);
  54	temp->mid = get_mid(smb_buffer);
  55	temp->pid = current->pid;
  56	temp->command = cpu_to_le16(smb_buffer->Command);
  57	cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
  58	/*	do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
  59	/* when mid allocated can be before when sent */
  60	temp->when_alloc = jiffies;
  61	temp->server = server;
  62
  63	/*
  64	 * The default is for the mid to be synchronous, so the
  65	 * default callback just wakes up the current task.
  66	 */
  67	get_task_struct(current);
  68	temp->creator = current;
  69	temp->callback = cifs_wake_up_task;
  70	temp->callback_data = current;
  71
  72	atomic_inc(&mid_count);
  73	temp->mid_state = MID_REQUEST_ALLOCATED;
  74	return temp;
  75}
  76
  77static void __release_mid(struct kref *refcount)
  78{
  79	struct mid_q_entry *midEntry =
  80			container_of(refcount, struct mid_q_entry, refcount);
  81#ifdef CONFIG_CIFS_STATS2
  82	__le16 command = midEntry->server->vals->lock_cmd;
  83	__u16 smb_cmd = le16_to_cpu(midEntry->command);
  84	unsigned long now;
  85	unsigned long roundtrip_time;
  86#endif
  87	struct TCP_Server_Info *server = midEntry->server;
  88
  89	if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
  90	    midEntry->mid_state == MID_RESPONSE_RECEIVED &&
  91	    server->ops->handle_cancelled_mid)
  92		server->ops->handle_cancelled_mid(midEntry, server);
  93
  94	midEntry->mid_state = MID_FREE;
  95	atomic_dec(&mid_count);
  96	if (midEntry->large_buf)
  97		cifs_buf_release(midEntry->resp_buf);
  98	else
  99		cifs_small_buf_release(midEntry->resp_buf);
 100#ifdef CONFIG_CIFS_STATS2
 101	now = jiffies;
 102	if (now < midEntry->when_alloc)
 103		cifs_server_dbg(VFS, "Invalid mid allocation time\n");
 104	roundtrip_time = now - midEntry->when_alloc;
 105
 106	if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
 107		if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
 108			server->slowest_cmd[smb_cmd] = roundtrip_time;
 109			server->fastest_cmd[smb_cmd] = roundtrip_time;
 110		} else {
 111			if (server->slowest_cmd[smb_cmd] < roundtrip_time)
 112				server->slowest_cmd[smb_cmd] = roundtrip_time;
 113			else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
 114				server->fastest_cmd[smb_cmd] = roundtrip_time;
 115		}
 116		cifs_stats_inc(&server->num_cmds[smb_cmd]);
 117		server->time_per_cmd[smb_cmd] += roundtrip_time;
 118	}
 119	/*
 120	 * commands taking longer than one second (default) can be indications
 121	 * that something is wrong, unless it is quite a slow link or a very
 122	 * busy server. Note that this calc is unlikely or impossible to wrap
 123	 * as long as slow_rsp_threshold is not set way above recommended max
 124	 * value (32767 ie 9 hours) and is generally harmless even if wrong
 125	 * since only affects debug counters - so leaving the calc as simple
 126	 * comparison rather than doing multiple conversions and overflow
 127	 * checks
 128	 */
 129	if ((slow_rsp_threshold != 0) &&
 130	    time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
 131	    (midEntry->command != command)) {
 132		/*
 133		 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
 134		 * NB: le16_to_cpu returns unsigned so can not be negative below
 135		 */
 136		if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
 137			cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
 138
 139		trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
 140			       midEntry->when_sent, midEntry->when_received);
 141		if (cifsFYI & CIFS_TIMER) {
 142			pr_debug("slow rsp: cmd %d mid %llu",
 143				 midEntry->command, midEntry->mid);
 144			cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
 145				  now - midEntry->when_alloc,
 146				  now - midEntry->when_sent,
 147				  now - midEntry->when_received);
 148		}
 149	}
 150#endif
 151	put_task_struct(midEntry->creator);
 152
 153	mempool_free(midEntry, cifs_mid_poolp);
 154}
 155
 156void release_mid(struct mid_q_entry *mid)
 157{
 158	struct TCP_Server_Info *server = mid->server;
 
 
 
 159
 160	spin_lock(&server->mid_lock);
 161	kref_put(&mid->refcount, __release_mid);
 162	spin_unlock(&server->mid_lock);
 163}
 164
 165void
 166delete_mid(struct mid_q_entry *mid)
 167{
 168	spin_lock(&mid->server->mid_lock);
 169	if (!(mid->mid_flags & MID_DELETED)) {
 170		list_del_init(&mid->qhead);
 171		mid->mid_flags |= MID_DELETED;
 172	}
 173	spin_unlock(&mid->server->mid_lock);
 174
 175	release_mid(mid);
 176}
 177
 178/*
 179 * smb_send_kvec - send an array of kvecs to the server
 180 * @server:	Server to send the data to
 181 * @smb_msg:	Message to send
 182 * @sent:	amount of data sent on socket is stored here
 183 *
 184 * Our basic "send data to server" function. Should be called with srv_mutex
 185 * held. The caller is responsible for handling the results.
 186 */
 187static int
 188smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
 189	      size_t *sent)
 190{
 191	int rc = 0;
 192	int retries = 0;
 193	struct socket *ssocket = server->ssocket;
 194
 195	*sent = 0;
 196
 
 
 
 
 197	if (server->noblocksnd)
 198		smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
 199	else
 200		smb_msg->msg_flags = MSG_NOSIGNAL;
 201
 202	while (msg_data_left(smb_msg)) {
 203		/*
 204		 * If blocking send, we try 3 times, since each can block
 205		 * for 5 seconds. For nonblocking  we have to try more
 206		 * but wait increasing amounts of time allowing time for
 207		 * socket to clear.  The overall time we wait in either
 208		 * case to send on the socket is about 15 seconds.
 209		 * Similarly we wait for 15 seconds for a response from
 210		 * the server in SendReceive[2] for the server to send
 211		 * a response back for most types of requests (except
 212		 * SMB Write past end of file which can be slow, and
 213		 * blocking lock operations). NFS waits slightly longer
 214		 * than CIFS, but this can make it take longer for
 215		 * nonresponsive servers to be detected and 15 seconds
 216		 * is more than enough time for modern networks to
 217		 * send a packet.  In most cases if we fail to send
 218		 * after the retries we will kill the socket and
 219		 * reconnect which may clear the network problem.
 220		 */
 221		rc = sock_sendmsg(ssocket, smb_msg);
 222		if (rc == -EAGAIN) {
 223			retries++;
 224			if (retries >= 14 ||
 225			    (!server->noblocksnd && (retries > 2))) {
 226				cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
 227					 ssocket);
 228				return -EAGAIN;
 229			}
 230			msleep(1 << retries);
 231			continue;
 232		}
 233
 234		if (rc < 0)
 235			return rc;
 236
 237		if (rc == 0) {
 238			/* should never happen, letting socket clear before
 239			   retrying is our only obvious option here */
 240			cifs_server_dbg(VFS, "tcp sent no data\n");
 241			msleep(500);
 242			continue;
 243		}
 244
 245		/* send was at least partially successful */
 246		*sent += rc;
 247		retries = 0; /* in case we get ENOSPC on the next send */
 248	}
 249	return 0;
 250}
 251
 252unsigned long
 253smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
 254{
 255	unsigned int i;
 256	struct kvec *iov;
 257	int nvec;
 258	unsigned long buflen = 0;
 259
 260	if (!is_smb1(server) && rqst->rq_nvec >= 2 &&
 261	    rqst->rq_iov[0].iov_len == 4) {
 262		iov = &rqst->rq_iov[1];
 263		nvec = rqst->rq_nvec - 1;
 264	} else {
 265		iov = rqst->rq_iov;
 266		nvec = rqst->rq_nvec;
 267	}
 268
 269	/* total up iov array first */
 270	for (i = 0; i < nvec; i++)
 271		buflen += iov[i].iov_len;
 272
 273	/*
 274	 * Add in the page array if there is one. The caller needs to make
 275	 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
 276	 * multiple pages ends at page boundary, rq_tailsz needs to be set to
 277	 * PAGE_SIZE.
 278	 */
 279	if (rqst->rq_npages) {
 280		if (rqst->rq_npages == 1)
 281			buflen += rqst->rq_tailsz;
 282		else {
 283			/*
 284			 * If there is more than one page, calculate the
 285			 * buffer length based on rq_offset and rq_tailsz
 286			 */
 287			buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
 288					rqst->rq_offset;
 289			buflen += rqst->rq_tailsz;
 290		}
 291	}
 292
 293	return buflen;
 294}
 295
 296static int
 297__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
 298		struct smb_rqst *rqst)
 299{
 300	int rc = 0;
 301	struct kvec *iov;
 302	int n_vec;
 303	unsigned int send_length = 0;
 304	unsigned int i, j;
 305	sigset_t mask, oldmask;
 306	size_t total_len = 0, sent, size;
 307	struct socket *ssocket = server->ssocket;
 308	struct msghdr smb_msg = {};
 309	__be32 rfc1002_marker;
 310
 311	if (cifs_rdma_enabled(server)) {
 312		/* return -EAGAIN when connecting or reconnecting */
 313		rc = -EAGAIN;
 314		if (server->smbd_conn)
 315			rc = smbd_send(server, num_rqst, rqst);
 316		goto smbd_done;
 317	}
 318
 319	if (ssocket == NULL)
 320		return -EAGAIN;
 321
 322	if (fatal_signal_pending(current)) {
 323		cifs_dbg(FYI, "signal pending before send request\n");
 324		return -ERESTARTSYS;
 325	}
 326
 327	/* cork the socket */
 328	tcp_sock_set_cork(ssocket->sk, true);
 329
 330	for (j = 0; j < num_rqst; j++)
 331		send_length += smb_rqst_len(server, &rqst[j]);
 332	rfc1002_marker = cpu_to_be32(send_length);
 333
 334	/*
 335	 * We should not allow signals to interrupt the network send because
 336	 * any partial send will cause session reconnects thus increasing
 337	 * latency of system calls and overload a server with unnecessary
 338	 * requests.
 339	 */
 340
 341	sigfillset(&mask);
 342	sigprocmask(SIG_BLOCK, &mask, &oldmask);
 343
 344	/* Generate a rfc1002 marker for SMB2+ */
 345	if (!is_smb1(server)) {
 346		struct kvec hiov = {
 347			.iov_base = &rfc1002_marker,
 348			.iov_len  = 4
 349		};
 350		iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, &hiov, 1, 4);
 351		rc = smb_send_kvec(server, &smb_msg, &sent);
 352		if (rc < 0)
 353			goto unmask;
 354
 355		total_len += sent;
 356		send_length += 4;
 357	}
 358
 359	cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
 360
 361	for (j = 0; j < num_rqst; j++) {
 362		iov = rqst[j].rq_iov;
 363		n_vec = rqst[j].rq_nvec;
 364
 365		size = 0;
 366		for (i = 0; i < n_vec; i++) {
 367			dump_smb(iov[i].iov_base, iov[i].iov_len);
 368			size += iov[i].iov_len;
 369		}
 370
 371		iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, iov, n_vec, size);
 372
 373		rc = smb_send_kvec(server, &smb_msg, &sent);
 374		if (rc < 0)
 375			goto unmask;
 376
 377		total_len += sent;
 378
 379		/* now walk the page array and send each page in it */
 380		for (i = 0; i < rqst[j].rq_npages; i++) {
 381			struct bio_vec bvec;
 382
 383			bvec.bv_page = rqst[j].rq_pages[i];
 384			rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
 385					     &bvec.bv_offset);
 386
 387			iov_iter_bvec(&smb_msg.msg_iter, ITER_SOURCE,
 388				      &bvec, 1, bvec.bv_len);
 389			rc = smb_send_kvec(server, &smb_msg, &sent);
 390			if (rc < 0)
 391				break;
 392
 393			total_len += sent;
 394		}
 395	}
 396
 397unmask:
 398	sigprocmask(SIG_SETMASK, &oldmask, NULL);
 399
 400	/*
 401	 * If signal is pending but we have already sent the whole packet to
 402	 * the server we need to return success status to allow a corresponding
 403	 * mid entry to be kept in the pending requests queue thus allowing
 404	 * to handle responses from the server by the client.
 405	 *
 406	 * If only part of the packet has been sent there is no need to hide
 407	 * interrupt because the session will be reconnected anyway, so there
 408	 * won't be any response from the server to handle.
 409	 */
 410
 411	if (signal_pending(current) && (total_len != send_length)) {
 412		cifs_dbg(FYI, "signal is pending after attempt to send\n");
 413		rc = -ERESTARTSYS;
 414	}
 415
 416	/* uncork it */
 417	tcp_sock_set_cork(ssocket->sk, false);
 418
 419	if ((total_len > 0) && (total_len != send_length)) {
 420		cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
 421			 send_length, total_len);
 422		/*
 423		 * If we have only sent part of an SMB then the next SMB could
 424		 * be taken as the remainder of this one. We need to kill the
 425		 * socket so the server throws away the partial SMB
 426		 */
 427		cifs_signal_cifsd_for_reconnect(server, false);
 428		trace_smb3_partial_send_reconnect(server->CurrentMid,
 429						  server->conn_id, server->hostname);
 430	}
 431smbd_done:
 432	if (rc < 0 && rc != -EINTR)
 433		cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
 434			 rc);
 435	else if (rc > 0)
 436		rc = 0;
 437
 438	return rc;
 439}
 440
 441static int
 442smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
 443	      struct smb_rqst *rqst, int flags)
 444{
 445	struct kvec iov;
 446	struct smb2_transform_hdr *tr_hdr;
 447	struct smb_rqst cur_rqst[MAX_COMPOUND];
 448	int rc;
 449
 450	if (!(flags & CIFS_TRANSFORM_REQ))
 451		return __smb_send_rqst(server, num_rqst, rqst);
 452
 453	if (num_rqst > MAX_COMPOUND - 1)
 454		return -ENOMEM;
 455
 456	if (!server->ops->init_transform_rq) {
 457		cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
 458		return -EIO;
 459	}
 460
 461	tr_hdr = kzalloc(sizeof(*tr_hdr), GFP_NOFS);
 462	if (!tr_hdr)
 463		return -ENOMEM;
 464
 465	memset(&cur_rqst[0], 0, sizeof(cur_rqst));
 466	memset(&iov, 0, sizeof(iov));
 
 467
 468	iov.iov_base = tr_hdr;
 469	iov.iov_len = sizeof(*tr_hdr);
 470	cur_rqst[0].rq_iov = &iov;
 471	cur_rqst[0].rq_nvec = 1;
 472
 473	rc = server->ops->init_transform_rq(server, num_rqst + 1,
 474					    &cur_rqst[0], rqst);
 475	if (rc)
 476		goto out;
 477
 478	rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
 479	smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
 480out:
 481	kfree(tr_hdr);
 482	return rc;
 483}
 484
 485int
 486smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
 487	 unsigned int smb_buf_length)
 488{
 489	struct kvec iov[2];
 490	struct smb_rqst rqst = { .rq_iov = iov,
 491				 .rq_nvec = 2 };
 492
 493	iov[0].iov_base = smb_buffer;
 494	iov[0].iov_len = 4;
 495	iov[1].iov_base = (char *)smb_buffer + 4;
 496	iov[1].iov_len = smb_buf_length;
 497
 498	return __smb_send_rqst(server, 1, &rqst);
 499}
 500
 501static int
 502wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
 503		      const int timeout, const int flags,
 504		      unsigned int *instance)
 505{
 506	long rc;
 507	int *credits;
 508	int optype;
 509	long int t;
 510	int scredits, in_flight;
 511
 512	if (timeout < 0)
 513		t = MAX_JIFFY_OFFSET;
 514	else
 515		t = msecs_to_jiffies(timeout);
 516
 517	optype = flags & CIFS_OP_MASK;
 518
 519	*instance = 0;
 520
 521	credits = server->ops->get_credits_field(server, optype);
 522	/* Since an echo is already inflight, no need to wait to send another */
 523	if (*credits <= 0 && optype == CIFS_ECHO_OP)
 524		return -EAGAIN;
 525
 526	spin_lock(&server->req_lock);
 527	if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
 528		/* oplock breaks must not be held up */
 529		server->in_flight++;
 530		if (server->in_flight > server->max_in_flight)
 531			server->max_in_flight = server->in_flight;
 532		*credits -= 1;
 533		*instance = server->reconnect_instance;
 534		scredits = *credits;
 535		in_flight = server->in_flight;
 536		spin_unlock(&server->req_lock);
 537
 538		trace_smb3_nblk_credits(server->CurrentMid,
 539				server->conn_id, server->hostname, scredits, -1, in_flight);
 540		cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
 541				__func__, 1, scredits);
 542
 543		return 0;
 544	}
 545
 546	while (1) {
 547		if (*credits < num_credits) {
 548			scredits = *credits;
 549			spin_unlock(&server->req_lock);
 550
 551			cifs_num_waiters_inc(server);
 552			rc = wait_event_killable_timeout(server->request_q,
 553				has_credits(server, credits, num_credits), t);
 554			cifs_num_waiters_dec(server);
 555			if (!rc) {
 556				spin_lock(&server->req_lock);
 557				scredits = *credits;
 558				in_flight = server->in_flight;
 559				spin_unlock(&server->req_lock);
 560
 561				trace_smb3_credit_timeout(server->CurrentMid,
 562						server->conn_id, server->hostname, scredits,
 563						num_credits, in_flight);
 564				cifs_server_dbg(VFS, "wait timed out after %d ms\n",
 565						timeout);
 566				return -EBUSY;
 567			}
 568			if (rc == -ERESTARTSYS)
 569				return -ERESTARTSYS;
 570			spin_lock(&server->req_lock);
 571		} else {
 572			spin_unlock(&server->req_lock);
 573
 574			spin_lock(&server->srv_lock);
 575			if (server->tcpStatus == CifsExiting) {
 576				spin_unlock(&server->srv_lock);
 577				return -ENOENT;
 578			}
 579			spin_unlock(&server->srv_lock);
 580
 581			/*
 582			 * For normal commands, reserve the last MAX_COMPOUND
 583			 * credits to compound requests.
 584			 * Otherwise these compounds could be permanently
 585			 * starved for credits by single-credit requests.
 586			 *
 587			 * To prevent spinning CPU, block this thread until
 588			 * there are >MAX_COMPOUND credits available.
 589			 * But only do this is we already have a lot of
 590			 * credits in flight to avoid triggering this check
 591			 * for servers that are slow to hand out credits on
 592			 * new sessions.
 593			 */
 594			spin_lock(&server->req_lock);
 595			if (!optype && num_credits == 1 &&
 596			    server->in_flight > 2 * MAX_COMPOUND &&
 597			    *credits <= MAX_COMPOUND) {
 598				spin_unlock(&server->req_lock);
 599
 600				cifs_num_waiters_inc(server);
 601				rc = wait_event_killable_timeout(
 602					server->request_q,
 603					has_credits(server, credits,
 604						    MAX_COMPOUND + 1),
 605					t);
 606				cifs_num_waiters_dec(server);
 607				if (!rc) {
 608					spin_lock(&server->req_lock);
 609					scredits = *credits;
 610					in_flight = server->in_flight;
 611					spin_unlock(&server->req_lock);
 612
 613					trace_smb3_credit_timeout(
 614							server->CurrentMid,
 615							server->conn_id, server->hostname,
 616							scredits, num_credits, in_flight);
 617					cifs_server_dbg(VFS, "wait timed out after %d ms\n",
 618							timeout);
 619					return -EBUSY;
 620				}
 621				if (rc == -ERESTARTSYS)
 622					return -ERESTARTSYS;
 623				spin_lock(&server->req_lock);
 624				continue;
 625			}
 626
 627			/*
 628			 * Can not count locking commands against total
 629			 * as they are allowed to block on server.
 630			 */
 631
 632			/* update # of requests on the wire to server */
 633			if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
 634				*credits -= num_credits;
 635				server->in_flight += num_credits;
 636				if (server->in_flight > server->max_in_flight)
 637					server->max_in_flight = server->in_flight;
 638				*instance = server->reconnect_instance;
 639			}
 640			scredits = *credits;
 641			in_flight = server->in_flight;
 642			spin_unlock(&server->req_lock);
 643
 644			trace_smb3_waitff_credits(server->CurrentMid,
 645					server->conn_id, server->hostname, scredits,
 646					-(num_credits), in_flight);
 647			cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
 648					__func__, num_credits, scredits);
 649			break;
 650		}
 651	}
 652	return 0;
 653}
 654
 655static int
 656wait_for_free_request(struct TCP_Server_Info *server, const int flags,
 657		      unsigned int *instance)
 658{
 659	return wait_for_free_credits(server, 1, -1, flags,
 660				     instance);
 661}
 662
 663static int
 664wait_for_compound_request(struct TCP_Server_Info *server, int num,
 665			  const int flags, unsigned int *instance)
 666{
 667	int *credits;
 668	int scredits, in_flight;
 669
 670	credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
 671
 672	spin_lock(&server->req_lock);
 673	scredits = *credits;
 674	in_flight = server->in_flight;
 675
 676	if (*credits < num) {
 677		/*
 678		 * If the server is tight on resources or just gives us less
 679		 * credits for other reasons (e.g. requests are coming out of
 680		 * order and the server delays granting more credits until it
 681		 * processes a missing mid) and we exhausted most available
 682		 * credits there may be situations when we try to send
 683		 * a compound request but we don't have enough credits. At this
 684		 * point the client needs to decide if it should wait for
 685		 * additional credits or fail the request. If at least one
 686		 * request is in flight there is a high probability that the
 687		 * server will return enough credits to satisfy this compound
 688		 * request.
 689		 *
 690		 * Return immediately if no requests in flight since we will be
 691		 * stuck on waiting for credits.
 692		 */
 693		if (server->in_flight == 0) {
 694			spin_unlock(&server->req_lock);
 695			trace_smb3_insufficient_credits(server->CurrentMid,
 696					server->conn_id, server->hostname, scredits,
 697					num, in_flight);
 698			cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
 699					__func__, in_flight, num, scredits);
 700			return -EDEADLK;
 701		}
 702	}
 703	spin_unlock(&server->req_lock);
 704
 705	return wait_for_free_credits(server, num, 60000, flags,
 706				     instance);
 707}
 708
 709int
 710cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
 711		      unsigned int *num, struct cifs_credits *credits)
 712{
 713	*num = size;
 714	credits->value = 0;
 715	credits->instance = server->reconnect_instance;
 716	return 0;
 717}
 718
 719static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
 720			struct mid_q_entry **ppmidQ)
 721{
 722	spin_lock(&ses->ses_lock);
 723	if (ses->ses_status == SES_NEW) {
 
 
 
 
 
 
 
 
 724		if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
 725			(in_buf->Command != SMB_COM_NEGOTIATE)) {
 726			spin_unlock(&ses->ses_lock);
 727			return -EAGAIN;
 728		}
 729		/* else ok - we are setting up session */
 730	}
 731
 732	if (ses->ses_status == SES_EXITING) {
 733		/* check if SMB session is bad because we are setting it up */
 734		if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
 735			spin_unlock(&ses->ses_lock);
 736			return -EAGAIN;
 737		}
 738		/* else ok - we are shutting down session */
 739	}
 740	spin_unlock(&ses->ses_lock);
 741
 742	*ppmidQ = alloc_mid(in_buf, ses->server);
 743	if (*ppmidQ == NULL)
 744		return -ENOMEM;
 745	spin_lock(&ses->server->mid_lock);
 746	list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
 747	spin_unlock(&ses->server->mid_lock);
 748	return 0;
 749}
 750
 751static int
 752wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
 753{
 754	int error;
 755
 756	error = wait_event_state(server->response_q,
 757				 midQ->mid_state != MID_REQUEST_SUBMITTED,
 758				 (TASK_KILLABLE|TASK_FREEZABLE_UNSAFE));
 759	if (error < 0)
 760		return -ERESTARTSYS;
 761
 762	return 0;
 763}
 764
 765struct mid_q_entry *
 766cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
 767{
 768	int rc;
 769	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
 770	struct mid_q_entry *mid;
 771
 772	if (rqst->rq_iov[0].iov_len != 4 ||
 773	    rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
 774		return ERR_PTR(-EIO);
 775
 776	/* enable signing if server requires it */
 777	if (server->sign)
 778		hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
 779
 780	mid = alloc_mid(hdr, server);
 781	if (mid == NULL)
 782		return ERR_PTR(-ENOMEM);
 783
 784	rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
 785	if (rc) {
 786		release_mid(mid);
 787		return ERR_PTR(rc);
 788	}
 789
 790	return mid;
 791}
 792
 793/*
 794 * Send a SMB request and set the callback function in the mid to handle
 795 * the result. Caller is responsible for dealing with timeouts.
 796 */
 797int
 798cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
 799		mid_receive_t *receive, mid_callback_t *callback,
 800		mid_handle_t *handle, void *cbdata, const int flags,
 801		const struct cifs_credits *exist_credits)
 802{
 803	int rc;
 804	struct mid_q_entry *mid;
 805	struct cifs_credits credits = { .value = 0, .instance = 0 };
 806	unsigned int instance;
 807	int optype;
 808
 809	optype = flags & CIFS_OP_MASK;
 810
 811	if ((flags & CIFS_HAS_CREDITS) == 0) {
 812		rc = wait_for_free_request(server, flags, &instance);
 813		if (rc)
 814			return rc;
 815		credits.value = 1;
 816		credits.instance = instance;
 817	} else
 818		instance = exist_credits->instance;
 819
 820	cifs_server_lock(server);
 821
 822	/*
 823	 * We can't use credits obtained from the previous session to send this
 824	 * request. Check if there were reconnects after we obtained credits and
 825	 * return -EAGAIN in such cases to let callers handle it.
 826	 */
 827	if (instance != server->reconnect_instance) {
 828		cifs_server_unlock(server);
 829		add_credits_and_wake_if(server, &credits, optype);
 830		return -EAGAIN;
 831	}
 832
 833	mid = server->ops->setup_async_request(server, rqst);
 834	if (IS_ERR(mid)) {
 835		cifs_server_unlock(server);
 836		add_credits_and_wake_if(server, &credits, optype);
 837		return PTR_ERR(mid);
 838	}
 839
 840	mid->receive = receive;
 841	mid->callback = callback;
 842	mid->callback_data = cbdata;
 843	mid->handle = handle;
 844	mid->mid_state = MID_REQUEST_SUBMITTED;
 845
 846	/* put it on the pending_mid_q */
 847	spin_lock(&server->mid_lock);
 848	list_add_tail(&mid->qhead, &server->pending_mid_q);
 849	spin_unlock(&server->mid_lock);
 850
 851	/*
 852	 * Need to store the time in mid before calling I/O. For call_async,
 853	 * I/O response may come back and free the mid entry on another thread.
 854	 */
 855	cifs_save_when_sent(mid);
 856	cifs_in_send_inc(server);
 857	rc = smb_send_rqst(server, 1, rqst, flags);
 858	cifs_in_send_dec(server);
 859
 860	if (rc < 0) {
 861		revert_current_mid(server, mid->credits);
 862		server->sequence_number -= 2;
 863		delete_mid(mid);
 864	}
 865
 866	cifs_server_unlock(server);
 867
 868	if (rc == 0)
 869		return 0;
 870
 871	add_credits_and_wake_if(server, &credits, optype);
 872	return rc;
 873}
 874
 875/*
 876 *
 877 * Send an SMB Request.  No response info (other than return code)
 878 * needs to be parsed.
 879 *
 880 * flags indicate the type of request buffer and how long to wait
 881 * and whether to log NT STATUS code (error) before mapping it to POSIX error
 882 *
 883 */
 884int
 885SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
 886		 char *in_buf, int flags)
 887{
 888	int rc;
 889	struct kvec iov[1];
 890	struct kvec rsp_iov;
 891	int resp_buf_type;
 892
 893	iov[0].iov_base = in_buf;
 894	iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
 895	flags |= CIFS_NO_RSP_BUF;
 896	rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
 897	cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
 898
 899	return rc;
 900}
 901
 902static int
 903cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
 904{
 905	int rc = 0;
 906
 907	cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
 908		 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
 909
 910	spin_lock(&server->mid_lock);
 911	switch (mid->mid_state) {
 912	case MID_RESPONSE_RECEIVED:
 913		spin_unlock(&server->mid_lock);
 914		return rc;
 915	case MID_RETRY_NEEDED:
 916		rc = -EAGAIN;
 917		break;
 918	case MID_RESPONSE_MALFORMED:
 919		rc = -EIO;
 920		break;
 921	case MID_SHUTDOWN:
 922		rc = -EHOSTDOWN;
 923		break;
 924	default:
 925		if (!(mid->mid_flags & MID_DELETED)) {
 926			list_del_init(&mid->qhead);
 927			mid->mid_flags |= MID_DELETED;
 928		}
 929		cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
 930			 __func__, mid->mid, mid->mid_state);
 931		rc = -EIO;
 932	}
 933	spin_unlock(&server->mid_lock);
 934
 935	release_mid(mid);
 936	return rc;
 937}
 938
 939static inline int
 940send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
 941	    struct mid_q_entry *mid)
 942{
 943	return server->ops->send_cancel ?
 944				server->ops->send_cancel(server, rqst, mid) : 0;
 945}
 946
 947int
 948cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
 949		   bool log_error)
 950{
 951	unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
 952
 953	dump_smb(mid->resp_buf, min_t(u32, 92, len));
 954
 955	/* convert the length into a more usable form */
 956	if (server->sign) {
 957		struct kvec iov[2];
 958		int rc = 0;
 959		struct smb_rqst rqst = { .rq_iov = iov,
 960					 .rq_nvec = 2 };
 961
 962		iov[0].iov_base = mid->resp_buf;
 963		iov[0].iov_len = 4;
 964		iov[1].iov_base = (char *)mid->resp_buf + 4;
 965		iov[1].iov_len = len - 4;
 966		/* FIXME: add code to kill session */
 967		rc = cifs_verify_signature(&rqst, server,
 968					   mid->sequence_number);
 969		if (rc)
 970			cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
 971				 rc);
 972	}
 973
 974	/* BB special case reconnect tid and uid here? */
 975	return map_and_check_smb_error(mid, log_error);
 976}
 977
 978struct mid_q_entry *
 979cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
 980		   struct smb_rqst *rqst)
 981{
 982	int rc;
 983	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
 984	struct mid_q_entry *mid;
 985
 986	if (rqst->rq_iov[0].iov_len != 4 ||
 987	    rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
 988		return ERR_PTR(-EIO);
 989
 990	rc = allocate_mid(ses, hdr, &mid);
 991	if (rc)
 992		return ERR_PTR(rc);
 993	rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
 994	if (rc) {
 995		delete_mid(mid);
 996		return ERR_PTR(rc);
 997	}
 998	return mid;
 999}
1000
1001static void
1002cifs_compound_callback(struct mid_q_entry *mid)
1003{
1004	struct TCP_Server_Info *server = mid->server;
1005	struct cifs_credits credits;
1006
1007	credits.value = server->ops->get_credits(mid);
1008	credits.instance = server->reconnect_instance;
1009
1010	add_credits(server, &credits, mid->optype);
1011}
1012
1013static void
1014cifs_compound_last_callback(struct mid_q_entry *mid)
1015{
1016	cifs_compound_callback(mid);
1017	cifs_wake_up_task(mid);
1018}
1019
1020static void
1021cifs_cancelled_callback(struct mid_q_entry *mid)
1022{
1023	cifs_compound_callback(mid);
1024	release_mid(mid);
1025}
1026
1027/*
1028 * Return a channel (master if none) of @ses that can be used to send
1029 * regular requests.
1030 *
1031 * If we are currently binding a new channel (negprot/sess.setup),
1032 * return the new incomplete channel.
1033 */
1034struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1035{
1036	uint index = 0;
1037
1038	if (!ses)
1039		return NULL;
1040
1041	/* round robin */
1042	index = (uint)atomic_inc_return(&ses->chan_seq);
1043
1044	spin_lock(&ses->chan_lock);
1045	index %= ses->chan_count;
1046	spin_unlock(&ses->chan_lock);
1047
1048	return ses->chans[index].server;
 
 
1049}
1050
1051int
1052compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1053		   struct TCP_Server_Info *server,
1054		   const int flags, const int num_rqst, struct smb_rqst *rqst,
1055		   int *resp_buf_type, struct kvec *resp_iov)
1056{
1057	int i, j, optype, rc = 0;
1058	struct mid_q_entry *midQ[MAX_COMPOUND];
1059	bool cancelled_mid[MAX_COMPOUND] = {false};
1060	struct cifs_credits credits[MAX_COMPOUND] = {
1061		{ .value = 0, .instance = 0 }
1062	};
1063	unsigned int instance;
1064	char *buf;
1065
1066	optype = flags & CIFS_OP_MASK;
1067
1068	for (i = 0; i < num_rqst; i++)
1069		resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
1070
1071	if (!ses || !ses->server || !server) {
1072		cifs_dbg(VFS, "Null session\n");
1073		return -EIO;
1074	}
1075
1076	spin_lock(&server->srv_lock);
1077	if (server->tcpStatus == CifsExiting) {
1078		spin_unlock(&server->srv_lock);
1079		return -ENOENT;
1080	}
1081	spin_unlock(&server->srv_lock);
1082
1083	/*
1084	 * Wait for all the requests to become available.
1085	 * This approach still leaves the possibility to be stuck waiting for
1086	 * credits if the server doesn't grant credits to the outstanding
1087	 * requests and if the client is completely idle, not generating any
1088	 * other requests.
1089	 * This can be handled by the eventual session reconnect.
1090	 */
1091	rc = wait_for_compound_request(server, num_rqst, flags,
1092				       &instance);
1093	if (rc)
1094		return rc;
1095
1096	for (i = 0; i < num_rqst; i++) {
1097		credits[i].value = 1;
1098		credits[i].instance = instance;
1099	}
1100
1101	/*
1102	 * Make sure that we sign in the same order that we send on this socket
1103	 * and avoid races inside tcp sendmsg code that could cause corruption
1104	 * of smb data.
1105	 */
1106
1107	cifs_server_lock(server);
1108
1109	/*
1110	 * All the parts of the compound chain belong obtained credits from the
1111	 * same session. We can not use credits obtained from the previous
1112	 * session to send this request. Check if there were reconnects after
1113	 * we obtained credits and return -EAGAIN in such cases to let callers
1114	 * handle it.
1115	 */
1116	if (instance != server->reconnect_instance) {
1117		cifs_server_unlock(server);
1118		for (j = 0; j < num_rqst; j++)
1119			add_credits(server, &credits[j], optype);
1120		return -EAGAIN;
1121	}
1122
1123	for (i = 0; i < num_rqst; i++) {
1124		midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1125		if (IS_ERR(midQ[i])) {
1126			revert_current_mid(server, i);
1127			for (j = 0; j < i; j++)
1128				delete_mid(midQ[j]);
1129			cifs_server_unlock(server);
1130
1131			/* Update # of requests on wire to server */
1132			for (j = 0; j < num_rqst; j++)
1133				add_credits(server, &credits[j], optype);
1134			return PTR_ERR(midQ[i]);
1135		}
1136
1137		midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1138		midQ[i]->optype = optype;
1139		/*
1140		 * Invoke callback for every part of the compound chain
1141		 * to calculate credits properly. Wake up this thread only when
1142		 * the last element is received.
1143		 */
1144		if (i < num_rqst - 1)
1145			midQ[i]->callback = cifs_compound_callback;
1146		else
1147			midQ[i]->callback = cifs_compound_last_callback;
1148	}
1149	cifs_in_send_inc(server);
1150	rc = smb_send_rqst(server, num_rqst, rqst, flags);
1151	cifs_in_send_dec(server);
1152
1153	for (i = 0; i < num_rqst; i++)
1154		cifs_save_when_sent(midQ[i]);
1155
1156	if (rc < 0) {
1157		revert_current_mid(server, num_rqst);
1158		server->sequence_number -= 2;
1159	}
1160
1161	cifs_server_unlock(server);
1162
1163	/*
1164	 * If sending failed for some reason or it is an oplock break that we
1165	 * will not receive a response to - return credits back
1166	 */
1167	if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1168		for (i = 0; i < num_rqst; i++)
1169			add_credits(server, &credits[i], optype);
1170		goto out;
1171	}
1172
1173	/*
1174	 * At this point the request is passed to the network stack - we assume
1175	 * that any credits taken from the server structure on the client have
1176	 * been spent and we can't return them back. Once we receive responses
1177	 * we will collect credits granted by the server in the mid callbacks
1178	 * and add those credits to the server structure.
1179	 */
1180
1181	/*
1182	 * Compounding is never used during session establish.
1183	 */
1184	spin_lock(&ses->ses_lock);
1185	if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1186		spin_unlock(&ses->ses_lock);
1187
1188		cifs_server_lock(server);
1189		smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
1190		cifs_server_unlock(server);
1191
1192		spin_lock(&ses->ses_lock);
1193	}
1194	spin_unlock(&ses->ses_lock);
1195
1196	for (i = 0; i < num_rqst; i++) {
1197		rc = wait_for_response(server, midQ[i]);
1198		if (rc != 0)
1199			break;
1200	}
1201	if (rc != 0) {
1202		for (; i < num_rqst; i++) {
1203			cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
1204				 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1205			send_cancel(server, &rqst[i], midQ[i]);
1206			spin_lock(&server->mid_lock);
1207			midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1208			if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1209				midQ[i]->callback = cifs_cancelled_callback;
1210				cancelled_mid[i] = true;
1211				credits[i].value = 0;
1212			}
1213			spin_unlock(&server->mid_lock);
1214		}
1215	}
1216
1217	for (i = 0; i < num_rqst; i++) {
1218		if (rc < 0)
1219			goto out;
1220
1221		rc = cifs_sync_mid_result(midQ[i], server);
1222		if (rc != 0) {
1223			/* mark this mid as cancelled to not free it below */
1224			cancelled_mid[i] = true;
1225			goto out;
1226		}
1227
1228		if (!midQ[i]->resp_buf ||
1229		    midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1230			rc = -EIO;
1231			cifs_dbg(FYI, "Bad MID state?\n");
1232			goto out;
1233		}
1234
1235		buf = (char *)midQ[i]->resp_buf;
1236		resp_iov[i].iov_base = buf;
1237		resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1238			HEADER_PREAMBLE_SIZE(server);
1239
1240		if (midQ[i]->large_buf)
1241			resp_buf_type[i] = CIFS_LARGE_BUFFER;
1242		else
1243			resp_buf_type[i] = CIFS_SMALL_BUFFER;
1244
1245		rc = server->ops->check_receive(midQ[i], server,
1246						     flags & CIFS_LOG_ERROR);
1247
1248		/* mark it so buf will not be freed by delete_mid */
1249		if ((flags & CIFS_NO_RSP_BUF) == 0)
1250			midQ[i]->resp_buf = NULL;
1251
1252	}
1253
1254	/*
1255	 * Compounding is never used during session establish.
1256	 */
1257	spin_lock(&ses->ses_lock);
1258	if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1259		struct kvec iov = {
1260			.iov_base = resp_iov[0].iov_base,
1261			.iov_len = resp_iov[0].iov_len
1262		};
1263		spin_unlock(&ses->ses_lock);
1264		cifs_server_lock(server);
1265		smb311_update_preauth_hash(ses, server, &iov, 1);
1266		cifs_server_unlock(server);
1267		spin_lock(&ses->ses_lock);
1268	}
1269	spin_unlock(&ses->ses_lock);
1270
1271out:
1272	/*
1273	 * This will dequeue all mids. After this it is important that the
1274	 * demultiplex_thread will not process any of these mids any futher.
1275	 * This is prevented above by using a noop callback that will not
1276	 * wake this thread except for the very last PDU.
1277	 */
1278	for (i = 0; i < num_rqst; i++) {
1279		if (!cancelled_mid[i])
1280			delete_mid(midQ[i]);
1281	}
1282
1283	return rc;
1284}
1285
1286int
1287cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1288	       struct TCP_Server_Info *server,
1289	       struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1290	       struct kvec *resp_iov)
1291{
1292	return compound_send_recv(xid, ses, server, flags, 1,
1293				  rqst, resp_buf_type, resp_iov);
1294}
1295
1296int
1297SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1298	     struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1299	     const int flags, struct kvec *resp_iov)
1300{
1301	struct smb_rqst rqst;
1302	struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1303	int rc;
1304
1305	if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1306		new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1307					GFP_KERNEL);
1308		if (!new_iov) {
1309			/* otherwise cifs_send_recv below sets resp_buf_type */
1310			*resp_buf_type = CIFS_NO_BUFFER;
1311			return -ENOMEM;
1312		}
1313	} else
1314		new_iov = s_iov;
1315
1316	/* 1st iov is a RFC1001 length followed by the rest of the packet */
1317	memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1318
1319	new_iov[0].iov_base = new_iov[1].iov_base;
1320	new_iov[0].iov_len = 4;
1321	new_iov[1].iov_base += 4;
1322	new_iov[1].iov_len -= 4;
1323
1324	memset(&rqst, 0, sizeof(struct smb_rqst));
1325	rqst.rq_iov = new_iov;
1326	rqst.rq_nvec = n_vec + 1;
1327
1328	rc = cifs_send_recv(xid, ses, ses->server,
1329			    &rqst, resp_buf_type, flags, resp_iov);
1330	if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1331		kfree(new_iov);
1332	return rc;
1333}
1334
1335int
1336SendReceive(const unsigned int xid, struct cifs_ses *ses,
1337	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1338	    int *pbytes_returned, const int flags)
1339{
1340	int rc = 0;
1341	struct mid_q_entry *midQ;
1342	unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1343	struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1344	struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1345	struct cifs_credits credits = { .value = 1, .instance = 0 };
1346	struct TCP_Server_Info *server;
1347
1348	if (ses == NULL) {
1349		cifs_dbg(VFS, "Null smb session\n");
1350		return -EIO;
1351	}
1352	server = ses->server;
1353	if (server == NULL) {
1354		cifs_dbg(VFS, "Null tcp session\n");
1355		return -EIO;
1356	}
1357
1358	spin_lock(&server->srv_lock);
1359	if (server->tcpStatus == CifsExiting) {
1360		spin_unlock(&server->srv_lock);
1361		return -ENOENT;
1362	}
1363	spin_unlock(&server->srv_lock);
1364
1365	/* Ensure that we do not send more than 50 overlapping requests
1366	   to the same server. We may make this configurable later or
1367	   use ses->maxReq */
1368
1369	if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1370		cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1371				len);
1372		return -EIO;
1373	}
1374
1375	rc = wait_for_free_request(server, flags, &credits.instance);
1376	if (rc)
1377		return rc;
1378
1379	/* make sure that we sign in the same order that we send on this socket
1380	   and avoid races inside tcp sendmsg code that could cause corruption
1381	   of smb data */
1382
1383	cifs_server_lock(server);
1384
1385	rc = allocate_mid(ses, in_buf, &midQ);
1386	if (rc) {
1387		cifs_server_unlock(server);
1388		/* Update # of requests on wire to server */
1389		add_credits(server, &credits, 0);
1390		return rc;
1391	}
1392
1393	rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1394	if (rc) {
1395		cifs_server_unlock(server);
1396		goto out;
1397	}
1398
1399	midQ->mid_state = MID_REQUEST_SUBMITTED;
1400
1401	cifs_in_send_inc(server);
1402	rc = smb_send(server, in_buf, len);
1403	cifs_in_send_dec(server);
1404	cifs_save_when_sent(midQ);
1405
1406	if (rc < 0)
1407		server->sequence_number -= 2;
1408
1409	cifs_server_unlock(server);
1410
1411	if (rc < 0)
1412		goto out;
1413
1414	rc = wait_for_response(server, midQ);
1415	if (rc != 0) {
1416		send_cancel(server, &rqst, midQ);
1417		spin_lock(&server->mid_lock);
1418		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1419			/* no longer considered to be "in-flight" */
1420			midQ->callback = release_mid;
1421			spin_unlock(&server->mid_lock);
1422			add_credits(server, &credits, 0);
1423			return rc;
1424		}
1425		spin_unlock(&server->mid_lock);
1426	}
1427
1428	rc = cifs_sync_mid_result(midQ, server);
1429	if (rc != 0) {
1430		add_credits(server, &credits, 0);
1431		return rc;
1432	}
1433
1434	if (!midQ->resp_buf || !out_buf ||
1435	    midQ->mid_state != MID_RESPONSE_RECEIVED) {
1436		rc = -EIO;
1437		cifs_server_dbg(VFS, "Bad MID state?\n");
1438		goto out;
1439	}
1440
1441	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1442	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1443	rc = cifs_check_receive(midQ, server, 0);
1444out:
1445	delete_mid(midQ);
1446	add_credits(server, &credits, 0);
1447
1448	return rc;
1449}
1450
1451/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1452   blocking lock to return. */
1453
1454static int
1455send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1456			struct smb_hdr *in_buf,
1457			struct smb_hdr *out_buf)
1458{
1459	int bytes_returned;
1460	struct cifs_ses *ses = tcon->ses;
1461	LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1462
1463	/* We just modify the current in_buf to change
1464	   the type of lock from LOCKING_ANDX_SHARED_LOCK
1465	   or LOCKING_ANDX_EXCLUSIVE_LOCK to
1466	   LOCKING_ANDX_CANCEL_LOCK. */
1467
1468	pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1469	pSMB->Timeout = 0;
1470	pSMB->hdr.Mid = get_next_mid(ses->server);
1471
1472	return SendReceive(xid, ses, in_buf, out_buf,
1473			&bytes_returned, 0);
1474}
1475
1476int
1477SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1478	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1479	    int *pbytes_returned)
1480{
1481	int rc = 0;
1482	int rstart = 0;
1483	struct mid_q_entry *midQ;
1484	struct cifs_ses *ses;
1485	unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1486	struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1487	struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1488	unsigned int instance;
1489	struct TCP_Server_Info *server;
1490
1491	if (tcon == NULL || tcon->ses == NULL) {
1492		cifs_dbg(VFS, "Null smb session\n");
1493		return -EIO;
1494	}
1495	ses = tcon->ses;
1496	server = ses->server;
1497
1498	if (server == NULL) {
1499		cifs_dbg(VFS, "Null tcp session\n");
1500		return -EIO;
1501	}
1502
1503	spin_lock(&server->srv_lock);
1504	if (server->tcpStatus == CifsExiting) {
1505		spin_unlock(&server->srv_lock);
1506		return -ENOENT;
1507	}
1508	spin_unlock(&server->srv_lock);
1509
1510	/* Ensure that we do not send more than 50 overlapping requests
1511	   to the same server. We may make this configurable later or
1512	   use ses->maxReq */
1513
1514	if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1515		cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1516			      len);
1517		return -EIO;
1518	}
1519
1520	rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1521	if (rc)
1522		return rc;
1523
1524	/* make sure that we sign in the same order that we send on this socket
1525	   and avoid races inside tcp sendmsg code that could cause corruption
1526	   of smb data */
1527
1528	cifs_server_lock(server);
1529
1530	rc = allocate_mid(ses, in_buf, &midQ);
1531	if (rc) {
1532		cifs_server_unlock(server);
1533		return rc;
1534	}
1535
1536	rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1537	if (rc) {
1538		delete_mid(midQ);
1539		cifs_server_unlock(server);
1540		return rc;
1541	}
1542
1543	midQ->mid_state = MID_REQUEST_SUBMITTED;
1544	cifs_in_send_inc(server);
1545	rc = smb_send(server, in_buf, len);
1546	cifs_in_send_dec(server);
1547	cifs_save_when_sent(midQ);
1548
1549	if (rc < 0)
1550		server->sequence_number -= 2;
1551
1552	cifs_server_unlock(server);
1553
1554	if (rc < 0) {
1555		delete_mid(midQ);
1556		return rc;
1557	}
1558
1559	/* Wait for a reply - allow signals to interrupt. */
1560	rc = wait_event_interruptible(server->response_q,
1561		(!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1562		((server->tcpStatus != CifsGood) &&
1563		 (server->tcpStatus != CifsNew)));
1564
1565	/* Were we interrupted by a signal ? */
1566	spin_lock(&server->srv_lock);
1567	if ((rc == -ERESTARTSYS) &&
1568		(midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1569		((server->tcpStatus == CifsGood) ||
1570		 (server->tcpStatus == CifsNew))) {
1571		spin_unlock(&server->srv_lock);
1572
1573		if (in_buf->Command == SMB_COM_TRANSACTION2) {
1574			/* POSIX lock. We send a NT_CANCEL SMB to cause the
1575			   blocking lock to return. */
1576			rc = send_cancel(server, &rqst, midQ);
1577			if (rc) {
1578				delete_mid(midQ);
1579				return rc;
1580			}
1581		} else {
1582			/* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1583			   to cause the blocking lock to return. */
1584
1585			rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1586
1587			/* If we get -ENOLCK back the lock may have
1588			   already been removed. Don't exit in this case. */
1589			if (rc && rc != -ENOLCK) {
1590				delete_mid(midQ);
1591				return rc;
1592			}
1593		}
1594
1595		rc = wait_for_response(server, midQ);
1596		if (rc) {
1597			send_cancel(server, &rqst, midQ);
1598			spin_lock(&server->mid_lock);
1599			if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1600				/* no longer considered to be "in-flight" */
1601				midQ->callback = release_mid;
1602				spin_unlock(&server->mid_lock);
1603				return rc;
1604			}
1605			spin_unlock(&server->mid_lock);
1606		}
1607
1608		/* We got the response - restart system call. */
1609		rstart = 1;
1610		spin_lock(&server->srv_lock);
1611	}
1612	spin_unlock(&server->srv_lock);
1613
1614	rc = cifs_sync_mid_result(midQ, server);
1615	if (rc != 0)
1616		return rc;
1617
1618	/* rcvd frame is ok */
1619	if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1620		rc = -EIO;
1621		cifs_tcon_dbg(VFS, "Bad MID state?\n");
1622		goto out;
1623	}
1624
1625	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1626	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1627	rc = cifs_check_receive(midQ, server, 0);
1628out:
1629	delete_mid(midQ);
1630	if (rstart && rc == -EACCES)
1631		return -ERESTARTSYS;
1632	return rc;
1633}
1634
1635/*
1636 * Discard any remaining data in the current SMB. To do this, we borrow the
1637 * current bigbuf.
1638 */
1639int
1640cifs_discard_remaining_data(struct TCP_Server_Info *server)
1641{
1642	unsigned int rfclen = server->pdu_size;
1643	int remaining = rfclen + HEADER_PREAMBLE_SIZE(server) -
1644		server->total_read;
1645
1646	while (remaining > 0) {
1647		int length;
1648
1649		length = cifs_discard_from_socket(server,
1650				min_t(size_t, remaining,
1651				      CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
1652		if (length < 0)
1653			return length;
1654		server->total_read += length;
1655		remaining -= length;
1656	}
1657
1658	return 0;
1659}
1660
1661static int
1662__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
1663		     bool malformed)
1664{
1665	int length;
1666
1667	length = cifs_discard_remaining_data(server);
1668	dequeue_mid(mid, malformed);
1669	mid->resp_buf = server->smallbuf;
1670	server->smallbuf = NULL;
1671	return length;
1672}
1673
1674static int
1675cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1676{
1677	struct cifs_readdata *rdata = mid->callback_data;
1678
1679	return  __cifs_readv_discard(server, mid, rdata->result);
1680}
1681
1682int
1683cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1684{
1685	int length, len;
1686	unsigned int data_offset, data_len;
1687	struct cifs_readdata *rdata = mid->callback_data;
1688	char *buf = server->smallbuf;
1689	unsigned int buflen = server->pdu_size + HEADER_PREAMBLE_SIZE(server);
1690	bool use_rdma_mr = false;
1691
1692	cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n",
1693		 __func__, mid->mid, rdata->offset, rdata->bytes);
1694
1695	/*
1696	 * read the rest of READ_RSP header (sans Data array), or whatever we
1697	 * can if there's not enough data. At this point, we've read down to
1698	 * the Mid.
1699	 */
1700	len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
1701							HEADER_SIZE(server) + 1;
1702
1703	length = cifs_read_from_socket(server,
1704				       buf + HEADER_SIZE(server) - 1, len);
1705	if (length < 0)
1706		return length;
1707	server->total_read += length;
1708
1709	if (server->ops->is_session_expired &&
1710	    server->ops->is_session_expired(buf)) {
1711		cifs_reconnect(server, true);
1712		return -1;
1713	}
1714
1715	if (server->ops->is_status_pending &&
1716	    server->ops->is_status_pending(buf, server)) {
1717		cifs_discard_remaining_data(server);
1718		return -1;
1719	}
1720
1721	/* set up first two iov for signature check and to get credits */
1722	rdata->iov[0].iov_base = buf;
1723	rdata->iov[0].iov_len = HEADER_PREAMBLE_SIZE(server);
1724	rdata->iov[1].iov_base = buf + HEADER_PREAMBLE_SIZE(server);
1725	rdata->iov[1].iov_len =
1726		server->total_read - HEADER_PREAMBLE_SIZE(server);
1727	cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
1728		 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
1729	cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
1730		 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
1731
1732	/* Was the SMB read successful? */
1733	rdata->result = server->ops->map_error(buf, false);
1734	if (rdata->result != 0) {
1735		cifs_dbg(FYI, "%s: server returned error %d\n",
1736			 __func__, rdata->result);
1737		/* normal error on read response */
1738		return __cifs_readv_discard(server, mid, false);
1739	}
1740
1741	/* Is there enough to get to the rest of the READ_RSP header? */
1742	if (server->total_read < server->vals->read_rsp_size) {
1743		cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n",
1744			 __func__, server->total_read,
1745			 server->vals->read_rsp_size);
1746		rdata->result = -EIO;
1747		return cifs_readv_discard(server, mid);
1748	}
1749
1750	data_offset = server->ops->read_data_offset(buf) +
1751		HEADER_PREAMBLE_SIZE(server);
1752	if (data_offset < server->total_read) {
1753		/*
1754		 * win2k8 sometimes sends an offset of 0 when the read
1755		 * is beyond the EOF. Treat it as if the data starts just after
1756		 * the header.
1757		 */
1758		cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
1759			 __func__, data_offset);
1760		data_offset = server->total_read;
1761	} else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
1762		/* data_offset is beyond the end of smallbuf */
1763		cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
1764			 __func__, data_offset);
1765		rdata->result = -EIO;
1766		return cifs_readv_discard(server, mid);
1767	}
1768
1769	cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n",
1770		 __func__, server->total_read, data_offset);
1771
1772	len = data_offset - server->total_read;
1773	if (len > 0) {
1774		/* read any junk before data into the rest of smallbuf */
1775		length = cifs_read_from_socket(server,
1776					       buf + server->total_read, len);
1777		if (length < 0)
1778			return length;
1779		server->total_read += length;
1780	}
1781
1782	/* how much data is in the response? */
1783#ifdef CONFIG_CIFS_SMB_DIRECT
1784	use_rdma_mr = rdata->mr;
1785#endif
1786	data_len = server->ops->read_data_length(buf, use_rdma_mr);
1787	if (!use_rdma_mr && (data_offset + data_len > buflen)) {
1788		/* data_len is corrupt -- discard frame */
1789		rdata->result = -EIO;
1790		return cifs_readv_discard(server, mid);
1791	}
1792
1793	length = rdata->read_into_pages(server, rdata, data_len);
1794	if (length < 0)
1795		return length;
1796
1797	server->total_read += length;
1798
1799	cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n",
1800		 server->total_read, buflen, data_len);
1801
1802	/* discard anything left over */
1803	if (server->total_read < buflen)
1804		return cifs_readv_discard(server, mid);
1805
1806	dequeue_mid(mid, false);
1807	mid->resp_buf = server->smallbuf;
1808	server->smallbuf = NULL;
1809	return length;
1810}
v5.9
 
   1/*
   2 *   fs/cifs/transport.c
   3 *
   4 *   Copyright (C) International Business Machines  Corp., 2002,2008
   5 *   Author(s): Steve French (sfrench@us.ibm.com)
   6 *   Jeremy Allison (jra@samba.org) 2006.
   7 *
   8 *   This library is free software; you can redistribute it and/or modify
   9 *   it under the terms of the GNU Lesser General Public License as published
  10 *   by the Free Software Foundation; either version 2.1 of the License, or
  11 *   (at your option) any later version.
  12 *
  13 *   This library is distributed in the hope that it will be useful,
  14 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
  16 *   the GNU Lesser General Public License for more details.
  17 *
  18 *   You should have received a copy of the GNU Lesser General Public License
  19 *   along with this library; if not, write to the Free Software
  20 *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21 */
  22
  23#include <linux/fs.h>
  24#include <linux/list.h>
  25#include <linux/gfp.h>
  26#include <linux/wait.h>
  27#include <linux/net.h>
  28#include <linux/delay.h>
  29#include <linux/freezer.h>
  30#include <linux/tcp.h>
  31#include <linux/bvec.h>
  32#include <linux/highmem.h>
  33#include <linux/uaccess.h>
  34#include <asm/processor.h>
  35#include <linux/mempool.h>
  36#include <linux/sched/signal.h>
 
  37#include "cifspdu.h"
  38#include "cifsglob.h"
  39#include "cifsproto.h"
  40#include "cifs_debug.h"
  41#include "smb2proto.h"
  42#include "smbdirect.h"
  43
  44/* Max number of iovectors we can use off the stack when sending requests. */
  45#define CIFS_MAX_IOV_SIZE 8
  46
  47void
  48cifs_wake_up_task(struct mid_q_entry *mid)
  49{
  50	wake_up_process(mid->callback_data);
  51}
  52
  53struct mid_q_entry *
  54AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
  55{
  56	struct mid_q_entry *temp;
  57
  58	if (server == NULL) {
  59		cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
  60		return NULL;
  61	}
  62
  63	temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
  64	memset(temp, 0, sizeof(struct mid_q_entry));
  65	kref_init(&temp->refcount);
  66	temp->mid = get_mid(smb_buffer);
  67	temp->pid = current->pid;
  68	temp->command = cpu_to_le16(smb_buffer->Command);
  69	cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
  70	/*	do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
  71	/* when mid allocated can be before when sent */
  72	temp->when_alloc = jiffies;
  73	temp->server = server;
  74
  75	/*
  76	 * The default is for the mid to be synchronous, so the
  77	 * default callback just wakes up the current task.
  78	 */
  79	get_task_struct(current);
  80	temp->creator = current;
  81	temp->callback = cifs_wake_up_task;
  82	temp->callback_data = current;
  83
  84	atomic_inc(&midCount);
  85	temp->mid_state = MID_REQUEST_ALLOCATED;
  86	return temp;
  87}
  88
  89static void _cifs_mid_q_entry_release(struct kref *refcount)
  90{
  91	struct mid_q_entry *midEntry =
  92			container_of(refcount, struct mid_q_entry, refcount);
  93#ifdef CONFIG_CIFS_STATS2
  94	__le16 command = midEntry->server->vals->lock_cmd;
  95	__u16 smb_cmd = le16_to_cpu(midEntry->command);
  96	unsigned long now;
  97	unsigned long roundtrip_time;
  98#endif
  99	struct TCP_Server_Info *server = midEntry->server;
 100
 101	if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
 102	    midEntry->mid_state == MID_RESPONSE_RECEIVED &&
 103	    server->ops->handle_cancelled_mid)
 104		server->ops->handle_cancelled_mid(midEntry->resp_buf, server);
 105
 106	midEntry->mid_state = MID_FREE;
 107	atomic_dec(&midCount);
 108	if (midEntry->large_buf)
 109		cifs_buf_release(midEntry->resp_buf);
 110	else
 111		cifs_small_buf_release(midEntry->resp_buf);
 112#ifdef CONFIG_CIFS_STATS2
 113	now = jiffies;
 114	if (now < midEntry->when_alloc)
 115		cifs_server_dbg(VFS, "Invalid mid allocation time\n");
 116	roundtrip_time = now - midEntry->when_alloc;
 117
 118	if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
 119		if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
 120			server->slowest_cmd[smb_cmd] = roundtrip_time;
 121			server->fastest_cmd[smb_cmd] = roundtrip_time;
 122		} else {
 123			if (server->slowest_cmd[smb_cmd] < roundtrip_time)
 124				server->slowest_cmd[smb_cmd] = roundtrip_time;
 125			else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
 126				server->fastest_cmd[smb_cmd] = roundtrip_time;
 127		}
 128		cifs_stats_inc(&server->num_cmds[smb_cmd]);
 129		server->time_per_cmd[smb_cmd] += roundtrip_time;
 130	}
 131	/*
 132	 * commands taking longer than one second (default) can be indications
 133	 * that something is wrong, unless it is quite a slow link or a very
 134	 * busy server. Note that this calc is unlikely or impossible to wrap
 135	 * as long as slow_rsp_threshold is not set way above recommended max
 136	 * value (32767 ie 9 hours) and is generally harmless even if wrong
 137	 * since only affects debug counters - so leaving the calc as simple
 138	 * comparison rather than doing multiple conversions and overflow
 139	 * checks
 140	 */
 141	if ((slow_rsp_threshold != 0) &&
 142	    time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
 143	    (midEntry->command != command)) {
 144		/*
 145		 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
 146		 * NB: le16_to_cpu returns unsigned so can not be negative below
 147		 */
 148		if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
 149			cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
 150
 151		trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
 152			       midEntry->when_sent, midEntry->when_received);
 153		if (cifsFYI & CIFS_TIMER) {
 154			pr_debug("slow rsp: cmd %d mid %llu",
 155				 midEntry->command, midEntry->mid);
 156			cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
 157				  now - midEntry->when_alloc,
 158				  now - midEntry->when_sent,
 159				  now - midEntry->when_received);
 160		}
 161	}
 162#endif
 163	put_task_struct(midEntry->creator);
 164
 165	mempool_free(midEntry, cifs_mid_poolp);
 166}
 167
 168void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
 169{
 170	spin_lock(&GlobalMid_Lock);
 171	kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
 172	spin_unlock(&GlobalMid_Lock);
 173}
 174
 175void DeleteMidQEntry(struct mid_q_entry *midEntry)
 176{
 177	cifs_mid_q_entry_release(midEntry);
 178}
 179
 180void
 181cifs_delete_mid(struct mid_q_entry *mid)
 182{
 183	spin_lock(&GlobalMid_Lock);
 184	if (!(mid->mid_flags & MID_DELETED)) {
 185		list_del_init(&mid->qhead);
 186		mid->mid_flags |= MID_DELETED;
 187	}
 188	spin_unlock(&GlobalMid_Lock);
 189
 190	DeleteMidQEntry(mid);
 191}
 192
 193/*
 194 * smb_send_kvec - send an array of kvecs to the server
 195 * @server:	Server to send the data to
 196 * @smb_msg:	Message to send
 197 * @sent:	amount of data sent on socket is stored here
 198 *
 199 * Our basic "send data to server" function. Should be called with srv_mutex
 200 * held. The caller is responsible for handling the results.
 201 */
 202static int
 203smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
 204	      size_t *sent)
 205{
 206	int rc = 0;
 207	int retries = 0;
 208	struct socket *ssocket = server->ssocket;
 209
 210	*sent = 0;
 211
 212	smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
 213	smb_msg->msg_namelen = sizeof(struct sockaddr);
 214	smb_msg->msg_control = NULL;
 215	smb_msg->msg_controllen = 0;
 216	if (server->noblocksnd)
 217		smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
 218	else
 219		smb_msg->msg_flags = MSG_NOSIGNAL;
 220
 221	while (msg_data_left(smb_msg)) {
 222		/*
 223		 * If blocking send, we try 3 times, since each can block
 224		 * for 5 seconds. For nonblocking  we have to try more
 225		 * but wait increasing amounts of time allowing time for
 226		 * socket to clear.  The overall time we wait in either
 227		 * case to send on the socket is about 15 seconds.
 228		 * Similarly we wait for 15 seconds for a response from
 229		 * the server in SendReceive[2] for the server to send
 230		 * a response back for most types of requests (except
 231		 * SMB Write past end of file which can be slow, and
 232		 * blocking lock operations). NFS waits slightly longer
 233		 * than CIFS, but this can make it take longer for
 234		 * nonresponsive servers to be detected and 15 seconds
 235		 * is more than enough time for modern networks to
 236		 * send a packet.  In most cases if we fail to send
 237		 * after the retries we will kill the socket and
 238		 * reconnect which may clear the network problem.
 239		 */
 240		rc = sock_sendmsg(ssocket, smb_msg);
 241		if (rc == -EAGAIN) {
 242			retries++;
 243			if (retries >= 14 ||
 244			    (!server->noblocksnd && (retries > 2))) {
 245				cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
 246					 ssocket);
 247				return -EAGAIN;
 248			}
 249			msleep(1 << retries);
 250			continue;
 251		}
 252
 253		if (rc < 0)
 254			return rc;
 255
 256		if (rc == 0) {
 257			/* should never happen, letting socket clear before
 258			   retrying is our only obvious option here */
 259			cifs_server_dbg(VFS, "tcp sent no data\n");
 260			msleep(500);
 261			continue;
 262		}
 263
 264		/* send was at least partially successful */
 265		*sent += rc;
 266		retries = 0; /* in case we get ENOSPC on the next send */
 267	}
 268	return 0;
 269}
 270
 271unsigned long
 272smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
 273{
 274	unsigned int i;
 275	struct kvec *iov;
 276	int nvec;
 277	unsigned long buflen = 0;
 278
 279	if (server->vals->header_preamble_size == 0 &&
 280	    rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
 281		iov = &rqst->rq_iov[1];
 282		nvec = rqst->rq_nvec - 1;
 283	} else {
 284		iov = rqst->rq_iov;
 285		nvec = rqst->rq_nvec;
 286	}
 287
 288	/* total up iov array first */
 289	for (i = 0; i < nvec; i++)
 290		buflen += iov[i].iov_len;
 291
 292	/*
 293	 * Add in the page array if there is one. The caller needs to make
 294	 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
 295	 * multiple pages ends at page boundary, rq_tailsz needs to be set to
 296	 * PAGE_SIZE.
 297	 */
 298	if (rqst->rq_npages) {
 299		if (rqst->rq_npages == 1)
 300			buflen += rqst->rq_tailsz;
 301		else {
 302			/*
 303			 * If there is more than one page, calculate the
 304			 * buffer length based on rq_offset and rq_tailsz
 305			 */
 306			buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
 307					rqst->rq_offset;
 308			buflen += rqst->rq_tailsz;
 309		}
 310	}
 311
 312	return buflen;
 313}
 314
 315static int
 316__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
 317		struct smb_rqst *rqst)
 318{
 319	int rc = 0;
 320	struct kvec *iov;
 321	int n_vec;
 322	unsigned int send_length = 0;
 323	unsigned int i, j;
 324	sigset_t mask, oldmask;
 325	size_t total_len = 0, sent, size;
 326	struct socket *ssocket = server->ssocket;
 327	struct msghdr smb_msg;
 328	__be32 rfc1002_marker;
 329
 330	if (cifs_rdma_enabled(server)) {
 331		/* return -EAGAIN when connecting or reconnecting */
 332		rc = -EAGAIN;
 333		if (server->smbd_conn)
 334			rc = smbd_send(server, num_rqst, rqst);
 335		goto smbd_done;
 336	}
 337
 338	if (ssocket == NULL)
 339		return -EAGAIN;
 340
 341	if (signal_pending(current)) {
 342		cifs_dbg(FYI, "signal is pending before sending any data\n");
 343		return -EINTR;
 344	}
 345
 346	/* cork the socket */
 347	tcp_sock_set_cork(ssocket->sk, true);
 348
 349	for (j = 0; j < num_rqst; j++)
 350		send_length += smb_rqst_len(server, &rqst[j]);
 351	rfc1002_marker = cpu_to_be32(send_length);
 352
 353	/*
 354	 * We should not allow signals to interrupt the network send because
 355	 * any partial send will cause session reconnects thus increasing
 356	 * latency of system calls and overload a server with unnecessary
 357	 * requests.
 358	 */
 359
 360	sigfillset(&mask);
 361	sigprocmask(SIG_BLOCK, &mask, &oldmask);
 362
 363	/* Generate a rfc1002 marker for SMB2+ */
 364	if (server->vals->header_preamble_size == 0) {
 365		struct kvec hiov = {
 366			.iov_base = &rfc1002_marker,
 367			.iov_len  = 4
 368		};
 369		iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
 370		rc = smb_send_kvec(server, &smb_msg, &sent);
 371		if (rc < 0)
 372			goto unmask;
 373
 374		total_len += sent;
 375		send_length += 4;
 376	}
 377
 378	cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
 379
 380	for (j = 0; j < num_rqst; j++) {
 381		iov = rqst[j].rq_iov;
 382		n_vec = rqst[j].rq_nvec;
 383
 384		size = 0;
 385		for (i = 0; i < n_vec; i++) {
 386			dump_smb(iov[i].iov_base, iov[i].iov_len);
 387			size += iov[i].iov_len;
 388		}
 389
 390		iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
 391
 392		rc = smb_send_kvec(server, &smb_msg, &sent);
 393		if (rc < 0)
 394			goto unmask;
 395
 396		total_len += sent;
 397
 398		/* now walk the page array and send each page in it */
 399		for (i = 0; i < rqst[j].rq_npages; i++) {
 400			struct bio_vec bvec;
 401
 402			bvec.bv_page = rqst[j].rq_pages[i];
 403			rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
 404					     &bvec.bv_offset);
 405
 406			iov_iter_bvec(&smb_msg.msg_iter, WRITE,
 407				      &bvec, 1, bvec.bv_len);
 408			rc = smb_send_kvec(server, &smb_msg, &sent);
 409			if (rc < 0)
 410				break;
 411
 412			total_len += sent;
 413		}
 414	}
 415
 416unmask:
 417	sigprocmask(SIG_SETMASK, &oldmask, NULL);
 418
 419	/*
 420	 * If signal is pending but we have already sent the whole packet to
 421	 * the server we need to return success status to allow a corresponding
 422	 * mid entry to be kept in the pending requests queue thus allowing
 423	 * to handle responses from the server by the client.
 424	 *
 425	 * If only part of the packet has been sent there is no need to hide
 426	 * interrupt because the session will be reconnected anyway, so there
 427	 * won't be any response from the server to handle.
 428	 */
 429
 430	if (signal_pending(current) && (total_len != send_length)) {
 431		cifs_dbg(FYI, "signal is pending after attempt to send\n");
 432		rc = -EINTR;
 433	}
 434
 435	/* uncork it */
 436	tcp_sock_set_cork(ssocket->sk, false);
 437
 438	if ((total_len > 0) && (total_len != send_length)) {
 439		cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
 440			 send_length, total_len);
 441		/*
 442		 * If we have only sent part of an SMB then the next SMB could
 443		 * be taken as the remainder of this one. We need to kill the
 444		 * socket so the server throws away the partial SMB
 445		 */
 446		server->tcpStatus = CifsNeedReconnect;
 447		trace_smb3_partial_send_reconnect(server->CurrentMid,
 448						  server->hostname);
 449	}
 450smbd_done:
 451	if (rc < 0 && rc != -EINTR)
 452		cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
 453			 rc);
 454	else if (rc > 0)
 455		rc = 0;
 456
 457	return rc;
 458}
 459
 460static int
 461smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
 462	      struct smb_rqst *rqst, int flags)
 463{
 464	struct kvec iov;
 465	struct smb2_transform_hdr *tr_hdr;
 466	struct smb_rqst cur_rqst[MAX_COMPOUND];
 467	int rc;
 468
 469	if (!(flags & CIFS_TRANSFORM_REQ))
 470		return __smb_send_rqst(server, num_rqst, rqst);
 471
 472	if (num_rqst > MAX_COMPOUND - 1)
 473		return -ENOMEM;
 474
 475	if (!server->ops->init_transform_rq) {
 476		cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
 477		return -EIO;
 478	}
 479
 480	tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
 481	if (!tr_hdr)
 482		return -ENOMEM;
 483
 484	memset(&cur_rqst[0], 0, sizeof(cur_rqst));
 485	memset(&iov, 0, sizeof(iov));
 486	memset(tr_hdr, 0, sizeof(*tr_hdr));
 487
 488	iov.iov_base = tr_hdr;
 489	iov.iov_len = sizeof(*tr_hdr);
 490	cur_rqst[0].rq_iov = &iov;
 491	cur_rqst[0].rq_nvec = 1;
 492
 493	rc = server->ops->init_transform_rq(server, num_rqst + 1,
 494					    &cur_rqst[0], rqst);
 495	if (rc)
 496		goto out;
 497
 498	rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
 499	smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
 500out:
 501	kfree(tr_hdr);
 502	return rc;
 503}
 504
 505int
 506smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
 507	 unsigned int smb_buf_length)
 508{
 509	struct kvec iov[2];
 510	struct smb_rqst rqst = { .rq_iov = iov,
 511				 .rq_nvec = 2 };
 512
 513	iov[0].iov_base = smb_buffer;
 514	iov[0].iov_len = 4;
 515	iov[1].iov_base = (char *)smb_buffer + 4;
 516	iov[1].iov_len = smb_buf_length;
 517
 518	return __smb_send_rqst(server, 1, &rqst);
 519}
 520
 521static int
 522wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
 523		      const int timeout, const int flags,
 524		      unsigned int *instance)
 525{
 526	long rc;
 527	int *credits;
 528	int optype;
 529	long int t;
 
 530
 531	if (timeout < 0)
 532		t = MAX_JIFFY_OFFSET;
 533	else
 534		t = msecs_to_jiffies(timeout);
 535
 536	optype = flags & CIFS_OP_MASK;
 537
 538	*instance = 0;
 539
 540	credits = server->ops->get_credits_field(server, optype);
 541	/* Since an echo is already inflight, no need to wait to send another */
 542	if (*credits <= 0 && optype == CIFS_ECHO_OP)
 543		return -EAGAIN;
 544
 545	spin_lock(&server->req_lock);
 546	if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
 547		/* oplock breaks must not be held up */
 548		server->in_flight++;
 549		if (server->in_flight > server->max_in_flight)
 550			server->max_in_flight = server->in_flight;
 551		*credits -= 1;
 552		*instance = server->reconnect_instance;
 
 
 553		spin_unlock(&server->req_lock);
 
 
 
 
 
 
 554		return 0;
 555	}
 556
 557	while (1) {
 558		if (*credits < num_credits) {
 
 559			spin_unlock(&server->req_lock);
 
 560			cifs_num_waiters_inc(server);
 561			rc = wait_event_killable_timeout(server->request_q,
 562				has_credits(server, credits, num_credits), t);
 563			cifs_num_waiters_dec(server);
 564			if (!rc) {
 
 
 
 
 
 565				trace_smb3_credit_timeout(server->CurrentMid,
 566					server->hostname, num_credits);
 
 567				cifs_server_dbg(VFS, "wait timed out after %d ms\n",
 568					 timeout);
 569				return -ENOTSUPP;
 570			}
 571			if (rc == -ERESTARTSYS)
 572				return -ERESTARTSYS;
 573			spin_lock(&server->req_lock);
 574		} else {
 
 
 
 575			if (server->tcpStatus == CifsExiting) {
 576				spin_unlock(&server->req_lock);
 577				return -ENOENT;
 578			}
 
 579
 580			/*
 581			 * For normal commands, reserve the last MAX_COMPOUND
 582			 * credits to compound requests.
 583			 * Otherwise these compounds could be permanently
 584			 * starved for credits by single-credit requests.
 585			 *
 586			 * To prevent spinning CPU, block this thread until
 587			 * there are >MAX_COMPOUND credits available.
 588			 * But only do this is we already have a lot of
 589			 * credits in flight to avoid triggering this check
 590			 * for servers that are slow to hand out credits on
 591			 * new sessions.
 592			 */
 
 593			if (!optype && num_credits == 1 &&
 594			    server->in_flight > 2 * MAX_COMPOUND &&
 595			    *credits <= MAX_COMPOUND) {
 596				spin_unlock(&server->req_lock);
 
 597				cifs_num_waiters_inc(server);
 598				rc = wait_event_killable_timeout(
 599					server->request_q,
 600					has_credits(server, credits,
 601						    MAX_COMPOUND + 1),
 602					t);
 603				cifs_num_waiters_dec(server);
 604				if (!rc) {
 
 
 
 
 
 605					trace_smb3_credit_timeout(
 606						server->CurrentMid,
 607						server->hostname, num_credits);
 
 608					cifs_server_dbg(VFS, "wait timed out after %d ms\n",
 609						 timeout);
 610					return -ENOTSUPP;
 611				}
 612				if (rc == -ERESTARTSYS)
 613					return -ERESTARTSYS;
 614				spin_lock(&server->req_lock);
 615				continue;
 616			}
 617
 618			/*
 619			 * Can not count locking commands against total
 620			 * as they are allowed to block on server.
 621			 */
 622
 623			/* update # of requests on the wire to server */
 624			if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
 625				*credits -= num_credits;
 626				server->in_flight += num_credits;
 627				if (server->in_flight > server->max_in_flight)
 628					server->max_in_flight = server->in_flight;
 629				*instance = server->reconnect_instance;
 630			}
 
 
 631			spin_unlock(&server->req_lock);
 
 
 
 
 
 
 632			break;
 633		}
 634	}
 635	return 0;
 636}
 637
 638static int
 639wait_for_free_request(struct TCP_Server_Info *server, const int flags,
 640		      unsigned int *instance)
 641{
 642	return wait_for_free_credits(server, 1, -1, flags,
 643				     instance);
 644}
 645
 646static int
 647wait_for_compound_request(struct TCP_Server_Info *server, int num,
 648			  const int flags, unsigned int *instance)
 649{
 650	int *credits;
 
 651
 652	credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
 653
 654	spin_lock(&server->req_lock);
 
 
 
 655	if (*credits < num) {
 656		/*
 657		 * Return immediately if not too many requests in flight since
 658		 * we will likely be stuck on waiting for credits.
 
 
 
 
 
 
 
 
 
 
 
 
 659		 */
 660		if (server->in_flight < num - *credits) {
 661			spin_unlock(&server->req_lock);
 662			return -ENOTSUPP;
 
 
 
 
 
 663		}
 664	}
 665	spin_unlock(&server->req_lock);
 666
 667	return wait_for_free_credits(server, num, 60000, flags,
 668				     instance);
 669}
 670
 671int
 672cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
 673		      unsigned int *num, struct cifs_credits *credits)
 674{
 675	*num = size;
 676	credits->value = 0;
 677	credits->instance = server->reconnect_instance;
 678	return 0;
 679}
 680
 681static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
 682			struct mid_q_entry **ppmidQ)
 683{
 684	if (ses->server->tcpStatus == CifsExiting) {
 685		return -ENOENT;
 686	}
 687
 688	if (ses->server->tcpStatus == CifsNeedReconnect) {
 689		cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
 690		return -EAGAIN;
 691	}
 692
 693	if (ses->status == CifsNew) {
 694		if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
 695			(in_buf->Command != SMB_COM_NEGOTIATE))
 
 696			return -EAGAIN;
 
 697		/* else ok - we are setting up session */
 698	}
 699
 700	if (ses->status == CifsExiting) {
 701		/* check if SMB session is bad because we are setting it up */
 702		if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
 
 703			return -EAGAIN;
 
 704		/* else ok - we are shutting down session */
 705	}
 
 706
 707	*ppmidQ = AllocMidQEntry(in_buf, ses->server);
 708	if (*ppmidQ == NULL)
 709		return -ENOMEM;
 710	spin_lock(&GlobalMid_Lock);
 711	list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
 712	spin_unlock(&GlobalMid_Lock);
 713	return 0;
 714}
 715
 716static int
 717wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
 718{
 719	int error;
 720
 721	error = wait_event_freezekillable_unsafe(server->response_q,
 722				    midQ->mid_state != MID_REQUEST_SUBMITTED);
 
 723	if (error < 0)
 724		return -ERESTARTSYS;
 725
 726	return 0;
 727}
 728
 729struct mid_q_entry *
 730cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
 731{
 732	int rc;
 733	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
 734	struct mid_q_entry *mid;
 735
 736	if (rqst->rq_iov[0].iov_len != 4 ||
 737	    rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
 738		return ERR_PTR(-EIO);
 739
 740	/* enable signing if server requires it */
 741	if (server->sign)
 742		hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
 743
 744	mid = AllocMidQEntry(hdr, server);
 745	if (mid == NULL)
 746		return ERR_PTR(-ENOMEM);
 747
 748	rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
 749	if (rc) {
 750		DeleteMidQEntry(mid);
 751		return ERR_PTR(rc);
 752	}
 753
 754	return mid;
 755}
 756
 757/*
 758 * Send a SMB request and set the callback function in the mid to handle
 759 * the result. Caller is responsible for dealing with timeouts.
 760 */
 761int
 762cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
 763		mid_receive_t *receive, mid_callback_t *callback,
 764		mid_handle_t *handle, void *cbdata, const int flags,
 765		const struct cifs_credits *exist_credits)
 766{
 767	int rc;
 768	struct mid_q_entry *mid;
 769	struct cifs_credits credits = { .value = 0, .instance = 0 };
 770	unsigned int instance;
 771	int optype;
 772
 773	optype = flags & CIFS_OP_MASK;
 774
 775	if ((flags & CIFS_HAS_CREDITS) == 0) {
 776		rc = wait_for_free_request(server, flags, &instance);
 777		if (rc)
 778			return rc;
 779		credits.value = 1;
 780		credits.instance = instance;
 781	} else
 782		instance = exist_credits->instance;
 783
 784	mutex_lock(&server->srv_mutex);
 785
 786	/*
 787	 * We can't use credits obtained from the previous session to send this
 788	 * request. Check if there were reconnects after we obtained credits and
 789	 * return -EAGAIN in such cases to let callers handle it.
 790	 */
 791	if (instance != server->reconnect_instance) {
 792		mutex_unlock(&server->srv_mutex);
 793		add_credits_and_wake_if(server, &credits, optype);
 794		return -EAGAIN;
 795	}
 796
 797	mid = server->ops->setup_async_request(server, rqst);
 798	if (IS_ERR(mid)) {
 799		mutex_unlock(&server->srv_mutex);
 800		add_credits_and_wake_if(server, &credits, optype);
 801		return PTR_ERR(mid);
 802	}
 803
 804	mid->receive = receive;
 805	mid->callback = callback;
 806	mid->callback_data = cbdata;
 807	mid->handle = handle;
 808	mid->mid_state = MID_REQUEST_SUBMITTED;
 809
 810	/* put it on the pending_mid_q */
 811	spin_lock(&GlobalMid_Lock);
 812	list_add_tail(&mid->qhead, &server->pending_mid_q);
 813	spin_unlock(&GlobalMid_Lock);
 814
 815	/*
 816	 * Need to store the time in mid before calling I/O. For call_async,
 817	 * I/O response may come back and free the mid entry on another thread.
 818	 */
 819	cifs_save_when_sent(mid);
 820	cifs_in_send_inc(server);
 821	rc = smb_send_rqst(server, 1, rqst, flags);
 822	cifs_in_send_dec(server);
 823
 824	if (rc < 0) {
 825		revert_current_mid(server, mid->credits);
 826		server->sequence_number -= 2;
 827		cifs_delete_mid(mid);
 828	}
 829
 830	mutex_unlock(&server->srv_mutex);
 831
 832	if (rc == 0)
 833		return 0;
 834
 835	add_credits_and_wake_if(server, &credits, optype);
 836	return rc;
 837}
 838
 839/*
 840 *
 841 * Send an SMB Request.  No response info (other than return code)
 842 * needs to be parsed.
 843 *
 844 * flags indicate the type of request buffer and how long to wait
 845 * and whether to log NT STATUS code (error) before mapping it to POSIX error
 846 *
 847 */
 848int
 849SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
 850		 char *in_buf, int flags)
 851{
 852	int rc;
 853	struct kvec iov[1];
 854	struct kvec rsp_iov;
 855	int resp_buf_type;
 856
 857	iov[0].iov_base = in_buf;
 858	iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
 859	flags |= CIFS_NO_RSP_BUF;
 860	rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
 861	cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
 862
 863	return rc;
 864}
 865
 866static int
 867cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
 868{
 869	int rc = 0;
 870
 871	cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
 872		 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
 873
 874	spin_lock(&GlobalMid_Lock);
 875	switch (mid->mid_state) {
 876	case MID_RESPONSE_RECEIVED:
 877		spin_unlock(&GlobalMid_Lock);
 878		return rc;
 879	case MID_RETRY_NEEDED:
 880		rc = -EAGAIN;
 881		break;
 882	case MID_RESPONSE_MALFORMED:
 883		rc = -EIO;
 884		break;
 885	case MID_SHUTDOWN:
 886		rc = -EHOSTDOWN;
 887		break;
 888	default:
 889		if (!(mid->mid_flags & MID_DELETED)) {
 890			list_del_init(&mid->qhead);
 891			mid->mid_flags |= MID_DELETED;
 892		}
 893		cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
 894			 __func__, mid->mid, mid->mid_state);
 895		rc = -EIO;
 896	}
 897	spin_unlock(&GlobalMid_Lock);
 898
 899	DeleteMidQEntry(mid);
 900	return rc;
 901}
 902
 903static inline int
 904send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
 905	    struct mid_q_entry *mid)
 906{
 907	return server->ops->send_cancel ?
 908				server->ops->send_cancel(server, rqst, mid) : 0;
 909}
 910
 911int
 912cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
 913		   bool log_error)
 914{
 915	unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
 916
 917	dump_smb(mid->resp_buf, min_t(u32, 92, len));
 918
 919	/* convert the length into a more usable form */
 920	if (server->sign) {
 921		struct kvec iov[2];
 922		int rc = 0;
 923		struct smb_rqst rqst = { .rq_iov = iov,
 924					 .rq_nvec = 2 };
 925
 926		iov[0].iov_base = mid->resp_buf;
 927		iov[0].iov_len = 4;
 928		iov[1].iov_base = (char *)mid->resp_buf + 4;
 929		iov[1].iov_len = len - 4;
 930		/* FIXME: add code to kill session */
 931		rc = cifs_verify_signature(&rqst, server,
 932					   mid->sequence_number);
 933		if (rc)
 934			cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
 935				 rc);
 936	}
 937
 938	/* BB special case reconnect tid and uid here? */
 939	return map_and_check_smb_error(mid, log_error);
 940}
 941
 942struct mid_q_entry *
 943cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
 944		   struct smb_rqst *rqst)
 945{
 946	int rc;
 947	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
 948	struct mid_q_entry *mid;
 949
 950	if (rqst->rq_iov[0].iov_len != 4 ||
 951	    rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
 952		return ERR_PTR(-EIO);
 953
 954	rc = allocate_mid(ses, hdr, &mid);
 955	if (rc)
 956		return ERR_PTR(rc);
 957	rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
 958	if (rc) {
 959		cifs_delete_mid(mid);
 960		return ERR_PTR(rc);
 961	}
 962	return mid;
 963}
 964
 965static void
 966cifs_compound_callback(struct mid_q_entry *mid)
 967{
 968	struct TCP_Server_Info *server = mid->server;
 969	struct cifs_credits credits;
 970
 971	credits.value = server->ops->get_credits(mid);
 972	credits.instance = server->reconnect_instance;
 973
 974	add_credits(server, &credits, mid->optype);
 975}
 976
 977static void
 978cifs_compound_last_callback(struct mid_q_entry *mid)
 979{
 980	cifs_compound_callback(mid);
 981	cifs_wake_up_task(mid);
 982}
 983
 984static void
 985cifs_cancelled_callback(struct mid_q_entry *mid)
 986{
 987	cifs_compound_callback(mid);
 988	DeleteMidQEntry(mid);
 989}
 990
 991/*
 992 * Return a channel (master if none) of @ses that can be used to send
 993 * regular requests.
 994 *
 995 * If we are currently binding a new channel (negprot/sess.setup),
 996 * return the new incomplete channel.
 997 */
 998struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
 999{
1000	uint index = 0;
1001
1002	if (!ses)
1003		return NULL;
1004
1005	if (!ses->binding) {
1006		/* round robin */
1007		if (ses->chan_count > 1) {
1008			index = (uint)atomic_inc_return(&ses->chan_seq);
1009			index %= ses->chan_count;
1010		}
1011		return ses->chans[index].server;
1012	} else {
1013		return cifs_ses_server(ses);
1014	}
1015}
1016
1017int
1018compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1019		   struct TCP_Server_Info *server,
1020		   const int flags, const int num_rqst, struct smb_rqst *rqst,
1021		   int *resp_buf_type, struct kvec *resp_iov)
1022{
1023	int i, j, optype, rc = 0;
1024	struct mid_q_entry *midQ[MAX_COMPOUND];
1025	bool cancelled_mid[MAX_COMPOUND] = {false};
1026	struct cifs_credits credits[MAX_COMPOUND] = {
1027		{ .value = 0, .instance = 0 }
1028	};
1029	unsigned int instance;
1030	char *buf;
1031
1032	optype = flags & CIFS_OP_MASK;
1033
1034	for (i = 0; i < num_rqst; i++)
1035		resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
1036
1037	if (!ses || !ses->server || !server) {
1038		cifs_dbg(VFS, "Null session\n");
1039		return -EIO;
1040	}
1041
1042	if (server->tcpStatus == CifsExiting)
 
 
1043		return -ENOENT;
 
 
1044
1045	/*
1046	 * Wait for all the requests to become available.
1047	 * This approach still leaves the possibility to be stuck waiting for
1048	 * credits if the server doesn't grant credits to the outstanding
1049	 * requests and if the client is completely idle, not generating any
1050	 * other requests.
1051	 * This can be handled by the eventual session reconnect.
1052	 */
1053	rc = wait_for_compound_request(server, num_rqst, flags,
1054				       &instance);
1055	if (rc)
1056		return rc;
1057
1058	for (i = 0; i < num_rqst; i++) {
1059		credits[i].value = 1;
1060		credits[i].instance = instance;
1061	}
1062
1063	/*
1064	 * Make sure that we sign in the same order that we send on this socket
1065	 * and avoid races inside tcp sendmsg code that could cause corruption
1066	 * of smb data.
1067	 */
1068
1069	mutex_lock(&server->srv_mutex);
1070
1071	/*
1072	 * All the parts of the compound chain belong obtained credits from the
1073	 * same session. We can not use credits obtained from the previous
1074	 * session to send this request. Check if there were reconnects after
1075	 * we obtained credits and return -EAGAIN in such cases to let callers
1076	 * handle it.
1077	 */
1078	if (instance != server->reconnect_instance) {
1079		mutex_unlock(&server->srv_mutex);
1080		for (j = 0; j < num_rqst; j++)
1081			add_credits(server, &credits[j], optype);
1082		return -EAGAIN;
1083	}
1084
1085	for (i = 0; i < num_rqst; i++) {
1086		midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1087		if (IS_ERR(midQ[i])) {
1088			revert_current_mid(server, i);
1089			for (j = 0; j < i; j++)
1090				cifs_delete_mid(midQ[j]);
1091			mutex_unlock(&server->srv_mutex);
1092
1093			/* Update # of requests on wire to server */
1094			for (j = 0; j < num_rqst; j++)
1095				add_credits(server, &credits[j], optype);
1096			return PTR_ERR(midQ[i]);
1097		}
1098
1099		midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1100		midQ[i]->optype = optype;
1101		/*
1102		 * Invoke callback for every part of the compound chain
1103		 * to calculate credits properly. Wake up this thread only when
1104		 * the last element is received.
1105		 */
1106		if (i < num_rqst - 1)
1107			midQ[i]->callback = cifs_compound_callback;
1108		else
1109			midQ[i]->callback = cifs_compound_last_callback;
1110	}
1111	cifs_in_send_inc(server);
1112	rc = smb_send_rqst(server, num_rqst, rqst, flags);
1113	cifs_in_send_dec(server);
1114
1115	for (i = 0; i < num_rqst; i++)
1116		cifs_save_when_sent(midQ[i]);
1117
1118	if (rc < 0) {
1119		revert_current_mid(server, num_rqst);
1120		server->sequence_number -= 2;
1121	}
1122
1123	mutex_unlock(&server->srv_mutex);
1124
1125	/*
1126	 * If sending failed for some reason or it is an oplock break that we
1127	 * will not receive a response to - return credits back
1128	 */
1129	if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1130		for (i = 0; i < num_rqst; i++)
1131			add_credits(server, &credits[i], optype);
1132		goto out;
1133	}
1134
1135	/*
1136	 * At this point the request is passed to the network stack - we assume
1137	 * that any credits taken from the server structure on the client have
1138	 * been spent and we can't return them back. Once we receive responses
1139	 * we will collect credits granted by the server in the mid callbacks
1140	 * and add those credits to the server structure.
1141	 */
1142
1143	/*
1144	 * Compounding is never used during session establish.
1145	 */
1146	if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
1147		smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1148					   rqst[0].rq_nvec);
 
 
 
 
 
 
 
 
1149
1150	for (i = 0; i < num_rqst; i++) {
1151		rc = wait_for_response(server, midQ[i]);
1152		if (rc != 0)
1153			break;
1154	}
1155	if (rc != 0) {
1156		for (; i < num_rqst; i++) {
1157			cifs_server_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
1158				 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1159			send_cancel(server, &rqst[i], midQ[i]);
1160			spin_lock(&GlobalMid_Lock);
1161			midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1162			if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1163				midQ[i]->callback = cifs_cancelled_callback;
1164				cancelled_mid[i] = true;
1165				credits[i].value = 0;
1166			}
1167			spin_unlock(&GlobalMid_Lock);
1168		}
1169	}
1170
1171	for (i = 0; i < num_rqst; i++) {
1172		if (rc < 0)
1173			goto out;
1174
1175		rc = cifs_sync_mid_result(midQ[i], server);
1176		if (rc != 0) {
1177			/* mark this mid as cancelled to not free it below */
1178			cancelled_mid[i] = true;
1179			goto out;
1180		}
1181
1182		if (!midQ[i]->resp_buf ||
1183		    midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1184			rc = -EIO;
1185			cifs_dbg(FYI, "Bad MID state?\n");
1186			goto out;
1187		}
1188
1189		buf = (char *)midQ[i]->resp_buf;
1190		resp_iov[i].iov_base = buf;
1191		resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1192			server->vals->header_preamble_size;
1193
1194		if (midQ[i]->large_buf)
1195			resp_buf_type[i] = CIFS_LARGE_BUFFER;
1196		else
1197			resp_buf_type[i] = CIFS_SMALL_BUFFER;
1198
1199		rc = server->ops->check_receive(midQ[i], server,
1200						     flags & CIFS_LOG_ERROR);
1201
1202		/* mark it so buf will not be freed by cifs_delete_mid */
1203		if ((flags & CIFS_NO_RSP_BUF) == 0)
1204			midQ[i]->resp_buf = NULL;
1205
1206	}
1207
1208	/*
1209	 * Compounding is never used during session establish.
1210	 */
1211	if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
 
1212		struct kvec iov = {
1213			.iov_base = resp_iov[0].iov_base,
1214			.iov_len = resp_iov[0].iov_len
1215		};
1216		smb311_update_preauth_hash(ses, &iov, 1);
 
 
 
 
1217	}
 
1218
1219out:
1220	/*
1221	 * This will dequeue all mids. After this it is important that the
1222	 * demultiplex_thread will not process any of these mids any futher.
1223	 * This is prevented above by using a noop callback that will not
1224	 * wake this thread except for the very last PDU.
1225	 */
1226	for (i = 0; i < num_rqst; i++) {
1227		if (!cancelled_mid[i])
1228			cifs_delete_mid(midQ[i]);
1229	}
1230
1231	return rc;
1232}
1233
1234int
1235cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1236	       struct TCP_Server_Info *server,
1237	       struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1238	       struct kvec *resp_iov)
1239{
1240	return compound_send_recv(xid, ses, server, flags, 1,
1241				  rqst, resp_buf_type, resp_iov);
1242}
1243
1244int
1245SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1246	     struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1247	     const int flags, struct kvec *resp_iov)
1248{
1249	struct smb_rqst rqst;
1250	struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1251	int rc;
1252
1253	if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1254		new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1255					GFP_KERNEL);
1256		if (!new_iov) {
1257			/* otherwise cifs_send_recv below sets resp_buf_type */
1258			*resp_buf_type = CIFS_NO_BUFFER;
1259			return -ENOMEM;
1260		}
1261	} else
1262		new_iov = s_iov;
1263
1264	/* 1st iov is a RFC1001 length followed by the rest of the packet */
1265	memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1266
1267	new_iov[0].iov_base = new_iov[1].iov_base;
1268	new_iov[0].iov_len = 4;
1269	new_iov[1].iov_base += 4;
1270	new_iov[1].iov_len -= 4;
1271
1272	memset(&rqst, 0, sizeof(struct smb_rqst));
1273	rqst.rq_iov = new_iov;
1274	rqst.rq_nvec = n_vec + 1;
1275
1276	rc = cifs_send_recv(xid, ses, ses->server,
1277			    &rqst, resp_buf_type, flags, resp_iov);
1278	if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1279		kfree(new_iov);
1280	return rc;
1281}
1282
1283int
1284SendReceive(const unsigned int xid, struct cifs_ses *ses,
1285	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1286	    int *pbytes_returned, const int flags)
1287{
1288	int rc = 0;
1289	struct mid_q_entry *midQ;
1290	unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1291	struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1292	struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1293	struct cifs_credits credits = { .value = 1, .instance = 0 };
1294	struct TCP_Server_Info *server;
1295
1296	if (ses == NULL) {
1297		cifs_dbg(VFS, "Null smb session\n");
1298		return -EIO;
1299	}
1300	server = ses->server;
1301	if (server == NULL) {
1302		cifs_dbg(VFS, "Null tcp session\n");
1303		return -EIO;
1304	}
1305
1306	if (server->tcpStatus == CifsExiting)
 
 
1307		return -ENOENT;
 
 
1308
1309	/* Ensure that we do not send more than 50 overlapping requests
1310	   to the same server. We may make this configurable later or
1311	   use ses->maxReq */
1312
1313	if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1314		cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1315				len);
1316		return -EIO;
1317	}
1318
1319	rc = wait_for_free_request(server, flags, &credits.instance);
1320	if (rc)
1321		return rc;
1322
1323	/* make sure that we sign in the same order that we send on this socket
1324	   and avoid races inside tcp sendmsg code that could cause corruption
1325	   of smb data */
1326
1327	mutex_lock(&server->srv_mutex);
1328
1329	rc = allocate_mid(ses, in_buf, &midQ);
1330	if (rc) {
1331		mutex_unlock(&server->srv_mutex);
1332		/* Update # of requests on wire to server */
1333		add_credits(server, &credits, 0);
1334		return rc;
1335	}
1336
1337	rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1338	if (rc) {
1339		mutex_unlock(&server->srv_mutex);
1340		goto out;
1341	}
1342
1343	midQ->mid_state = MID_REQUEST_SUBMITTED;
1344
1345	cifs_in_send_inc(server);
1346	rc = smb_send(server, in_buf, len);
1347	cifs_in_send_dec(server);
1348	cifs_save_when_sent(midQ);
1349
1350	if (rc < 0)
1351		server->sequence_number -= 2;
1352
1353	mutex_unlock(&server->srv_mutex);
1354
1355	if (rc < 0)
1356		goto out;
1357
1358	rc = wait_for_response(server, midQ);
1359	if (rc != 0) {
1360		send_cancel(server, &rqst, midQ);
1361		spin_lock(&GlobalMid_Lock);
1362		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1363			/* no longer considered to be "in-flight" */
1364			midQ->callback = DeleteMidQEntry;
1365			spin_unlock(&GlobalMid_Lock);
1366			add_credits(server, &credits, 0);
1367			return rc;
1368		}
1369		spin_unlock(&GlobalMid_Lock);
1370	}
1371
1372	rc = cifs_sync_mid_result(midQ, server);
1373	if (rc != 0) {
1374		add_credits(server, &credits, 0);
1375		return rc;
1376	}
1377
1378	if (!midQ->resp_buf || !out_buf ||
1379	    midQ->mid_state != MID_RESPONSE_RECEIVED) {
1380		rc = -EIO;
1381		cifs_server_dbg(VFS, "Bad MID state?\n");
1382		goto out;
1383	}
1384
1385	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1386	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1387	rc = cifs_check_receive(midQ, server, 0);
1388out:
1389	cifs_delete_mid(midQ);
1390	add_credits(server, &credits, 0);
1391
1392	return rc;
1393}
1394
1395/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1396   blocking lock to return. */
1397
1398static int
1399send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1400			struct smb_hdr *in_buf,
1401			struct smb_hdr *out_buf)
1402{
1403	int bytes_returned;
1404	struct cifs_ses *ses = tcon->ses;
1405	LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1406
1407	/* We just modify the current in_buf to change
1408	   the type of lock from LOCKING_ANDX_SHARED_LOCK
1409	   or LOCKING_ANDX_EXCLUSIVE_LOCK to
1410	   LOCKING_ANDX_CANCEL_LOCK. */
1411
1412	pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1413	pSMB->Timeout = 0;
1414	pSMB->hdr.Mid = get_next_mid(ses->server);
1415
1416	return SendReceive(xid, ses, in_buf, out_buf,
1417			&bytes_returned, 0);
1418}
1419
1420int
1421SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1422	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1423	    int *pbytes_returned)
1424{
1425	int rc = 0;
1426	int rstart = 0;
1427	struct mid_q_entry *midQ;
1428	struct cifs_ses *ses;
1429	unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1430	struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1431	struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1432	unsigned int instance;
1433	struct TCP_Server_Info *server;
1434
1435	if (tcon == NULL || tcon->ses == NULL) {
1436		cifs_dbg(VFS, "Null smb session\n");
1437		return -EIO;
1438	}
1439	ses = tcon->ses;
1440	server = ses->server;
1441
1442	if (server == NULL) {
1443		cifs_dbg(VFS, "Null tcp session\n");
1444		return -EIO;
1445	}
1446
1447	if (server->tcpStatus == CifsExiting)
 
 
1448		return -ENOENT;
 
 
1449
1450	/* Ensure that we do not send more than 50 overlapping requests
1451	   to the same server. We may make this configurable later or
1452	   use ses->maxReq */
1453
1454	if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1455		cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1456			      len);
1457		return -EIO;
1458	}
1459
1460	rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1461	if (rc)
1462		return rc;
1463
1464	/* make sure that we sign in the same order that we send on this socket
1465	   and avoid races inside tcp sendmsg code that could cause corruption
1466	   of smb data */
1467
1468	mutex_lock(&server->srv_mutex);
1469
1470	rc = allocate_mid(ses, in_buf, &midQ);
1471	if (rc) {
1472		mutex_unlock(&server->srv_mutex);
1473		return rc;
1474	}
1475
1476	rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1477	if (rc) {
1478		cifs_delete_mid(midQ);
1479		mutex_unlock(&server->srv_mutex);
1480		return rc;
1481	}
1482
1483	midQ->mid_state = MID_REQUEST_SUBMITTED;
1484	cifs_in_send_inc(server);
1485	rc = smb_send(server, in_buf, len);
1486	cifs_in_send_dec(server);
1487	cifs_save_when_sent(midQ);
1488
1489	if (rc < 0)
1490		server->sequence_number -= 2;
1491
1492	mutex_unlock(&server->srv_mutex);
1493
1494	if (rc < 0) {
1495		cifs_delete_mid(midQ);
1496		return rc;
1497	}
1498
1499	/* Wait for a reply - allow signals to interrupt. */
1500	rc = wait_event_interruptible(server->response_q,
1501		(!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1502		((server->tcpStatus != CifsGood) &&
1503		 (server->tcpStatus != CifsNew)));
1504
1505	/* Were we interrupted by a signal ? */
 
1506	if ((rc == -ERESTARTSYS) &&
1507		(midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1508		((server->tcpStatus == CifsGood) ||
1509		 (server->tcpStatus == CifsNew))) {
 
1510
1511		if (in_buf->Command == SMB_COM_TRANSACTION2) {
1512			/* POSIX lock. We send a NT_CANCEL SMB to cause the
1513			   blocking lock to return. */
1514			rc = send_cancel(server, &rqst, midQ);
1515			if (rc) {
1516				cifs_delete_mid(midQ);
1517				return rc;
1518			}
1519		} else {
1520			/* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1521			   to cause the blocking lock to return. */
1522
1523			rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1524
1525			/* If we get -ENOLCK back the lock may have
1526			   already been removed. Don't exit in this case. */
1527			if (rc && rc != -ENOLCK) {
1528				cifs_delete_mid(midQ);
1529				return rc;
1530			}
1531		}
1532
1533		rc = wait_for_response(server, midQ);
1534		if (rc) {
1535			send_cancel(server, &rqst, midQ);
1536			spin_lock(&GlobalMid_Lock);
1537			if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1538				/* no longer considered to be "in-flight" */
1539				midQ->callback = DeleteMidQEntry;
1540				spin_unlock(&GlobalMid_Lock);
1541				return rc;
1542			}
1543			spin_unlock(&GlobalMid_Lock);
1544		}
1545
1546		/* We got the response - restart system call. */
1547		rstart = 1;
 
1548	}
 
1549
1550	rc = cifs_sync_mid_result(midQ, server);
1551	if (rc != 0)
1552		return rc;
1553
1554	/* rcvd frame is ok */
1555	if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1556		rc = -EIO;
1557		cifs_tcon_dbg(VFS, "Bad MID state?\n");
1558		goto out;
1559	}
1560
1561	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1562	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1563	rc = cifs_check_receive(midQ, server, 0);
1564out:
1565	cifs_delete_mid(midQ);
1566	if (rstart && rc == -EACCES)
1567		return -ERESTARTSYS;
1568	return rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1569}