Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 1999 Eric Youngdale
   4 * Copyright (C) 2014 Christoph Hellwig
   5 *
   6 *  SCSI queueing library.
   7 *      Initial versions: Eric Youngdale (eric@andante.org).
   8 *                        Based upon conversations with large numbers
   9 *                        of people at Linux Expo.
  10 */
  11
  12#include <linux/bio.h>
  13#include <linux/bitops.h>
  14#include <linux/blkdev.h>
  15#include <linux/completion.h>
  16#include <linux/kernel.h>
  17#include <linux/export.h>
  18#include <linux/init.h>
  19#include <linux/pci.h>
  20#include <linux/delay.h>
  21#include <linux/hardirq.h>
  22#include <linux/scatterlist.h>
  23#include <linux/blk-mq.h>
 
  24#include <linux/ratelimit.h>
  25#include <asm/unaligned.h>
  26
  27#include <scsi/scsi.h>
  28#include <scsi/scsi_cmnd.h>
  29#include <scsi/scsi_dbg.h>
  30#include <scsi/scsi_device.h>
  31#include <scsi/scsi_driver.h>
  32#include <scsi/scsi_eh.h>
  33#include <scsi/scsi_host.h>
  34#include <scsi/scsi_transport.h> /* __scsi_init_queue() */
  35#include <scsi/scsi_dh.h>
  36
  37#include <trace/events/scsi.h>
  38
  39#include "scsi_debugfs.h"
  40#include "scsi_priv.h"
  41#include "scsi_logging.h"
  42
  43/*
  44 * Size of integrity metadata is usually small, 1 inline sg should
  45 * cover normal cases.
  46 */
  47#ifdef CONFIG_ARCH_NO_SG_CHAIN
  48#define  SCSI_INLINE_PROT_SG_CNT  0
  49#define  SCSI_INLINE_SG_CNT  0
  50#else
  51#define  SCSI_INLINE_PROT_SG_CNT  1
  52#define  SCSI_INLINE_SG_CNT  2
  53#endif
  54
  55static struct kmem_cache *scsi_sense_cache;
  56static DEFINE_MUTEX(scsi_sense_cache_mutex);
  57
  58static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd);
  59
  60int scsi_init_sense_cache(struct Scsi_Host *shost)
  61{
  62	int ret = 0;
  63
  64	mutex_lock(&scsi_sense_cache_mutex);
  65	if (!scsi_sense_cache) {
  66		scsi_sense_cache =
  67			kmem_cache_create_usercopy("scsi_sense_cache",
  68				SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN,
  69				0, SCSI_SENSE_BUFFERSIZE, NULL);
  70		if (!scsi_sense_cache)
  71			ret = -ENOMEM;
  72	}
  73	mutex_unlock(&scsi_sense_cache_mutex);
  74	return ret;
  75}
  76
  77/*
  78 * When to reinvoke queueing after a resource shortage. It's 3 msecs to
  79 * not change behaviour from the previous unplug mechanism, experimentation
  80 * may prove this needs changing.
  81 */
  82#define SCSI_QUEUE_DELAY	3
  83
  84static void
  85scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
  86{
  87	struct Scsi_Host *host = cmd->device->host;
  88	struct scsi_device *device = cmd->device;
  89	struct scsi_target *starget = scsi_target(device);
  90
  91	/*
  92	 * Set the appropriate busy bit for the device/host.
  93	 *
  94	 * If the host/device isn't busy, assume that something actually
  95	 * completed, and that we should be able to queue a command now.
  96	 *
  97	 * Note that the prior mid-layer assumption that any host could
  98	 * always queue at least one command is now broken.  The mid-layer
  99	 * will implement a user specifiable stall (see
 100	 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
 101	 * if a command is requeued with no other commands outstanding
 102	 * either for the device or for the host.
 103	 */
 104	switch (reason) {
 105	case SCSI_MLQUEUE_HOST_BUSY:
 106		atomic_set(&host->host_blocked, host->max_host_blocked);
 107		break;
 108	case SCSI_MLQUEUE_DEVICE_BUSY:
 109	case SCSI_MLQUEUE_EH_RETRY:
 110		atomic_set(&device->device_blocked,
 111			   device->max_device_blocked);
 112		break;
 113	case SCSI_MLQUEUE_TARGET_BUSY:
 114		atomic_set(&starget->target_blocked,
 115			   starget->max_target_blocked);
 116		break;
 117	}
 118}
 119
 120static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
 121{
 122	if (cmd->request->rq_flags & RQF_DONTPREP) {
 123		cmd->request->rq_flags &= ~RQF_DONTPREP;
 
 
 124		scsi_mq_uninit_cmd(cmd);
 125	} else {
 126		WARN_ON_ONCE(true);
 127	}
 128	blk_mq_requeue_request(cmd->request, true);
 
 
 
 
 
 129}
 130
 131/**
 132 * __scsi_queue_insert - private queue insertion
 133 * @cmd: The SCSI command being requeued
 134 * @reason:  The reason for the requeue
 135 * @unbusy: Whether the queue should be unbusied
 136 *
 137 * This is a private queue insertion.  The public interface
 138 * scsi_queue_insert() always assumes the queue should be unbusied
 139 * because it's always called before the completion.  This function is
 140 * for a requeue after completion, which should only occur in this
 141 * file.
 142 */
 143static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
 144{
 145	struct scsi_device *device = cmd->device;
 146
 147	SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd,
 148		"Inserting command %p into mlqueue\n", cmd));
 149
 150	scsi_set_blocked(cmd, reason);
 151
 152	/*
 153	 * Decrement the counters, since these commands are no longer
 154	 * active on the host/device.
 155	 */
 156	if (unbusy)
 157		scsi_device_unbusy(device, cmd);
 158
 159	/*
 160	 * Requeue this command.  It will go before all other commands
 161	 * that are already in the queue. Schedule requeue work under
 162	 * lock such that the kblockd_schedule_work() call happens
 163	 * before blk_cleanup_queue() finishes.
 164	 */
 165	cmd->result = 0;
 166
 167	blk_mq_requeue_request(cmd->request, true);
 168}
 169
 170/**
 171 * scsi_queue_insert - Reinsert a command in the queue.
 172 * @cmd:    command that we are adding to queue.
 173 * @reason: why we are inserting command to queue.
 174 *
 175 * We do this for one of two cases. Either the host is busy and it cannot accept
 176 * any more commands for the time being, or the device returned QUEUE_FULL and
 177 * can accept no more commands.
 178 *
 179 * Context: This could be called either from an interrupt context or a normal
 180 * process context.
 181 */
 182void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
 183{
 184	__scsi_queue_insert(cmd, reason, true);
 185}
 186
 187
 188/**
 189 * __scsi_execute - insert request and wait for the result
 190 * @sdev:	scsi device
 191 * @cmd:	scsi command
 192 * @data_direction: data direction
 193 * @buffer:	data buffer
 194 * @bufflen:	len of buffer
 195 * @sense:	optional sense buffer
 196 * @sshdr:	optional decoded sense header
 197 * @timeout:	request timeout in HZ
 198 * @retries:	number of times to retry request
 199 * @flags:	flags for ->cmd_flags
 200 * @rq_flags:	flags for ->rq_flags
 201 * @resid:	optional residual length
 202 *
 203 * Returns the scsi_cmnd result field if a command was executed, or a negative
 204 * Linux error code if we didn't get that far.
 205 */
 206int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
 207		 int data_direction, void *buffer, unsigned bufflen,
 208		 unsigned char *sense, struct scsi_sense_hdr *sshdr,
 209		 int timeout, int retries, u64 flags, req_flags_t rq_flags,
 210		 int *resid)
 211{
 212	struct request *req;
 213	struct scsi_request *rq;
 214	int ret;
 215
 216	req = blk_get_request(sdev->request_queue,
 217			data_direction == DMA_TO_DEVICE ?
 218			REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
 219			rq_flags & RQF_PM ? BLK_MQ_REQ_PM : 0);
 220	if (IS_ERR(req))
 221		return PTR_ERR(req);
 222
 223	rq = scsi_req(req);
 224
 225	if (bufflen) {
 226		ret = blk_rq_map_kern(sdev->request_queue, req,
 227				      buffer, bufflen, GFP_NOIO);
 228		if (ret)
 229			goto out;
 230	}
 231	rq->cmd_len = COMMAND_SIZE(cmd[0]);
 232	memcpy(rq->cmd, cmd, rq->cmd_len);
 233	rq->retries = retries;
 
 234	req->timeout = timeout;
 235	req->cmd_flags |= flags;
 236	req->rq_flags |= rq_flags | RQF_QUIET;
 237
 238	/*
 239	 * head injection *required* here otherwise quiesce won't work
 240	 */
 241	blk_execute_rq(NULL, req, 1);
 242
 243	/*
 244	 * Some devices (USB mass-storage in particular) may transfer
 245	 * garbage data together with a residue indicating that the data
 246	 * is invalid.  Prevent the garbage from being misinterpreted
 247	 * and prevent security leaks by zeroing out the excess data.
 248	 */
 249	if (unlikely(rq->resid_len > 0 && rq->resid_len <= bufflen))
 250		memset(buffer + (bufflen - rq->resid_len), 0, rq->resid_len);
 251
 252	if (resid)
 253		*resid = rq->resid_len;
 254	if (sense && rq->sense_len)
 255		memcpy(sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
 256	if (sshdr)
 257		scsi_normalize_sense(rq->sense, rq->sense_len, sshdr);
 258	ret = rq->result;
 
 259 out:
 260	blk_put_request(req);
 261
 262	return ret;
 263}
 264EXPORT_SYMBOL(__scsi_execute);
 265
 266/*
 267 * Wake up the error handler if necessary. Avoid as follows that the error
 268 * handler is not woken up if host in-flight requests number ==
 269 * shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination
 270 * with an RCU read lock in this function to ensure that this function in
 271 * its entirety either finishes before scsi_eh_scmd_add() increases the
 272 * host_failed counter or that it notices the shost state change made by
 273 * scsi_eh_scmd_add().
 274 */
 275static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
 276{
 277	unsigned long flags;
 278
 279	rcu_read_lock();
 280	__clear_bit(SCMD_STATE_INFLIGHT, &cmd->state);
 281	if (unlikely(scsi_host_in_recovery(shost))) {
 282		spin_lock_irqsave(shost->host_lock, flags);
 283		if (shost->host_failed || shost->host_eh_scheduled)
 284			scsi_eh_wakeup(shost);
 285		spin_unlock_irqrestore(shost->host_lock, flags);
 286	}
 287	rcu_read_unlock();
 288}
 289
 290void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd)
 291{
 292	struct Scsi_Host *shost = sdev->host;
 293	struct scsi_target *starget = scsi_target(sdev);
 294
 295	scsi_dec_host_busy(shost, cmd);
 296
 297	if (starget->can_queue > 0)
 298		atomic_dec(&starget->target_busy);
 299
 300	sbitmap_put(&sdev->budget_map, cmd->budget_token);
 301	cmd->budget_token = -1;
 302}
 303
 304static void scsi_kick_queue(struct request_queue *q)
 305{
 306	blk_mq_run_hw_queues(q, false);
 307}
 308
 309/*
 
 
 
 
 
 
 
 
 
 
 
 
 310 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
 311 * and call blk_run_queue for all the scsi_devices on the target -
 312 * including current_sdev first.
 313 *
 314 * Called with *no* scsi locks held.
 315 */
 316static void scsi_single_lun_run(struct scsi_device *current_sdev)
 317{
 318	struct Scsi_Host *shost = current_sdev->host;
 319	struct scsi_device *sdev, *tmp;
 320	struct scsi_target *starget = scsi_target(current_sdev);
 321	unsigned long flags;
 322
 323	spin_lock_irqsave(shost->host_lock, flags);
 324	starget->starget_sdev_user = NULL;
 325	spin_unlock_irqrestore(shost->host_lock, flags);
 326
 327	/*
 328	 * Call blk_run_queue for all LUNs on the target, starting with
 329	 * current_sdev. We race with others (to set starget_sdev_user),
 330	 * but in most cases, we will be first. Ideally, each LU on the
 331	 * target would get some limited time or requests on the target.
 332	 */
 333	scsi_kick_queue(current_sdev->request_queue);
 334
 335	spin_lock_irqsave(shost->host_lock, flags);
 336	if (starget->starget_sdev_user)
 337		goto out;
 338	list_for_each_entry_safe(sdev, tmp, &starget->devices,
 339			same_target_siblings) {
 340		if (sdev == current_sdev)
 341			continue;
 342		if (scsi_device_get(sdev))
 343			continue;
 344
 345		spin_unlock_irqrestore(shost->host_lock, flags);
 346		scsi_kick_queue(sdev->request_queue);
 347		spin_lock_irqsave(shost->host_lock, flags);
 348
 349		scsi_device_put(sdev);
 350	}
 351 out:
 352	spin_unlock_irqrestore(shost->host_lock, flags);
 353}
 354
 355static inline bool scsi_device_is_busy(struct scsi_device *sdev)
 356{
 357	if (scsi_device_busy(sdev) >= sdev->queue_depth)
 358		return true;
 359	if (atomic_read(&sdev->device_blocked) > 0)
 360		return true;
 361	return false;
 362}
 363
 364static inline bool scsi_target_is_busy(struct scsi_target *starget)
 365{
 366	if (starget->can_queue > 0) {
 367		if (atomic_read(&starget->target_busy) >= starget->can_queue)
 368			return true;
 369		if (atomic_read(&starget->target_blocked) > 0)
 370			return true;
 371	}
 372	return false;
 373}
 374
 375static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
 376{
 377	if (atomic_read(&shost->host_blocked) > 0)
 378		return true;
 379	if (shost->host_self_blocked)
 380		return true;
 381	return false;
 382}
 383
 384static void scsi_starved_list_run(struct Scsi_Host *shost)
 385{
 386	LIST_HEAD(starved_list);
 387	struct scsi_device *sdev;
 388	unsigned long flags;
 389
 390	spin_lock_irqsave(shost->host_lock, flags);
 391	list_splice_init(&shost->starved_list, &starved_list);
 392
 393	while (!list_empty(&starved_list)) {
 394		struct request_queue *slq;
 395
 396		/*
 397		 * As long as shost is accepting commands and we have
 398		 * starved queues, call blk_run_queue. scsi_request_fn
 399		 * drops the queue_lock and can add us back to the
 400		 * starved_list.
 401		 *
 402		 * host_lock protects the starved_list and starved_entry.
 403		 * scsi_request_fn must get the host_lock before checking
 404		 * or modifying starved_list or starved_entry.
 405		 */
 406		if (scsi_host_is_busy(shost))
 407			break;
 408
 409		sdev = list_entry(starved_list.next,
 410				  struct scsi_device, starved_entry);
 411		list_del_init(&sdev->starved_entry);
 412		if (scsi_target_is_busy(scsi_target(sdev))) {
 413			list_move_tail(&sdev->starved_entry,
 414				       &shost->starved_list);
 415			continue;
 416		}
 417
 418		/*
 419		 * Once we drop the host lock, a racing scsi_remove_device()
 420		 * call may remove the sdev from the starved list and destroy
 421		 * it and the queue.  Mitigate by taking a reference to the
 422		 * queue and never touching the sdev again after we drop the
 423		 * host lock.  Note: if __scsi_remove_device() invokes
 424		 * blk_cleanup_queue() before the queue is run from this
 425		 * function then blk_run_queue() will return immediately since
 426		 * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING.
 427		 */
 428		slq = sdev->request_queue;
 429		if (!blk_get_queue(slq))
 430			continue;
 431		spin_unlock_irqrestore(shost->host_lock, flags);
 432
 433		scsi_kick_queue(slq);
 434		blk_put_queue(slq);
 435
 436		spin_lock_irqsave(shost->host_lock, flags);
 437	}
 438	/* put any unprocessed entries back */
 439	list_splice(&starved_list, &shost->starved_list);
 440	spin_unlock_irqrestore(shost->host_lock, flags);
 441}
 442
 443/**
 444 * scsi_run_queue - Select a proper request queue to serve next.
 445 * @q:  last request's queue
 446 *
 447 * The previous command was completely finished, start a new one if possible.
 448 */
 449static void scsi_run_queue(struct request_queue *q)
 450{
 451	struct scsi_device *sdev = q->queuedata;
 452
 453	if (scsi_target(sdev)->single_lun)
 454		scsi_single_lun_run(sdev);
 455	if (!list_empty(&sdev->host->starved_list))
 456		scsi_starved_list_run(sdev->host);
 457
 458	blk_mq_run_hw_queues(q, false);
 459}
 460
 461void scsi_requeue_run_queue(struct work_struct *work)
 462{
 463	struct scsi_device *sdev;
 464	struct request_queue *q;
 465
 466	sdev = container_of(work, struct scsi_device, requeue_work);
 467	q = sdev->request_queue;
 468	scsi_run_queue(q);
 469}
 470
 471void scsi_run_host_queues(struct Scsi_Host *shost)
 472{
 473	struct scsi_device *sdev;
 474
 475	shost_for_each_device(sdev, shost)
 476		scsi_run_queue(sdev->request_queue);
 477}
 478
 479static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
 480{
 481	if (!blk_rq_is_passthrough(cmd->request)) {
 482		struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
 483
 484		if (drv->uninit_command)
 485			drv->uninit_command(cmd);
 486	}
 487}
 488
 489void scsi_free_sgtables(struct scsi_cmnd *cmd)
 490{
 491	if (cmd->sdb.table.nents)
 492		sg_free_table_chained(&cmd->sdb.table,
 493				SCSI_INLINE_SG_CNT);
 494	if (scsi_prot_sg_count(cmd))
 495		sg_free_table_chained(&cmd->prot_sdb->table,
 496				SCSI_INLINE_PROT_SG_CNT);
 497}
 498EXPORT_SYMBOL_GPL(scsi_free_sgtables);
 499
 500static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
 501{
 502	scsi_free_sgtables(cmd);
 503	scsi_uninit_cmd(cmd);
 504}
 505
 506static void scsi_run_queue_async(struct scsi_device *sdev)
 507{
 508	if (scsi_target(sdev)->single_lun ||
 509	    !list_empty(&sdev->host->starved_list)) {
 510		kblockd_schedule_work(&sdev->requeue_work);
 511	} else {
 512		/*
 513		 * smp_mb() present in sbitmap_queue_clear() or implied in
 514		 * .end_io is for ordering writing .device_busy in
 515		 * scsi_device_unbusy() and reading sdev->restarts.
 516		 */
 517		int old = atomic_read(&sdev->restarts);
 518
 519		/*
 520		 * ->restarts has to be kept as non-zero if new budget
 521		 *  contention occurs.
 522		 *
 523		 *  No need to run queue when either another re-run
 524		 *  queue wins in updating ->restarts or a new budget
 525		 *  contention occurs.
 526		 */
 527		if (old && atomic_cmpxchg(&sdev->restarts, old, 0) == old)
 528			blk_mq_run_hw_queues(sdev->request_queue, true);
 529	}
 530}
 531
 532/* Returns false when no more bytes to process, true if there are more */
 533static bool scsi_end_request(struct request *req, blk_status_t error,
 534		unsigned int bytes)
 535{
 536	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
 537	struct scsi_device *sdev = cmd->device;
 538	struct request_queue *q = sdev->request_queue;
 539
 540	if (blk_update_request(req, error, bytes))
 541		return true;
 542
 
 543	if (blk_queue_add_random(q))
 544		add_disk_randomness(req->rq_disk);
 545
 546	if (!blk_rq_is_passthrough(req)) {
 547		WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
 548		cmd->flags &= ~SCMD_INITIALIZED;
 549	}
 550
 551	/*
 552	 * Calling rcu_barrier() is not necessary here because the
 553	 * SCSI error handler guarantees that the function called by
 554	 * call_rcu() has been called before scsi_end_request() is
 555	 * called.
 556	 */
 557	destroy_rcu_head(&cmd->rcu);
 558
 559	/*
 560	 * In the MQ case the command gets freed by __blk_mq_end_request,
 561	 * so we have to do all cleanup that depends on it earlier.
 562	 *
 563	 * We also can't kick the queues from irq context, so we
 564	 * will have to defer it to a workqueue.
 565	 */
 566	scsi_mq_uninit_cmd(cmd);
 567
 568	/*
 569	 * queue is still alive, so grab the ref for preventing it
 570	 * from being cleaned up during running queue.
 571	 */
 572	percpu_ref_get(&q->q_usage_counter);
 573
 574	__blk_mq_end_request(req, error);
 575
 576	scsi_run_queue_async(sdev);
 577
 578	percpu_ref_put(&q->q_usage_counter);
 579	return false;
 580}
 581
 
 
 
 
 
 582/**
 583 * scsi_result_to_blk_status - translate a SCSI result code into blk_status_t
 584 * @cmd:	SCSI command
 585 * @result:	scsi error code
 586 *
 587 * Translate a SCSI result code into a blk_status_t value. May reset the host
 588 * byte of @cmd->result.
 589 */
 590static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
 591{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 592	switch (host_byte(result)) {
 593	case DID_OK:
 594		if (scsi_status_is_good(result))
 595			return BLK_STS_OK;
 596		return BLK_STS_IOERR;
 597	case DID_TRANSPORT_FAILFAST:
 598	case DID_TRANSPORT_MARGINAL:
 599		return BLK_STS_TRANSPORT;
 600	case DID_TARGET_FAILURE:
 601		set_host_byte(cmd, DID_OK);
 602		return BLK_STS_TARGET;
 603	case DID_NEXUS_FAILURE:
 604		set_host_byte(cmd, DID_OK);
 605		return BLK_STS_NEXUS;
 606	case DID_ALLOC_FAILURE:
 607		set_host_byte(cmd, DID_OK);
 608		return BLK_STS_NOSPC;
 609	case DID_MEDIUM_ERROR:
 610		set_host_byte(cmd, DID_OK);
 611		return BLK_STS_MEDIUM;
 612	default:
 613		return BLK_STS_IOERR;
 614	}
 615}
 616
 617/* Helper for scsi_io_completion() when "reprep" action required. */
 618static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
 619				      struct request_queue *q)
 
 
 
 
 
 
 
 
 
 
 
 620{
 621	/* A new command will be prepared and issued. */
 622	scsi_mq_requeue_cmd(cmd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 623}
 624
 625static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
 626{
 627	struct request *req = cmd->request;
 628	unsigned long wait_for;
 629
 630	if (cmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT)
 631		return false;
 632
 633	wait_for = (cmd->allowed + 1) * req->timeout;
 634	if (time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
 635		scmd_printk(KERN_ERR, cmd, "timing out command, waited %lus\n",
 636			    wait_for/HZ);
 637		return true;
 638	}
 639	return false;
 640}
 641
 
 
 
 
 
 
 
 
 642/* Helper for scsi_io_completion() when special action required. */
 643static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
 644{
 645	struct request_queue *q = cmd->device->request_queue;
 646	struct request *req = cmd->request;
 647	int level = 0;
 648	enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
 649	      ACTION_DELAYED_RETRY} action;
 650	struct scsi_sense_hdr sshdr;
 651	bool sense_valid;
 652	bool sense_current = true;      /* false implies "deferred sense" */
 653	blk_status_t blk_stat;
 654
 655	sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
 656	if (sense_valid)
 657		sense_current = !scsi_sense_is_deferred(&sshdr);
 658
 659	blk_stat = scsi_result_to_blk_status(cmd, result);
 660
 661	if (host_byte(result) == DID_RESET) {
 662		/* Third party bus reset or reset for error recovery
 663		 * reasons.  Just retry the command and see what
 664		 * happens.
 665		 */
 666		action = ACTION_RETRY;
 667	} else if (sense_valid && sense_current) {
 668		switch (sshdr.sense_key) {
 669		case UNIT_ATTENTION:
 670			if (cmd->device->removable) {
 671				/* Detected disc change.  Set a bit
 672				 * and quietly refuse further access.
 673				 */
 674				cmd->device->changed = 1;
 675				action = ACTION_FAIL;
 676			} else {
 677				/* Must have been a power glitch, or a
 678				 * bus reset.  Could not have been a
 679				 * media change, so we just retry the
 680				 * command and see what happens.
 681				 */
 682				action = ACTION_RETRY;
 683			}
 684			break;
 685		case ILLEGAL_REQUEST:
 686			/* If we had an ILLEGAL REQUEST returned, then
 687			 * we may have performed an unsupported
 688			 * command.  The only thing this should be
 689			 * would be a ten byte read where only a six
 690			 * byte read was supported.  Also, on a system
 691			 * where READ CAPACITY failed, we may have
 692			 * read past the end of the disk.
 693			 */
 694			if ((cmd->device->use_10_for_rw &&
 695			    sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
 696			    (cmd->cmnd[0] == READ_10 ||
 697			     cmd->cmnd[0] == WRITE_10)) {
 698				/* This will issue a new 6-byte command. */
 699				cmd->device->use_10_for_rw = 0;
 700				action = ACTION_REPREP;
 701			} else if (sshdr.asc == 0x10) /* DIX */ {
 702				action = ACTION_FAIL;
 703				blk_stat = BLK_STS_PROTECTION;
 704			/* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
 705			} else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
 706				action = ACTION_FAIL;
 707				blk_stat = BLK_STS_TARGET;
 708			} else
 709				action = ACTION_FAIL;
 710			break;
 711		case ABORTED_COMMAND:
 712			action = ACTION_FAIL;
 713			if (sshdr.asc == 0x10) /* DIF */
 714				blk_stat = BLK_STS_PROTECTION;
 715			break;
 716		case NOT_READY:
 717			/* If the device is in the process of becoming
 718			 * ready, or has a temporary blockage, retry.
 719			 */
 720			if (sshdr.asc == 0x04) {
 721				switch (sshdr.ascq) {
 722				case 0x01: /* becoming ready */
 723				case 0x04: /* format in progress */
 724				case 0x05: /* rebuild in progress */
 725				case 0x06: /* recalculation in progress */
 726				case 0x07: /* operation in progress */
 727				case 0x08: /* Long write in progress */
 728				case 0x09: /* self test in progress */
 729				case 0x11: /* notify (enable spinup) required */
 730				case 0x14: /* space allocation in progress */
 731				case 0x1a: /* start stop unit in progress */
 732				case 0x1b: /* sanitize in progress */
 733				case 0x1d: /* configuration in progress */
 734				case 0x24: /* depopulation in progress */
 735					action = ACTION_DELAYED_RETRY;
 736					break;
 737				case 0x0a: /* ALUA state transition */
 738					blk_stat = BLK_STS_AGAIN;
 739					fallthrough;
 740				default:
 741					action = ACTION_FAIL;
 742					break;
 743				}
 744			} else
 745				action = ACTION_FAIL;
 746			break;
 747		case VOLUME_OVERFLOW:
 748			/* See SSC3rXX or current. */
 749			action = ACTION_FAIL;
 750			break;
 751		case DATA_PROTECT:
 752			action = ACTION_FAIL;
 753			if ((sshdr.asc == 0x0C && sshdr.ascq == 0x12) ||
 754			    (sshdr.asc == 0x55 &&
 755			     (sshdr.ascq == 0x0E || sshdr.ascq == 0x0F))) {
 756				/* Insufficient zone resources */
 757				blk_stat = BLK_STS_ZONE_OPEN_RESOURCE;
 758			}
 759			break;
 760		default:
 761			action = ACTION_FAIL;
 762			break;
 763		}
 764	} else
 765		action = ACTION_FAIL;
 766
 767	if (action != ACTION_FAIL && scsi_cmd_runtime_exceeced(cmd))
 768		action = ACTION_FAIL;
 769
 770	switch (action) {
 771	case ACTION_FAIL:
 772		/* Give up and fail the remainder of the request */
 773		if (!(req->rq_flags & RQF_QUIET)) {
 774			static DEFINE_RATELIMIT_STATE(_rs,
 775					DEFAULT_RATELIMIT_INTERVAL,
 776					DEFAULT_RATELIMIT_BURST);
 777
 778			if (unlikely(scsi_logging_level))
 779				level =
 780				     SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
 781						    SCSI_LOG_MLCOMPLETE_BITS);
 782
 783			/*
 784			 * if logging is enabled the failure will be printed
 785			 * in scsi_log_completion(), so avoid duplicate messages
 786			 */
 787			if (!level && __ratelimit(&_rs)) {
 788				scsi_print_result(cmd, NULL, FAILED);
 789				if (sense_valid)
 790					scsi_print_sense(cmd);
 791				scsi_print_command(cmd);
 792			}
 793		}
 794		if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req)))
 795			return;
 796		fallthrough;
 797	case ACTION_REPREP:
 798		scsi_io_completion_reprep(cmd, q);
 
 
 
 799		break;
 800	case ACTION_RETRY:
 801		/* Retry the same command immediately */
 802		__scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, false);
 803		break;
 804	case ACTION_DELAYED_RETRY:
 805		/* Retry the same command after a delay */
 806		__scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, false);
 807		break;
 808	}
 809}
 810
 811/*
 812 * Helper for scsi_io_completion() when cmd->result is non-zero. Returns a
 813 * new result that may suppress further error checking. Also modifies
 814 * *blk_statp in some cases.
 815 */
 816static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
 817					blk_status_t *blk_statp)
 818{
 819	bool sense_valid;
 820	bool sense_current = true;	/* false implies "deferred sense" */
 821	struct request *req = cmd->request;
 822	struct scsi_sense_hdr sshdr;
 823
 824	sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
 825	if (sense_valid)
 826		sense_current = !scsi_sense_is_deferred(&sshdr);
 827
 828	if (blk_rq_is_passthrough(req)) {
 829		if (sense_valid) {
 830			/*
 831			 * SG_IO wants current and deferred errors
 832			 */
 833			scsi_req(req)->sense_len =
 834				min(8 + cmd->sense_buffer[7],
 835				    SCSI_SENSE_BUFFERSIZE);
 836		}
 837		if (sense_current)
 838			*blk_statp = scsi_result_to_blk_status(cmd, result);
 839	} else if (blk_rq_bytes(req) == 0 && sense_current) {
 840		/*
 841		 * Flush commands do not transfers any data, and thus cannot use
 842		 * good_bytes != blk_rq_bytes(req) as the signal for an error.
 843		 * This sets *blk_statp explicitly for the problem case.
 844		 */
 845		*blk_statp = scsi_result_to_blk_status(cmd, result);
 846	}
 847	/*
 848	 * Recovered errors need reporting, but they're always treated as
 849	 * success, so fiddle the result code here.  For passthrough requests
 850	 * we already took a copy of the original into sreq->result which
 851	 * is what gets returned to the user
 852	 */
 853	if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
 854		bool do_print = true;
 855		/*
 856		 * if ATA PASS-THROUGH INFORMATION AVAILABLE [0x0, 0x1d]
 857		 * skip print since caller wants ATA registers. Only occurs
 858		 * on SCSI ATA PASS_THROUGH commands when CK_COND=1
 859		 */
 860		if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
 861			do_print = false;
 862		else if (req->rq_flags & RQF_QUIET)
 863			do_print = false;
 864		if (do_print)
 865			scsi_print_sense(cmd);
 866		result = 0;
 867		/* for passthrough, *blk_statp may be set */
 868		*blk_statp = BLK_STS_OK;
 869	}
 870	/*
 871	 * Another corner case: the SCSI status byte is non-zero but 'good'.
 872	 * Example: PRE-FETCH command returns SAM_STAT_CONDITION_MET when
 873	 * it is able to fit nominated LBs in its cache (and SAM_STAT_GOOD
 874	 * if it can't fit). Treat SAM_STAT_CONDITION_MET and the related
 875	 * intermediate statuses (both obsolete in SAM-4) as good.
 876	 */
 877	if ((result & 0xff) && scsi_status_is_good(result)) {
 878		result = 0;
 879		*blk_statp = BLK_STS_OK;
 880	}
 881	return result;
 882}
 883
 884/**
 885 * scsi_io_completion - Completion processing for SCSI commands.
 886 * @cmd:	command that is finished.
 887 * @good_bytes:	number of processed bytes.
 888 *
 889 * We will finish off the specified number of sectors. If we are done, the
 890 * command block will be released and the queue function will be goosed. If we
 891 * are not done then we have to figure out what to do next:
 892 *
 893 *   a) We can call scsi_io_completion_reprep().  The request will be
 894 *	unprepared and put back on the queue.  Then a new command will
 895 *	be created for it.  This should be used if we made forward
 896 *	progress, or if we want to switch from READ(10) to READ(6) for
 897 *	example.
 898 *
 899 *   b) We can call scsi_io_completion_action().  The request will be
 900 *	put back on the queue and retried using the same command as
 901 *	before, possibly after a delay.
 902 *
 903 *   c) We can call scsi_end_request() with blk_stat other than
 904 *	BLK_STS_OK, to fail the remainder of the request.
 905 */
 906void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 907{
 908	int result = cmd->result;
 909	struct request_queue *q = cmd->device->request_queue;
 910	struct request *req = cmd->request;
 911	blk_status_t blk_stat = BLK_STS_OK;
 912
 913	if (unlikely(result))	/* a nz result may or may not be an error */
 914		result = scsi_io_completion_nz_result(cmd, result, &blk_stat);
 915
 916	if (unlikely(blk_rq_is_passthrough(req))) {
 917		/*
 918		 * scsi_result_to_blk_status may have reset the host_byte
 919		 */
 920		scsi_req(req)->result = cmd->result;
 921	}
 922
 923	/*
 924	 * Next deal with any sectors which we were able to correctly
 925	 * handle.
 926	 */
 927	SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd,
 928		"%u sectors total, %d bytes done.\n",
 929		blk_rq_sectors(req), good_bytes));
 930
 931	/*
 932	 * Failed, zero length commands always need to drop down
 933	 * to retry code. Fast path should return in this block.
 934	 */
 935	if (likely(blk_rq_bytes(req) > 0 || blk_stat == BLK_STS_OK)) {
 936		if (likely(!scsi_end_request(req, blk_stat, good_bytes)))
 937			return; /* no bytes remaining */
 938	}
 939
 940	/* Kill remainder if no retries. */
 941	if (unlikely(blk_stat && scsi_noretry_cmd(cmd))) {
 942		if (scsi_end_request(req, blk_stat, blk_rq_bytes(req)))
 943			WARN_ONCE(true,
 944			    "Bytes remaining after failed, no-retry command");
 945		return;
 946	}
 947
 948	/*
 949	 * If there had been no error, but we have leftover bytes in the
 950	 * requeues just queue the command up again.
 951	 */
 952	if (likely(result == 0))
 953		scsi_io_completion_reprep(cmd, q);
 954	else
 955		scsi_io_completion_action(cmd, result);
 956}
 957
 958static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev,
 959		struct request *rq)
 960{
 961	return sdev->dma_drain_len && blk_rq_is_passthrough(rq) &&
 962	       !op_is_write(req_op(rq)) &&
 963	       sdev->host->hostt->dma_need_drain(rq);
 964}
 965
 966/**
 967 * scsi_alloc_sgtables - Allocate and initialize data and integrity scatterlists
 968 * @cmd: SCSI command data structure to initialize.
 969 *
 970 * Initializes @cmd->sdb and also @cmd->prot_sdb if data integrity is enabled
 971 * for @cmd.
 972 *
 973 * Returns:
 974 * * BLK_STS_OK       - on success
 975 * * BLK_STS_RESOURCE - if the failure is retryable
 976 * * BLK_STS_IOERR    - if the failure is fatal
 977 */
 978blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
 979{
 980	struct scsi_device *sdev = cmd->device;
 981	struct request *rq = cmd->request;
 982	unsigned short nr_segs = blk_rq_nr_phys_segments(rq);
 983	struct scatterlist *last_sg = NULL;
 984	blk_status_t ret;
 985	bool need_drain = scsi_cmd_needs_dma_drain(sdev, rq);
 986	int count;
 987
 988	if (WARN_ON_ONCE(!nr_segs))
 989		return BLK_STS_IOERR;
 990
 991	/*
 992	 * Make sure there is space for the drain.  The driver must adjust
 993	 * max_hw_segments to be prepared for this.
 994	 */
 995	if (need_drain)
 996		nr_segs++;
 997
 998	/*
 999	 * If sg table allocation fails, requeue request later.
1000	 */
1001	if (unlikely(sg_alloc_table_chained(&cmd->sdb.table, nr_segs,
1002			cmd->sdb.table.sgl, SCSI_INLINE_SG_CNT)))
1003		return BLK_STS_RESOURCE;
1004
1005	/*
1006	 * Next, walk the list, and fill in the addresses and sizes of
1007	 * each segment.
1008	 */
1009	count = __blk_rq_map_sg(rq->q, rq, cmd->sdb.table.sgl, &last_sg);
1010
1011	if (blk_rq_bytes(rq) & rq->q->dma_pad_mask) {
1012		unsigned int pad_len =
1013			(rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
1014
1015		last_sg->length += pad_len;
1016		cmd->extra_len += pad_len;
1017	}
1018
1019	if (need_drain) {
1020		sg_unmark_end(last_sg);
1021		last_sg = sg_next(last_sg);
1022		sg_set_buf(last_sg, sdev->dma_drain_buf, sdev->dma_drain_len);
1023		sg_mark_end(last_sg);
1024
1025		cmd->extra_len += sdev->dma_drain_len;
1026		count++;
1027	}
1028
1029	BUG_ON(count > cmd->sdb.table.nents);
1030	cmd->sdb.table.nents = count;
1031	cmd->sdb.length = blk_rq_payload_bytes(rq);
1032
1033	if (blk_integrity_rq(rq)) {
1034		struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
1035		int ivecs;
1036
1037		if (WARN_ON_ONCE(!prot_sdb)) {
1038			/*
1039			 * This can happen if someone (e.g. multipath)
1040			 * queues a command to a device on an adapter
1041			 * that does not support DIX.
1042			 */
1043			ret = BLK_STS_IOERR;
1044			goto out_free_sgtables;
1045		}
1046
1047		ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
1048
1049		if (sg_alloc_table_chained(&prot_sdb->table, ivecs,
1050				prot_sdb->table.sgl,
1051				SCSI_INLINE_PROT_SG_CNT)) {
1052			ret = BLK_STS_RESOURCE;
1053			goto out_free_sgtables;
1054		}
1055
1056		count = blk_rq_map_integrity_sg(rq->q, rq->bio,
1057						prot_sdb->table.sgl);
1058		BUG_ON(count > ivecs);
1059		BUG_ON(count > queue_max_integrity_segments(rq->q));
1060
1061		cmd->prot_sdb = prot_sdb;
1062		cmd->prot_sdb->table.nents = count;
1063	}
1064
1065	return BLK_STS_OK;
1066out_free_sgtables:
1067	scsi_free_sgtables(cmd);
1068	return ret;
1069}
1070EXPORT_SYMBOL(scsi_alloc_sgtables);
1071
1072/**
1073 * scsi_initialize_rq - initialize struct scsi_cmnd partially
1074 * @rq: Request associated with the SCSI command to be initialized.
1075 *
1076 * This function initializes the members of struct scsi_cmnd that must be
1077 * initialized before request processing starts and that won't be
1078 * reinitialized if a SCSI command is requeued.
1079 *
1080 * Called from inside blk_get_request() for pass-through requests and from
1081 * inside scsi_init_command() for filesystem requests.
1082 */
1083static void scsi_initialize_rq(struct request *rq)
1084{
1085	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1086
1087	scsi_req_init(&cmd->req);
 
 
1088	init_rcu_head(&cmd->rcu);
1089	cmd->jiffies_at_alloc = jiffies;
1090	cmd->retries = 0;
1091}
1092
 
 
 
 
 
 
 
 
 
 
 
 
1093/*
1094 * Only called when the request isn't completed by SCSI, and not freed by
1095 * SCSI
1096 */
1097static void scsi_cleanup_rq(struct request *rq)
1098{
1099	if (rq->rq_flags & RQF_DONTPREP) {
1100		scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq));
1101		rq->rq_flags &= ~RQF_DONTPREP;
1102	}
1103}
1104
1105/* Called before a request is prepared. See also scsi_mq_prep_fn(). */
1106void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
1107{
1108	void *buf = cmd->sense_buffer;
1109	void *prot = cmd->prot_sdb;
1110	struct request *rq = blk_mq_rq_from_pdu(cmd);
1111	unsigned int flags = cmd->flags & SCMD_PRESERVED_FLAGS;
1112	unsigned long jiffies_at_alloc;
1113	int retries, to_clear;
1114	bool in_flight;
1115	int budget_token = cmd->budget_token;
1116
1117	if (!blk_rq_is_passthrough(rq) && !(flags & SCMD_INITIALIZED)) {
1118		flags |= SCMD_INITIALIZED;
1119		scsi_initialize_rq(rq);
1120	}
1121
1122	jiffies_at_alloc = cmd->jiffies_at_alloc;
1123	retries = cmd->retries;
1124	in_flight = test_bit(SCMD_STATE_INFLIGHT, &cmd->state);
1125	/*
1126	 * Zero out the cmd, except for the embedded scsi_request. Only clear
1127	 * the driver-private command data if the LLD does not supply a
1128	 * function to initialize that data.
1129	 */
1130	to_clear = sizeof(*cmd) - sizeof(cmd->req);
1131	if (!dev->host->hostt->init_cmd_priv)
1132		to_clear += dev->host->hostt->cmd_size;
1133	memset((char *)cmd + sizeof(cmd->req), 0, to_clear);
1134
1135	cmd->device = dev;
1136	cmd->sense_buffer = buf;
1137	cmd->prot_sdb = prot;
1138	cmd->flags = flags;
1139	INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
1140	cmd->jiffies_at_alloc = jiffies_at_alloc;
1141	cmd->retries = retries;
1142	if (in_flight)
1143		__set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
1144	cmd->budget_token = budget_token;
1145
1146}
1147
1148static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev,
1149		struct request *req)
1150{
1151	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1152
1153	/*
1154	 * Passthrough requests may transfer data, in which case they must
1155	 * a bio attached to them.  Or they might contain a SCSI command
1156	 * that does not transfer data, in which case they may optionally
1157	 * submit a request without an attached bio.
1158	 */
1159	if (req->bio) {
1160		blk_status_t ret = scsi_alloc_sgtables(cmd);
1161		if (unlikely(ret != BLK_STS_OK))
1162			return ret;
1163	} else {
1164		BUG_ON(blk_rq_bytes(req));
1165
1166		memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1167	}
1168
1169	cmd->cmd_len = scsi_req(req)->cmd_len;
1170	if (cmd->cmd_len == 0)
1171		cmd->cmd_len = scsi_command_size(cmd->cmnd);
1172	cmd->cmnd = scsi_req(req)->cmd;
1173	cmd->transfersize = blk_rq_bytes(req);
1174	cmd->allowed = scsi_req(req)->retries;
1175	return BLK_STS_OK;
1176}
1177
1178static blk_status_t
1179scsi_device_state_check(struct scsi_device *sdev, struct request *req)
1180{
1181	switch (sdev->sdev_state) {
1182	case SDEV_CREATED:
1183		return BLK_STS_OK;
1184	case SDEV_OFFLINE:
1185	case SDEV_TRANSPORT_OFFLINE:
1186		/*
1187		 * If the device is offline we refuse to process any
1188		 * commands.  The device must be brought online
1189		 * before trying any recovery commands.
1190		 */
1191		if (!sdev->offline_already) {
1192			sdev->offline_already = true;
1193			sdev_printk(KERN_ERR, sdev,
1194				    "rejecting I/O to offline device\n");
1195		}
1196		return BLK_STS_IOERR;
1197	case SDEV_DEL:
1198		/*
1199		 * If the device is fully deleted, we refuse to
1200		 * process any commands as well.
1201		 */
1202		sdev_printk(KERN_ERR, sdev,
1203			    "rejecting I/O to dead device\n");
1204		return BLK_STS_IOERR;
1205	case SDEV_BLOCK:
1206	case SDEV_CREATED_BLOCK:
1207		return BLK_STS_RESOURCE;
1208	case SDEV_QUIESCE:
1209		/*
1210		 * If the device is blocked we only accept power management
1211		 * commands.
1212		 */
1213		if (req && WARN_ON_ONCE(!(req->rq_flags & RQF_PM)))
1214			return BLK_STS_RESOURCE;
1215		return BLK_STS_OK;
1216	default:
1217		/*
1218		 * For any other not fully online state we only allow
1219		 * power management commands.
1220		 */
1221		if (req && !(req->rq_flags & RQF_PM))
1222			return BLK_STS_IOERR;
1223		return BLK_STS_OK;
1224	}
1225}
1226
1227/*
1228 * scsi_dev_queue_ready: if we can send requests to sdev, assign one token
1229 * and return the token else return -1.
1230 */
1231static inline int scsi_dev_queue_ready(struct request_queue *q,
1232				  struct scsi_device *sdev)
1233{
1234	int token;
1235
1236	token = sbitmap_get(&sdev->budget_map);
1237	if (atomic_read(&sdev->device_blocked)) {
1238		if (token < 0)
1239			goto out;
1240
1241		if (scsi_device_busy(sdev) > 1)
1242			goto out_dec;
1243
1244		/*
1245		 * unblock after device_blocked iterates to zero
1246		 */
1247		if (atomic_dec_return(&sdev->device_blocked) > 0)
1248			goto out_dec;
1249		SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
1250				   "unblocking device at zero depth\n"));
1251	}
1252
1253	return token;
1254out_dec:
1255	if (token >= 0)
1256		sbitmap_put(&sdev->budget_map, token);
1257out:
1258	return -1;
1259}
1260
1261/*
1262 * scsi_target_queue_ready: checks if there we can send commands to target
1263 * @sdev: scsi device on starget to check.
1264 */
1265static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1266					   struct scsi_device *sdev)
1267{
1268	struct scsi_target *starget = scsi_target(sdev);
1269	unsigned int busy;
1270
1271	if (starget->single_lun) {
1272		spin_lock_irq(shost->host_lock);
1273		if (starget->starget_sdev_user &&
1274		    starget->starget_sdev_user != sdev) {
1275			spin_unlock_irq(shost->host_lock);
1276			return 0;
1277		}
1278		starget->starget_sdev_user = sdev;
1279		spin_unlock_irq(shost->host_lock);
1280	}
1281
1282	if (starget->can_queue <= 0)
1283		return 1;
1284
1285	busy = atomic_inc_return(&starget->target_busy) - 1;
1286	if (atomic_read(&starget->target_blocked) > 0) {
1287		if (busy)
1288			goto starved;
1289
1290		/*
1291		 * unblock after target_blocked iterates to zero
1292		 */
1293		if (atomic_dec_return(&starget->target_blocked) > 0)
1294			goto out_dec;
1295
1296		SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1297				 "unblocking target at zero depth\n"));
1298	}
1299
1300	if (busy >= starget->can_queue)
1301		goto starved;
1302
1303	return 1;
1304
1305starved:
1306	spin_lock_irq(shost->host_lock);
1307	list_move_tail(&sdev->starved_entry, &shost->starved_list);
1308	spin_unlock_irq(shost->host_lock);
1309out_dec:
1310	if (starget->can_queue > 0)
1311		atomic_dec(&starget->target_busy);
1312	return 0;
1313}
1314
1315/*
1316 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1317 * return 0. We must end up running the queue again whenever 0 is
1318 * returned, else IO can hang.
1319 */
1320static inline int scsi_host_queue_ready(struct request_queue *q,
1321				   struct Scsi_Host *shost,
1322				   struct scsi_device *sdev,
1323				   struct scsi_cmnd *cmd)
1324{
1325	if (scsi_host_in_recovery(shost))
1326		return 0;
1327
1328	if (atomic_read(&shost->host_blocked) > 0) {
1329		if (scsi_host_busy(shost) > 0)
1330			goto starved;
1331
1332		/*
1333		 * unblock after host_blocked iterates to zero
1334		 */
1335		if (atomic_dec_return(&shost->host_blocked) > 0)
1336			goto out_dec;
1337
1338		SCSI_LOG_MLQUEUE(3,
1339			shost_printk(KERN_INFO, shost,
1340				     "unblocking host at zero depth\n"));
1341	}
1342
1343	if (shost->host_self_blocked)
1344		goto starved;
1345
1346	/* We're OK to process the command, so we can't be starved */
1347	if (!list_empty(&sdev->starved_entry)) {
1348		spin_lock_irq(shost->host_lock);
1349		if (!list_empty(&sdev->starved_entry))
1350			list_del_init(&sdev->starved_entry);
1351		spin_unlock_irq(shost->host_lock);
1352	}
1353
1354	__set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
1355
1356	return 1;
1357
1358starved:
1359	spin_lock_irq(shost->host_lock);
1360	if (list_empty(&sdev->starved_entry))
1361		list_add_tail(&sdev->starved_entry, &shost->starved_list);
1362	spin_unlock_irq(shost->host_lock);
1363out_dec:
1364	scsi_dec_host_busy(shost, cmd);
1365	return 0;
1366}
1367
1368/*
1369 * Busy state exporting function for request stacking drivers.
1370 *
1371 * For efficiency, no lock is taken to check the busy state of
1372 * shost/starget/sdev, since the returned value is not guaranteed and
1373 * may be changed after request stacking drivers call the function,
1374 * regardless of taking lock or not.
1375 *
1376 * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi
1377 * needs to return 'not busy'. Otherwise, request stacking drivers
1378 * may hold requests forever.
1379 */
1380static bool scsi_mq_lld_busy(struct request_queue *q)
1381{
1382	struct scsi_device *sdev = q->queuedata;
1383	struct Scsi_Host *shost;
1384
1385	if (blk_queue_dying(q))
1386		return false;
1387
1388	shost = sdev->host;
1389
1390	/*
1391	 * Ignore host/starget busy state.
1392	 * Since block layer does not have a concept of fairness across
1393	 * multiple queues, congestion of host/starget needs to be handled
1394	 * in SCSI layer.
1395	 */
1396	if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1397		return true;
1398
1399	return false;
1400}
1401
1402/*
1403 * Block layer request completion callback. May be called from interrupt
1404 * context.
1405 */
1406static void scsi_complete(struct request *rq)
1407{
1408	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1409	enum scsi_disposition disposition;
1410
1411	INIT_LIST_HEAD(&cmd->eh_entry);
1412
1413	atomic_inc(&cmd->device->iodone_cnt);
1414	if (cmd->result)
1415		atomic_inc(&cmd->device->ioerr_cnt);
1416
1417	disposition = scsi_decide_disposition(cmd);
1418	if (disposition != SUCCESS && scsi_cmd_runtime_exceeced(cmd))
1419		disposition = SUCCESS;
1420
1421	scsi_log_completion(cmd, disposition);
1422
1423	switch (disposition) {
1424	case SUCCESS:
1425		scsi_finish_command(cmd);
1426		break;
1427	case NEEDS_RETRY:
1428		scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1429		break;
1430	case ADD_TO_MLQUEUE:
1431		scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1432		break;
1433	default:
1434		scsi_eh_scmd_add(cmd);
1435		break;
1436	}
1437}
1438
1439/**
1440 * scsi_dispatch_cmd - Dispatch a command to the low-level driver.
1441 * @cmd: command block we are dispatching.
1442 *
1443 * Return: nonzero return request was rejected and device's queue needs to be
1444 * plugged.
1445 */
1446static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
1447{
1448	struct Scsi_Host *host = cmd->device->host;
1449	int rtn = 0;
1450
1451	atomic_inc(&cmd->device->iorequest_cnt);
1452
1453	/* check if the device is still usable */
1454	if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
1455		/* in SDEV_DEL we error all commands. DID_NO_CONNECT
1456		 * returns an immediate error upwards, and signals
1457		 * that the device is no longer present */
1458		cmd->result = DID_NO_CONNECT << 16;
1459		goto done;
1460	}
1461
1462	/* Check to see if the scsi lld made this device blocked. */
1463	if (unlikely(scsi_device_blocked(cmd->device))) {
1464		/*
1465		 * in blocked state, the command is just put back on
1466		 * the device queue.  The suspend state has already
1467		 * blocked the queue so future requests should not
1468		 * occur until the device transitions out of the
1469		 * suspend state.
1470		 */
1471		SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1472			"queuecommand : device blocked\n"));
1473		return SCSI_MLQUEUE_DEVICE_BUSY;
1474	}
1475
1476	/* Store the LUN value in cmnd, if needed. */
1477	if (cmd->device->lun_in_cdb)
1478		cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
1479			       (cmd->device->lun << 5 & 0xe0);
1480
1481	scsi_log_send(cmd);
1482
1483	/*
1484	 * Before we queue this command, check if the command
1485	 * length exceeds what the host adapter can handle.
1486	 */
1487	if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
1488		SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1489			       "queuecommand : command too long. "
1490			       "cdb_size=%d host->max_cmd_len=%d\n",
1491			       cmd->cmd_len, cmd->device->host->max_cmd_len));
1492		cmd->result = (DID_ABORT << 16);
1493		goto done;
1494	}
1495
1496	if (unlikely(host->shost_state == SHOST_DEL)) {
1497		cmd->result = (DID_NO_CONNECT << 16);
1498		goto done;
1499
1500	}
1501
1502	trace_scsi_dispatch_cmd_start(cmd);
1503	rtn = host->hostt->queuecommand(host, cmd);
1504	if (rtn) {
1505		trace_scsi_dispatch_cmd_error(cmd, rtn);
1506		if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
1507		    rtn != SCSI_MLQUEUE_TARGET_BUSY)
1508			rtn = SCSI_MLQUEUE_HOST_BUSY;
1509
1510		SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1511			"queuecommand : request rejected\n"));
1512	}
1513
1514	return rtn;
1515 done:
1516	cmd->scsi_done(cmd);
1517	return 0;
1518}
1519
1520/* Size in bytes of the sg-list stored in the scsi-mq command-private data. */
1521static unsigned int scsi_mq_inline_sgl_size(struct Scsi_Host *shost)
1522{
1523	return min_t(unsigned int, shost->sg_tablesize, SCSI_INLINE_SG_CNT) *
1524		sizeof(struct scatterlist);
1525}
1526
1527static blk_status_t scsi_prepare_cmd(struct request *req)
1528{
1529	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1530	struct scsi_device *sdev = req->q->queuedata;
1531	struct Scsi_Host *shost = sdev->host;
 
1532	struct scatterlist *sg;
1533
1534	scsi_init_command(sdev, cmd);
1535
1536	cmd->request = req;
1537	cmd->tag = req->tag;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1538	cmd->prot_op = SCSI_PROT_NORMAL;
1539	if (blk_rq_bytes(req))
1540		cmd->sc_data_direction = rq_dma_dir(req);
1541	else
1542		cmd->sc_data_direction = DMA_NONE;
1543
1544	sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
1545	cmd->sdb.table.sgl = sg;
1546
1547	if (scsi_host_get_prot(shost)) {
1548		memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer));
1549
1550		cmd->prot_sdb->table.sgl =
1551			(struct scatterlist *)(cmd->prot_sdb + 1);
1552	}
1553
1554	/*
1555	 * Special handling for passthrough commands, which don't go to the ULP
1556	 * at all:
1557	 */
1558	if (blk_rq_is_passthrough(req))
1559		return scsi_setup_scsi_cmnd(sdev, req);
1560
1561	if (sdev->handler && sdev->handler->prep_fn) {
1562		blk_status_t ret = sdev->handler->prep_fn(sdev, req);
1563
1564		if (ret != BLK_STS_OK)
1565			return ret;
1566	}
1567
1568	cmd->cmnd = scsi_req(req)->cmd = scsi_req(req)->__cmd;
1569	memset(cmd->cmnd, 0, BLK_MAX_CDB);
 
1570	return scsi_cmd_to_driver(cmd)->init_command(cmd);
1571}
1572
1573static void scsi_mq_done(struct scsi_cmnd *cmd)
1574{
1575	if (unlikely(blk_should_fake_timeout(cmd->request->q)))
 
 
 
 
 
 
 
 
 
 
 
1576		return;
1577	if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state)))
1578		return;
1579	trace_scsi_dispatch_cmd_done(cmd);
1580	blk_mq_complete_request(cmd->request);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1581}
 
1582
1583static void scsi_mq_put_budget(struct request_queue *q, int budget_token)
1584{
1585	struct scsi_device *sdev = q->queuedata;
1586
1587	sbitmap_put(&sdev->budget_map, budget_token);
1588}
1589
 
 
 
 
 
 
 
1590static int scsi_mq_get_budget(struct request_queue *q)
1591{
1592	struct scsi_device *sdev = q->queuedata;
1593	int token = scsi_dev_queue_ready(q, sdev);
1594
1595	if (token >= 0)
1596		return token;
1597
1598	atomic_inc(&sdev->restarts);
1599
1600	/*
1601	 * Orders atomic_inc(&sdev->restarts) and atomic_read(&sdev->device_busy).
1602	 * .restarts must be incremented before .device_busy is read because the
1603	 * code in scsi_run_queue_async() depends on the order of these operations.
1604	 */
1605	smp_mb__after_atomic();
1606
1607	/*
1608	 * If all in-flight requests originated from this LUN are completed
1609	 * before reading .device_busy, sdev->device_busy will be observed as
1610	 * zero, then blk_mq_delay_run_hw_queues() will dispatch this request
1611	 * soon. Otherwise, completion of one of these requests will observe
1612	 * the .restarts flag, and the request queue will be run for handling
1613	 * this request, see scsi_end_request().
1614	 */
1615	if (unlikely(scsi_device_busy(sdev) == 0 &&
1616				!scsi_device_blocked(sdev)))
1617		blk_mq_delay_run_hw_queues(sdev->request_queue, SCSI_QUEUE_DELAY);
1618	return -1;
1619}
1620
1621static void scsi_mq_set_rq_budget_token(struct request *req, int token)
1622{
1623	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1624
1625	cmd->budget_token = token;
1626}
1627
1628static int scsi_mq_get_rq_budget_token(struct request *req)
1629{
1630	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1631
1632	return cmd->budget_token;
1633}
1634
1635static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
1636			 const struct blk_mq_queue_data *bd)
1637{
1638	struct request *req = bd->rq;
1639	struct request_queue *q = req->q;
1640	struct scsi_device *sdev = q->queuedata;
1641	struct Scsi_Host *shost = sdev->host;
1642	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1643	blk_status_t ret;
1644	int reason;
1645
1646	WARN_ON_ONCE(cmd->budget_token < 0);
1647
1648	/*
1649	 * If the device is not in running state we will reject some or all
1650	 * commands.
1651	 */
1652	if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1653		ret = scsi_device_state_check(sdev, req);
1654		if (ret != BLK_STS_OK)
1655			goto out_put_budget;
1656	}
1657
1658	ret = BLK_STS_RESOURCE;
1659	if (!scsi_target_queue_ready(shost, sdev))
1660		goto out_put_budget;
 
 
 
 
 
1661	if (!scsi_host_queue_ready(q, shost, sdev, cmd))
1662		goto out_dec_target_busy;
1663
1664	if (!(req->rq_flags & RQF_DONTPREP)) {
1665		ret = scsi_prepare_cmd(req);
1666		if (ret != BLK_STS_OK)
1667			goto out_dec_host_busy;
1668		req->rq_flags |= RQF_DONTPREP;
1669	} else {
1670		clear_bit(SCMD_STATE_COMPLETE, &cmd->state);
1671	}
1672
1673	cmd->flags &= SCMD_PRESERVED_FLAGS;
1674	if (sdev->simple_tags)
1675		cmd->flags |= SCMD_TAGGED;
1676	if (bd->last)
1677		cmd->flags |= SCMD_LAST;
1678
1679	scsi_set_resid(cmd, 0);
1680	memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1681	cmd->scsi_done = scsi_mq_done;
1682
1683	blk_mq_start_request(req);
1684	reason = scsi_dispatch_cmd(cmd);
1685	if (reason) {
1686		scsi_set_blocked(cmd, reason);
1687		ret = BLK_STS_RESOURCE;
1688		goto out_dec_host_busy;
1689	}
1690
 
1691	return BLK_STS_OK;
1692
1693out_dec_host_busy:
1694	scsi_dec_host_busy(shost, cmd);
1695out_dec_target_busy:
1696	if (scsi_target(sdev)->can_queue > 0)
1697		atomic_dec(&scsi_target(sdev)->target_busy);
1698out_put_budget:
1699	scsi_mq_put_budget(q, cmd->budget_token);
1700	cmd->budget_token = -1;
1701	switch (ret) {
1702	case BLK_STS_OK:
1703		break;
1704	case BLK_STS_RESOURCE:
1705	case BLK_STS_ZONE_RESOURCE:
1706		if (scsi_device_blocked(sdev))
1707			ret = BLK_STS_DEV_RESOURCE;
1708		break;
1709	case BLK_STS_AGAIN:
1710		scsi_req(req)->result = DID_BUS_BUSY << 16;
1711		if (req->rq_flags & RQF_DONTPREP)
1712			scsi_mq_uninit_cmd(cmd);
1713		break;
1714	default:
1715		if (unlikely(!scsi_device_online(sdev)))
1716			scsi_req(req)->result = DID_NO_CONNECT << 16;
1717		else
1718			scsi_req(req)->result = DID_ERROR << 16;
1719		/*
1720		 * Make sure to release all allocated resources when
1721		 * we hit an error, as we will never see this command
1722		 * again.
1723		 */
1724		if (req->rq_flags & RQF_DONTPREP)
1725			scsi_mq_uninit_cmd(cmd);
1726		scsi_run_queue_async(sdev);
1727		break;
1728	}
1729	return ret;
1730}
1731
1732static enum blk_eh_timer_return scsi_timeout(struct request *req,
1733		bool reserved)
1734{
1735	if (reserved)
1736		return BLK_EH_RESET_TIMER;
1737	return scsi_times_out(req);
1738}
1739
1740static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
1741				unsigned int hctx_idx, unsigned int numa_node)
1742{
1743	struct Scsi_Host *shost = set->driver_data;
1744	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1745	struct scatterlist *sg;
1746	int ret = 0;
1747
1748	cmd->sense_buffer =
1749		kmem_cache_alloc_node(scsi_sense_cache, GFP_KERNEL, numa_node);
1750	if (!cmd->sense_buffer)
1751		return -ENOMEM;
1752	cmd->req.sense = cmd->sense_buffer;
1753
1754	if (scsi_host_get_prot(shost)) {
1755		sg = (void *)cmd + sizeof(struct scsi_cmnd) +
1756			shost->hostt->cmd_size;
1757		cmd->prot_sdb = (void *)sg + scsi_mq_inline_sgl_size(shost);
1758	}
1759
1760	if (shost->hostt->init_cmd_priv) {
1761		ret = shost->hostt->init_cmd_priv(shost, cmd);
1762		if (ret < 0)
1763			kmem_cache_free(scsi_sense_cache, cmd->sense_buffer);
1764	}
1765
1766	return ret;
1767}
1768
1769static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1770				 unsigned int hctx_idx)
1771{
1772	struct Scsi_Host *shost = set->driver_data;
1773	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1774
1775	if (shost->hostt->exit_cmd_priv)
1776		shost->hostt->exit_cmd_priv(shost, cmd);
1777	kmem_cache_free(scsi_sense_cache, cmd->sense_buffer);
1778}
1779
1780
1781static int scsi_mq_poll(struct blk_mq_hw_ctx *hctx)
1782{
1783	struct Scsi_Host *shost = hctx->driver_data;
1784
1785	if (shost->hostt->mq_poll)
1786		return shost->hostt->mq_poll(shost, hctx->queue_num);
1787
1788	return 0;
1789}
1790
1791static int scsi_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1792			  unsigned int hctx_idx)
1793{
1794	struct Scsi_Host *shost = data;
1795
1796	hctx->driver_data = shost;
1797	return 0;
1798}
1799
1800static int scsi_map_queues(struct blk_mq_tag_set *set)
1801{
1802	struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
1803
1804	if (shost->hostt->map_queues)
1805		return shost->hostt->map_queues(shost);
1806	return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
1807}
1808
1809void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
1810{
1811	struct device *dev = shost->dma_dev;
1812
1813	/*
1814	 * this limit is imposed by hardware restrictions
1815	 */
1816	blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
1817					SG_MAX_SEGMENTS));
1818
1819	if (scsi_host_prot_dma(shost)) {
1820		shost->sg_prot_tablesize =
1821			min_not_zero(shost->sg_prot_tablesize,
1822				     (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
1823		BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
1824		blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
1825	}
1826
1827	if (dev->dma_mask) {
1828		shost->max_sectors = min_t(unsigned int, shost->max_sectors,
1829				dma_max_mapping_size(dev) >> SECTOR_SHIFT);
1830	}
1831	blk_queue_max_hw_sectors(q, shost->max_sectors);
1832	blk_queue_segment_boundary(q, shost->dma_boundary);
1833	dma_set_seg_boundary(dev, shost->dma_boundary);
1834
1835	blk_queue_max_segment_size(q, shost->max_segment_size);
1836	blk_queue_virt_boundary(q, shost->virt_boundary_mask);
1837	dma_set_max_seg_size(dev, queue_max_segment_size(q));
1838
1839	/*
1840	 * Set a reasonable default alignment:  The larger of 32-byte (dword),
1841	 * which is a common minimum for HBAs, and the minimum DMA alignment,
1842	 * which is set by the platform.
1843	 *
1844	 * Devices that require a bigger alignment can increase it later.
1845	 */
1846	blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1);
1847}
1848EXPORT_SYMBOL_GPL(__scsi_init_queue);
1849
1850static const struct blk_mq_ops scsi_mq_ops_no_commit = {
1851	.get_budget	= scsi_mq_get_budget,
1852	.put_budget	= scsi_mq_put_budget,
1853	.queue_rq	= scsi_queue_rq,
1854	.complete	= scsi_complete,
1855	.timeout	= scsi_timeout,
1856#ifdef CONFIG_BLK_DEBUG_FS
1857	.show_rq	= scsi_show_rq,
1858#endif
1859	.init_request	= scsi_mq_init_request,
1860	.exit_request	= scsi_mq_exit_request,
1861	.initialize_rq_fn = scsi_initialize_rq,
1862	.cleanup_rq	= scsi_cleanup_rq,
1863	.busy		= scsi_mq_lld_busy,
1864	.map_queues	= scsi_map_queues,
1865	.init_hctx	= scsi_init_hctx,
1866	.poll		= scsi_mq_poll,
1867	.set_rq_budget_token = scsi_mq_set_rq_budget_token,
1868	.get_rq_budget_token = scsi_mq_get_rq_budget_token,
1869};
1870
1871
1872static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx)
1873{
1874	struct Scsi_Host *shost = hctx->driver_data;
1875
1876	shost->hostt->commit_rqs(shost, hctx->queue_num);
1877}
1878
1879static const struct blk_mq_ops scsi_mq_ops = {
1880	.get_budget	= scsi_mq_get_budget,
1881	.put_budget	= scsi_mq_put_budget,
1882	.queue_rq	= scsi_queue_rq,
1883	.commit_rqs	= scsi_commit_rqs,
1884	.complete	= scsi_complete,
1885	.timeout	= scsi_timeout,
1886#ifdef CONFIG_BLK_DEBUG_FS
1887	.show_rq	= scsi_show_rq,
1888#endif
1889	.init_request	= scsi_mq_init_request,
1890	.exit_request	= scsi_mq_exit_request,
1891	.initialize_rq_fn = scsi_initialize_rq,
1892	.cleanup_rq	= scsi_cleanup_rq,
1893	.busy		= scsi_mq_lld_busy,
1894	.map_queues	= scsi_map_queues,
1895	.init_hctx	= scsi_init_hctx,
1896	.poll		= scsi_mq_poll,
1897	.set_rq_budget_token = scsi_mq_set_rq_budget_token,
1898	.get_rq_budget_token = scsi_mq_get_rq_budget_token,
1899};
1900
1901int scsi_mq_setup_tags(struct Scsi_Host *shost)
1902{
1903	unsigned int cmd_size, sgl_size;
1904	struct blk_mq_tag_set *tag_set = &shost->tag_set;
1905
1906	sgl_size = max_t(unsigned int, sizeof(struct scatterlist),
1907				scsi_mq_inline_sgl_size(shost));
1908	cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
1909	if (scsi_host_get_prot(shost))
1910		cmd_size += sizeof(struct scsi_data_buffer) +
1911			sizeof(struct scatterlist) * SCSI_INLINE_PROT_SG_CNT;
1912
1913	memset(tag_set, 0, sizeof(*tag_set));
1914	if (shost->hostt->commit_rqs)
1915		tag_set->ops = &scsi_mq_ops;
1916	else
1917		tag_set->ops = &scsi_mq_ops_no_commit;
1918	tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1;
1919	tag_set->nr_maps = shost->nr_maps ? : 1;
1920	tag_set->queue_depth = shost->can_queue;
1921	tag_set->cmd_size = cmd_size;
1922	tag_set->numa_node = NUMA_NO_NODE;
1923	tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
1924	tag_set->flags |=
1925		BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
1926	tag_set->driver_data = shost;
1927	if (shost->host_tagset)
1928		tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
1929
1930	return blk_mq_alloc_tag_set(tag_set);
1931}
1932
1933void scsi_mq_destroy_tags(struct Scsi_Host *shost)
1934{
 
 
 
1935	blk_mq_free_tag_set(&shost->tag_set);
 
1936}
1937
1938/**
1939 * scsi_device_from_queue - return sdev associated with a request_queue
1940 * @q: The request queue to return the sdev from
1941 *
1942 * Return the sdev associated with a request queue or NULL if the
1943 * request_queue does not reference a SCSI device.
1944 */
1945struct scsi_device *scsi_device_from_queue(struct request_queue *q)
1946{
1947	struct scsi_device *sdev = NULL;
1948
1949	if (q->mq_ops == &scsi_mq_ops_no_commit ||
1950	    q->mq_ops == &scsi_mq_ops)
1951		sdev = q->queuedata;
1952	if (!sdev || !get_device(&sdev->sdev_gendev))
1953		sdev = NULL;
1954
1955	return sdev;
1956}
 
 
 
 
 
 
 
 
1957
1958/**
1959 * scsi_block_requests - Utility function used by low-level drivers to prevent
1960 * further commands from being queued to the device.
1961 * @shost:  host in question
1962 *
1963 * There is no timer nor any other means by which the requests get unblocked
1964 * other than the low-level driver calling scsi_unblock_requests().
1965 */
1966void scsi_block_requests(struct Scsi_Host *shost)
1967{
1968	shost->host_self_blocked = 1;
1969}
1970EXPORT_SYMBOL(scsi_block_requests);
1971
1972/**
1973 * scsi_unblock_requests - Utility function used by low-level drivers to allow
1974 * further commands to be queued to the device.
1975 * @shost:  host in question
1976 *
1977 * There is no timer nor any other means by which the requests get unblocked
1978 * other than the low-level driver calling scsi_unblock_requests(). This is done
1979 * as an API function so that changes to the internals of the scsi mid-layer
1980 * won't require wholesale changes to drivers that use this feature.
1981 */
1982void scsi_unblock_requests(struct Scsi_Host *shost)
1983{
1984	shost->host_self_blocked = 0;
1985	scsi_run_host_queues(shost);
1986}
1987EXPORT_SYMBOL(scsi_unblock_requests);
1988
1989void scsi_exit_queue(void)
1990{
1991	kmem_cache_destroy(scsi_sense_cache);
1992}
1993
1994/**
1995 *	scsi_mode_select - issue a mode select
1996 *	@sdev:	SCSI device to be queried
1997 *	@pf:	Page format bit (1 == standard, 0 == vendor specific)
1998 *	@sp:	Save page bit (0 == don't save, 1 == save)
1999 *	@modepage: mode page being requested
2000 *	@buffer: request buffer (may not be smaller than eight bytes)
2001 *	@len:	length of request buffer.
2002 *	@timeout: command timeout
2003 *	@retries: number of retries before failing
2004 *	@data: returns a structure abstracting the mode header data
2005 *	@sshdr: place to put sense data (or NULL if no sense to be collected).
2006 *		must be SCSI_SENSE_BUFFERSIZE big.
2007 *
2008 *	Returns zero if successful; negative error number or scsi
2009 *	status on error
2010 *
2011 */
2012int
2013scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
2014		 unsigned char *buffer, int len, int timeout, int retries,
2015		 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
2016{
2017	unsigned char cmd[10];
2018	unsigned char *real_buffer;
2019	int ret;
2020
2021	memset(cmd, 0, sizeof(cmd));
2022	cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
2023
2024	if (sdev->use_10_for_ms) {
2025		if (len > 65535)
 
 
 
 
 
 
 
2026			return -EINVAL;
2027		real_buffer = kmalloc(8 + len, GFP_KERNEL);
2028		if (!real_buffer)
2029			return -ENOMEM;
2030		memcpy(real_buffer + 8, buffer, len);
2031		len += 8;
2032		real_buffer[0] = 0;
2033		real_buffer[1] = 0;
2034		real_buffer[2] = data->medium_type;
2035		real_buffer[3] = data->device_specific;
2036		real_buffer[4] = data->longlba ? 0x01 : 0;
2037		real_buffer[5] = 0;
2038		real_buffer[6] = data->block_descriptor_length >> 8;
2039		real_buffer[7] = data->block_descriptor_length;
2040
2041		cmd[0] = MODE_SELECT_10;
2042		cmd[7] = len >> 8;
2043		cmd[8] = len;
2044	} else {
2045		if (len > 255 || data->block_descriptor_length > 255 ||
2046		    data->longlba)
2047			return -EINVAL;
2048
2049		real_buffer = kmalloc(4 + len, GFP_KERNEL);
2050		if (!real_buffer)
2051			return -ENOMEM;
2052		memcpy(real_buffer + 4, buffer, len);
2053		len += 4;
2054		real_buffer[0] = 0;
2055		real_buffer[1] = data->medium_type;
2056		real_buffer[2] = data->device_specific;
2057		real_buffer[3] = data->block_descriptor_length;
2058
2059		cmd[0] = MODE_SELECT;
2060		cmd[4] = len;
2061	}
2062
2063	ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
2064			       sshdr, timeout, retries, NULL);
2065	kfree(real_buffer);
2066	return ret;
2067}
2068EXPORT_SYMBOL_GPL(scsi_mode_select);
2069
2070/**
2071 *	scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
2072 *	@sdev:	SCSI device to be queried
2073 *	@dbd:	set if mode sense will allow block descriptors to be returned
2074 *	@modepage: mode page being requested
2075 *	@buffer: request buffer (may not be smaller than eight bytes)
2076 *	@len:	length of request buffer.
2077 *	@timeout: command timeout
2078 *	@retries: number of retries before failing
2079 *	@data: returns a structure abstracting the mode header data
2080 *	@sshdr: place to put sense data (or NULL if no sense to be collected).
2081 *		must be SCSI_SENSE_BUFFERSIZE big.
2082 *
2083 *	Returns zero if successful, or a negative error number on failure
2084 */
2085int
2086scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
2087		  unsigned char *buffer, int len, int timeout, int retries,
2088		  struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
2089{
2090	unsigned char cmd[12];
2091	int use_10_for_ms;
2092	int header_length;
2093	int result, retry_count = retries;
2094	struct scsi_sense_hdr my_sshdr;
2095
2096	memset(data, 0, sizeof(*data));
2097	memset(&cmd[0], 0, 12);
2098
2099	dbd = sdev->set_dbd_for_ms ? 8 : dbd;
2100	cmd[1] = dbd & 0x18;	/* allows DBD and LLBA bits */
2101	cmd[2] = modepage;
2102
2103	/* caller might not be interested in sense, but we need it */
2104	if (!sshdr)
2105		sshdr = &my_sshdr;
2106
2107 retry:
2108	use_10_for_ms = sdev->use_10_for_ms;
2109
2110	if (use_10_for_ms) {
2111		if (len < 8)
2112			len = 8;
2113
2114		cmd[0] = MODE_SENSE_10;
2115		cmd[8] = len;
2116		header_length = 8;
2117	} else {
2118		if (len < 4)
2119			len = 4;
2120
2121		cmd[0] = MODE_SENSE;
2122		cmd[4] = len;
2123		header_length = 4;
2124	}
2125
2126	memset(buffer, 0, len);
2127
2128	result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
2129				  sshdr, timeout, retries, NULL);
2130	if (result < 0)
2131		return result;
2132
2133	/* This code looks awful: what it's doing is making sure an
2134	 * ILLEGAL REQUEST sense return identifies the actual command
2135	 * byte as the problem.  MODE_SENSE commands can return
2136	 * ILLEGAL REQUEST if the code page isn't supported */
2137
2138	if (!scsi_status_is_good(result)) {
2139		if (scsi_sense_valid(sshdr)) {
2140			if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
2141			    (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
2142				/*
2143				 * Invalid command operation code
 
 
 
 
2144				 */
2145				if (use_10_for_ms) {
 
 
2146					sdev->use_10_for_ms = 0;
2147					goto retry;
2148				}
2149			}
2150			if (scsi_status_is_check_condition(result) &&
2151			    sshdr->sense_key == UNIT_ATTENTION &&
2152			    retry_count) {
2153				retry_count--;
2154				goto retry;
2155			}
2156		}
2157		return -EIO;
2158	}
2159	if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
2160		     (modepage == 6 || modepage == 8))) {
2161		/* Initio breakage? */
2162		header_length = 0;
2163		data->length = 13;
2164		data->medium_type = 0;
2165		data->device_specific = 0;
2166		data->longlba = 0;
2167		data->block_descriptor_length = 0;
2168	} else if (use_10_for_ms) {
2169		data->length = buffer[0]*256 + buffer[1] + 2;
2170		data->medium_type = buffer[2];
2171		data->device_specific = buffer[3];
2172		data->longlba = buffer[4] & 0x01;
2173		data->block_descriptor_length = buffer[6]*256
2174			+ buffer[7];
2175	} else {
2176		data->length = buffer[0] + 1;
2177		data->medium_type = buffer[1];
2178		data->device_specific = buffer[2];
2179		data->block_descriptor_length = buffer[3];
2180	}
2181	data->header_length = header_length;
2182
2183	return 0;
2184}
2185EXPORT_SYMBOL(scsi_mode_sense);
2186
2187/**
2188 *	scsi_test_unit_ready - test if unit is ready
2189 *	@sdev:	scsi device to change the state of.
2190 *	@timeout: command timeout
2191 *	@retries: number of retries before failing
2192 *	@sshdr: outpout pointer for decoded sense information.
2193 *
2194 *	Returns zero if unsuccessful or an error if TUR failed.  For
2195 *	removable media, UNIT_ATTENTION sets ->changed flag.
2196 **/
2197int
2198scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
2199		     struct scsi_sense_hdr *sshdr)
2200{
2201	char cmd[] = {
2202		TEST_UNIT_READY, 0, 0, 0, 0, 0,
2203	};
2204	int result;
2205
2206	/* try to eat the UNIT_ATTENTION if there are enough retries */
2207	do {
2208		result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
2209					  timeout, 1, NULL);
2210		if (sdev->removable && scsi_sense_valid(sshdr) &&
2211		    sshdr->sense_key == UNIT_ATTENTION)
2212			sdev->changed = 1;
2213	} while (scsi_sense_valid(sshdr) &&
2214		 sshdr->sense_key == UNIT_ATTENTION && --retries);
2215
2216	return result;
2217}
2218EXPORT_SYMBOL(scsi_test_unit_ready);
2219
2220/**
2221 *	scsi_device_set_state - Take the given device through the device state model.
2222 *	@sdev:	scsi device to change the state of.
2223 *	@state:	state to change to.
2224 *
2225 *	Returns zero if successful or an error if the requested
2226 *	transition is illegal.
2227 */
2228int
2229scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2230{
2231	enum scsi_device_state oldstate = sdev->sdev_state;
2232
2233	if (state == oldstate)
2234		return 0;
2235
2236	switch (state) {
2237	case SDEV_CREATED:
2238		switch (oldstate) {
2239		case SDEV_CREATED_BLOCK:
2240			break;
2241		default:
2242			goto illegal;
2243		}
2244		break;
2245
2246	case SDEV_RUNNING:
2247		switch (oldstate) {
2248		case SDEV_CREATED:
2249		case SDEV_OFFLINE:
2250		case SDEV_TRANSPORT_OFFLINE:
2251		case SDEV_QUIESCE:
2252		case SDEV_BLOCK:
2253			break;
2254		default:
2255			goto illegal;
2256		}
2257		break;
2258
2259	case SDEV_QUIESCE:
2260		switch (oldstate) {
2261		case SDEV_RUNNING:
2262		case SDEV_OFFLINE:
2263		case SDEV_TRANSPORT_OFFLINE:
2264			break;
2265		default:
2266			goto illegal;
2267		}
2268		break;
2269
2270	case SDEV_OFFLINE:
2271	case SDEV_TRANSPORT_OFFLINE:
2272		switch (oldstate) {
2273		case SDEV_CREATED:
2274		case SDEV_RUNNING:
2275		case SDEV_QUIESCE:
2276		case SDEV_BLOCK:
2277			break;
2278		default:
2279			goto illegal;
2280		}
2281		break;
2282
2283	case SDEV_BLOCK:
2284		switch (oldstate) {
2285		case SDEV_RUNNING:
2286		case SDEV_CREATED_BLOCK:
2287		case SDEV_QUIESCE:
2288		case SDEV_OFFLINE:
2289			break;
2290		default:
2291			goto illegal;
2292		}
2293		break;
2294
2295	case SDEV_CREATED_BLOCK:
2296		switch (oldstate) {
2297		case SDEV_CREATED:
2298			break;
2299		default:
2300			goto illegal;
2301		}
2302		break;
2303
2304	case SDEV_CANCEL:
2305		switch (oldstate) {
2306		case SDEV_CREATED:
2307		case SDEV_RUNNING:
2308		case SDEV_QUIESCE:
2309		case SDEV_OFFLINE:
2310		case SDEV_TRANSPORT_OFFLINE:
2311			break;
2312		default:
2313			goto illegal;
2314		}
2315		break;
2316
2317	case SDEV_DEL:
2318		switch (oldstate) {
2319		case SDEV_CREATED:
2320		case SDEV_RUNNING:
2321		case SDEV_OFFLINE:
2322		case SDEV_TRANSPORT_OFFLINE:
2323		case SDEV_CANCEL:
2324		case SDEV_BLOCK:
2325		case SDEV_CREATED_BLOCK:
2326			break;
2327		default:
2328			goto illegal;
2329		}
2330		break;
2331
2332	}
2333	sdev->offline_already = false;
2334	sdev->sdev_state = state;
2335	return 0;
2336
2337 illegal:
2338	SCSI_LOG_ERROR_RECOVERY(1,
2339				sdev_printk(KERN_ERR, sdev,
2340					    "Illegal state transition %s->%s",
2341					    scsi_device_state_name(oldstate),
2342					    scsi_device_state_name(state))
2343				);
2344	return -EINVAL;
2345}
2346EXPORT_SYMBOL(scsi_device_set_state);
2347
2348/**
2349 *	scsi_evt_emit - emit a single SCSI device uevent
2350 *	@sdev: associated SCSI device
2351 *	@evt: event to emit
2352 *
2353 *	Send a single uevent (scsi_event) to the associated scsi_device.
2354 */
2355static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2356{
2357	int idx = 0;
2358	char *envp[3];
2359
2360	switch (evt->evt_type) {
2361	case SDEV_EVT_MEDIA_CHANGE:
2362		envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2363		break;
2364	case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2365		scsi_rescan_device(&sdev->sdev_gendev);
2366		envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
2367		break;
2368	case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2369		envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED";
2370		break;
2371	case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2372	       envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED";
2373		break;
2374	case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2375		envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED";
2376		break;
2377	case SDEV_EVT_LUN_CHANGE_REPORTED:
2378		envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED";
2379		break;
2380	case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
2381		envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED";
2382		break;
2383	case SDEV_EVT_POWER_ON_RESET_OCCURRED:
2384		envp[idx++] = "SDEV_UA=POWER_ON_RESET_OCCURRED";
2385		break;
2386	default:
2387		/* do nothing */
2388		break;
2389	}
2390
2391	envp[idx++] = NULL;
2392
2393	kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2394}
2395
2396/**
2397 *	scsi_evt_thread - send a uevent for each scsi event
2398 *	@work: work struct for scsi_device
2399 *
2400 *	Dispatch queued events to their associated scsi_device kobjects
2401 *	as uevents.
2402 */
2403void scsi_evt_thread(struct work_struct *work)
2404{
2405	struct scsi_device *sdev;
2406	enum scsi_device_event evt_type;
2407	LIST_HEAD(event_list);
2408
2409	sdev = container_of(work, struct scsi_device, event_work);
2410
2411	for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++)
2412		if (test_and_clear_bit(evt_type, sdev->pending_events))
2413			sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL);
2414
2415	while (1) {
2416		struct scsi_event *evt;
2417		struct list_head *this, *tmp;
2418		unsigned long flags;
2419
2420		spin_lock_irqsave(&sdev->list_lock, flags);
2421		list_splice_init(&sdev->event_list, &event_list);
2422		spin_unlock_irqrestore(&sdev->list_lock, flags);
2423
2424		if (list_empty(&event_list))
2425			break;
2426
2427		list_for_each_safe(this, tmp, &event_list) {
2428			evt = list_entry(this, struct scsi_event, node);
2429			list_del(&evt->node);
2430			scsi_evt_emit(sdev, evt);
2431			kfree(evt);
2432		}
2433	}
2434}
2435
2436/**
2437 * 	sdev_evt_send - send asserted event to uevent thread
2438 *	@sdev: scsi_device event occurred on
2439 *	@evt: event to send
2440 *
2441 *	Assert scsi device event asynchronously.
2442 */
2443void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2444{
2445	unsigned long flags;
2446
2447#if 0
2448	/* FIXME: currently this check eliminates all media change events
2449	 * for polled devices.  Need to update to discriminate between AN
2450	 * and polled events */
2451	if (!test_bit(evt->evt_type, sdev->supported_events)) {
2452		kfree(evt);
2453		return;
2454	}
2455#endif
2456
2457	spin_lock_irqsave(&sdev->list_lock, flags);
2458	list_add_tail(&evt->node, &sdev->event_list);
2459	schedule_work(&sdev->event_work);
2460	spin_unlock_irqrestore(&sdev->list_lock, flags);
2461}
2462EXPORT_SYMBOL_GPL(sdev_evt_send);
2463
2464/**
2465 * 	sdev_evt_alloc - allocate a new scsi event
2466 *	@evt_type: type of event to allocate
2467 *	@gfpflags: GFP flags for allocation
2468 *
2469 *	Allocates and returns a new scsi_event.
2470 */
2471struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2472				  gfp_t gfpflags)
2473{
2474	struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2475	if (!evt)
2476		return NULL;
2477
2478	evt->evt_type = evt_type;
2479	INIT_LIST_HEAD(&evt->node);
2480
2481	/* evt_type-specific initialization, if any */
2482	switch (evt_type) {
2483	case SDEV_EVT_MEDIA_CHANGE:
2484	case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2485	case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2486	case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2487	case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2488	case SDEV_EVT_LUN_CHANGE_REPORTED:
2489	case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
2490	case SDEV_EVT_POWER_ON_RESET_OCCURRED:
2491	default:
2492		/* do nothing */
2493		break;
2494	}
2495
2496	return evt;
2497}
2498EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2499
2500/**
2501 * 	sdev_evt_send_simple - send asserted event to uevent thread
2502 *	@sdev: scsi_device event occurred on
2503 *	@evt_type: type of event to send
2504 *	@gfpflags: GFP flags for allocation
2505 *
2506 *	Assert scsi device event asynchronously, given an event type.
2507 */
2508void sdev_evt_send_simple(struct scsi_device *sdev,
2509			  enum scsi_device_event evt_type, gfp_t gfpflags)
2510{
2511	struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2512	if (!evt) {
2513		sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2514			    evt_type);
2515		return;
2516	}
2517
2518	sdev_evt_send(sdev, evt);
2519}
2520EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2521
2522/**
2523 *	scsi_device_quiesce - Block all commands except power management.
2524 *	@sdev:	scsi device to quiesce.
2525 *
2526 *	This works by trying to transition to the SDEV_QUIESCE state
2527 *	(which must be a legal transition).  When the device is in this
2528 *	state, only power management requests will be accepted, all others will
2529 *	be deferred.
2530 *
2531 *	Must be called with user context, may sleep.
2532 *
2533 *	Returns zero if unsuccessful or an error if not.
2534 */
2535int
2536scsi_device_quiesce(struct scsi_device *sdev)
2537{
2538	struct request_queue *q = sdev->request_queue;
2539	int err;
2540
2541	/*
2542	 * It is allowed to call scsi_device_quiesce() multiple times from
2543	 * the same context but concurrent scsi_device_quiesce() calls are
2544	 * not allowed.
2545	 */
2546	WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current);
2547
2548	if (sdev->quiesced_by == current)
2549		return 0;
2550
2551	blk_set_pm_only(q);
2552
2553	blk_mq_freeze_queue(q);
2554	/*
2555	 * Ensure that the effect of blk_set_pm_only() will be visible
2556	 * for percpu_ref_tryget() callers that occur after the queue
2557	 * unfreeze even if the queue was already frozen before this function
2558	 * was called. See also https://lwn.net/Articles/573497/.
2559	 */
2560	synchronize_rcu();
2561	blk_mq_unfreeze_queue(q);
2562
2563	mutex_lock(&sdev->state_mutex);
2564	err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2565	if (err == 0)
2566		sdev->quiesced_by = current;
2567	else
2568		blk_clear_pm_only(q);
2569	mutex_unlock(&sdev->state_mutex);
2570
2571	return err;
2572}
2573EXPORT_SYMBOL(scsi_device_quiesce);
2574
2575/**
2576 *	scsi_device_resume - Restart user issued commands to a quiesced device.
2577 *	@sdev:	scsi device to resume.
2578 *
2579 *	Moves the device from quiesced back to running and restarts the
2580 *	queues.
2581 *
2582 *	Must be called with user context, may sleep.
2583 */
2584void scsi_device_resume(struct scsi_device *sdev)
2585{
2586	/* check if the device state was mutated prior to resume, and if
2587	 * so assume the state is being managed elsewhere (for example
2588	 * device deleted during suspend)
2589	 */
2590	mutex_lock(&sdev->state_mutex);
2591	if (sdev->sdev_state == SDEV_QUIESCE)
2592		scsi_device_set_state(sdev, SDEV_RUNNING);
2593	if (sdev->quiesced_by) {
2594		sdev->quiesced_by = NULL;
2595		blk_clear_pm_only(sdev->request_queue);
2596	}
2597	mutex_unlock(&sdev->state_mutex);
2598}
2599EXPORT_SYMBOL(scsi_device_resume);
2600
2601static void
2602device_quiesce_fn(struct scsi_device *sdev, void *data)
2603{
2604	scsi_device_quiesce(sdev);
2605}
2606
2607void
2608scsi_target_quiesce(struct scsi_target *starget)
2609{
2610	starget_for_each_device(starget, NULL, device_quiesce_fn);
2611}
2612EXPORT_SYMBOL(scsi_target_quiesce);
2613
2614static void
2615device_resume_fn(struct scsi_device *sdev, void *data)
2616{
2617	scsi_device_resume(sdev);
2618}
2619
2620void
2621scsi_target_resume(struct scsi_target *starget)
2622{
2623	starget_for_each_device(starget, NULL, device_resume_fn);
2624}
2625EXPORT_SYMBOL(scsi_target_resume);
2626
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2627/**
2628 * scsi_internal_device_block_nowait - try to transition to the SDEV_BLOCK state
2629 * @sdev: device to block
2630 *
2631 * Pause SCSI command processing on the specified device. Does not sleep.
2632 *
2633 * Returns zero if successful or a negative error code upon failure.
2634 *
2635 * Notes:
2636 * This routine transitions the device to the SDEV_BLOCK state (which must be
2637 * a legal transition). When the device is in this state, command processing
2638 * is paused until the device leaves the SDEV_BLOCK state. See also
2639 * scsi_internal_device_unblock_nowait().
2640 */
2641int scsi_internal_device_block_nowait(struct scsi_device *sdev)
2642{
2643	struct request_queue *q = sdev->request_queue;
2644	int err = 0;
2645
2646	err = scsi_device_set_state(sdev, SDEV_BLOCK);
2647	if (err) {
2648		err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2649
2650		if (err)
2651			return err;
2652	}
2653
2654	/*
2655	 * The device has transitioned to SDEV_BLOCK.  Stop the
2656	 * block layer from calling the midlayer with this device's
2657	 * request queue.
2658	 */
2659	blk_mq_quiesce_queue_nowait(q);
2660	return 0;
 
2661}
2662EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait);
2663
2664/**
2665 * scsi_internal_device_block - try to transition to the SDEV_BLOCK state
2666 * @sdev: device to block
2667 *
2668 * Pause SCSI command processing on the specified device and wait until all
2669 * ongoing scsi_request_fn() / scsi_queue_rq() calls have finished. May sleep.
2670 *
2671 * Returns zero if successful or a negative error code upon failure.
2672 *
2673 * Note:
2674 * This routine transitions the device to the SDEV_BLOCK state (which must be
2675 * a legal transition). When the device is in this state, command processing
2676 * is paused until the device leaves the SDEV_BLOCK state. See also
2677 * scsi_internal_device_unblock().
2678 */
2679static int scsi_internal_device_block(struct scsi_device *sdev)
2680{
2681	struct request_queue *q = sdev->request_queue;
2682	int err;
2683
2684	mutex_lock(&sdev->state_mutex);
2685	err = scsi_internal_device_block_nowait(sdev);
2686	if (err == 0)
2687		blk_mq_quiesce_queue(q);
2688	mutex_unlock(&sdev->state_mutex);
2689
2690	return err;
2691}
2692
2693void scsi_start_queue(struct scsi_device *sdev)
2694{
2695	struct request_queue *q = sdev->request_queue;
2696
2697	blk_mq_unquiesce_queue(q);
2698}
2699
2700/**
2701 * scsi_internal_device_unblock_nowait - resume a device after a block request
2702 * @sdev:	device to resume
2703 * @new_state:	state to set the device to after unblocking
2704 *
2705 * Restart the device queue for a previously suspended SCSI device. Does not
2706 * sleep.
2707 *
2708 * Returns zero if successful or a negative error code upon failure.
2709 *
2710 * Notes:
2711 * This routine transitions the device to the SDEV_RUNNING state or to one of
2712 * the offline states (which must be a legal transition) allowing the midlayer
2713 * to goose the queue for this device.
2714 */
2715int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
2716					enum scsi_device_state new_state)
2717{
2718	switch (new_state) {
2719	case SDEV_RUNNING:
2720	case SDEV_TRANSPORT_OFFLINE:
2721		break;
2722	default:
2723		return -EINVAL;
2724	}
2725
2726	/*
2727	 * Try to transition the scsi device to SDEV_RUNNING or one of the
2728	 * offlined states and goose the device queue if successful.
2729	 */
2730	switch (sdev->sdev_state) {
2731	case SDEV_BLOCK:
2732	case SDEV_TRANSPORT_OFFLINE:
2733		sdev->sdev_state = new_state;
2734		break;
2735	case SDEV_CREATED_BLOCK:
2736		if (new_state == SDEV_TRANSPORT_OFFLINE ||
2737		    new_state == SDEV_OFFLINE)
2738			sdev->sdev_state = new_state;
2739		else
2740			sdev->sdev_state = SDEV_CREATED;
2741		break;
2742	case SDEV_CANCEL:
2743	case SDEV_OFFLINE:
2744		break;
2745	default:
2746		return -EINVAL;
2747	}
2748	scsi_start_queue(sdev);
2749
2750	return 0;
2751}
2752EXPORT_SYMBOL_GPL(scsi_internal_device_unblock_nowait);
2753
2754/**
2755 * scsi_internal_device_unblock - resume a device after a block request
2756 * @sdev:	device to resume
2757 * @new_state:	state to set the device to after unblocking
2758 *
2759 * Restart the device queue for a previously suspended SCSI device. May sleep.
2760 *
2761 * Returns zero if successful or a negative error code upon failure.
2762 *
2763 * Notes:
2764 * This routine transitions the device to the SDEV_RUNNING state or to one of
2765 * the offline states (which must be a legal transition) allowing the midlayer
2766 * to goose the queue for this device.
2767 */
2768static int scsi_internal_device_unblock(struct scsi_device *sdev,
2769					enum scsi_device_state new_state)
2770{
2771	int ret;
2772
2773	mutex_lock(&sdev->state_mutex);
2774	ret = scsi_internal_device_unblock_nowait(sdev, new_state);
2775	mutex_unlock(&sdev->state_mutex);
2776
2777	return ret;
2778}
2779
2780static void
2781device_block(struct scsi_device *sdev, void *data)
2782{
2783	int ret;
2784
2785	ret = scsi_internal_device_block(sdev);
2786
2787	WARN_ONCE(ret, "scsi_internal_device_block(%s) failed: ret = %d\n",
2788		  dev_name(&sdev->sdev_gendev), ret);
2789}
2790
2791static int
2792target_block(struct device *dev, void *data)
2793{
2794	if (scsi_is_target_device(dev))
2795		starget_for_each_device(to_scsi_target(dev), NULL,
2796					device_block);
2797	return 0;
2798}
2799
2800void
2801scsi_target_block(struct device *dev)
2802{
2803	if (scsi_is_target_device(dev))
2804		starget_for_each_device(to_scsi_target(dev), NULL,
2805					device_block);
2806	else
2807		device_for_each_child(dev, NULL, target_block);
2808}
2809EXPORT_SYMBOL_GPL(scsi_target_block);
2810
2811static void
2812device_unblock(struct scsi_device *sdev, void *data)
2813{
2814	scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
2815}
2816
2817static int
2818target_unblock(struct device *dev, void *data)
2819{
2820	if (scsi_is_target_device(dev))
2821		starget_for_each_device(to_scsi_target(dev), data,
2822					device_unblock);
2823	return 0;
2824}
2825
2826void
2827scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
2828{
2829	if (scsi_is_target_device(dev))
2830		starget_for_each_device(to_scsi_target(dev), &new_state,
2831					device_unblock);
2832	else
2833		device_for_each_child(dev, &new_state, target_unblock);
2834}
2835EXPORT_SYMBOL_GPL(scsi_target_unblock);
2836
2837int
2838scsi_host_block(struct Scsi_Host *shost)
2839{
2840	struct scsi_device *sdev;
2841	int ret = 0;
2842
2843	/*
2844	 * Call scsi_internal_device_block_nowait so we can avoid
2845	 * calling synchronize_rcu() for each LUN.
2846	 */
2847	shost_for_each_device(sdev, shost) {
2848		mutex_lock(&sdev->state_mutex);
2849		ret = scsi_internal_device_block_nowait(sdev);
2850		mutex_unlock(&sdev->state_mutex);
2851		if (ret) {
2852			scsi_device_put(sdev);
2853			break;
2854		}
2855	}
2856
2857	/*
2858	 * SCSI never enables blk-mq's BLK_MQ_F_BLOCKING flag so
2859	 * calling synchronize_rcu() once is enough.
2860	 */
2861	WARN_ON_ONCE(shost->tag_set.flags & BLK_MQ_F_BLOCKING);
2862
2863	if (!ret)
2864		synchronize_rcu();
2865
2866	return ret;
2867}
2868EXPORT_SYMBOL_GPL(scsi_host_block);
2869
2870int
2871scsi_host_unblock(struct Scsi_Host *shost, int new_state)
2872{
2873	struct scsi_device *sdev;
2874	int ret = 0;
2875
2876	shost_for_each_device(sdev, shost) {
2877		ret = scsi_internal_device_unblock(sdev, new_state);
2878		if (ret) {
2879			scsi_device_put(sdev);
2880			break;
2881		}
2882	}
2883	return ret;
2884}
2885EXPORT_SYMBOL_GPL(scsi_host_unblock);
2886
2887/**
2888 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
2889 * @sgl:	scatter-gather list
2890 * @sg_count:	number of segments in sg
2891 * @offset:	offset in bytes into sg, on return offset into the mapped area
2892 * @len:	bytes to map, on return number of bytes mapped
2893 *
2894 * Returns virtual address of the start of the mapped page
2895 */
2896void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
2897			  size_t *offset, size_t *len)
2898{
2899	int i;
2900	size_t sg_len = 0, len_complete = 0;
2901	struct scatterlist *sg;
2902	struct page *page;
2903
2904	WARN_ON(!irqs_disabled());
2905
2906	for_each_sg(sgl, sg, sg_count, i) {
2907		len_complete = sg_len; /* Complete sg-entries */
2908		sg_len += sg->length;
2909		if (sg_len > *offset)
2910			break;
2911	}
2912
2913	if (unlikely(i == sg_count)) {
2914		printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
2915			"elements %d\n",
2916		       __func__, sg_len, *offset, sg_count);
2917		WARN_ON(1);
2918		return NULL;
2919	}
2920
2921	/* Offset starting from the beginning of first page in this sg-entry */
2922	*offset = *offset - len_complete + sg->offset;
2923
2924	/* Assumption: contiguous pages can be accessed as "page + i" */
2925	page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
2926	*offset &= ~PAGE_MASK;
2927
2928	/* Bytes in this sg-entry from *offset to the end of the page */
2929	sg_len = PAGE_SIZE - *offset;
2930	if (*len > sg_len)
2931		*len = sg_len;
2932
2933	return kmap_atomic(page);
2934}
2935EXPORT_SYMBOL(scsi_kmap_atomic_sg);
2936
2937/**
2938 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
2939 * @virt:	virtual address to be unmapped
2940 */
2941void scsi_kunmap_atomic_sg(void *virt)
2942{
2943	kunmap_atomic(virt);
2944}
2945EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
2946
2947void sdev_disable_disk_events(struct scsi_device *sdev)
2948{
2949	atomic_inc(&sdev->disk_events_disable_depth);
2950}
2951EXPORT_SYMBOL(sdev_disable_disk_events);
2952
2953void sdev_enable_disk_events(struct scsi_device *sdev)
2954{
2955	if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0))
2956		return;
2957	atomic_dec(&sdev->disk_events_disable_depth);
2958}
2959EXPORT_SYMBOL(sdev_enable_disk_events);
2960
2961static unsigned char designator_prio(const unsigned char *d)
2962{
2963	if (d[1] & 0x30)
2964		/* not associated with LUN */
2965		return 0;
2966
2967	if (d[3] == 0)
2968		/* invalid length */
2969		return 0;
2970
2971	/*
2972	 * Order of preference for lun descriptor:
2973	 * - SCSI name string
2974	 * - NAA IEEE Registered Extended
2975	 * - EUI-64 based 16-byte
2976	 * - EUI-64 based 12-byte
2977	 * - NAA IEEE Registered
2978	 * - NAA IEEE Extended
2979	 * - EUI-64 based 8-byte
2980	 * - SCSI name string (truncated)
2981	 * - T10 Vendor ID
2982	 * as longer descriptors reduce the likelyhood
2983	 * of identification clashes.
2984	 */
2985
2986	switch (d[1] & 0xf) {
2987	case 8:
2988		/* SCSI name string, variable-length UTF-8 */
2989		return 9;
2990	case 3:
2991		switch (d[4] >> 4) {
2992		case 6:
2993			/* NAA registered extended */
2994			return 8;
2995		case 5:
2996			/* NAA registered */
2997			return 5;
2998		case 4:
2999			/* NAA extended */
3000			return 4;
3001		case 3:
3002			/* NAA locally assigned */
3003			return 1;
3004		default:
3005			break;
3006		}
3007		break;
3008	case 2:
3009		switch (d[3]) {
3010		case 16:
3011			/* EUI64-based, 16 byte */
3012			return 7;
3013		case 12:
3014			/* EUI64-based, 12 byte */
3015			return 6;
3016		case 8:
3017			/* EUI64-based, 8 byte */
3018			return 3;
3019		default:
3020			break;
3021		}
3022		break;
3023	case 1:
3024		/* T10 vendor ID */
3025		return 1;
3026	default:
3027		break;
3028	}
3029
3030	return 0;
3031}
3032
3033/**
3034 * scsi_vpd_lun_id - return a unique device identification
3035 * @sdev: SCSI device
3036 * @id:   buffer for the identification
3037 * @id_len:  length of the buffer
3038 *
3039 * Copies a unique device identification into @id based
3040 * on the information in the VPD page 0x83 of the device.
3041 * The string will be formatted as a SCSI name string.
3042 *
3043 * Returns the length of the identification or error on failure.
3044 * If the identifier is longer than the supplied buffer the actual
3045 * identifier length is returned and the buffer is not zero-padded.
3046 */
3047int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
3048{
3049	u8 cur_id_prio = 0;
3050	u8 cur_id_size = 0;
3051	const unsigned char *d, *cur_id_str;
3052	const struct scsi_vpd *vpd_pg83;
3053	int id_size = -EINVAL;
3054
3055	rcu_read_lock();
3056	vpd_pg83 = rcu_dereference(sdev->vpd_pg83);
3057	if (!vpd_pg83) {
3058		rcu_read_unlock();
3059		return -ENXIO;
3060	}
3061
3062	/* The id string must be at least 20 bytes + terminating NULL byte */
3063	if (id_len < 21) {
3064		rcu_read_unlock();
3065		return -EINVAL;
3066	}
3067
3068	memset(id, 0, id_len);
3069	for (d = vpd_pg83->data + 4;
3070	     d < vpd_pg83->data + vpd_pg83->len;
3071	     d += d[3] + 4) {
3072		u8 prio = designator_prio(d);
3073
3074		if (prio == 0 || cur_id_prio > prio)
3075			continue;
3076
3077		switch (d[1] & 0xf) {
3078		case 0x1:
3079			/* T10 Vendor ID */
3080			if (cur_id_size > d[3])
3081				break;
3082			cur_id_prio = prio;
3083			cur_id_size = d[3];
3084			if (cur_id_size + 4 > id_len)
3085				cur_id_size = id_len - 4;
3086			cur_id_str = d + 4;
3087			id_size = snprintf(id, id_len, "t10.%*pE",
3088					   cur_id_size, cur_id_str);
3089			break;
3090		case 0x2:
3091			/* EUI-64 */
3092			cur_id_prio = prio;
3093			cur_id_size = d[3];
3094			cur_id_str = d + 4;
3095			switch (cur_id_size) {
3096			case 8:
3097				id_size = snprintf(id, id_len,
3098						   "eui.%8phN",
3099						   cur_id_str);
3100				break;
3101			case 12:
3102				id_size = snprintf(id, id_len,
3103						   "eui.%12phN",
3104						   cur_id_str);
3105				break;
3106			case 16:
3107				id_size = snprintf(id, id_len,
3108						   "eui.%16phN",
3109						   cur_id_str);
3110				break;
3111			default:
3112				break;
3113			}
3114			break;
3115		case 0x3:
3116			/* NAA */
3117			cur_id_prio = prio;
3118			cur_id_size = d[3];
3119			cur_id_str = d + 4;
3120			switch (cur_id_size) {
3121			case 8:
3122				id_size = snprintf(id, id_len,
3123						   "naa.%8phN",
3124						   cur_id_str);
3125				break;
3126			case 16:
3127				id_size = snprintf(id, id_len,
3128						   "naa.%16phN",
3129						   cur_id_str);
3130				break;
3131			default:
3132				break;
3133			}
3134			break;
3135		case 0x8:
3136			/* SCSI name string */
3137			if (cur_id_size > d[3])
3138				break;
3139			/* Prefer others for truncated descriptor */
3140			if (d[3] > id_len) {
3141				prio = 2;
3142				if (cur_id_prio > prio)
3143					break;
3144			}
3145			cur_id_prio = prio;
3146			cur_id_size = id_size = d[3];
3147			cur_id_str = d + 4;
3148			if (cur_id_size >= id_len)
3149				cur_id_size = id_len - 1;
3150			memcpy(id, cur_id_str, cur_id_size);
3151			break;
3152		default:
3153			break;
3154		}
3155	}
3156	rcu_read_unlock();
3157
3158	return id_size;
3159}
3160EXPORT_SYMBOL(scsi_vpd_lun_id);
3161
3162/*
3163 * scsi_vpd_tpg_id - return a target port group identifier
3164 * @sdev: SCSI device
3165 *
3166 * Returns the Target Port Group identifier from the information
3167 * froom VPD page 0x83 of the device.
3168 *
3169 * Returns the identifier or error on failure.
3170 */
3171int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id)
3172{
3173	const unsigned char *d;
3174	const struct scsi_vpd *vpd_pg83;
3175	int group_id = -EAGAIN, rel_port = -1;
3176
3177	rcu_read_lock();
3178	vpd_pg83 = rcu_dereference(sdev->vpd_pg83);
3179	if (!vpd_pg83) {
3180		rcu_read_unlock();
3181		return -ENXIO;
3182	}
3183
3184	d = vpd_pg83->data + 4;
3185	while (d < vpd_pg83->data + vpd_pg83->len) {
3186		switch (d[1] & 0xf) {
3187		case 0x4:
3188			/* Relative target port */
3189			rel_port = get_unaligned_be16(&d[6]);
3190			break;
3191		case 0x5:
3192			/* Target port group */
3193			group_id = get_unaligned_be16(&d[6]);
3194			break;
3195		default:
3196			break;
3197		}
3198		d += d[3] + 4;
3199	}
3200	rcu_read_unlock();
3201
3202	if (group_id >= 0 && rel_id && rel_port != -1)
3203		*rel_id = rel_port;
3204
3205	return group_id;
3206}
3207EXPORT_SYMBOL(scsi_vpd_tpg_id);
3208
3209/**
3210 * scsi_build_sense - build sense data for a command
3211 * @scmd:	scsi command for which the sense should be formatted
3212 * @desc:	Sense format (non-zero == descriptor format,
3213 *              0 == fixed format)
3214 * @key:	Sense key
3215 * @asc:	Additional sense code
3216 * @ascq:	Additional sense code qualifier
3217 *
3218 **/
3219void scsi_build_sense(struct scsi_cmnd *scmd, int desc, u8 key, u8 asc, u8 ascq)
3220{
3221	scsi_build_sense_buffer(desc, scmd->sense_buffer, key, asc, ascq);
3222	scmd->result = SAM_STAT_CHECK_CONDITION;
3223}
3224EXPORT_SYMBOL_GPL(scsi_build_sense);
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 1999 Eric Youngdale
   4 * Copyright (C) 2014 Christoph Hellwig
   5 *
   6 *  SCSI queueing library.
   7 *      Initial versions: Eric Youngdale (eric@andante.org).
   8 *                        Based upon conversations with large numbers
   9 *                        of people at Linux Expo.
  10 */
  11
  12#include <linux/bio.h>
  13#include <linux/bitops.h>
  14#include <linux/blkdev.h>
  15#include <linux/completion.h>
  16#include <linux/kernel.h>
  17#include <linux/export.h>
  18#include <linux/init.h>
  19#include <linux/pci.h>
  20#include <linux/delay.h>
  21#include <linux/hardirq.h>
  22#include <linux/scatterlist.h>
  23#include <linux/blk-mq.h>
  24#include <linux/blk-integrity.h>
  25#include <linux/ratelimit.h>
  26#include <asm/unaligned.h>
  27
  28#include <scsi/scsi.h>
  29#include <scsi/scsi_cmnd.h>
  30#include <scsi/scsi_dbg.h>
  31#include <scsi/scsi_device.h>
  32#include <scsi/scsi_driver.h>
  33#include <scsi/scsi_eh.h>
  34#include <scsi/scsi_host.h>
  35#include <scsi/scsi_transport.h> /* __scsi_init_queue() */
  36#include <scsi/scsi_dh.h>
  37
  38#include <trace/events/scsi.h>
  39
  40#include "scsi_debugfs.h"
  41#include "scsi_priv.h"
  42#include "scsi_logging.h"
  43
  44/*
  45 * Size of integrity metadata is usually small, 1 inline sg should
  46 * cover normal cases.
  47 */
  48#ifdef CONFIG_ARCH_NO_SG_CHAIN
  49#define  SCSI_INLINE_PROT_SG_CNT  0
  50#define  SCSI_INLINE_SG_CNT  0
  51#else
  52#define  SCSI_INLINE_PROT_SG_CNT  1
  53#define  SCSI_INLINE_SG_CNT  2
  54#endif
  55
  56static struct kmem_cache *scsi_sense_cache;
  57static DEFINE_MUTEX(scsi_sense_cache_mutex);
  58
  59static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd);
  60
  61int scsi_init_sense_cache(struct Scsi_Host *shost)
  62{
  63	int ret = 0;
  64
  65	mutex_lock(&scsi_sense_cache_mutex);
  66	if (!scsi_sense_cache) {
  67		scsi_sense_cache =
  68			kmem_cache_create_usercopy("scsi_sense_cache",
  69				SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN,
  70				0, SCSI_SENSE_BUFFERSIZE, NULL);
  71		if (!scsi_sense_cache)
  72			ret = -ENOMEM;
  73	}
  74	mutex_unlock(&scsi_sense_cache_mutex);
  75	return ret;
  76}
  77
 
 
 
 
 
 
 
  78static void
  79scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
  80{
  81	struct Scsi_Host *host = cmd->device->host;
  82	struct scsi_device *device = cmd->device;
  83	struct scsi_target *starget = scsi_target(device);
  84
  85	/*
  86	 * Set the appropriate busy bit for the device/host.
  87	 *
  88	 * If the host/device isn't busy, assume that something actually
  89	 * completed, and that we should be able to queue a command now.
  90	 *
  91	 * Note that the prior mid-layer assumption that any host could
  92	 * always queue at least one command is now broken.  The mid-layer
  93	 * will implement a user specifiable stall (see
  94	 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
  95	 * if a command is requeued with no other commands outstanding
  96	 * either for the device or for the host.
  97	 */
  98	switch (reason) {
  99	case SCSI_MLQUEUE_HOST_BUSY:
 100		atomic_set(&host->host_blocked, host->max_host_blocked);
 101		break;
 102	case SCSI_MLQUEUE_DEVICE_BUSY:
 103	case SCSI_MLQUEUE_EH_RETRY:
 104		atomic_set(&device->device_blocked,
 105			   device->max_device_blocked);
 106		break;
 107	case SCSI_MLQUEUE_TARGET_BUSY:
 108		atomic_set(&starget->target_blocked,
 109			   starget->max_target_blocked);
 110		break;
 111	}
 112}
 113
 114static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd, unsigned long msecs)
 115{
 116	struct request *rq = scsi_cmd_to_rq(cmd);
 117
 118	if (rq->rq_flags & RQF_DONTPREP) {
 119		rq->rq_flags &= ~RQF_DONTPREP;
 120		scsi_mq_uninit_cmd(cmd);
 121	} else {
 122		WARN_ON_ONCE(true);
 123	}
 124
 125	if (msecs) {
 126		blk_mq_requeue_request(rq, false);
 127		blk_mq_delay_kick_requeue_list(rq->q, msecs);
 128	} else
 129		blk_mq_requeue_request(rq, true);
 130}
 131
 132/**
 133 * __scsi_queue_insert - private queue insertion
 134 * @cmd: The SCSI command being requeued
 135 * @reason:  The reason for the requeue
 136 * @unbusy: Whether the queue should be unbusied
 137 *
 138 * This is a private queue insertion.  The public interface
 139 * scsi_queue_insert() always assumes the queue should be unbusied
 140 * because it's always called before the completion.  This function is
 141 * for a requeue after completion, which should only occur in this
 142 * file.
 143 */
 144static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
 145{
 146	struct scsi_device *device = cmd->device;
 147
 148	SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd,
 149		"Inserting command %p into mlqueue\n", cmd));
 150
 151	scsi_set_blocked(cmd, reason);
 152
 153	/*
 154	 * Decrement the counters, since these commands are no longer
 155	 * active on the host/device.
 156	 */
 157	if (unbusy)
 158		scsi_device_unbusy(device, cmd);
 159
 160	/*
 161	 * Requeue this command.  It will go before all other commands
 162	 * that are already in the queue. Schedule requeue work under
 163	 * lock such that the kblockd_schedule_work() call happens
 164	 * before blk_mq_destroy_queue() finishes.
 165	 */
 166	cmd->result = 0;
 167
 168	blk_mq_requeue_request(scsi_cmd_to_rq(cmd), true);
 169}
 170
 171/**
 172 * scsi_queue_insert - Reinsert a command in the queue.
 173 * @cmd:    command that we are adding to queue.
 174 * @reason: why we are inserting command to queue.
 175 *
 176 * We do this for one of two cases. Either the host is busy and it cannot accept
 177 * any more commands for the time being, or the device returned QUEUE_FULL and
 178 * can accept no more commands.
 179 *
 180 * Context: This could be called either from an interrupt context or a normal
 181 * process context.
 182 */
 183void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
 184{
 185	__scsi_queue_insert(cmd, reason, true);
 186}
 187
 188
 189/**
 190 * __scsi_execute - insert request and wait for the result
 191 * @sdev:	scsi device
 192 * @cmd:	scsi command
 193 * @data_direction: data direction
 194 * @buffer:	data buffer
 195 * @bufflen:	len of buffer
 196 * @sense:	optional sense buffer
 197 * @sshdr:	optional decoded sense header
 198 * @timeout:	request timeout in HZ
 199 * @retries:	number of times to retry request
 200 * @flags:	flags for ->cmd_flags
 201 * @rq_flags:	flags for ->rq_flags
 202 * @resid:	optional residual length
 203 *
 204 * Returns the scsi_cmnd result field if a command was executed, or a negative
 205 * Linux error code if we didn't get that far.
 206 */
 207int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
 208		 int data_direction, void *buffer, unsigned bufflen,
 209		 unsigned char *sense, struct scsi_sense_hdr *sshdr,
 210		 int timeout, int retries, blk_opf_t flags,
 211		 req_flags_t rq_flags, int *resid)
 212{
 213	struct request *req;
 214	struct scsi_cmnd *scmd;
 215	int ret;
 216
 217	req = scsi_alloc_request(sdev->request_queue,
 218			data_direction == DMA_TO_DEVICE ?
 219			REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
 220			rq_flags & RQF_PM ? BLK_MQ_REQ_PM : 0);
 221	if (IS_ERR(req))
 222		return PTR_ERR(req);
 223
 
 
 224	if (bufflen) {
 225		ret = blk_rq_map_kern(sdev->request_queue, req,
 226				      buffer, bufflen, GFP_NOIO);
 227		if (ret)
 228			goto out;
 229	}
 230	scmd = blk_mq_rq_to_pdu(req);
 231	scmd->cmd_len = COMMAND_SIZE(cmd[0]);
 232	memcpy(scmd->cmnd, cmd, scmd->cmd_len);
 233	scmd->allowed = retries;
 234	req->timeout = timeout;
 235	req->cmd_flags |= flags;
 236	req->rq_flags |= rq_flags | RQF_QUIET;
 237
 238	/*
 239	 * head injection *required* here otherwise quiesce won't work
 240	 */
 241	blk_execute_rq(req, true);
 242
 243	/*
 244	 * Some devices (USB mass-storage in particular) may transfer
 245	 * garbage data together with a residue indicating that the data
 246	 * is invalid.  Prevent the garbage from being misinterpreted
 247	 * and prevent security leaks by zeroing out the excess data.
 248	 */
 249	if (unlikely(scmd->resid_len > 0 && scmd->resid_len <= bufflen))
 250		memset(buffer + bufflen - scmd->resid_len, 0, scmd->resid_len);
 251
 252	if (resid)
 253		*resid = scmd->resid_len;
 254	if (sense && scmd->sense_len)
 255		memcpy(sense, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
 256	if (sshdr)
 257		scsi_normalize_sense(scmd->sense_buffer, scmd->sense_len,
 258				     sshdr);
 259	ret = scmd->result;
 260 out:
 261	blk_mq_free_request(req);
 262
 263	return ret;
 264}
 265EXPORT_SYMBOL(__scsi_execute);
 266
 267/*
 268 * Wake up the error handler if necessary. Avoid as follows that the error
 269 * handler is not woken up if host in-flight requests number ==
 270 * shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination
 271 * with an RCU read lock in this function to ensure that this function in
 272 * its entirety either finishes before scsi_eh_scmd_add() increases the
 273 * host_failed counter or that it notices the shost state change made by
 274 * scsi_eh_scmd_add().
 275 */
 276static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
 277{
 278	unsigned long flags;
 279
 280	rcu_read_lock();
 281	__clear_bit(SCMD_STATE_INFLIGHT, &cmd->state);
 282	if (unlikely(scsi_host_in_recovery(shost))) {
 283		spin_lock_irqsave(shost->host_lock, flags);
 284		if (shost->host_failed || shost->host_eh_scheduled)
 285			scsi_eh_wakeup(shost);
 286		spin_unlock_irqrestore(shost->host_lock, flags);
 287	}
 288	rcu_read_unlock();
 289}
 290
 291void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd)
 292{
 293	struct Scsi_Host *shost = sdev->host;
 294	struct scsi_target *starget = scsi_target(sdev);
 295
 296	scsi_dec_host_busy(shost, cmd);
 297
 298	if (starget->can_queue > 0)
 299		atomic_dec(&starget->target_busy);
 300
 301	sbitmap_put(&sdev->budget_map, cmd->budget_token);
 302	cmd->budget_token = -1;
 303}
 304
 305static void scsi_kick_queue(struct request_queue *q)
 306{
 307	blk_mq_run_hw_queues(q, false);
 308}
 309
 310/*
 311 * Kick the queue of SCSI device @sdev if @sdev != current_sdev. Called with
 312 * interrupts disabled.
 313 */
 314static void scsi_kick_sdev_queue(struct scsi_device *sdev, void *data)
 315{
 316	struct scsi_device *current_sdev = data;
 317
 318	if (sdev != current_sdev)
 319		blk_mq_run_hw_queues(sdev->request_queue, true);
 320}
 321
 322/*
 323 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
 324 * and call blk_run_queue for all the scsi_devices on the target -
 325 * including current_sdev first.
 326 *
 327 * Called with *no* scsi locks held.
 328 */
 329static void scsi_single_lun_run(struct scsi_device *current_sdev)
 330{
 331	struct Scsi_Host *shost = current_sdev->host;
 
 332	struct scsi_target *starget = scsi_target(current_sdev);
 333	unsigned long flags;
 334
 335	spin_lock_irqsave(shost->host_lock, flags);
 336	starget->starget_sdev_user = NULL;
 337	spin_unlock_irqrestore(shost->host_lock, flags);
 338
 339	/*
 340	 * Call blk_run_queue for all LUNs on the target, starting with
 341	 * current_sdev. We race with others (to set starget_sdev_user),
 342	 * but in most cases, we will be first. Ideally, each LU on the
 343	 * target would get some limited time or requests on the target.
 344	 */
 345	scsi_kick_queue(current_sdev->request_queue);
 346
 347	spin_lock_irqsave(shost->host_lock, flags);
 348	if (!starget->starget_sdev_user)
 349		__starget_for_each_device(starget, current_sdev,
 350					  scsi_kick_sdev_queue);
 
 
 
 
 
 
 
 
 
 
 
 
 
 351	spin_unlock_irqrestore(shost->host_lock, flags);
 352}
 353
 354static inline bool scsi_device_is_busy(struct scsi_device *sdev)
 355{
 356	if (scsi_device_busy(sdev) >= sdev->queue_depth)
 357		return true;
 358	if (atomic_read(&sdev->device_blocked) > 0)
 359		return true;
 360	return false;
 361}
 362
 363static inline bool scsi_target_is_busy(struct scsi_target *starget)
 364{
 365	if (starget->can_queue > 0) {
 366		if (atomic_read(&starget->target_busy) >= starget->can_queue)
 367			return true;
 368		if (atomic_read(&starget->target_blocked) > 0)
 369			return true;
 370	}
 371	return false;
 372}
 373
 374static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
 375{
 376	if (atomic_read(&shost->host_blocked) > 0)
 377		return true;
 378	if (shost->host_self_blocked)
 379		return true;
 380	return false;
 381}
 382
 383static void scsi_starved_list_run(struct Scsi_Host *shost)
 384{
 385	LIST_HEAD(starved_list);
 386	struct scsi_device *sdev;
 387	unsigned long flags;
 388
 389	spin_lock_irqsave(shost->host_lock, flags);
 390	list_splice_init(&shost->starved_list, &starved_list);
 391
 392	while (!list_empty(&starved_list)) {
 393		struct request_queue *slq;
 394
 395		/*
 396		 * As long as shost is accepting commands and we have
 397		 * starved queues, call blk_run_queue. scsi_request_fn
 398		 * drops the queue_lock and can add us back to the
 399		 * starved_list.
 400		 *
 401		 * host_lock protects the starved_list and starved_entry.
 402		 * scsi_request_fn must get the host_lock before checking
 403		 * or modifying starved_list or starved_entry.
 404		 */
 405		if (scsi_host_is_busy(shost))
 406			break;
 407
 408		sdev = list_entry(starved_list.next,
 409				  struct scsi_device, starved_entry);
 410		list_del_init(&sdev->starved_entry);
 411		if (scsi_target_is_busy(scsi_target(sdev))) {
 412			list_move_tail(&sdev->starved_entry,
 413				       &shost->starved_list);
 414			continue;
 415		}
 416
 417		/*
 418		 * Once we drop the host lock, a racing scsi_remove_device()
 419		 * call may remove the sdev from the starved list and destroy
 420		 * it and the queue.  Mitigate by taking a reference to the
 421		 * queue and never touching the sdev again after we drop the
 422		 * host lock.  Note: if __scsi_remove_device() invokes
 423		 * blk_mq_destroy_queue() before the queue is run from this
 424		 * function then blk_run_queue() will return immediately since
 425		 * blk_mq_destroy_queue() marks the queue with QUEUE_FLAG_DYING.
 426		 */
 427		slq = sdev->request_queue;
 428		if (!blk_get_queue(slq))
 429			continue;
 430		spin_unlock_irqrestore(shost->host_lock, flags);
 431
 432		scsi_kick_queue(slq);
 433		blk_put_queue(slq);
 434
 435		spin_lock_irqsave(shost->host_lock, flags);
 436	}
 437	/* put any unprocessed entries back */
 438	list_splice(&starved_list, &shost->starved_list);
 439	spin_unlock_irqrestore(shost->host_lock, flags);
 440}
 441
 442/**
 443 * scsi_run_queue - Select a proper request queue to serve next.
 444 * @q:  last request's queue
 445 *
 446 * The previous command was completely finished, start a new one if possible.
 447 */
 448static void scsi_run_queue(struct request_queue *q)
 449{
 450	struct scsi_device *sdev = q->queuedata;
 451
 452	if (scsi_target(sdev)->single_lun)
 453		scsi_single_lun_run(sdev);
 454	if (!list_empty(&sdev->host->starved_list))
 455		scsi_starved_list_run(sdev->host);
 456
 457	blk_mq_run_hw_queues(q, false);
 458}
 459
 460void scsi_requeue_run_queue(struct work_struct *work)
 461{
 462	struct scsi_device *sdev;
 463	struct request_queue *q;
 464
 465	sdev = container_of(work, struct scsi_device, requeue_work);
 466	q = sdev->request_queue;
 467	scsi_run_queue(q);
 468}
 469
 470void scsi_run_host_queues(struct Scsi_Host *shost)
 471{
 472	struct scsi_device *sdev;
 473
 474	shost_for_each_device(sdev, shost)
 475		scsi_run_queue(sdev->request_queue);
 476}
 477
 478static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
 479{
 480	if (!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd))) {
 481		struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
 482
 483		if (drv->uninit_command)
 484			drv->uninit_command(cmd);
 485	}
 486}
 487
 488void scsi_free_sgtables(struct scsi_cmnd *cmd)
 489{
 490	if (cmd->sdb.table.nents)
 491		sg_free_table_chained(&cmd->sdb.table,
 492				SCSI_INLINE_SG_CNT);
 493	if (scsi_prot_sg_count(cmd))
 494		sg_free_table_chained(&cmd->prot_sdb->table,
 495				SCSI_INLINE_PROT_SG_CNT);
 496}
 497EXPORT_SYMBOL_GPL(scsi_free_sgtables);
 498
 499static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
 500{
 501	scsi_free_sgtables(cmd);
 502	scsi_uninit_cmd(cmd);
 503}
 504
 505static void scsi_run_queue_async(struct scsi_device *sdev)
 506{
 507	if (scsi_target(sdev)->single_lun ||
 508	    !list_empty(&sdev->host->starved_list)) {
 509		kblockd_schedule_work(&sdev->requeue_work);
 510	} else {
 511		/*
 512		 * smp_mb() present in sbitmap_queue_clear() or implied in
 513		 * .end_io is for ordering writing .device_busy in
 514		 * scsi_device_unbusy() and reading sdev->restarts.
 515		 */
 516		int old = atomic_read(&sdev->restarts);
 517
 518		/*
 519		 * ->restarts has to be kept as non-zero if new budget
 520		 *  contention occurs.
 521		 *
 522		 *  No need to run queue when either another re-run
 523		 *  queue wins in updating ->restarts or a new budget
 524		 *  contention occurs.
 525		 */
 526		if (old && atomic_cmpxchg(&sdev->restarts, old, 0) == old)
 527			blk_mq_run_hw_queues(sdev->request_queue, true);
 528	}
 529}
 530
 531/* Returns false when no more bytes to process, true if there are more */
 532static bool scsi_end_request(struct request *req, blk_status_t error,
 533		unsigned int bytes)
 534{
 535	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
 536	struct scsi_device *sdev = cmd->device;
 537	struct request_queue *q = sdev->request_queue;
 538
 539	if (blk_update_request(req, error, bytes))
 540		return true;
 541
 542	// XXX:
 543	if (blk_queue_add_random(q))
 544		add_disk_randomness(req->q->disk);
 545
 546	if (!blk_rq_is_passthrough(req)) {
 547		WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
 548		cmd->flags &= ~SCMD_INITIALIZED;
 549	}
 550
 551	/*
 552	 * Calling rcu_barrier() is not necessary here because the
 553	 * SCSI error handler guarantees that the function called by
 554	 * call_rcu() has been called before scsi_end_request() is
 555	 * called.
 556	 */
 557	destroy_rcu_head(&cmd->rcu);
 558
 559	/*
 560	 * In the MQ case the command gets freed by __blk_mq_end_request,
 561	 * so we have to do all cleanup that depends on it earlier.
 562	 *
 563	 * We also can't kick the queues from irq context, so we
 564	 * will have to defer it to a workqueue.
 565	 */
 566	scsi_mq_uninit_cmd(cmd);
 567
 568	/*
 569	 * queue is still alive, so grab the ref for preventing it
 570	 * from being cleaned up during running queue.
 571	 */
 572	percpu_ref_get(&q->q_usage_counter);
 573
 574	__blk_mq_end_request(req, error);
 575
 576	scsi_run_queue_async(sdev);
 577
 578	percpu_ref_put(&q->q_usage_counter);
 579	return false;
 580}
 581
 582static inline u8 get_scsi_ml_byte(int result)
 583{
 584	return (result >> 8) & 0xff;
 585}
 586
 587/**
 588 * scsi_result_to_blk_status - translate a SCSI result code into blk_status_t
 
 589 * @result:	scsi error code
 590 *
 591 * Translate a SCSI result code into a blk_status_t value.
 
 592 */
 593static blk_status_t scsi_result_to_blk_status(int result)
 594{
 595	/*
 596	 * Check the scsi-ml byte first in case we converted a host or status
 597	 * byte.
 598	 */
 599	switch (get_scsi_ml_byte(result)) {
 600	case SCSIML_STAT_OK:
 601		break;
 602	case SCSIML_STAT_RESV_CONFLICT:
 603		return BLK_STS_NEXUS;
 604	case SCSIML_STAT_NOSPC:
 605		return BLK_STS_NOSPC;
 606	case SCSIML_STAT_MED_ERROR:
 607		return BLK_STS_MEDIUM;
 608	case SCSIML_STAT_TGT_FAILURE:
 609		return BLK_STS_TARGET;
 610	}
 611
 612	switch (host_byte(result)) {
 613	case DID_OK:
 614		if (scsi_status_is_good(result))
 615			return BLK_STS_OK;
 616		return BLK_STS_IOERR;
 617	case DID_TRANSPORT_FAILFAST:
 618	case DID_TRANSPORT_MARGINAL:
 619		return BLK_STS_TRANSPORT;
 
 
 
 
 
 
 
 
 
 
 
 
 620	default:
 621		return BLK_STS_IOERR;
 622	}
 623}
 624
 625/**
 626 * scsi_rq_err_bytes - determine number of bytes till the next failure boundary
 627 * @rq: request to examine
 628 *
 629 * Description:
 630 *     A request could be merge of IOs which require different failure
 631 *     handling.  This function determines the number of bytes which
 632 *     can be failed from the beginning of the request without
 633 *     crossing into area which need to be retried further.
 634 *
 635 * Return:
 636 *     The number of bytes to fail.
 637 */
 638static unsigned int scsi_rq_err_bytes(const struct request *rq)
 639{
 640	blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
 641	unsigned int bytes = 0;
 642	struct bio *bio;
 643
 644	if (!(rq->rq_flags & RQF_MIXED_MERGE))
 645		return blk_rq_bytes(rq);
 646
 647	/*
 648	 * Currently the only 'mixing' which can happen is between
 649	 * different fastfail types.  We can safely fail portions
 650	 * which have all the failfast bits that the first one has -
 651	 * the ones which are at least as eager to fail as the first
 652	 * one.
 653	 */
 654	for (bio = rq->bio; bio; bio = bio->bi_next) {
 655		if ((bio->bi_opf & ff) != ff)
 656			break;
 657		bytes += bio->bi_iter.bi_size;
 658	}
 659
 660	/* this could lead to infinite loop */
 661	BUG_ON(blk_rq_bytes(rq) && !bytes);
 662	return bytes;
 663}
 664
 665static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
 666{
 667	struct request *req = scsi_cmd_to_rq(cmd);
 668	unsigned long wait_for;
 669
 670	if (cmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT)
 671		return false;
 672
 673	wait_for = (cmd->allowed + 1) * req->timeout;
 674	if (time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
 675		scmd_printk(KERN_ERR, cmd, "timing out command, waited %lus\n",
 676			    wait_for/HZ);
 677		return true;
 678	}
 679	return false;
 680}
 681
 682/*
 683 * When ALUA transition state is returned, reprep the cmd to
 684 * use the ALUA handler's transition timeout. Delay the reprep
 685 * 1 sec to avoid aggressive retries of the target in that
 686 * state.
 687 */
 688#define ALUA_TRANSITION_REPREP_DELAY	1000
 689
 690/* Helper for scsi_io_completion() when special action required. */
 691static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
 692{
 693	struct request *req = scsi_cmd_to_rq(cmd);
 
 694	int level = 0;
 695	enum {ACTION_FAIL, ACTION_REPREP, ACTION_DELAYED_REPREP,
 696	      ACTION_RETRY, ACTION_DELAYED_RETRY} action;
 697	struct scsi_sense_hdr sshdr;
 698	bool sense_valid;
 699	bool sense_current = true;      /* false implies "deferred sense" */
 700	blk_status_t blk_stat;
 701
 702	sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
 703	if (sense_valid)
 704		sense_current = !scsi_sense_is_deferred(&sshdr);
 705
 706	blk_stat = scsi_result_to_blk_status(result);
 707
 708	if (host_byte(result) == DID_RESET) {
 709		/* Third party bus reset or reset for error recovery
 710		 * reasons.  Just retry the command and see what
 711		 * happens.
 712		 */
 713		action = ACTION_RETRY;
 714	} else if (sense_valid && sense_current) {
 715		switch (sshdr.sense_key) {
 716		case UNIT_ATTENTION:
 717			if (cmd->device->removable) {
 718				/* Detected disc change.  Set a bit
 719				 * and quietly refuse further access.
 720				 */
 721				cmd->device->changed = 1;
 722				action = ACTION_FAIL;
 723			} else {
 724				/* Must have been a power glitch, or a
 725				 * bus reset.  Could not have been a
 726				 * media change, so we just retry the
 727				 * command and see what happens.
 728				 */
 729				action = ACTION_RETRY;
 730			}
 731			break;
 732		case ILLEGAL_REQUEST:
 733			/* If we had an ILLEGAL REQUEST returned, then
 734			 * we may have performed an unsupported
 735			 * command.  The only thing this should be
 736			 * would be a ten byte read where only a six
 737			 * byte read was supported.  Also, on a system
 738			 * where READ CAPACITY failed, we may have
 739			 * read past the end of the disk.
 740			 */
 741			if ((cmd->device->use_10_for_rw &&
 742			    sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
 743			    (cmd->cmnd[0] == READ_10 ||
 744			     cmd->cmnd[0] == WRITE_10)) {
 745				/* This will issue a new 6-byte command. */
 746				cmd->device->use_10_for_rw = 0;
 747				action = ACTION_REPREP;
 748			} else if (sshdr.asc == 0x10) /* DIX */ {
 749				action = ACTION_FAIL;
 750				blk_stat = BLK_STS_PROTECTION;
 751			/* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
 752			} else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
 753				action = ACTION_FAIL;
 754				blk_stat = BLK_STS_TARGET;
 755			} else
 756				action = ACTION_FAIL;
 757			break;
 758		case ABORTED_COMMAND:
 759			action = ACTION_FAIL;
 760			if (sshdr.asc == 0x10) /* DIF */
 761				blk_stat = BLK_STS_PROTECTION;
 762			break;
 763		case NOT_READY:
 764			/* If the device is in the process of becoming
 765			 * ready, or has a temporary blockage, retry.
 766			 */
 767			if (sshdr.asc == 0x04) {
 768				switch (sshdr.ascq) {
 769				case 0x01: /* becoming ready */
 770				case 0x04: /* format in progress */
 771				case 0x05: /* rebuild in progress */
 772				case 0x06: /* recalculation in progress */
 773				case 0x07: /* operation in progress */
 774				case 0x08: /* Long write in progress */
 775				case 0x09: /* self test in progress */
 776				case 0x11: /* notify (enable spinup) required */
 777				case 0x14: /* space allocation in progress */
 778				case 0x1a: /* start stop unit in progress */
 779				case 0x1b: /* sanitize in progress */
 780				case 0x1d: /* configuration in progress */
 781				case 0x24: /* depopulation in progress */
 782					action = ACTION_DELAYED_RETRY;
 783					break;
 784				case 0x0a: /* ALUA state transition */
 785					action = ACTION_DELAYED_REPREP;
 786					break;
 787				default:
 788					action = ACTION_FAIL;
 789					break;
 790				}
 791			} else
 792				action = ACTION_FAIL;
 793			break;
 794		case VOLUME_OVERFLOW:
 795			/* See SSC3rXX or current. */
 796			action = ACTION_FAIL;
 797			break;
 798		case DATA_PROTECT:
 799			action = ACTION_FAIL;
 800			if ((sshdr.asc == 0x0C && sshdr.ascq == 0x12) ||
 801			    (sshdr.asc == 0x55 &&
 802			     (sshdr.ascq == 0x0E || sshdr.ascq == 0x0F))) {
 803				/* Insufficient zone resources */
 804				blk_stat = BLK_STS_ZONE_OPEN_RESOURCE;
 805			}
 806			break;
 807		default:
 808			action = ACTION_FAIL;
 809			break;
 810		}
 811	} else
 812		action = ACTION_FAIL;
 813
 814	if (action != ACTION_FAIL && scsi_cmd_runtime_exceeced(cmd))
 815		action = ACTION_FAIL;
 816
 817	switch (action) {
 818	case ACTION_FAIL:
 819		/* Give up and fail the remainder of the request */
 820		if (!(req->rq_flags & RQF_QUIET)) {
 821			static DEFINE_RATELIMIT_STATE(_rs,
 822					DEFAULT_RATELIMIT_INTERVAL,
 823					DEFAULT_RATELIMIT_BURST);
 824
 825			if (unlikely(scsi_logging_level))
 826				level =
 827				     SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
 828						    SCSI_LOG_MLCOMPLETE_BITS);
 829
 830			/*
 831			 * if logging is enabled the failure will be printed
 832			 * in scsi_log_completion(), so avoid duplicate messages
 833			 */
 834			if (!level && __ratelimit(&_rs)) {
 835				scsi_print_result(cmd, NULL, FAILED);
 836				if (sense_valid)
 837					scsi_print_sense(cmd);
 838				scsi_print_command(cmd);
 839			}
 840		}
 841		if (!scsi_end_request(req, blk_stat, scsi_rq_err_bytes(req)))
 842			return;
 843		fallthrough;
 844	case ACTION_REPREP:
 845		scsi_mq_requeue_cmd(cmd, 0);
 846		break;
 847	case ACTION_DELAYED_REPREP:
 848		scsi_mq_requeue_cmd(cmd, ALUA_TRANSITION_REPREP_DELAY);
 849		break;
 850	case ACTION_RETRY:
 851		/* Retry the same command immediately */
 852		__scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, false);
 853		break;
 854	case ACTION_DELAYED_RETRY:
 855		/* Retry the same command after a delay */
 856		__scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, false);
 857		break;
 858	}
 859}
 860
 861/*
 862 * Helper for scsi_io_completion() when cmd->result is non-zero. Returns a
 863 * new result that may suppress further error checking. Also modifies
 864 * *blk_statp in some cases.
 865 */
 866static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
 867					blk_status_t *blk_statp)
 868{
 869	bool sense_valid;
 870	bool sense_current = true;	/* false implies "deferred sense" */
 871	struct request *req = scsi_cmd_to_rq(cmd);
 872	struct scsi_sense_hdr sshdr;
 873
 874	sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
 875	if (sense_valid)
 876		sense_current = !scsi_sense_is_deferred(&sshdr);
 877
 878	if (blk_rq_is_passthrough(req)) {
 879		if (sense_valid) {
 880			/*
 881			 * SG_IO wants current and deferred errors
 882			 */
 883			cmd->sense_len = min(8 + cmd->sense_buffer[7],
 884					     SCSI_SENSE_BUFFERSIZE);
 
 885		}
 886		if (sense_current)
 887			*blk_statp = scsi_result_to_blk_status(result);
 888	} else if (blk_rq_bytes(req) == 0 && sense_current) {
 889		/*
 890		 * Flush commands do not transfers any data, and thus cannot use
 891		 * good_bytes != blk_rq_bytes(req) as the signal for an error.
 892		 * This sets *blk_statp explicitly for the problem case.
 893		 */
 894		*blk_statp = scsi_result_to_blk_status(result);
 895	}
 896	/*
 897	 * Recovered errors need reporting, but they're always treated as
 898	 * success, so fiddle the result code here.  For passthrough requests
 899	 * we already took a copy of the original into sreq->result which
 900	 * is what gets returned to the user
 901	 */
 902	if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
 903		bool do_print = true;
 904		/*
 905		 * if ATA PASS-THROUGH INFORMATION AVAILABLE [0x0, 0x1d]
 906		 * skip print since caller wants ATA registers. Only occurs
 907		 * on SCSI ATA PASS_THROUGH commands when CK_COND=1
 908		 */
 909		if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
 910			do_print = false;
 911		else if (req->rq_flags & RQF_QUIET)
 912			do_print = false;
 913		if (do_print)
 914			scsi_print_sense(cmd);
 915		result = 0;
 916		/* for passthrough, *blk_statp may be set */
 917		*blk_statp = BLK_STS_OK;
 918	}
 919	/*
 920	 * Another corner case: the SCSI status byte is non-zero but 'good'.
 921	 * Example: PRE-FETCH command returns SAM_STAT_CONDITION_MET when
 922	 * it is able to fit nominated LBs in its cache (and SAM_STAT_GOOD
 923	 * if it can't fit). Treat SAM_STAT_CONDITION_MET and the related
 924	 * intermediate statuses (both obsolete in SAM-4) as good.
 925	 */
 926	if ((result & 0xff) && scsi_status_is_good(result)) {
 927		result = 0;
 928		*blk_statp = BLK_STS_OK;
 929	}
 930	return result;
 931}
 932
 933/**
 934 * scsi_io_completion - Completion processing for SCSI commands.
 935 * @cmd:	command that is finished.
 936 * @good_bytes:	number of processed bytes.
 937 *
 938 * We will finish off the specified number of sectors. If we are done, the
 939 * command block will be released and the queue function will be goosed. If we
 940 * are not done then we have to figure out what to do next:
 941 *
 942 *   a) We can call scsi_mq_requeue_cmd().  The request will be
 943 *	unprepared and put back on the queue.  Then a new command will
 944 *	be created for it.  This should be used if we made forward
 945 *	progress, or if we want to switch from READ(10) to READ(6) for
 946 *	example.
 947 *
 948 *   b) We can call scsi_io_completion_action().  The request will be
 949 *	put back on the queue and retried using the same command as
 950 *	before, possibly after a delay.
 951 *
 952 *   c) We can call scsi_end_request() with blk_stat other than
 953 *	BLK_STS_OK, to fail the remainder of the request.
 954 */
 955void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 956{
 957	int result = cmd->result;
 958	struct request *req = scsi_cmd_to_rq(cmd);
 
 959	blk_status_t blk_stat = BLK_STS_OK;
 960
 961	if (unlikely(result))	/* a nz result may or may not be an error */
 962		result = scsi_io_completion_nz_result(cmd, result, &blk_stat);
 963
 
 
 
 
 
 
 
 964	/*
 965	 * Next deal with any sectors which we were able to correctly
 966	 * handle.
 967	 */
 968	SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd,
 969		"%u sectors total, %d bytes done.\n",
 970		blk_rq_sectors(req), good_bytes));
 971
 972	/*
 973	 * Failed, zero length commands always need to drop down
 974	 * to retry code. Fast path should return in this block.
 975	 */
 976	if (likely(blk_rq_bytes(req) > 0 || blk_stat == BLK_STS_OK)) {
 977		if (likely(!scsi_end_request(req, blk_stat, good_bytes)))
 978			return; /* no bytes remaining */
 979	}
 980
 981	/* Kill remainder if no retries. */
 982	if (unlikely(blk_stat && scsi_noretry_cmd(cmd))) {
 983		if (scsi_end_request(req, blk_stat, blk_rq_bytes(req)))
 984			WARN_ONCE(true,
 985			    "Bytes remaining after failed, no-retry command");
 986		return;
 987	}
 988
 989	/*
 990	 * If there had been no error, but we have leftover bytes in the
 991	 * request just queue the command up again.
 992	 */
 993	if (likely(result == 0))
 994		scsi_mq_requeue_cmd(cmd, 0);
 995	else
 996		scsi_io_completion_action(cmd, result);
 997}
 998
 999static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev,
1000		struct request *rq)
1001{
1002	return sdev->dma_drain_len && blk_rq_is_passthrough(rq) &&
1003	       !op_is_write(req_op(rq)) &&
1004	       sdev->host->hostt->dma_need_drain(rq);
1005}
1006
1007/**
1008 * scsi_alloc_sgtables - Allocate and initialize data and integrity scatterlists
1009 * @cmd: SCSI command data structure to initialize.
1010 *
1011 * Initializes @cmd->sdb and also @cmd->prot_sdb if data integrity is enabled
1012 * for @cmd.
1013 *
1014 * Returns:
1015 * * BLK_STS_OK       - on success
1016 * * BLK_STS_RESOURCE - if the failure is retryable
1017 * * BLK_STS_IOERR    - if the failure is fatal
1018 */
1019blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
1020{
1021	struct scsi_device *sdev = cmd->device;
1022	struct request *rq = scsi_cmd_to_rq(cmd);
1023	unsigned short nr_segs = blk_rq_nr_phys_segments(rq);
1024	struct scatterlist *last_sg = NULL;
1025	blk_status_t ret;
1026	bool need_drain = scsi_cmd_needs_dma_drain(sdev, rq);
1027	int count;
1028
1029	if (WARN_ON_ONCE(!nr_segs))
1030		return BLK_STS_IOERR;
1031
1032	/*
1033	 * Make sure there is space for the drain.  The driver must adjust
1034	 * max_hw_segments to be prepared for this.
1035	 */
1036	if (need_drain)
1037		nr_segs++;
1038
1039	/*
1040	 * If sg table allocation fails, requeue request later.
1041	 */
1042	if (unlikely(sg_alloc_table_chained(&cmd->sdb.table, nr_segs,
1043			cmd->sdb.table.sgl, SCSI_INLINE_SG_CNT)))
1044		return BLK_STS_RESOURCE;
1045
1046	/*
1047	 * Next, walk the list, and fill in the addresses and sizes of
1048	 * each segment.
1049	 */
1050	count = __blk_rq_map_sg(rq->q, rq, cmd->sdb.table.sgl, &last_sg);
1051
1052	if (blk_rq_bytes(rq) & rq->q->dma_pad_mask) {
1053		unsigned int pad_len =
1054			(rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
1055
1056		last_sg->length += pad_len;
1057		cmd->extra_len += pad_len;
1058	}
1059
1060	if (need_drain) {
1061		sg_unmark_end(last_sg);
1062		last_sg = sg_next(last_sg);
1063		sg_set_buf(last_sg, sdev->dma_drain_buf, sdev->dma_drain_len);
1064		sg_mark_end(last_sg);
1065
1066		cmd->extra_len += sdev->dma_drain_len;
1067		count++;
1068	}
1069
1070	BUG_ON(count > cmd->sdb.table.nents);
1071	cmd->sdb.table.nents = count;
1072	cmd->sdb.length = blk_rq_payload_bytes(rq);
1073
1074	if (blk_integrity_rq(rq)) {
1075		struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
1076		int ivecs;
1077
1078		if (WARN_ON_ONCE(!prot_sdb)) {
1079			/*
1080			 * This can happen if someone (e.g. multipath)
1081			 * queues a command to a device on an adapter
1082			 * that does not support DIX.
1083			 */
1084			ret = BLK_STS_IOERR;
1085			goto out_free_sgtables;
1086		}
1087
1088		ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
1089
1090		if (sg_alloc_table_chained(&prot_sdb->table, ivecs,
1091				prot_sdb->table.sgl,
1092				SCSI_INLINE_PROT_SG_CNT)) {
1093			ret = BLK_STS_RESOURCE;
1094			goto out_free_sgtables;
1095		}
1096
1097		count = blk_rq_map_integrity_sg(rq->q, rq->bio,
1098						prot_sdb->table.sgl);
1099		BUG_ON(count > ivecs);
1100		BUG_ON(count > queue_max_integrity_segments(rq->q));
1101
1102		cmd->prot_sdb = prot_sdb;
1103		cmd->prot_sdb->table.nents = count;
1104	}
1105
1106	return BLK_STS_OK;
1107out_free_sgtables:
1108	scsi_free_sgtables(cmd);
1109	return ret;
1110}
1111EXPORT_SYMBOL(scsi_alloc_sgtables);
1112
1113/**
1114 * scsi_initialize_rq - initialize struct scsi_cmnd partially
1115 * @rq: Request associated with the SCSI command to be initialized.
1116 *
1117 * This function initializes the members of struct scsi_cmnd that must be
1118 * initialized before request processing starts and that won't be
1119 * reinitialized if a SCSI command is requeued.
 
 
 
1120 */
1121static void scsi_initialize_rq(struct request *rq)
1122{
1123	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1124
1125	memset(cmd->cmnd, 0, sizeof(cmd->cmnd));
1126	cmd->cmd_len = MAX_COMMAND_SIZE;
1127	cmd->sense_len = 0;
1128	init_rcu_head(&cmd->rcu);
1129	cmd->jiffies_at_alloc = jiffies;
1130	cmd->retries = 0;
1131}
1132
1133struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf,
1134				   blk_mq_req_flags_t flags)
1135{
1136	struct request *rq;
1137
1138	rq = blk_mq_alloc_request(q, opf, flags);
1139	if (!IS_ERR(rq))
1140		scsi_initialize_rq(rq);
1141	return rq;
1142}
1143EXPORT_SYMBOL_GPL(scsi_alloc_request);
1144
1145/*
1146 * Only called when the request isn't completed by SCSI, and not freed by
1147 * SCSI
1148 */
1149static void scsi_cleanup_rq(struct request *rq)
1150{
1151	if (rq->rq_flags & RQF_DONTPREP) {
1152		scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq));
1153		rq->rq_flags &= ~RQF_DONTPREP;
1154	}
1155}
1156
1157/* Called before a request is prepared. See also scsi_mq_prep_fn(). */
1158void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
1159{
1160	struct request *rq = scsi_cmd_to_rq(cmd);
 
 
 
 
 
 
 
1161
1162	if (!blk_rq_is_passthrough(rq) && !(cmd->flags & SCMD_INITIALIZED)) {
1163		cmd->flags |= SCMD_INITIALIZED;
1164		scsi_initialize_rq(rq);
1165	}
1166
 
 
 
 
 
 
 
 
 
 
 
 
 
1167	cmd->device = dev;
1168	INIT_LIST_HEAD(&cmd->eh_entry);
 
 
1169	INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
 
 
 
 
 
 
1170}
1171
1172static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev,
1173		struct request *req)
1174{
1175	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1176
1177	/*
1178	 * Passthrough requests may transfer data, in which case they must
1179	 * a bio attached to them.  Or they might contain a SCSI command
1180	 * that does not transfer data, in which case they may optionally
1181	 * submit a request without an attached bio.
1182	 */
1183	if (req->bio) {
1184		blk_status_t ret = scsi_alloc_sgtables(cmd);
1185		if (unlikely(ret != BLK_STS_OK))
1186			return ret;
1187	} else {
1188		BUG_ON(blk_rq_bytes(req));
1189
1190		memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1191	}
1192
 
 
 
 
1193	cmd->transfersize = blk_rq_bytes(req);
 
1194	return BLK_STS_OK;
1195}
1196
1197static blk_status_t
1198scsi_device_state_check(struct scsi_device *sdev, struct request *req)
1199{
1200	switch (sdev->sdev_state) {
1201	case SDEV_CREATED:
1202		return BLK_STS_OK;
1203	case SDEV_OFFLINE:
1204	case SDEV_TRANSPORT_OFFLINE:
1205		/*
1206		 * If the device is offline we refuse to process any
1207		 * commands.  The device must be brought online
1208		 * before trying any recovery commands.
1209		 */
1210		if (!sdev->offline_already) {
1211			sdev->offline_already = true;
1212			sdev_printk(KERN_ERR, sdev,
1213				    "rejecting I/O to offline device\n");
1214		}
1215		return BLK_STS_IOERR;
1216	case SDEV_DEL:
1217		/*
1218		 * If the device is fully deleted, we refuse to
1219		 * process any commands as well.
1220		 */
1221		sdev_printk(KERN_ERR, sdev,
1222			    "rejecting I/O to dead device\n");
1223		return BLK_STS_IOERR;
1224	case SDEV_BLOCK:
1225	case SDEV_CREATED_BLOCK:
1226		return BLK_STS_RESOURCE;
1227	case SDEV_QUIESCE:
1228		/*
1229		 * If the device is blocked we only accept power management
1230		 * commands.
1231		 */
1232		if (req && WARN_ON_ONCE(!(req->rq_flags & RQF_PM)))
1233			return BLK_STS_RESOURCE;
1234		return BLK_STS_OK;
1235	default:
1236		/*
1237		 * For any other not fully online state we only allow
1238		 * power management commands.
1239		 */
1240		if (req && !(req->rq_flags & RQF_PM))
1241			return BLK_STS_OFFLINE;
1242		return BLK_STS_OK;
1243	}
1244}
1245
1246/*
1247 * scsi_dev_queue_ready: if we can send requests to sdev, assign one token
1248 * and return the token else return -1.
1249 */
1250static inline int scsi_dev_queue_ready(struct request_queue *q,
1251				  struct scsi_device *sdev)
1252{
1253	int token;
1254
1255	token = sbitmap_get(&sdev->budget_map);
1256	if (atomic_read(&sdev->device_blocked)) {
1257		if (token < 0)
1258			goto out;
1259
1260		if (scsi_device_busy(sdev) > 1)
1261			goto out_dec;
1262
1263		/*
1264		 * unblock after device_blocked iterates to zero
1265		 */
1266		if (atomic_dec_return(&sdev->device_blocked) > 0)
1267			goto out_dec;
1268		SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
1269				   "unblocking device at zero depth\n"));
1270	}
1271
1272	return token;
1273out_dec:
1274	if (token >= 0)
1275		sbitmap_put(&sdev->budget_map, token);
1276out:
1277	return -1;
1278}
1279
1280/*
1281 * scsi_target_queue_ready: checks if there we can send commands to target
1282 * @sdev: scsi device on starget to check.
1283 */
1284static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1285					   struct scsi_device *sdev)
1286{
1287	struct scsi_target *starget = scsi_target(sdev);
1288	unsigned int busy;
1289
1290	if (starget->single_lun) {
1291		spin_lock_irq(shost->host_lock);
1292		if (starget->starget_sdev_user &&
1293		    starget->starget_sdev_user != sdev) {
1294			spin_unlock_irq(shost->host_lock);
1295			return 0;
1296		}
1297		starget->starget_sdev_user = sdev;
1298		spin_unlock_irq(shost->host_lock);
1299	}
1300
1301	if (starget->can_queue <= 0)
1302		return 1;
1303
1304	busy = atomic_inc_return(&starget->target_busy) - 1;
1305	if (atomic_read(&starget->target_blocked) > 0) {
1306		if (busy)
1307			goto starved;
1308
1309		/*
1310		 * unblock after target_blocked iterates to zero
1311		 */
1312		if (atomic_dec_return(&starget->target_blocked) > 0)
1313			goto out_dec;
1314
1315		SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1316				 "unblocking target at zero depth\n"));
1317	}
1318
1319	if (busy >= starget->can_queue)
1320		goto starved;
1321
1322	return 1;
1323
1324starved:
1325	spin_lock_irq(shost->host_lock);
1326	list_move_tail(&sdev->starved_entry, &shost->starved_list);
1327	spin_unlock_irq(shost->host_lock);
1328out_dec:
1329	if (starget->can_queue > 0)
1330		atomic_dec(&starget->target_busy);
1331	return 0;
1332}
1333
1334/*
1335 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1336 * return 0. We must end up running the queue again whenever 0 is
1337 * returned, else IO can hang.
1338 */
1339static inline int scsi_host_queue_ready(struct request_queue *q,
1340				   struct Scsi_Host *shost,
1341				   struct scsi_device *sdev,
1342				   struct scsi_cmnd *cmd)
1343{
 
 
 
1344	if (atomic_read(&shost->host_blocked) > 0) {
1345		if (scsi_host_busy(shost) > 0)
1346			goto starved;
1347
1348		/*
1349		 * unblock after host_blocked iterates to zero
1350		 */
1351		if (atomic_dec_return(&shost->host_blocked) > 0)
1352			goto out_dec;
1353
1354		SCSI_LOG_MLQUEUE(3,
1355			shost_printk(KERN_INFO, shost,
1356				     "unblocking host at zero depth\n"));
1357	}
1358
1359	if (shost->host_self_blocked)
1360		goto starved;
1361
1362	/* We're OK to process the command, so we can't be starved */
1363	if (!list_empty(&sdev->starved_entry)) {
1364		spin_lock_irq(shost->host_lock);
1365		if (!list_empty(&sdev->starved_entry))
1366			list_del_init(&sdev->starved_entry);
1367		spin_unlock_irq(shost->host_lock);
1368	}
1369
1370	__set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
1371
1372	return 1;
1373
1374starved:
1375	spin_lock_irq(shost->host_lock);
1376	if (list_empty(&sdev->starved_entry))
1377		list_add_tail(&sdev->starved_entry, &shost->starved_list);
1378	spin_unlock_irq(shost->host_lock);
1379out_dec:
1380	scsi_dec_host_busy(shost, cmd);
1381	return 0;
1382}
1383
1384/*
1385 * Busy state exporting function for request stacking drivers.
1386 *
1387 * For efficiency, no lock is taken to check the busy state of
1388 * shost/starget/sdev, since the returned value is not guaranteed and
1389 * may be changed after request stacking drivers call the function,
1390 * regardless of taking lock or not.
1391 *
1392 * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi
1393 * needs to return 'not busy'. Otherwise, request stacking drivers
1394 * may hold requests forever.
1395 */
1396static bool scsi_mq_lld_busy(struct request_queue *q)
1397{
1398	struct scsi_device *sdev = q->queuedata;
1399	struct Scsi_Host *shost;
1400
1401	if (blk_queue_dying(q))
1402		return false;
1403
1404	shost = sdev->host;
1405
1406	/*
1407	 * Ignore host/starget busy state.
1408	 * Since block layer does not have a concept of fairness across
1409	 * multiple queues, congestion of host/starget needs to be handled
1410	 * in SCSI layer.
1411	 */
1412	if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1413		return true;
1414
1415	return false;
1416}
1417
1418/*
1419 * Block layer request completion callback. May be called from interrupt
1420 * context.
1421 */
1422static void scsi_complete(struct request *rq)
1423{
1424	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1425	enum scsi_disposition disposition;
1426
1427	INIT_LIST_HEAD(&cmd->eh_entry);
1428
1429	atomic_inc(&cmd->device->iodone_cnt);
1430	if (cmd->result)
1431		atomic_inc(&cmd->device->ioerr_cnt);
1432
1433	disposition = scsi_decide_disposition(cmd);
1434	if (disposition != SUCCESS && scsi_cmd_runtime_exceeced(cmd))
1435		disposition = SUCCESS;
1436
1437	scsi_log_completion(cmd, disposition);
1438
1439	switch (disposition) {
1440	case SUCCESS:
1441		scsi_finish_command(cmd);
1442		break;
1443	case NEEDS_RETRY:
1444		scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1445		break;
1446	case ADD_TO_MLQUEUE:
1447		scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1448		break;
1449	default:
1450		scsi_eh_scmd_add(cmd);
1451		break;
1452	}
1453}
1454
1455/**
1456 * scsi_dispatch_cmd - Dispatch a command to the low-level driver.
1457 * @cmd: command block we are dispatching.
1458 *
1459 * Return: nonzero return request was rejected and device's queue needs to be
1460 * plugged.
1461 */
1462static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
1463{
1464	struct Scsi_Host *host = cmd->device->host;
1465	int rtn = 0;
1466
 
 
1467	/* check if the device is still usable */
1468	if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
1469		/* in SDEV_DEL we error all commands. DID_NO_CONNECT
1470		 * returns an immediate error upwards, and signals
1471		 * that the device is no longer present */
1472		cmd->result = DID_NO_CONNECT << 16;
1473		goto done;
1474	}
1475
1476	/* Check to see if the scsi lld made this device blocked. */
1477	if (unlikely(scsi_device_blocked(cmd->device))) {
1478		/*
1479		 * in blocked state, the command is just put back on
1480		 * the device queue.  The suspend state has already
1481		 * blocked the queue so future requests should not
1482		 * occur until the device transitions out of the
1483		 * suspend state.
1484		 */
1485		SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1486			"queuecommand : device blocked\n"));
1487		return SCSI_MLQUEUE_DEVICE_BUSY;
1488	}
1489
1490	/* Store the LUN value in cmnd, if needed. */
1491	if (cmd->device->lun_in_cdb)
1492		cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
1493			       (cmd->device->lun << 5 & 0xe0);
1494
1495	scsi_log_send(cmd);
1496
1497	/*
1498	 * Before we queue this command, check if the command
1499	 * length exceeds what the host adapter can handle.
1500	 */
1501	if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
1502		SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1503			       "queuecommand : command too long. "
1504			       "cdb_size=%d host->max_cmd_len=%d\n",
1505			       cmd->cmd_len, cmd->device->host->max_cmd_len));
1506		cmd->result = (DID_ABORT << 16);
1507		goto done;
1508	}
1509
1510	if (unlikely(host->shost_state == SHOST_DEL)) {
1511		cmd->result = (DID_NO_CONNECT << 16);
1512		goto done;
1513
1514	}
1515
1516	trace_scsi_dispatch_cmd_start(cmd);
1517	rtn = host->hostt->queuecommand(host, cmd);
1518	if (rtn) {
1519		trace_scsi_dispatch_cmd_error(cmd, rtn);
1520		if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
1521		    rtn != SCSI_MLQUEUE_TARGET_BUSY)
1522			rtn = SCSI_MLQUEUE_HOST_BUSY;
1523
1524		SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1525			"queuecommand : request rejected\n"));
1526	}
1527
1528	return rtn;
1529 done:
1530	scsi_done(cmd);
1531	return 0;
1532}
1533
1534/* Size in bytes of the sg-list stored in the scsi-mq command-private data. */
1535static unsigned int scsi_mq_inline_sgl_size(struct Scsi_Host *shost)
1536{
1537	return min_t(unsigned int, shost->sg_tablesize, SCSI_INLINE_SG_CNT) *
1538		sizeof(struct scatterlist);
1539}
1540
1541static blk_status_t scsi_prepare_cmd(struct request *req)
1542{
1543	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1544	struct scsi_device *sdev = req->q->queuedata;
1545	struct Scsi_Host *shost = sdev->host;
1546	bool in_flight = test_bit(SCMD_STATE_INFLIGHT, &cmd->state);
1547	struct scatterlist *sg;
1548
1549	scsi_init_command(sdev, cmd);
1550
1551	cmd->eh_eflags = 0;
1552	cmd->prot_type = 0;
1553	cmd->prot_flags = 0;
1554	cmd->submitter = 0;
1555	memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1556	cmd->underflow = 0;
1557	cmd->transfersize = 0;
1558	cmd->host_scribble = NULL;
1559	cmd->result = 0;
1560	cmd->extra_len = 0;
1561	cmd->state = 0;
1562	if (in_flight)
1563		__set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
1564
1565	/*
1566	 * Only clear the driver-private command data if the LLD does not supply
1567	 * a function to initialize that data.
1568	 */
1569	if (!shost->hostt->init_cmd_priv)
1570		memset(cmd + 1, 0, shost->hostt->cmd_size);
1571
1572	cmd->prot_op = SCSI_PROT_NORMAL;
1573	if (blk_rq_bytes(req))
1574		cmd->sc_data_direction = rq_dma_dir(req);
1575	else
1576		cmd->sc_data_direction = DMA_NONE;
1577
1578	sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
1579	cmd->sdb.table.sgl = sg;
1580
1581	if (scsi_host_get_prot(shost)) {
1582		memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer));
1583
1584		cmd->prot_sdb->table.sgl =
1585			(struct scatterlist *)(cmd->prot_sdb + 1);
1586	}
1587
1588	/*
1589	 * Special handling for passthrough commands, which don't go to the ULP
1590	 * at all:
1591	 */
1592	if (blk_rq_is_passthrough(req))
1593		return scsi_setup_scsi_cmnd(sdev, req);
1594
1595	if (sdev->handler && sdev->handler->prep_fn) {
1596		blk_status_t ret = sdev->handler->prep_fn(sdev, req);
1597
1598		if (ret != BLK_STS_OK)
1599			return ret;
1600	}
1601
1602	/* Usually overridden by the ULP */
1603	cmd->allowed = 0;
1604	memset(cmd->cmnd, 0, sizeof(cmd->cmnd));
1605	return scsi_cmd_to_driver(cmd)->init_command(cmd);
1606}
1607
1608static void scsi_done_internal(struct scsi_cmnd *cmd, bool complete_directly)
1609{
1610	struct request *req = scsi_cmd_to_rq(cmd);
1611
1612	switch (cmd->submitter) {
1613	case SUBMITTED_BY_BLOCK_LAYER:
1614		break;
1615	case SUBMITTED_BY_SCSI_ERROR_HANDLER:
1616		return scsi_eh_done(cmd);
1617	case SUBMITTED_BY_SCSI_RESET_IOCTL:
1618		return;
1619	}
1620
1621	if (unlikely(blk_should_fake_timeout(scsi_cmd_to_rq(cmd)->q)))
1622		return;
1623	if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state)))
1624		return;
1625	trace_scsi_dispatch_cmd_done(cmd);
1626
1627	if (complete_directly)
1628		blk_mq_complete_request_direct(req, scsi_complete);
1629	else
1630		blk_mq_complete_request(req);
1631}
1632
1633void scsi_done(struct scsi_cmnd *cmd)
1634{
1635	scsi_done_internal(cmd, false);
1636}
1637EXPORT_SYMBOL(scsi_done);
1638
1639void scsi_done_direct(struct scsi_cmnd *cmd)
1640{
1641	scsi_done_internal(cmd, true);
1642}
1643EXPORT_SYMBOL(scsi_done_direct);
1644
1645static void scsi_mq_put_budget(struct request_queue *q, int budget_token)
1646{
1647	struct scsi_device *sdev = q->queuedata;
1648
1649	sbitmap_put(&sdev->budget_map, budget_token);
1650}
1651
1652/*
1653 * When to reinvoke queueing after a resource shortage. It's 3 msecs to
1654 * not change behaviour from the previous unplug mechanism, experimentation
1655 * may prove this needs changing.
1656 */
1657#define SCSI_QUEUE_DELAY 3
1658
1659static int scsi_mq_get_budget(struct request_queue *q)
1660{
1661	struct scsi_device *sdev = q->queuedata;
1662	int token = scsi_dev_queue_ready(q, sdev);
1663
1664	if (token >= 0)
1665		return token;
1666
1667	atomic_inc(&sdev->restarts);
1668
1669	/*
1670	 * Orders atomic_inc(&sdev->restarts) and atomic_read(&sdev->device_busy).
1671	 * .restarts must be incremented before .device_busy is read because the
1672	 * code in scsi_run_queue_async() depends on the order of these operations.
1673	 */
1674	smp_mb__after_atomic();
1675
1676	/*
1677	 * If all in-flight requests originated from this LUN are completed
1678	 * before reading .device_busy, sdev->device_busy will be observed as
1679	 * zero, then blk_mq_delay_run_hw_queues() will dispatch this request
1680	 * soon. Otherwise, completion of one of these requests will observe
1681	 * the .restarts flag, and the request queue will be run for handling
1682	 * this request, see scsi_end_request().
1683	 */
1684	if (unlikely(scsi_device_busy(sdev) == 0 &&
1685				!scsi_device_blocked(sdev)))
1686		blk_mq_delay_run_hw_queues(sdev->request_queue, SCSI_QUEUE_DELAY);
1687	return -1;
1688}
1689
1690static void scsi_mq_set_rq_budget_token(struct request *req, int token)
1691{
1692	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1693
1694	cmd->budget_token = token;
1695}
1696
1697static int scsi_mq_get_rq_budget_token(struct request *req)
1698{
1699	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1700
1701	return cmd->budget_token;
1702}
1703
1704static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
1705			 const struct blk_mq_queue_data *bd)
1706{
1707	struct request *req = bd->rq;
1708	struct request_queue *q = req->q;
1709	struct scsi_device *sdev = q->queuedata;
1710	struct Scsi_Host *shost = sdev->host;
1711	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1712	blk_status_t ret;
1713	int reason;
1714
1715	WARN_ON_ONCE(cmd->budget_token < 0);
1716
1717	/*
1718	 * If the device is not in running state we will reject some or all
1719	 * commands.
1720	 */
1721	if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1722		ret = scsi_device_state_check(sdev, req);
1723		if (ret != BLK_STS_OK)
1724			goto out_put_budget;
1725	}
1726
1727	ret = BLK_STS_RESOURCE;
1728	if (!scsi_target_queue_ready(shost, sdev))
1729		goto out_put_budget;
1730	if (unlikely(scsi_host_in_recovery(shost))) {
1731		if (cmd->flags & SCMD_FAIL_IF_RECOVERING)
1732			ret = BLK_STS_OFFLINE;
1733		goto out_dec_target_busy;
1734	}
1735	if (!scsi_host_queue_ready(q, shost, sdev, cmd))
1736		goto out_dec_target_busy;
1737
1738	if (!(req->rq_flags & RQF_DONTPREP)) {
1739		ret = scsi_prepare_cmd(req);
1740		if (ret != BLK_STS_OK)
1741			goto out_dec_host_busy;
1742		req->rq_flags |= RQF_DONTPREP;
1743	} else {
1744		clear_bit(SCMD_STATE_COMPLETE, &cmd->state);
1745	}
1746
1747	cmd->flags &= SCMD_PRESERVED_FLAGS;
1748	if (sdev->simple_tags)
1749		cmd->flags |= SCMD_TAGGED;
1750	if (bd->last)
1751		cmd->flags |= SCMD_LAST;
1752
1753	scsi_set_resid(cmd, 0);
1754	memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1755	cmd->submitter = SUBMITTED_BY_BLOCK_LAYER;
1756
1757	blk_mq_start_request(req);
1758	reason = scsi_dispatch_cmd(cmd);
1759	if (reason) {
1760		scsi_set_blocked(cmd, reason);
1761		ret = BLK_STS_RESOURCE;
1762		goto out_dec_host_busy;
1763	}
1764
1765	atomic_inc(&cmd->device->iorequest_cnt);
1766	return BLK_STS_OK;
1767
1768out_dec_host_busy:
1769	scsi_dec_host_busy(shost, cmd);
1770out_dec_target_busy:
1771	if (scsi_target(sdev)->can_queue > 0)
1772		atomic_dec(&scsi_target(sdev)->target_busy);
1773out_put_budget:
1774	scsi_mq_put_budget(q, cmd->budget_token);
1775	cmd->budget_token = -1;
1776	switch (ret) {
1777	case BLK_STS_OK:
1778		break;
1779	case BLK_STS_RESOURCE:
1780	case BLK_STS_ZONE_RESOURCE:
1781		if (scsi_device_blocked(sdev))
1782			ret = BLK_STS_DEV_RESOURCE;
1783		break;
1784	case BLK_STS_AGAIN:
1785		cmd->result = DID_BUS_BUSY << 16;
1786		if (req->rq_flags & RQF_DONTPREP)
1787			scsi_mq_uninit_cmd(cmd);
1788		break;
1789	default:
1790		if (unlikely(!scsi_device_online(sdev)))
1791			cmd->result = DID_NO_CONNECT << 16;
1792		else
1793			cmd->result = DID_ERROR << 16;
1794		/*
1795		 * Make sure to release all allocated resources when
1796		 * we hit an error, as we will never see this command
1797		 * again.
1798		 */
1799		if (req->rq_flags & RQF_DONTPREP)
1800			scsi_mq_uninit_cmd(cmd);
1801		scsi_run_queue_async(sdev);
1802		break;
1803	}
1804	return ret;
1805}
1806
 
 
 
 
 
 
 
 
1807static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
1808				unsigned int hctx_idx, unsigned int numa_node)
1809{
1810	struct Scsi_Host *shost = set->driver_data;
1811	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1812	struct scatterlist *sg;
1813	int ret = 0;
1814
1815	cmd->sense_buffer =
1816		kmem_cache_alloc_node(scsi_sense_cache, GFP_KERNEL, numa_node);
1817	if (!cmd->sense_buffer)
1818		return -ENOMEM;
 
1819
1820	if (scsi_host_get_prot(shost)) {
1821		sg = (void *)cmd + sizeof(struct scsi_cmnd) +
1822			shost->hostt->cmd_size;
1823		cmd->prot_sdb = (void *)sg + scsi_mq_inline_sgl_size(shost);
1824	}
1825
1826	if (shost->hostt->init_cmd_priv) {
1827		ret = shost->hostt->init_cmd_priv(shost, cmd);
1828		if (ret < 0)
1829			kmem_cache_free(scsi_sense_cache, cmd->sense_buffer);
1830	}
1831
1832	return ret;
1833}
1834
1835static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1836				 unsigned int hctx_idx)
1837{
1838	struct Scsi_Host *shost = set->driver_data;
1839	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1840
1841	if (shost->hostt->exit_cmd_priv)
1842		shost->hostt->exit_cmd_priv(shost, cmd);
1843	kmem_cache_free(scsi_sense_cache, cmd->sense_buffer);
1844}
1845
1846
1847static int scsi_mq_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
1848{
1849	struct Scsi_Host *shost = hctx->driver_data;
1850
1851	if (shost->hostt->mq_poll)
1852		return shost->hostt->mq_poll(shost, hctx->queue_num);
1853
1854	return 0;
1855}
1856
1857static int scsi_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1858			  unsigned int hctx_idx)
1859{
1860	struct Scsi_Host *shost = data;
1861
1862	hctx->driver_data = shost;
1863	return 0;
1864}
1865
1866static void scsi_map_queues(struct blk_mq_tag_set *set)
1867{
1868	struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
1869
1870	if (shost->hostt->map_queues)
1871		return shost->hostt->map_queues(shost);
1872	blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
1873}
1874
1875void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
1876{
1877	struct device *dev = shost->dma_dev;
1878
1879	/*
1880	 * this limit is imposed by hardware restrictions
1881	 */
1882	blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
1883					SG_MAX_SEGMENTS));
1884
1885	if (scsi_host_prot_dma(shost)) {
1886		shost->sg_prot_tablesize =
1887			min_not_zero(shost->sg_prot_tablesize,
1888				     (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
1889		BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
1890		blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
1891	}
1892
 
 
 
 
1893	blk_queue_max_hw_sectors(q, shost->max_sectors);
1894	blk_queue_segment_boundary(q, shost->dma_boundary);
1895	dma_set_seg_boundary(dev, shost->dma_boundary);
1896
1897	blk_queue_max_segment_size(q, shost->max_segment_size);
1898	blk_queue_virt_boundary(q, shost->virt_boundary_mask);
1899	dma_set_max_seg_size(dev, queue_max_segment_size(q));
1900
1901	/*
1902	 * Set a reasonable default alignment:  The larger of 32-byte (dword),
1903	 * which is a common minimum for HBAs, and the minimum DMA alignment,
1904	 * which is set by the platform.
1905	 *
1906	 * Devices that require a bigger alignment can increase it later.
1907	 */
1908	blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1);
1909}
1910EXPORT_SYMBOL_GPL(__scsi_init_queue);
1911
1912static const struct blk_mq_ops scsi_mq_ops_no_commit = {
1913	.get_budget	= scsi_mq_get_budget,
1914	.put_budget	= scsi_mq_put_budget,
1915	.queue_rq	= scsi_queue_rq,
1916	.complete	= scsi_complete,
1917	.timeout	= scsi_timeout,
1918#ifdef CONFIG_BLK_DEBUG_FS
1919	.show_rq	= scsi_show_rq,
1920#endif
1921	.init_request	= scsi_mq_init_request,
1922	.exit_request	= scsi_mq_exit_request,
 
1923	.cleanup_rq	= scsi_cleanup_rq,
1924	.busy		= scsi_mq_lld_busy,
1925	.map_queues	= scsi_map_queues,
1926	.init_hctx	= scsi_init_hctx,
1927	.poll		= scsi_mq_poll,
1928	.set_rq_budget_token = scsi_mq_set_rq_budget_token,
1929	.get_rq_budget_token = scsi_mq_get_rq_budget_token,
1930};
1931
1932
1933static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx)
1934{
1935	struct Scsi_Host *shost = hctx->driver_data;
1936
1937	shost->hostt->commit_rqs(shost, hctx->queue_num);
1938}
1939
1940static const struct blk_mq_ops scsi_mq_ops = {
1941	.get_budget	= scsi_mq_get_budget,
1942	.put_budget	= scsi_mq_put_budget,
1943	.queue_rq	= scsi_queue_rq,
1944	.commit_rqs	= scsi_commit_rqs,
1945	.complete	= scsi_complete,
1946	.timeout	= scsi_timeout,
1947#ifdef CONFIG_BLK_DEBUG_FS
1948	.show_rq	= scsi_show_rq,
1949#endif
1950	.init_request	= scsi_mq_init_request,
1951	.exit_request	= scsi_mq_exit_request,
 
1952	.cleanup_rq	= scsi_cleanup_rq,
1953	.busy		= scsi_mq_lld_busy,
1954	.map_queues	= scsi_map_queues,
1955	.init_hctx	= scsi_init_hctx,
1956	.poll		= scsi_mq_poll,
1957	.set_rq_budget_token = scsi_mq_set_rq_budget_token,
1958	.get_rq_budget_token = scsi_mq_get_rq_budget_token,
1959};
1960
1961int scsi_mq_setup_tags(struct Scsi_Host *shost)
1962{
1963	unsigned int cmd_size, sgl_size;
1964	struct blk_mq_tag_set *tag_set = &shost->tag_set;
1965
1966	sgl_size = max_t(unsigned int, sizeof(struct scatterlist),
1967				scsi_mq_inline_sgl_size(shost));
1968	cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
1969	if (scsi_host_get_prot(shost))
1970		cmd_size += sizeof(struct scsi_data_buffer) +
1971			sizeof(struct scatterlist) * SCSI_INLINE_PROT_SG_CNT;
1972
1973	memset(tag_set, 0, sizeof(*tag_set));
1974	if (shost->hostt->commit_rqs)
1975		tag_set->ops = &scsi_mq_ops;
1976	else
1977		tag_set->ops = &scsi_mq_ops_no_commit;
1978	tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1;
1979	tag_set->nr_maps = shost->nr_maps ? : 1;
1980	tag_set->queue_depth = shost->can_queue;
1981	tag_set->cmd_size = cmd_size;
1982	tag_set->numa_node = dev_to_node(shost->dma_dev);
1983	tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
1984	tag_set->flags |=
1985		BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
1986	tag_set->driver_data = shost;
1987	if (shost->host_tagset)
1988		tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
1989
1990	return blk_mq_alloc_tag_set(tag_set);
1991}
1992
1993void scsi_mq_free_tags(struct kref *kref)
1994{
1995	struct Scsi_Host *shost = container_of(kref, typeof(*shost),
1996					       tagset_refcnt);
1997
1998	blk_mq_free_tag_set(&shost->tag_set);
1999	complete(&shost->tagset_freed);
2000}
2001
2002/**
2003 * scsi_device_from_queue - return sdev associated with a request_queue
2004 * @q: The request queue to return the sdev from
2005 *
2006 * Return the sdev associated with a request queue or NULL if the
2007 * request_queue does not reference a SCSI device.
2008 */
2009struct scsi_device *scsi_device_from_queue(struct request_queue *q)
2010{
2011	struct scsi_device *sdev = NULL;
2012
2013	if (q->mq_ops == &scsi_mq_ops_no_commit ||
2014	    q->mq_ops == &scsi_mq_ops)
2015		sdev = q->queuedata;
2016	if (!sdev || !get_device(&sdev->sdev_gendev))
2017		sdev = NULL;
2018
2019	return sdev;
2020}
2021/*
2022 * pktcdvd should have been integrated into the SCSI layers, but for historical
2023 * reasons like the old IDE driver it isn't.  This export allows it to safely
2024 * probe if a given device is a SCSI one and only attach to that.
2025 */
2026#ifdef CONFIG_CDROM_PKTCDVD_MODULE
2027EXPORT_SYMBOL_GPL(scsi_device_from_queue);
2028#endif
2029
2030/**
2031 * scsi_block_requests - Utility function used by low-level drivers to prevent
2032 * further commands from being queued to the device.
2033 * @shost:  host in question
2034 *
2035 * There is no timer nor any other means by which the requests get unblocked
2036 * other than the low-level driver calling scsi_unblock_requests().
2037 */
2038void scsi_block_requests(struct Scsi_Host *shost)
2039{
2040	shost->host_self_blocked = 1;
2041}
2042EXPORT_SYMBOL(scsi_block_requests);
2043
2044/**
2045 * scsi_unblock_requests - Utility function used by low-level drivers to allow
2046 * further commands to be queued to the device.
2047 * @shost:  host in question
2048 *
2049 * There is no timer nor any other means by which the requests get unblocked
2050 * other than the low-level driver calling scsi_unblock_requests(). This is done
2051 * as an API function so that changes to the internals of the scsi mid-layer
2052 * won't require wholesale changes to drivers that use this feature.
2053 */
2054void scsi_unblock_requests(struct Scsi_Host *shost)
2055{
2056	shost->host_self_blocked = 0;
2057	scsi_run_host_queues(shost);
2058}
2059EXPORT_SYMBOL(scsi_unblock_requests);
2060
2061void scsi_exit_queue(void)
2062{
2063	kmem_cache_destroy(scsi_sense_cache);
2064}
2065
2066/**
2067 *	scsi_mode_select - issue a mode select
2068 *	@sdev:	SCSI device to be queried
2069 *	@pf:	Page format bit (1 == standard, 0 == vendor specific)
2070 *	@sp:	Save page bit (0 == don't save, 1 == save)
 
2071 *	@buffer: request buffer (may not be smaller than eight bytes)
2072 *	@len:	length of request buffer.
2073 *	@timeout: command timeout
2074 *	@retries: number of retries before failing
2075 *	@data: returns a structure abstracting the mode header data
2076 *	@sshdr: place to put sense data (or NULL if no sense to be collected).
2077 *		must be SCSI_SENSE_BUFFERSIZE big.
2078 *
2079 *	Returns zero if successful; negative error number or scsi
2080 *	status on error
2081 *
2082 */
2083int scsi_mode_select(struct scsi_device *sdev, int pf, int sp,
2084		     unsigned char *buffer, int len, int timeout, int retries,
2085		     struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
 
2086{
2087	unsigned char cmd[10];
2088	unsigned char *real_buffer;
2089	int ret;
2090
2091	memset(cmd, 0, sizeof(cmd));
2092	cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
2093
2094	/*
2095	 * Use MODE SELECT(10) if the device asked for it or if the mode page
2096	 * and the mode select header cannot fit within the maximumm 255 bytes
2097	 * of the MODE SELECT(6) command.
2098	 */
2099	if (sdev->use_10_for_ms ||
2100	    len + 4 > 255 ||
2101	    data->block_descriptor_length > 255) {
2102		if (len > 65535 - 8)
2103			return -EINVAL;
2104		real_buffer = kmalloc(8 + len, GFP_KERNEL);
2105		if (!real_buffer)
2106			return -ENOMEM;
2107		memcpy(real_buffer + 8, buffer, len);
2108		len += 8;
2109		real_buffer[0] = 0;
2110		real_buffer[1] = 0;
2111		real_buffer[2] = data->medium_type;
2112		real_buffer[3] = data->device_specific;
2113		real_buffer[4] = data->longlba ? 0x01 : 0;
2114		real_buffer[5] = 0;
2115		put_unaligned_be16(data->block_descriptor_length,
2116				   &real_buffer[6]);
2117
2118		cmd[0] = MODE_SELECT_10;
2119		put_unaligned_be16(len, &cmd[7]);
 
2120	} else {
2121		if (data->longlba)
 
2122			return -EINVAL;
2123
2124		real_buffer = kmalloc(4 + len, GFP_KERNEL);
2125		if (!real_buffer)
2126			return -ENOMEM;
2127		memcpy(real_buffer + 4, buffer, len);
2128		len += 4;
2129		real_buffer[0] = 0;
2130		real_buffer[1] = data->medium_type;
2131		real_buffer[2] = data->device_specific;
2132		real_buffer[3] = data->block_descriptor_length;
2133
2134		cmd[0] = MODE_SELECT;
2135		cmd[4] = len;
2136	}
2137
2138	ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
2139			       sshdr, timeout, retries, NULL);
2140	kfree(real_buffer);
2141	return ret;
2142}
2143EXPORT_SYMBOL_GPL(scsi_mode_select);
2144
2145/**
2146 *	scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
2147 *	@sdev:	SCSI device to be queried
2148 *	@dbd:	set to prevent mode sense from returning block descriptors
2149 *	@modepage: mode page being requested
2150 *	@buffer: request buffer (may not be smaller than eight bytes)
2151 *	@len:	length of request buffer.
2152 *	@timeout: command timeout
2153 *	@retries: number of retries before failing
2154 *	@data: returns a structure abstracting the mode header data
2155 *	@sshdr: place to put sense data (or NULL if no sense to be collected).
2156 *		must be SCSI_SENSE_BUFFERSIZE big.
2157 *
2158 *	Returns zero if successful, or a negative error number on failure
2159 */
2160int
2161scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
2162		  unsigned char *buffer, int len, int timeout, int retries,
2163		  struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
2164{
2165	unsigned char cmd[12];
2166	int use_10_for_ms;
2167	int header_length;
2168	int result, retry_count = retries;
2169	struct scsi_sense_hdr my_sshdr;
2170
2171	memset(data, 0, sizeof(*data));
2172	memset(&cmd[0], 0, 12);
2173
2174	dbd = sdev->set_dbd_for_ms ? 8 : dbd;
2175	cmd[1] = dbd & 0x18;	/* allows DBD and LLBA bits */
2176	cmd[2] = modepage;
2177
2178	/* caller might not be interested in sense, but we need it */
2179	if (!sshdr)
2180		sshdr = &my_sshdr;
2181
2182 retry:
2183	use_10_for_ms = sdev->use_10_for_ms || len > 255;
2184
2185	if (use_10_for_ms) {
2186		if (len < 8 || len > 65535)
2187			return -EINVAL;
2188
2189		cmd[0] = MODE_SENSE_10;
2190		put_unaligned_be16(len, &cmd[7]);
2191		header_length = 8;
2192	} else {
2193		if (len < 4)
2194			return -EINVAL;
2195
2196		cmd[0] = MODE_SENSE;
2197		cmd[4] = len;
2198		header_length = 4;
2199	}
2200
2201	memset(buffer, 0, len);
2202
2203	result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
2204				  sshdr, timeout, retries, NULL);
2205	if (result < 0)
2206		return result;
2207
2208	/* This code looks awful: what it's doing is making sure an
2209	 * ILLEGAL REQUEST sense return identifies the actual command
2210	 * byte as the problem.  MODE_SENSE commands can return
2211	 * ILLEGAL REQUEST if the code page isn't supported */
2212
2213	if (!scsi_status_is_good(result)) {
2214		if (scsi_sense_valid(sshdr)) {
2215			if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
2216			    (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
2217				/*
2218				 * Invalid command operation code: retry using
2219				 * MODE SENSE(6) if this was a MODE SENSE(10)
2220				 * request, except if the request mode page is
2221				 * too large for MODE SENSE single byte
2222				 * allocation length field.
2223				 */
2224				if (use_10_for_ms) {
2225					if (len > 255)
2226						return -EIO;
2227					sdev->use_10_for_ms = 0;
2228					goto retry;
2229				}
2230			}
2231			if (scsi_status_is_check_condition(result) &&
2232			    sshdr->sense_key == UNIT_ATTENTION &&
2233			    retry_count) {
2234				retry_count--;
2235				goto retry;
2236			}
2237		}
2238		return -EIO;
2239	}
2240	if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
2241		     (modepage == 6 || modepage == 8))) {
2242		/* Initio breakage? */
2243		header_length = 0;
2244		data->length = 13;
2245		data->medium_type = 0;
2246		data->device_specific = 0;
2247		data->longlba = 0;
2248		data->block_descriptor_length = 0;
2249	} else if (use_10_for_ms) {
2250		data->length = get_unaligned_be16(&buffer[0]) + 2;
2251		data->medium_type = buffer[2];
2252		data->device_specific = buffer[3];
2253		data->longlba = buffer[4] & 0x01;
2254		data->block_descriptor_length = get_unaligned_be16(&buffer[6]);
 
2255	} else {
2256		data->length = buffer[0] + 1;
2257		data->medium_type = buffer[1];
2258		data->device_specific = buffer[2];
2259		data->block_descriptor_length = buffer[3];
2260	}
2261	data->header_length = header_length;
2262
2263	return 0;
2264}
2265EXPORT_SYMBOL(scsi_mode_sense);
2266
2267/**
2268 *	scsi_test_unit_ready - test if unit is ready
2269 *	@sdev:	scsi device to change the state of.
2270 *	@timeout: command timeout
2271 *	@retries: number of retries before failing
2272 *	@sshdr: outpout pointer for decoded sense information.
2273 *
2274 *	Returns zero if unsuccessful or an error if TUR failed.  For
2275 *	removable media, UNIT_ATTENTION sets ->changed flag.
2276 **/
2277int
2278scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
2279		     struct scsi_sense_hdr *sshdr)
2280{
2281	char cmd[] = {
2282		TEST_UNIT_READY, 0, 0, 0, 0, 0,
2283	};
2284	int result;
2285
2286	/* try to eat the UNIT_ATTENTION if there are enough retries */
2287	do {
2288		result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
2289					  timeout, 1, NULL);
2290		if (sdev->removable && scsi_sense_valid(sshdr) &&
2291		    sshdr->sense_key == UNIT_ATTENTION)
2292			sdev->changed = 1;
2293	} while (scsi_sense_valid(sshdr) &&
2294		 sshdr->sense_key == UNIT_ATTENTION && --retries);
2295
2296	return result;
2297}
2298EXPORT_SYMBOL(scsi_test_unit_ready);
2299
2300/**
2301 *	scsi_device_set_state - Take the given device through the device state model.
2302 *	@sdev:	scsi device to change the state of.
2303 *	@state:	state to change to.
2304 *
2305 *	Returns zero if successful or an error if the requested
2306 *	transition is illegal.
2307 */
2308int
2309scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2310{
2311	enum scsi_device_state oldstate = sdev->sdev_state;
2312
2313	if (state == oldstate)
2314		return 0;
2315
2316	switch (state) {
2317	case SDEV_CREATED:
2318		switch (oldstate) {
2319		case SDEV_CREATED_BLOCK:
2320			break;
2321		default:
2322			goto illegal;
2323		}
2324		break;
2325
2326	case SDEV_RUNNING:
2327		switch (oldstate) {
2328		case SDEV_CREATED:
2329		case SDEV_OFFLINE:
2330		case SDEV_TRANSPORT_OFFLINE:
2331		case SDEV_QUIESCE:
2332		case SDEV_BLOCK:
2333			break;
2334		default:
2335			goto illegal;
2336		}
2337		break;
2338
2339	case SDEV_QUIESCE:
2340		switch (oldstate) {
2341		case SDEV_RUNNING:
2342		case SDEV_OFFLINE:
2343		case SDEV_TRANSPORT_OFFLINE:
2344			break;
2345		default:
2346			goto illegal;
2347		}
2348		break;
2349
2350	case SDEV_OFFLINE:
2351	case SDEV_TRANSPORT_OFFLINE:
2352		switch (oldstate) {
2353		case SDEV_CREATED:
2354		case SDEV_RUNNING:
2355		case SDEV_QUIESCE:
2356		case SDEV_BLOCK:
2357			break;
2358		default:
2359			goto illegal;
2360		}
2361		break;
2362
2363	case SDEV_BLOCK:
2364		switch (oldstate) {
2365		case SDEV_RUNNING:
2366		case SDEV_CREATED_BLOCK:
2367		case SDEV_QUIESCE:
2368		case SDEV_OFFLINE:
2369			break;
2370		default:
2371			goto illegal;
2372		}
2373		break;
2374
2375	case SDEV_CREATED_BLOCK:
2376		switch (oldstate) {
2377		case SDEV_CREATED:
2378			break;
2379		default:
2380			goto illegal;
2381		}
2382		break;
2383
2384	case SDEV_CANCEL:
2385		switch (oldstate) {
2386		case SDEV_CREATED:
2387		case SDEV_RUNNING:
2388		case SDEV_QUIESCE:
2389		case SDEV_OFFLINE:
2390		case SDEV_TRANSPORT_OFFLINE:
2391			break;
2392		default:
2393			goto illegal;
2394		}
2395		break;
2396
2397	case SDEV_DEL:
2398		switch (oldstate) {
2399		case SDEV_CREATED:
2400		case SDEV_RUNNING:
2401		case SDEV_OFFLINE:
2402		case SDEV_TRANSPORT_OFFLINE:
2403		case SDEV_CANCEL:
2404		case SDEV_BLOCK:
2405		case SDEV_CREATED_BLOCK:
2406			break;
2407		default:
2408			goto illegal;
2409		}
2410		break;
2411
2412	}
2413	sdev->offline_already = false;
2414	sdev->sdev_state = state;
2415	return 0;
2416
2417 illegal:
2418	SCSI_LOG_ERROR_RECOVERY(1,
2419				sdev_printk(KERN_ERR, sdev,
2420					    "Illegal state transition %s->%s",
2421					    scsi_device_state_name(oldstate),
2422					    scsi_device_state_name(state))
2423				);
2424	return -EINVAL;
2425}
2426EXPORT_SYMBOL(scsi_device_set_state);
2427
2428/**
2429 *	scsi_evt_emit - emit a single SCSI device uevent
2430 *	@sdev: associated SCSI device
2431 *	@evt: event to emit
2432 *
2433 *	Send a single uevent (scsi_event) to the associated scsi_device.
2434 */
2435static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2436{
2437	int idx = 0;
2438	char *envp[3];
2439
2440	switch (evt->evt_type) {
2441	case SDEV_EVT_MEDIA_CHANGE:
2442		envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2443		break;
2444	case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2445		scsi_rescan_device(&sdev->sdev_gendev);
2446		envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
2447		break;
2448	case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2449		envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED";
2450		break;
2451	case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2452	       envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED";
2453		break;
2454	case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2455		envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED";
2456		break;
2457	case SDEV_EVT_LUN_CHANGE_REPORTED:
2458		envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED";
2459		break;
2460	case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
2461		envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED";
2462		break;
2463	case SDEV_EVT_POWER_ON_RESET_OCCURRED:
2464		envp[idx++] = "SDEV_UA=POWER_ON_RESET_OCCURRED";
2465		break;
2466	default:
2467		/* do nothing */
2468		break;
2469	}
2470
2471	envp[idx++] = NULL;
2472
2473	kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2474}
2475
2476/**
2477 *	scsi_evt_thread - send a uevent for each scsi event
2478 *	@work: work struct for scsi_device
2479 *
2480 *	Dispatch queued events to their associated scsi_device kobjects
2481 *	as uevents.
2482 */
2483void scsi_evt_thread(struct work_struct *work)
2484{
2485	struct scsi_device *sdev;
2486	enum scsi_device_event evt_type;
2487	LIST_HEAD(event_list);
2488
2489	sdev = container_of(work, struct scsi_device, event_work);
2490
2491	for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++)
2492		if (test_and_clear_bit(evt_type, sdev->pending_events))
2493			sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL);
2494
2495	while (1) {
2496		struct scsi_event *evt;
2497		struct list_head *this, *tmp;
2498		unsigned long flags;
2499
2500		spin_lock_irqsave(&sdev->list_lock, flags);
2501		list_splice_init(&sdev->event_list, &event_list);
2502		spin_unlock_irqrestore(&sdev->list_lock, flags);
2503
2504		if (list_empty(&event_list))
2505			break;
2506
2507		list_for_each_safe(this, tmp, &event_list) {
2508			evt = list_entry(this, struct scsi_event, node);
2509			list_del(&evt->node);
2510			scsi_evt_emit(sdev, evt);
2511			kfree(evt);
2512		}
2513	}
2514}
2515
2516/**
2517 * 	sdev_evt_send - send asserted event to uevent thread
2518 *	@sdev: scsi_device event occurred on
2519 *	@evt: event to send
2520 *
2521 *	Assert scsi device event asynchronously.
2522 */
2523void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2524{
2525	unsigned long flags;
2526
2527#if 0
2528	/* FIXME: currently this check eliminates all media change events
2529	 * for polled devices.  Need to update to discriminate between AN
2530	 * and polled events */
2531	if (!test_bit(evt->evt_type, sdev->supported_events)) {
2532		kfree(evt);
2533		return;
2534	}
2535#endif
2536
2537	spin_lock_irqsave(&sdev->list_lock, flags);
2538	list_add_tail(&evt->node, &sdev->event_list);
2539	schedule_work(&sdev->event_work);
2540	spin_unlock_irqrestore(&sdev->list_lock, flags);
2541}
2542EXPORT_SYMBOL_GPL(sdev_evt_send);
2543
2544/**
2545 * 	sdev_evt_alloc - allocate a new scsi event
2546 *	@evt_type: type of event to allocate
2547 *	@gfpflags: GFP flags for allocation
2548 *
2549 *	Allocates and returns a new scsi_event.
2550 */
2551struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2552				  gfp_t gfpflags)
2553{
2554	struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2555	if (!evt)
2556		return NULL;
2557
2558	evt->evt_type = evt_type;
2559	INIT_LIST_HEAD(&evt->node);
2560
2561	/* evt_type-specific initialization, if any */
2562	switch (evt_type) {
2563	case SDEV_EVT_MEDIA_CHANGE:
2564	case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2565	case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2566	case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2567	case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2568	case SDEV_EVT_LUN_CHANGE_REPORTED:
2569	case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
2570	case SDEV_EVT_POWER_ON_RESET_OCCURRED:
2571	default:
2572		/* do nothing */
2573		break;
2574	}
2575
2576	return evt;
2577}
2578EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2579
2580/**
2581 * 	sdev_evt_send_simple - send asserted event to uevent thread
2582 *	@sdev: scsi_device event occurred on
2583 *	@evt_type: type of event to send
2584 *	@gfpflags: GFP flags for allocation
2585 *
2586 *	Assert scsi device event asynchronously, given an event type.
2587 */
2588void sdev_evt_send_simple(struct scsi_device *sdev,
2589			  enum scsi_device_event evt_type, gfp_t gfpflags)
2590{
2591	struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2592	if (!evt) {
2593		sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2594			    evt_type);
2595		return;
2596	}
2597
2598	sdev_evt_send(sdev, evt);
2599}
2600EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2601
2602/**
2603 *	scsi_device_quiesce - Block all commands except power management.
2604 *	@sdev:	scsi device to quiesce.
2605 *
2606 *	This works by trying to transition to the SDEV_QUIESCE state
2607 *	(which must be a legal transition).  When the device is in this
2608 *	state, only power management requests will be accepted, all others will
2609 *	be deferred.
2610 *
2611 *	Must be called with user context, may sleep.
2612 *
2613 *	Returns zero if unsuccessful or an error if not.
2614 */
2615int
2616scsi_device_quiesce(struct scsi_device *sdev)
2617{
2618	struct request_queue *q = sdev->request_queue;
2619	int err;
2620
2621	/*
2622	 * It is allowed to call scsi_device_quiesce() multiple times from
2623	 * the same context but concurrent scsi_device_quiesce() calls are
2624	 * not allowed.
2625	 */
2626	WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current);
2627
2628	if (sdev->quiesced_by == current)
2629		return 0;
2630
2631	blk_set_pm_only(q);
2632
2633	blk_mq_freeze_queue(q);
2634	/*
2635	 * Ensure that the effect of blk_set_pm_only() will be visible
2636	 * for percpu_ref_tryget() callers that occur after the queue
2637	 * unfreeze even if the queue was already frozen before this function
2638	 * was called. See also https://lwn.net/Articles/573497/.
2639	 */
2640	synchronize_rcu();
2641	blk_mq_unfreeze_queue(q);
2642
2643	mutex_lock(&sdev->state_mutex);
2644	err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2645	if (err == 0)
2646		sdev->quiesced_by = current;
2647	else
2648		blk_clear_pm_only(q);
2649	mutex_unlock(&sdev->state_mutex);
2650
2651	return err;
2652}
2653EXPORT_SYMBOL(scsi_device_quiesce);
2654
2655/**
2656 *	scsi_device_resume - Restart user issued commands to a quiesced device.
2657 *	@sdev:	scsi device to resume.
2658 *
2659 *	Moves the device from quiesced back to running and restarts the
2660 *	queues.
2661 *
2662 *	Must be called with user context, may sleep.
2663 */
2664void scsi_device_resume(struct scsi_device *sdev)
2665{
2666	/* check if the device state was mutated prior to resume, and if
2667	 * so assume the state is being managed elsewhere (for example
2668	 * device deleted during suspend)
2669	 */
2670	mutex_lock(&sdev->state_mutex);
2671	if (sdev->sdev_state == SDEV_QUIESCE)
2672		scsi_device_set_state(sdev, SDEV_RUNNING);
2673	if (sdev->quiesced_by) {
2674		sdev->quiesced_by = NULL;
2675		blk_clear_pm_only(sdev->request_queue);
2676	}
2677	mutex_unlock(&sdev->state_mutex);
2678}
2679EXPORT_SYMBOL(scsi_device_resume);
2680
2681static void
2682device_quiesce_fn(struct scsi_device *sdev, void *data)
2683{
2684	scsi_device_quiesce(sdev);
2685}
2686
2687void
2688scsi_target_quiesce(struct scsi_target *starget)
2689{
2690	starget_for_each_device(starget, NULL, device_quiesce_fn);
2691}
2692EXPORT_SYMBOL(scsi_target_quiesce);
2693
2694static void
2695device_resume_fn(struct scsi_device *sdev, void *data)
2696{
2697	scsi_device_resume(sdev);
2698}
2699
2700void
2701scsi_target_resume(struct scsi_target *starget)
2702{
2703	starget_for_each_device(starget, NULL, device_resume_fn);
2704}
2705EXPORT_SYMBOL(scsi_target_resume);
2706
2707static int __scsi_internal_device_block_nowait(struct scsi_device *sdev)
2708{
2709	if (scsi_device_set_state(sdev, SDEV_BLOCK))
2710		return scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2711
2712	return 0;
2713}
2714
2715void scsi_start_queue(struct scsi_device *sdev)
2716{
2717	if (cmpxchg(&sdev->queue_stopped, 1, 0))
2718		blk_mq_unquiesce_queue(sdev->request_queue);
2719}
2720
2721static void scsi_stop_queue(struct scsi_device *sdev, bool nowait)
2722{
2723	/*
2724	 * The atomic variable of ->queue_stopped covers that
2725	 * blk_mq_quiesce_queue* is balanced with blk_mq_unquiesce_queue.
2726	 *
2727	 * However, we still need to wait until quiesce is done
2728	 * in case that queue has been stopped.
2729	 */
2730	if (!cmpxchg(&sdev->queue_stopped, 0, 1)) {
2731		if (nowait)
2732			blk_mq_quiesce_queue_nowait(sdev->request_queue);
2733		else
2734			blk_mq_quiesce_queue(sdev->request_queue);
2735	} else {
2736		if (!nowait)
2737			blk_mq_wait_quiesce_done(sdev->request_queue->tag_set);
2738	}
2739}
2740
2741/**
2742 * scsi_internal_device_block_nowait - try to transition to the SDEV_BLOCK state
2743 * @sdev: device to block
2744 *
2745 * Pause SCSI command processing on the specified device. Does not sleep.
2746 *
2747 * Returns zero if successful or a negative error code upon failure.
2748 *
2749 * Notes:
2750 * This routine transitions the device to the SDEV_BLOCK state (which must be
2751 * a legal transition). When the device is in this state, command processing
2752 * is paused until the device leaves the SDEV_BLOCK state. See also
2753 * scsi_internal_device_unblock_nowait().
2754 */
2755int scsi_internal_device_block_nowait(struct scsi_device *sdev)
2756{
2757	int ret = __scsi_internal_device_block_nowait(sdev);
 
 
 
 
 
 
 
 
 
2758
2759	/*
2760	 * The device has transitioned to SDEV_BLOCK.  Stop the
2761	 * block layer from calling the midlayer with this device's
2762	 * request queue.
2763	 */
2764	if (!ret)
2765		scsi_stop_queue(sdev, true);
2766	return ret;
2767}
2768EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait);
2769
2770/**
2771 * scsi_internal_device_block - try to transition to the SDEV_BLOCK state
2772 * @sdev: device to block
2773 *
2774 * Pause SCSI command processing on the specified device and wait until all
2775 * ongoing scsi_request_fn() / scsi_queue_rq() calls have finished. May sleep.
2776 *
2777 * Returns zero if successful or a negative error code upon failure.
2778 *
2779 * Note:
2780 * This routine transitions the device to the SDEV_BLOCK state (which must be
2781 * a legal transition). When the device is in this state, command processing
2782 * is paused until the device leaves the SDEV_BLOCK state. See also
2783 * scsi_internal_device_unblock().
2784 */
2785static int scsi_internal_device_block(struct scsi_device *sdev)
2786{
 
2787	int err;
2788
2789	mutex_lock(&sdev->state_mutex);
2790	err = __scsi_internal_device_block_nowait(sdev);
2791	if (err == 0)
2792		scsi_stop_queue(sdev, false);
2793	mutex_unlock(&sdev->state_mutex);
2794
2795	return err;
 
 
 
 
 
 
 
2796}
2797
2798/**
2799 * scsi_internal_device_unblock_nowait - resume a device after a block request
2800 * @sdev:	device to resume
2801 * @new_state:	state to set the device to after unblocking
2802 *
2803 * Restart the device queue for a previously suspended SCSI device. Does not
2804 * sleep.
2805 *
2806 * Returns zero if successful or a negative error code upon failure.
2807 *
2808 * Notes:
2809 * This routine transitions the device to the SDEV_RUNNING state or to one of
2810 * the offline states (which must be a legal transition) allowing the midlayer
2811 * to goose the queue for this device.
2812 */
2813int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
2814					enum scsi_device_state new_state)
2815{
2816	switch (new_state) {
2817	case SDEV_RUNNING:
2818	case SDEV_TRANSPORT_OFFLINE:
2819		break;
2820	default:
2821		return -EINVAL;
2822	}
2823
2824	/*
2825	 * Try to transition the scsi device to SDEV_RUNNING or one of the
2826	 * offlined states and goose the device queue if successful.
2827	 */
2828	switch (sdev->sdev_state) {
2829	case SDEV_BLOCK:
2830	case SDEV_TRANSPORT_OFFLINE:
2831		sdev->sdev_state = new_state;
2832		break;
2833	case SDEV_CREATED_BLOCK:
2834		if (new_state == SDEV_TRANSPORT_OFFLINE ||
2835		    new_state == SDEV_OFFLINE)
2836			sdev->sdev_state = new_state;
2837		else
2838			sdev->sdev_state = SDEV_CREATED;
2839		break;
2840	case SDEV_CANCEL:
2841	case SDEV_OFFLINE:
2842		break;
2843	default:
2844		return -EINVAL;
2845	}
2846	scsi_start_queue(sdev);
2847
2848	return 0;
2849}
2850EXPORT_SYMBOL_GPL(scsi_internal_device_unblock_nowait);
2851
2852/**
2853 * scsi_internal_device_unblock - resume a device after a block request
2854 * @sdev:	device to resume
2855 * @new_state:	state to set the device to after unblocking
2856 *
2857 * Restart the device queue for a previously suspended SCSI device. May sleep.
2858 *
2859 * Returns zero if successful or a negative error code upon failure.
2860 *
2861 * Notes:
2862 * This routine transitions the device to the SDEV_RUNNING state or to one of
2863 * the offline states (which must be a legal transition) allowing the midlayer
2864 * to goose the queue for this device.
2865 */
2866static int scsi_internal_device_unblock(struct scsi_device *sdev,
2867					enum scsi_device_state new_state)
2868{
2869	int ret;
2870
2871	mutex_lock(&sdev->state_mutex);
2872	ret = scsi_internal_device_unblock_nowait(sdev, new_state);
2873	mutex_unlock(&sdev->state_mutex);
2874
2875	return ret;
2876}
2877
2878static void
2879device_block(struct scsi_device *sdev, void *data)
2880{
2881	int ret;
2882
2883	ret = scsi_internal_device_block(sdev);
2884
2885	WARN_ONCE(ret, "scsi_internal_device_block(%s) failed: ret = %d\n",
2886		  dev_name(&sdev->sdev_gendev), ret);
2887}
2888
2889static int
2890target_block(struct device *dev, void *data)
2891{
2892	if (scsi_is_target_device(dev))
2893		starget_for_each_device(to_scsi_target(dev), NULL,
2894					device_block);
2895	return 0;
2896}
2897
2898void
2899scsi_target_block(struct device *dev)
2900{
2901	if (scsi_is_target_device(dev))
2902		starget_for_each_device(to_scsi_target(dev), NULL,
2903					device_block);
2904	else
2905		device_for_each_child(dev, NULL, target_block);
2906}
2907EXPORT_SYMBOL_GPL(scsi_target_block);
2908
2909static void
2910device_unblock(struct scsi_device *sdev, void *data)
2911{
2912	scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
2913}
2914
2915static int
2916target_unblock(struct device *dev, void *data)
2917{
2918	if (scsi_is_target_device(dev))
2919		starget_for_each_device(to_scsi_target(dev), data,
2920					device_unblock);
2921	return 0;
2922}
2923
2924void
2925scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
2926{
2927	if (scsi_is_target_device(dev))
2928		starget_for_each_device(to_scsi_target(dev), &new_state,
2929					device_unblock);
2930	else
2931		device_for_each_child(dev, &new_state, target_unblock);
2932}
2933EXPORT_SYMBOL_GPL(scsi_target_unblock);
2934
2935int
2936scsi_host_block(struct Scsi_Host *shost)
2937{
2938	struct scsi_device *sdev;
2939	int ret = 0;
2940
2941	/*
2942	 * Call scsi_internal_device_block_nowait so we can avoid
2943	 * calling synchronize_rcu() for each LUN.
2944	 */
2945	shost_for_each_device(sdev, shost) {
2946		mutex_lock(&sdev->state_mutex);
2947		ret = scsi_internal_device_block_nowait(sdev);
2948		mutex_unlock(&sdev->state_mutex);
2949		if (ret) {
2950			scsi_device_put(sdev);
2951			break;
2952		}
2953	}
2954
2955	/*
2956	 * SCSI never enables blk-mq's BLK_MQ_F_BLOCKING flag so
2957	 * calling synchronize_rcu() once is enough.
2958	 */
2959	WARN_ON_ONCE(shost->tag_set.flags & BLK_MQ_F_BLOCKING);
2960
2961	if (!ret)
2962		synchronize_rcu();
2963
2964	return ret;
2965}
2966EXPORT_SYMBOL_GPL(scsi_host_block);
2967
2968int
2969scsi_host_unblock(struct Scsi_Host *shost, int new_state)
2970{
2971	struct scsi_device *sdev;
2972	int ret = 0;
2973
2974	shost_for_each_device(sdev, shost) {
2975		ret = scsi_internal_device_unblock(sdev, new_state);
2976		if (ret) {
2977			scsi_device_put(sdev);
2978			break;
2979		}
2980	}
2981	return ret;
2982}
2983EXPORT_SYMBOL_GPL(scsi_host_unblock);
2984
2985/**
2986 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
2987 * @sgl:	scatter-gather list
2988 * @sg_count:	number of segments in sg
2989 * @offset:	offset in bytes into sg, on return offset into the mapped area
2990 * @len:	bytes to map, on return number of bytes mapped
2991 *
2992 * Returns virtual address of the start of the mapped page
2993 */
2994void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
2995			  size_t *offset, size_t *len)
2996{
2997	int i;
2998	size_t sg_len = 0, len_complete = 0;
2999	struct scatterlist *sg;
3000	struct page *page;
3001
3002	WARN_ON(!irqs_disabled());
3003
3004	for_each_sg(sgl, sg, sg_count, i) {
3005		len_complete = sg_len; /* Complete sg-entries */
3006		sg_len += sg->length;
3007		if (sg_len > *offset)
3008			break;
3009	}
3010
3011	if (unlikely(i == sg_count)) {
3012		printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
3013			"elements %d\n",
3014		       __func__, sg_len, *offset, sg_count);
3015		WARN_ON(1);
3016		return NULL;
3017	}
3018
3019	/* Offset starting from the beginning of first page in this sg-entry */
3020	*offset = *offset - len_complete + sg->offset;
3021
3022	/* Assumption: contiguous pages can be accessed as "page + i" */
3023	page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
3024	*offset &= ~PAGE_MASK;
3025
3026	/* Bytes in this sg-entry from *offset to the end of the page */
3027	sg_len = PAGE_SIZE - *offset;
3028	if (*len > sg_len)
3029		*len = sg_len;
3030
3031	return kmap_atomic(page);
3032}
3033EXPORT_SYMBOL(scsi_kmap_atomic_sg);
3034
3035/**
3036 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
3037 * @virt:	virtual address to be unmapped
3038 */
3039void scsi_kunmap_atomic_sg(void *virt)
3040{
3041	kunmap_atomic(virt);
3042}
3043EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
3044
3045void sdev_disable_disk_events(struct scsi_device *sdev)
3046{
3047	atomic_inc(&sdev->disk_events_disable_depth);
3048}
3049EXPORT_SYMBOL(sdev_disable_disk_events);
3050
3051void sdev_enable_disk_events(struct scsi_device *sdev)
3052{
3053	if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0))
3054		return;
3055	atomic_dec(&sdev->disk_events_disable_depth);
3056}
3057EXPORT_SYMBOL(sdev_enable_disk_events);
3058
3059static unsigned char designator_prio(const unsigned char *d)
3060{
3061	if (d[1] & 0x30)
3062		/* not associated with LUN */
3063		return 0;
3064
3065	if (d[3] == 0)
3066		/* invalid length */
3067		return 0;
3068
3069	/*
3070	 * Order of preference for lun descriptor:
3071	 * - SCSI name string
3072	 * - NAA IEEE Registered Extended
3073	 * - EUI-64 based 16-byte
3074	 * - EUI-64 based 12-byte
3075	 * - NAA IEEE Registered
3076	 * - NAA IEEE Extended
3077	 * - EUI-64 based 8-byte
3078	 * - SCSI name string (truncated)
3079	 * - T10 Vendor ID
3080	 * as longer descriptors reduce the likelyhood
3081	 * of identification clashes.
3082	 */
3083
3084	switch (d[1] & 0xf) {
3085	case 8:
3086		/* SCSI name string, variable-length UTF-8 */
3087		return 9;
3088	case 3:
3089		switch (d[4] >> 4) {
3090		case 6:
3091			/* NAA registered extended */
3092			return 8;
3093		case 5:
3094			/* NAA registered */
3095			return 5;
3096		case 4:
3097			/* NAA extended */
3098			return 4;
3099		case 3:
3100			/* NAA locally assigned */
3101			return 1;
3102		default:
3103			break;
3104		}
3105		break;
3106	case 2:
3107		switch (d[3]) {
3108		case 16:
3109			/* EUI64-based, 16 byte */
3110			return 7;
3111		case 12:
3112			/* EUI64-based, 12 byte */
3113			return 6;
3114		case 8:
3115			/* EUI64-based, 8 byte */
3116			return 3;
3117		default:
3118			break;
3119		}
3120		break;
3121	case 1:
3122		/* T10 vendor ID */
3123		return 1;
3124	default:
3125		break;
3126	}
3127
3128	return 0;
3129}
3130
3131/**
3132 * scsi_vpd_lun_id - return a unique device identification
3133 * @sdev: SCSI device
3134 * @id:   buffer for the identification
3135 * @id_len:  length of the buffer
3136 *
3137 * Copies a unique device identification into @id based
3138 * on the information in the VPD page 0x83 of the device.
3139 * The string will be formatted as a SCSI name string.
3140 *
3141 * Returns the length of the identification or error on failure.
3142 * If the identifier is longer than the supplied buffer the actual
3143 * identifier length is returned and the buffer is not zero-padded.
3144 */
3145int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
3146{
3147	u8 cur_id_prio = 0;
3148	u8 cur_id_size = 0;
3149	const unsigned char *d, *cur_id_str;
3150	const struct scsi_vpd *vpd_pg83;
3151	int id_size = -EINVAL;
3152
3153	rcu_read_lock();
3154	vpd_pg83 = rcu_dereference(sdev->vpd_pg83);
3155	if (!vpd_pg83) {
3156		rcu_read_unlock();
3157		return -ENXIO;
3158	}
3159
3160	/* The id string must be at least 20 bytes + terminating NULL byte */
3161	if (id_len < 21) {
3162		rcu_read_unlock();
3163		return -EINVAL;
3164	}
3165
3166	memset(id, 0, id_len);
3167	for (d = vpd_pg83->data + 4;
3168	     d < vpd_pg83->data + vpd_pg83->len;
3169	     d += d[3] + 4) {
3170		u8 prio = designator_prio(d);
3171
3172		if (prio == 0 || cur_id_prio > prio)
3173			continue;
3174
3175		switch (d[1] & 0xf) {
3176		case 0x1:
3177			/* T10 Vendor ID */
3178			if (cur_id_size > d[3])
3179				break;
3180			cur_id_prio = prio;
3181			cur_id_size = d[3];
3182			if (cur_id_size + 4 > id_len)
3183				cur_id_size = id_len - 4;
3184			cur_id_str = d + 4;
3185			id_size = snprintf(id, id_len, "t10.%*pE",
3186					   cur_id_size, cur_id_str);
3187			break;
3188		case 0x2:
3189			/* EUI-64 */
3190			cur_id_prio = prio;
3191			cur_id_size = d[3];
3192			cur_id_str = d + 4;
3193			switch (cur_id_size) {
3194			case 8:
3195				id_size = snprintf(id, id_len,
3196						   "eui.%8phN",
3197						   cur_id_str);
3198				break;
3199			case 12:
3200				id_size = snprintf(id, id_len,
3201						   "eui.%12phN",
3202						   cur_id_str);
3203				break;
3204			case 16:
3205				id_size = snprintf(id, id_len,
3206						   "eui.%16phN",
3207						   cur_id_str);
3208				break;
3209			default:
3210				break;
3211			}
3212			break;
3213		case 0x3:
3214			/* NAA */
3215			cur_id_prio = prio;
3216			cur_id_size = d[3];
3217			cur_id_str = d + 4;
3218			switch (cur_id_size) {
3219			case 8:
3220				id_size = snprintf(id, id_len,
3221						   "naa.%8phN",
3222						   cur_id_str);
3223				break;
3224			case 16:
3225				id_size = snprintf(id, id_len,
3226						   "naa.%16phN",
3227						   cur_id_str);
3228				break;
3229			default:
3230				break;
3231			}
3232			break;
3233		case 0x8:
3234			/* SCSI name string */
3235			if (cur_id_size > d[3])
3236				break;
3237			/* Prefer others for truncated descriptor */
3238			if (d[3] > id_len) {
3239				prio = 2;
3240				if (cur_id_prio > prio)
3241					break;
3242			}
3243			cur_id_prio = prio;
3244			cur_id_size = id_size = d[3];
3245			cur_id_str = d + 4;
3246			if (cur_id_size >= id_len)
3247				cur_id_size = id_len - 1;
3248			memcpy(id, cur_id_str, cur_id_size);
3249			break;
3250		default:
3251			break;
3252		}
3253	}
3254	rcu_read_unlock();
3255
3256	return id_size;
3257}
3258EXPORT_SYMBOL(scsi_vpd_lun_id);
3259
3260/*
3261 * scsi_vpd_tpg_id - return a target port group identifier
3262 * @sdev: SCSI device
3263 *
3264 * Returns the Target Port Group identifier from the information
3265 * froom VPD page 0x83 of the device.
3266 *
3267 * Returns the identifier or error on failure.
3268 */
3269int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id)
3270{
3271	const unsigned char *d;
3272	const struct scsi_vpd *vpd_pg83;
3273	int group_id = -EAGAIN, rel_port = -1;
3274
3275	rcu_read_lock();
3276	vpd_pg83 = rcu_dereference(sdev->vpd_pg83);
3277	if (!vpd_pg83) {
3278		rcu_read_unlock();
3279		return -ENXIO;
3280	}
3281
3282	d = vpd_pg83->data + 4;
3283	while (d < vpd_pg83->data + vpd_pg83->len) {
3284		switch (d[1] & 0xf) {
3285		case 0x4:
3286			/* Relative target port */
3287			rel_port = get_unaligned_be16(&d[6]);
3288			break;
3289		case 0x5:
3290			/* Target port group */
3291			group_id = get_unaligned_be16(&d[6]);
3292			break;
3293		default:
3294			break;
3295		}
3296		d += d[3] + 4;
3297	}
3298	rcu_read_unlock();
3299
3300	if (group_id >= 0 && rel_id && rel_port != -1)
3301		*rel_id = rel_port;
3302
3303	return group_id;
3304}
3305EXPORT_SYMBOL(scsi_vpd_tpg_id);
3306
3307/**
3308 * scsi_build_sense - build sense data for a command
3309 * @scmd:	scsi command for which the sense should be formatted
3310 * @desc:	Sense format (non-zero == descriptor format,
3311 *              0 == fixed format)
3312 * @key:	Sense key
3313 * @asc:	Additional sense code
3314 * @ascq:	Additional sense code qualifier
3315 *
3316 **/
3317void scsi_build_sense(struct scsi_cmnd *scmd, int desc, u8 key, u8 asc, u8 ascq)
3318{
3319	scsi_build_sense_buffer(desc, scmd->sense_buffer, key, asc, ascq);
3320	scmd->result = SAM_STAT_CHECK_CONDITION;
3321}
3322EXPORT_SYMBOL_GPL(scsi_build_sense);