Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 1999 Eric Youngdale
   4 * Copyright (C) 2014 Christoph Hellwig
   5 *
   6 *  SCSI queueing library.
   7 *      Initial versions: Eric Youngdale (eric@andante.org).
   8 *                        Based upon conversations with large numbers
   9 *                        of people at Linux Expo.
  10 */
  11
  12#include <linux/bio.h>
  13#include <linux/bitops.h>
  14#include <linux/blkdev.h>
  15#include <linux/completion.h>
  16#include <linux/kernel.h>
  17#include <linux/export.h>
  18#include <linux/init.h>
  19#include <linux/pci.h>
  20#include <linux/delay.h>
  21#include <linux/hardirq.h>
  22#include <linux/scatterlist.h>
  23#include <linux/blk-mq.h>
  24#include <linux/blk-integrity.h>
  25#include <linux/ratelimit.h>
  26#include <asm/unaligned.h>
  27
  28#include <scsi/scsi.h>
  29#include <scsi/scsi_cmnd.h>
  30#include <scsi/scsi_dbg.h>
  31#include <scsi/scsi_device.h>
  32#include <scsi/scsi_driver.h>
  33#include <scsi/scsi_eh.h>
  34#include <scsi/scsi_host.h>
  35#include <scsi/scsi_transport.h> /* __scsi_init_queue() */
  36#include <scsi/scsi_dh.h>
  37
  38#include <trace/events/scsi.h>
  39
  40#include "scsi_debugfs.h"
  41#include "scsi_priv.h"
  42#include "scsi_logging.h"
  43
  44/*
  45 * Size of integrity metadata is usually small, 1 inline sg should
  46 * cover normal cases.
  47 */
  48#ifdef CONFIG_ARCH_NO_SG_CHAIN
  49#define  SCSI_INLINE_PROT_SG_CNT  0
  50#define  SCSI_INLINE_SG_CNT  0
  51#else
  52#define  SCSI_INLINE_PROT_SG_CNT  1
  53#define  SCSI_INLINE_SG_CNT  2
  54#endif
  55
  56static struct kmem_cache *scsi_sense_cache;
  57static DEFINE_MUTEX(scsi_sense_cache_mutex);
  58
  59static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd);
  60
  61int scsi_init_sense_cache(struct Scsi_Host *shost)
  62{
  63	int ret = 0;
  64
  65	mutex_lock(&scsi_sense_cache_mutex);
  66	if (!scsi_sense_cache) {
  67		scsi_sense_cache =
  68			kmem_cache_create_usercopy("scsi_sense_cache",
  69				SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN,
  70				0, SCSI_SENSE_BUFFERSIZE, NULL);
  71		if (!scsi_sense_cache)
  72			ret = -ENOMEM;
  73	}
  74	mutex_unlock(&scsi_sense_cache_mutex);
  75	return ret;
  76}
  77
  78static void
  79scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
  80{
  81	struct Scsi_Host *host = cmd->device->host;
  82	struct scsi_device *device = cmd->device;
  83	struct scsi_target *starget = scsi_target(device);
  84
  85	/*
  86	 * Set the appropriate busy bit for the device/host.
  87	 *
  88	 * If the host/device isn't busy, assume that something actually
  89	 * completed, and that we should be able to queue a command now.
  90	 *
  91	 * Note that the prior mid-layer assumption that any host could
  92	 * always queue at least one command is now broken.  The mid-layer
  93	 * will implement a user specifiable stall (see
  94	 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
  95	 * if a command is requeued with no other commands outstanding
  96	 * either for the device or for the host.
  97	 */
  98	switch (reason) {
  99	case SCSI_MLQUEUE_HOST_BUSY:
 100		atomic_set(&host->host_blocked, host->max_host_blocked);
 101		break;
 102	case SCSI_MLQUEUE_DEVICE_BUSY:
 103	case SCSI_MLQUEUE_EH_RETRY:
 104		atomic_set(&device->device_blocked,
 105			   device->max_device_blocked);
 106		break;
 107	case SCSI_MLQUEUE_TARGET_BUSY:
 108		atomic_set(&starget->target_blocked,
 109			   starget->max_target_blocked);
 110		break;
 111	}
 112}
 113
 114static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd, unsigned long msecs)
 115{
 116	struct request *rq = scsi_cmd_to_rq(cmd);
 117
 118	if (rq->rq_flags & RQF_DONTPREP) {
 119		rq->rq_flags &= ~RQF_DONTPREP;
 120		scsi_mq_uninit_cmd(cmd);
 121	} else {
 122		WARN_ON_ONCE(true);
 123	}
 124
 125	blk_mq_requeue_request(rq, false);
 126	if (!scsi_host_in_recovery(cmd->device->host))
 127		blk_mq_delay_kick_requeue_list(rq->q, msecs);
 128}
 129
 130/**
 131 * __scsi_queue_insert - private queue insertion
 132 * @cmd: The SCSI command being requeued
 133 * @reason:  The reason for the requeue
 134 * @unbusy: Whether the queue should be unbusied
 135 *
 136 * This is a private queue insertion.  The public interface
 137 * scsi_queue_insert() always assumes the queue should be unbusied
 138 * because it's always called before the completion.  This function is
 139 * for a requeue after completion, which should only occur in this
 140 * file.
 141 */
 142static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
 143{
 144	struct scsi_device *device = cmd->device;
 145
 146	SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd,
 147		"Inserting command %p into mlqueue\n", cmd));
 148
 149	scsi_set_blocked(cmd, reason);
 150
 151	/*
 152	 * Decrement the counters, since these commands are no longer
 153	 * active on the host/device.
 154	 */
 155	if (unbusy)
 156		scsi_device_unbusy(device, cmd);
 157
 158	/*
 159	 * Requeue this command.  It will go before all other commands
 160	 * that are already in the queue. Schedule requeue work under
 161	 * lock such that the kblockd_schedule_work() call happens
 162	 * before blk_mq_destroy_queue() finishes.
 163	 */
 164	cmd->result = 0;
 165
 166	blk_mq_requeue_request(scsi_cmd_to_rq(cmd),
 167			       !scsi_host_in_recovery(cmd->device->host));
 168}
 169
 170/**
 171 * scsi_queue_insert - Reinsert a command in the queue.
 172 * @cmd:    command that we are adding to queue.
 173 * @reason: why we are inserting command to queue.
 174 *
 175 * We do this for one of two cases. Either the host is busy and it cannot accept
 176 * any more commands for the time being, or the device returned QUEUE_FULL and
 177 * can accept no more commands.
 178 *
 179 * Context: This could be called either from an interrupt context or a normal
 180 * process context.
 181 */
 182void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
 183{
 184	__scsi_queue_insert(cmd, reason, true);
 185}
 186
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 187/**
 188 * scsi_execute_cmd - insert request and wait for the result
 189 * @sdev:	scsi_device
 190 * @cmd:	scsi command
 191 * @opf:	block layer request cmd_flags
 192 * @buffer:	data buffer
 193 * @bufflen:	len of buffer
 194 * @timeout:	request timeout in HZ
 195 * @retries:	number of times to retry request
 196 * @args:	Optional args. See struct definition for field descriptions
 197 *
 198 * Returns the scsi_cmnd result field if a command was executed, or a negative
 199 * Linux error code if we didn't get that far.
 200 */
 201int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
 202		     blk_opf_t opf, void *buffer, unsigned int bufflen,
 203		     int timeout, int retries,
 204		     const struct scsi_exec_args *args)
 205{
 206	static const struct scsi_exec_args default_args;
 207	struct request *req;
 208	struct scsi_cmnd *scmd;
 209	int ret;
 210
 211	if (!args)
 212		args = &default_args;
 213	else if (WARN_ON_ONCE(args->sense &&
 214			      args->sense_len != SCSI_SENSE_BUFFERSIZE))
 215		return -EINVAL;
 216
 
 217	req = scsi_alloc_request(sdev->request_queue, opf, args->req_flags);
 218	if (IS_ERR(req))
 219		return PTR_ERR(req);
 220
 221	if (bufflen) {
 222		ret = blk_rq_map_kern(sdev->request_queue, req,
 223				      buffer, bufflen, GFP_NOIO);
 224		if (ret)
 225			goto out;
 226	}
 227	scmd = blk_mq_rq_to_pdu(req);
 228	scmd->cmd_len = COMMAND_SIZE(cmd[0]);
 229	memcpy(scmd->cmnd, cmd, scmd->cmd_len);
 230	scmd->allowed = retries;
 231	scmd->flags |= args->scmd_flags;
 232	req->timeout = timeout;
 233	req->rq_flags |= RQF_QUIET;
 234
 235	/*
 236	 * head injection *required* here otherwise quiesce won't work
 237	 */
 238	blk_execute_rq(req, true);
 239
 
 
 
 
 
 240	/*
 241	 * Some devices (USB mass-storage in particular) may transfer
 242	 * garbage data together with a residue indicating that the data
 243	 * is invalid.  Prevent the garbage from being misinterpreted
 244	 * and prevent security leaks by zeroing out the excess data.
 245	 */
 246	if (unlikely(scmd->resid_len > 0 && scmd->resid_len <= bufflen))
 247		memset(buffer + bufflen - scmd->resid_len, 0, scmd->resid_len);
 248
 249	if (args->resid)
 250		*args->resid = scmd->resid_len;
 251	if (args->sense)
 252		memcpy(args->sense, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
 253	if (args->sshdr)
 254		scsi_normalize_sense(scmd->sense_buffer, scmd->sense_len,
 255				     args->sshdr);
 256
 257	ret = scmd->result;
 258 out:
 259	blk_mq_free_request(req);
 260
 261	return ret;
 262}
 263EXPORT_SYMBOL(scsi_execute_cmd);
 264
 265/*
 266 * Wake up the error handler if necessary. Avoid as follows that the error
 267 * handler is not woken up if host in-flight requests number ==
 268 * shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination
 269 * with an RCU read lock in this function to ensure that this function in
 270 * its entirety either finishes before scsi_eh_scmd_add() increases the
 271 * host_failed counter or that it notices the shost state change made by
 272 * scsi_eh_scmd_add().
 273 */
 274static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
 275{
 276	unsigned long flags;
 277
 278	rcu_read_lock();
 279	__clear_bit(SCMD_STATE_INFLIGHT, &cmd->state);
 280	if (unlikely(scsi_host_in_recovery(shost))) {
 281		unsigned int busy = scsi_host_busy(shost);
 282
 283		spin_lock_irqsave(shost->host_lock, flags);
 284		if (shost->host_failed || shost->host_eh_scheduled)
 285			scsi_eh_wakeup(shost, busy);
 286		spin_unlock_irqrestore(shost->host_lock, flags);
 287	}
 288	rcu_read_unlock();
 289}
 290
 291void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd)
 292{
 293	struct Scsi_Host *shost = sdev->host;
 294	struct scsi_target *starget = scsi_target(sdev);
 295
 296	scsi_dec_host_busy(shost, cmd);
 297
 298	if (starget->can_queue > 0)
 299		atomic_dec(&starget->target_busy);
 300
 301	sbitmap_put(&sdev->budget_map, cmd->budget_token);
 302	cmd->budget_token = -1;
 303}
 304
 305/*
 306 * Kick the queue of SCSI device @sdev if @sdev != current_sdev. Called with
 307 * interrupts disabled.
 308 */
 309static void scsi_kick_sdev_queue(struct scsi_device *sdev, void *data)
 310{
 311	struct scsi_device *current_sdev = data;
 312
 313	if (sdev != current_sdev)
 314		blk_mq_run_hw_queues(sdev->request_queue, true);
 315}
 316
 317/*
 318 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
 319 * and call blk_run_queue for all the scsi_devices on the target -
 320 * including current_sdev first.
 321 *
 322 * Called with *no* scsi locks held.
 323 */
 324static void scsi_single_lun_run(struct scsi_device *current_sdev)
 325{
 326	struct Scsi_Host *shost = current_sdev->host;
 327	struct scsi_target *starget = scsi_target(current_sdev);
 328	unsigned long flags;
 329
 330	spin_lock_irqsave(shost->host_lock, flags);
 331	starget->starget_sdev_user = NULL;
 332	spin_unlock_irqrestore(shost->host_lock, flags);
 333
 334	/*
 335	 * Call blk_run_queue for all LUNs on the target, starting with
 336	 * current_sdev. We race with others (to set starget_sdev_user),
 337	 * but in most cases, we will be first. Ideally, each LU on the
 338	 * target would get some limited time or requests on the target.
 339	 */
 340	blk_mq_run_hw_queues(current_sdev->request_queue,
 341			     shost->queuecommand_may_block);
 342
 343	spin_lock_irqsave(shost->host_lock, flags);
 344	if (!starget->starget_sdev_user)
 345		__starget_for_each_device(starget, current_sdev,
 346					  scsi_kick_sdev_queue);
 347	spin_unlock_irqrestore(shost->host_lock, flags);
 348}
 349
 350static inline bool scsi_device_is_busy(struct scsi_device *sdev)
 351{
 352	if (scsi_device_busy(sdev) >= sdev->queue_depth)
 353		return true;
 354	if (atomic_read(&sdev->device_blocked) > 0)
 355		return true;
 356	return false;
 357}
 358
 359static inline bool scsi_target_is_busy(struct scsi_target *starget)
 360{
 361	if (starget->can_queue > 0) {
 362		if (atomic_read(&starget->target_busy) >= starget->can_queue)
 363			return true;
 364		if (atomic_read(&starget->target_blocked) > 0)
 365			return true;
 366	}
 367	return false;
 368}
 369
 370static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
 371{
 372	if (atomic_read(&shost->host_blocked) > 0)
 373		return true;
 374	if (shost->host_self_blocked)
 375		return true;
 376	return false;
 377}
 378
 379static void scsi_starved_list_run(struct Scsi_Host *shost)
 380{
 381	LIST_HEAD(starved_list);
 382	struct scsi_device *sdev;
 383	unsigned long flags;
 384
 385	spin_lock_irqsave(shost->host_lock, flags);
 386	list_splice_init(&shost->starved_list, &starved_list);
 387
 388	while (!list_empty(&starved_list)) {
 389		struct request_queue *slq;
 390
 391		/*
 392		 * As long as shost is accepting commands and we have
 393		 * starved queues, call blk_run_queue. scsi_request_fn
 394		 * drops the queue_lock and can add us back to the
 395		 * starved_list.
 396		 *
 397		 * host_lock protects the starved_list and starved_entry.
 398		 * scsi_request_fn must get the host_lock before checking
 399		 * or modifying starved_list or starved_entry.
 400		 */
 401		if (scsi_host_is_busy(shost))
 402			break;
 403
 404		sdev = list_entry(starved_list.next,
 405				  struct scsi_device, starved_entry);
 406		list_del_init(&sdev->starved_entry);
 407		if (scsi_target_is_busy(scsi_target(sdev))) {
 408			list_move_tail(&sdev->starved_entry,
 409				       &shost->starved_list);
 410			continue;
 411		}
 412
 413		/*
 414		 * Once we drop the host lock, a racing scsi_remove_device()
 415		 * call may remove the sdev from the starved list and destroy
 416		 * it and the queue.  Mitigate by taking a reference to the
 417		 * queue and never touching the sdev again after we drop the
 418		 * host lock.  Note: if __scsi_remove_device() invokes
 419		 * blk_mq_destroy_queue() before the queue is run from this
 420		 * function then blk_run_queue() will return immediately since
 421		 * blk_mq_destroy_queue() marks the queue with QUEUE_FLAG_DYING.
 422		 */
 423		slq = sdev->request_queue;
 424		if (!blk_get_queue(slq))
 425			continue;
 426		spin_unlock_irqrestore(shost->host_lock, flags);
 427
 428		blk_mq_run_hw_queues(slq, false);
 429		blk_put_queue(slq);
 430
 431		spin_lock_irqsave(shost->host_lock, flags);
 432	}
 433	/* put any unprocessed entries back */
 434	list_splice(&starved_list, &shost->starved_list);
 435	spin_unlock_irqrestore(shost->host_lock, flags);
 436}
 437
 438/**
 439 * scsi_run_queue - Select a proper request queue to serve next.
 440 * @q:  last request's queue
 441 *
 442 * The previous command was completely finished, start a new one if possible.
 443 */
 444static void scsi_run_queue(struct request_queue *q)
 445{
 446	struct scsi_device *sdev = q->queuedata;
 447
 448	if (scsi_target(sdev)->single_lun)
 449		scsi_single_lun_run(sdev);
 450	if (!list_empty(&sdev->host->starved_list))
 451		scsi_starved_list_run(sdev->host);
 452
 453	/* Note: blk_mq_kick_requeue_list() runs the queue asynchronously. */
 454	blk_mq_kick_requeue_list(q);
 455}
 456
 457void scsi_requeue_run_queue(struct work_struct *work)
 458{
 459	struct scsi_device *sdev;
 460	struct request_queue *q;
 461
 462	sdev = container_of(work, struct scsi_device, requeue_work);
 463	q = sdev->request_queue;
 464	scsi_run_queue(q);
 465}
 466
 467void scsi_run_host_queues(struct Scsi_Host *shost)
 468{
 469	struct scsi_device *sdev;
 470
 471	shost_for_each_device(sdev, shost)
 472		scsi_run_queue(sdev->request_queue);
 473}
 474
 475static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
 476{
 477	if (!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd))) {
 478		struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
 479
 480		if (drv->uninit_command)
 481			drv->uninit_command(cmd);
 482	}
 483}
 484
 485void scsi_free_sgtables(struct scsi_cmnd *cmd)
 486{
 487	if (cmd->sdb.table.nents)
 488		sg_free_table_chained(&cmd->sdb.table,
 489				SCSI_INLINE_SG_CNT);
 490	if (scsi_prot_sg_count(cmd))
 491		sg_free_table_chained(&cmd->prot_sdb->table,
 492				SCSI_INLINE_PROT_SG_CNT);
 493}
 494EXPORT_SYMBOL_GPL(scsi_free_sgtables);
 495
 496static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
 497{
 498	scsi_free_sgtables(cmd);
 499	scsi_uninit_cmd(cmd);
 500}
 501
 502static void scsi_run_queue_async(struct scsi_device *sdev)
 503{
 504	if (scsi_host_in_recovery(sdev->host))
 505		return;
 506
 507	if (scsi_target(sdev)->single_lun ||
 508	    !list_empty(&sdev->host->starved_list)) {
 509		kblockd_schedule_work(&sdev->requeue_work);
 510	} else {
 511		/*
 512		 * smp_mb() present in sbitmap_queue_clear() or implied in
 513		 * .end_io is for ordering writing .device_busy in
 514		 * scsi_device_unbusy() and reading sdev->restarts.
 515		 */
 516		int old = atomic_read(&sdev->restarts);
 517
 518		/*
 519		 * ->restarts has to be kept as non-zero if new budget
 520		 *  contention occurs.
 521		 *
 522		 *  No need to run queue when either another re-run
 523		 *  queue wins in updating ->restarts or a new budget
 524		 *  contention occurs.
 525		 */
 526		if (old && atomic_cmpxchg(&sdev->restarts, old, 0) == old)
 527			blk_mq_run_hw_queues(sdev->request_queue, true);
 528	}
 529}
 530
 531/* Returns false when no more bytes to process, true if there are more */
 532static bool scsi_end_request(struct request *req, blk_status_t error,
 533		unsigned int bytes)
 534{
 535	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
 536	struct scsi_device *sdev = cmd->device;
 537	struct request_queue *q = sdev->request_queue;
 538
 539	if (blk_update_request(req, error, bytes))
 540		return true;
 541
 542	// XXX:
 543	if (blk_queue_add_random(q))
 544		add_disk_randomness(req->q->disk);
 545
 546	if (!blk_rq_is_passthrough(req)) {
 547		WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
 548		cmd->flags &= ~SCMD_INITIALIZED;
 549	}
 550
 551	/*
 552	 * Calling rcu_barrier() is not necessary here because the
 553	 * SCSI error handler guarantees that the function called by
 554	 * call_rcu() has been called before scsi_end_request() is
 555	 * called.
 556	 */
 557	destroy_rcu_head(&cmd->rcu);
 558
 559	/*
 560	 * In the MQ case the command gets freed by __blk_mq_end_request,
 561	 * so we have to do all cleanup that depends on it earlier.
 562	 *
 563	 * We also can't kick the queues from irq context, so we
 564	 * will have to defer it to a workqueue.
 565	 */
 566	scsi_mq_uninit_cmd(cmd);
 567
 568	/*
 569	 * queue is still alive, so grab the ref for preventing it
 570	 * from being cleaned up during running queue.
 571	 */
 572	percpu_ref_get(&q->q_usage_counter);
 573
 574	__blk_mq_end_request(req, error);
 575
 576	scsi_run_queue_async(sdev);
 577
 578	percpu_ref_put(&q->q_usage_counter);
 579	return false;
 580}
 581
 582/**
 583 * scsi_result_to_blk_status - translate a SCSI result code into blk_status_t
 584 * @result:	scsi error code
 585 *
 586 * Translate a SCSI result code into a blk_status_t value.
 587 */
 588static blk_status_t scsi_result_to_blk_status(int result)
 589{
 590	/*
 591	 * Check the scsi-ml byte first in case we converted a host or status
 592	 * byte.
 593	 */
 594	switch (scsi_ml_byte(result)) {
 595	case SCSIML_STAT_OK:
 596		break;
 597	case SCSIML_STAT_RESV_CONFLICT:
 598		return BLK_STS_RESV_CONFLICT;
 599	case SCSIML_STAT_NOSPC:
 600		return BLK_STS_NOSPC;
 601	case SCSIML_STAT_MED_ERROR:
 602		return BLK_STS_MEDIUM;
 603	case SCSIML_STAT_TGT_FAILURE:
 604		return BLK_STS_TARGET;
 605	case SCSIML_STAT_DL_TIMEOUT:
 606		return BLK_STS_DURATION_LIMIT;
 607	}
 608
 609	switch (host_byte(result)) {
 610	case DID_OK:
 611		if (scsi_status_is_good(result))
 612			return BLK_STS_OK;
 613		return BLK_STS_IOERR;
 614	case DID_TRANSPORT_FAILFAST:
 615	case DID_TRANSPORT_MARGINAL:
 616		return BLK_STS_TRANSPORT;
 617	default:
 618		return BLK_STS_IOERR;
 619	}
 620}
 621
 622/**
 623 * scsi_rq_err_bytes - determine number of bytes till the next failure boundary
 624 * @rq: request to examine
 625 *
 626 * Description:
 627 *     A request could be merge of IOs which require different failure
 628 *     handling.  This function determines the number of bytes which
 629 *     can be failed from the beginning of the request without
 630 *     crossing into area which need to be retried further.
 631 *
 632 * Return:
 633 *     The number of bytes to fail.
 634 */
 635static unsigned int scsi_rq_err_bytes(const struct request *rq)
 636{
 637	blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
 638	unsigned int bytes = 0;
 639	struct bio *bio;
 640
 641	if (!(rq->rq_flags & RQF_MIXED_MERGE))
 642		return blk_rq_bytes(rq);
 643
 644	/*
 645	 * Currently the only 'mixing' which can happen is between
 646	 * different fastfail types.  We can safely fail portions
 647	 * which have all the failfast bits that the first one has -
 648	 * the ones which are at least as eager to fail as the first
 649	 * one.
 650	 */
 651	for (bio = rq->bio; bio; bio = bio->bi_next) {
 652		if ((bio->bi_opf & ff) != ff)
 653			break;
 654		bytes += bio->bi_iter.bi_size;
 655	}
 656
 657	/* this could lead to infinite loop */
 658	BUG_ON(blk_rq_bytes(rq) && !bytes);
 659	return bytes;
 660}
 661
 662static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
 663{
 664	struct request *req = scsi_cmd_to_rq(cmd);
 665	unsigned long wait_for;
 666
 667	if (cmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT)
 668		return false;
 669
 670	wait_for = (cmd->allowed + 1) * req->timeout;
 671	if (time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
 672		scmd_printk(KERN_ERR, cmd, "timing out command, waited %lus\n",
 673			    wait_for/HZ);
 674		return true;
 675	}
 676	return false;
 677}
 678
 679/*
 680 * When ALUA transition state is returned, reprep the cmd to
 681 * use the ALUA handler's transition timeout. Delay the reprep
 682 * 1 sec to avoid aggressive retries of the target in that
 683 * state.
 684 */
 685#define ALUA_TRANSITION_REPREP_DELAY	1000
 686
 687/* Helper for scsi_io_completion() when special action required. */
 688static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
 689{
 690	struct request *req = scsi_cmd_to_rq(cmd);
 691	int level = 0;
 692	enum {ACTION_FAIL, ACTION_REPREP, ACTION_DELAYED_REPREP,
 693	      ACTION_RETRY, ACTION_DELAYED_RETRY} action;
 694	struct scsi_sense_hdr sshdr;
 695	bool sense_valid;
 696	bool sense_current = true;      /* false implies "deferred sense" */
 697	blk_status_t blk_stat;
 698
 699	sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
 700	if (sense_valid)
 701		sense_current = !scsi_sense_is_deferred(&sshdr);
 702
 703	blk_stat = scsi_result_to_blk_status(result);
 704
 705	if (host_byte(result) == DID_RESET) {
 706		/* Third party bus reset or reset for error recovery
 707		 * reasons.  Just retry the command and see what
 708		 * happens.
 709		 */
 710		action = ACTION_RETRY;
 711	} else if (sense_valid && sense_current) {
 712		switch (sshdr.sense_key) {
 713		case UNIT_ATTENTION:
 714			if (cmd->device->removable) {
 715				/* Detected disc change.  Set a bit
 716				 * and quietly refuse further access.
 717				 */
 718				cmd->device->changed = 1;
 719				action = ACTION_FAIL;
 720			} else {
 721				/* Must have been a power glitch, or a
 722				 * bus reset.  Could not have been a
 723				 * media change, so we just retry the
 724				 * command and see what happens.
 725				 */
 726				action = ACTION_RETRY;
 727			}
 728			break;
 729		case ILLEGAL_REQUEST:
 730			/* If we had an ILLEGAL REQUEST returned, then
 731			 * we may have performed an unsupported
 732			 * command.  The only thing this should be
 733			 * would be a ten byte read where only a six
 734			 * byte read was supported.  Also, on a system
 735			 * where READ CAPACITY failed, we may have
 736			 * read past the end of the disk.
 737			 */
 738			if ((cmd->device->use_10_for_rw &&
 739			    sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
 740			    (cmd->cmnd[0] == READ_10 ||
 741			     cmd->cmnd[0] == WRITE_10)) {
 742				/* This will issue a new 6-byte command. */
 743				cmd->device->use_10_for_rw = 0;
 744				action = ACTION_REPREP;
 745			} else if (sshdr.asc == 0x10) /* DIX */ {
 746				action = ACTION_FAIL;
 747				blk_stat = BLK_STS_PROTECTION;
 748			/* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
 749			} else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
 750				action = ACTION_FAIL;
 751				blk_stat = BLK_STS_TARGET;
 752			} else
 753				action = ACTION_FAIL;
 754			break;
 755		case ABORTED_COMMAND:
 756			action = ACTION_FAIL;
 757			if (sshdr.asc == 0x10) /* DIF */
 758				blk_stat = BLK_STS_PROTECTION;
 759			break;
 760		case NOT_READY:
 761			/* If the device is in the process of becoming
 762			 * ready, or has a temporary blockage, retry.
 763			 */
 764			if (sshdr.asc == 0x04) {
 765				switch (sshdr.ascq) {
 766				case 0x01: /* becoming ready */
 767				case 0x04: /* format in progress */
 768				case 0x05: /* rebuild in progress */
 769				case 0x06: /* recalculation in progress */
 770				case 0x07: /* operation in progress */
 771				case 0x08: /* Long write in progress */
 772				case 0x09: /* self test in progress */
 773				case 0x11: /* notify (enable spinup) required */
 774				case 0x14: /* space allocation in progress */
 775				case 0x1a: /* start stop unit in progress */
 776				case 0x1b: /* sanitize in progress */
 777				case 0x1d: /* configuration in progress */
 778				case 0x24: /* depopulation in progress */
 779				case 0x25: /* depopulation restore in progress */
 780					action = ACTION_DELAYED_RETRY;
 781					break;
 782				case 0x0a: /* ALUA state transition */
 783					action = ACTION_DELAYED_REPREP;
 784					break;
 
 
 
 
 
 
 
 785				default:
 786					action = ACTION_FAIL;
 787					break;
 788				}
 789			} else
 790				action = ACTION_FAIL;
 791			break;
 792		case VOLUME_OVERFLOW:
 793			/* See SSC3rXX or current. */
 794			action = ACTION_FAIL;
 795			break;
 796		case DATA_PROTECT:
 797			action = ACTION_FAIL;
 798			if ((sshdr.asc == 0x0C && sshdr.ascq == 0x12) ||
 799			    (sshdr.asc == 0x55 &&
 800			     (sshdr.ascq == 0x0E || sshdr.ascq == 0x0F))) {
 801				/* Insufficient zone resources */
 802				blk_stat = BLK_STS_ZONE_OPEN_RESOURCE;
 803			}
 804			break;
 805		case COMPLETED:
 806			fallthrough;
 807		default:
 808			action = ACTION_FAIL;
 809			break;
 810		}
 811	} else
 812		action = ACTION_FAIL;
 813
 814	if (action != ACTION_FAIL && scsi_cmd_runtime_exceeced(cmd))
 815		action = ACTION_FAIL;
 816
 817	switch (action) {
 818	case ACTION_FAIL:
 819		/* Give up and fail the remainder of the request */
 820		if (!(req->rq_flags & RQF_QUIET)) {
 821			static DEFINE_RATELIMIT_STATE(_rs,
 822					DEFAULT_RATELIMIT_INTERVAL,
 823					DEFAULT_RATELIMIT_BURST);
 824
 825			if (unlikely(scsi_logging_level))
 826				level =
 827				     SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
 828						    SCSI_LOG_MLCOMPLETE_BITS);
 829
 830			/*
 831			 * if logging is enabled the failure will be printed
 832			 * in scsi_log_completion(), so avoid duplicate messages
 833			 */
 834			if (!level && __ratelimit(&_rs)) {
 835				scsi_print_result(cmd, NULL, FAILED);
 836				if (sense_valid)
 837					scsi_print_sense(cmd);
 838				scsi_print_command(cmd);
 839			}
 840		}
 841		if (!scsi_end_request(req, blk_stat, scsi_rq_err_bytes(req)))
 842			return;
 843		fallthrough;
 844	case ACTION_REPREP:
 845		scsi_mq_requeue_cmd(cmd, 0);
 846		break;
 847	case ACTION_DELAYED_REPREP:
 848		scsi_mq_requeue_cmd(cmd, ALUA_TRANSITION_REPREP_DELAY);
 849		break;
 850	case ACTION_RETRY:
 851		/* Retry the same command immediately */
 852		__scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, false);
 853		break;
 854	case ACTION_DELAYED_RETRY:
 855		/* Retry the same command after a delay */
 856		__scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, false);
 857		break;
 858	}
 859}
 860
 861/*
 862 * Helper for scsi_io_completion() when cmd->result is non-zero. Returns a
 863 * new result that may suppress further error checking. Also modifies
 864 * *blk_statp in some cases.
 865 */
 866static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
 867					blk_status_t *blk_statp)
 868{
 869	bool sense_valid;
 870	bool sense_current = true;	/* false implies "deferred sense" */
 871	struct request *req = scsi_cmd_to_rq(cmd);
 872	struct scsi_sense_hdr sshdr;
 873
 874	sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
 875	if (sense_valid)
 876		sense_current = !scsi_sense_is_deferred(&sshdr);
 877
 878	if (blk_rq_is_passthrough(req)) {
 879		if (sense_valid) {
 880			/*
 881			 * SG_IO wants current and deferred errors
 882			 */
 883			cmd->sense_len = min(8 + cmd->sense_buffer[7],
 884					     SCSI_SENSE_BUFFERSIZE);
 885		}
 886		if (sense_current)
 887			*blk_statp = scsi_result_to_blk_status(result);
 888	} else if (blk_rq_bytes(req) == 0 && sense_current) {
 889		/*
 890		 * Flush commands do not transfers any data, and thus cannot use
 891		 * good_bytes != blk_rq_bytes(req) as the signal for an error.
 892		 * This sets *blk_statp explicitly for the problem case.
 893		 */
 894		*blk_statp = scsi_result_to_blk_status(result);
 895	}
 896	/*
 897	 * Recovered errors need reporting, but they're always treated as
 898	 * success, so fiddle the result code here.  For passthrough requests
 899	 * we already took a copy of the original into sreq->result which
 900	 * is what gets returned to the user
 901	 */
 902	if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
 903		bool do_print = true;
 904		/*
 905		 * if ATA PASS-THROUGH INFORMATION AVAILABLE [0x0, 0x1d]
 906		 * skip print since caller wants ATA registers. Only occurs
 907		 * on SCSI ATA PASS_THROUGH commands when CK_COND=1
 908		 */
 909		if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
 910			do_print = false;
 911		else if (req->rq_flags & RQF_QUIET)
 912			do_print = false;
 913		if (do_print)
 914			scsi_print_sense(cmd);
 915		result = 0;
 916		/* for passthrough, *blk_statp may be set */
 917		*blk_statp = BLK_STS_OK;
 918	}
 919	/*
 920	 * Another corner case: the SCSI status byte is non-zero but 'good'.
 921	 * Example: PRE-FETCH command returns SAM_STAT_CONDITION_MET when
 922	 * it is able to fit nominated LBs in its cache (and SAM_STAT_GOOD
 923	 * if it can't fit). Treat SAM_STAT_CONDITION_MET and the related
 924	 * intermediate statuses (both obsolete in SAM-4) as good.
 925	 */
 926	if ((result & 0xff) && scsi_status_is_good(result)) {
 927		result = 0;
 928		*blk_statp = BLK_STS_OK;
 929	}
 930	return result;
 931}
 932
 933/**
 934 * scsi_io_completion - Completion processing for SCSI commands.
 935 * @cmd:	command that is finished.
 936 * @good_bytes:	number of processed bytes.
 937 *
 938 * We will finish off the specified number of sectors. If we are done, the
 939 * command block will be released and the queue function will be goosed. If we
 940 * are not done then we have to figure out what to do next:
 941 *
 942 *   a) We can call scsi_mq_requeue_cmd().  The request will be
 943 *	unprepared and put back on the queue.  Then a new command will
 944 *	be created for it.  This should be used if we made forward
 945 *	progress, or if we want to switch from READ(10) to READ(6) for
 946 *	example.
 947 *
 948 *   b) We can call scsi_io_completion_action().  The request will be
 949 *	put back on the queue and retried using the same command as
 950 *	before, possibly after a delay.
 951 *
 952 *   c) We can call scsi_end_request() with blk_stat other than
 953 *	BLK_STS_OK, to fail the remainder of the request.
 954 */
 955void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 956{
 957	int result = cmd->result;
 958	struct request *req = scsi_cmd_to_rq(cmd);
 959	blk_status_t blk_stat = BLK_STS_OK;
 960
 961	if (unlikely(result))	/* a nz result may or may not be an error */
 962		result = scsi_io_completion_nz_result(cmd, result, &blk_stat);
 963
 964	/*
 965	 * Next deal with any sectors which we were able to correctly
 966	 * handle.
 967	 */
 968	SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd,
 969		"%u sectors total, %d bytes done.\n",
 970		blk_rq_sectors(req), good_bytes));
 971
 972	/*
 973	 * Failed, zero length commands always need to drop down
 974	 * to retry code. Fast path should return in this block.
 975	 */
 976	if (likely(blk_rq_bytes(req) > 0 || blk_stat == BLK_STS_OK)) {
 977		if (likely(!scsi_end_request(req, blk_stat, good_bytes)))
 978			return; /* no bytes remaining */
 979	}
 980
 981	/* Kill remainder if no retries. */
 982	if (unlikely(blk_stat && scsi_noretry_cmd(cmd))) {
 983		if (scsi_end_request(req, blk_stat, blk_rq_bytes(req)))
 984			WARN_ONCE(true,
 985			    "Bytes remaining after failed, no-retry command");
 986		return;
 987	}
 988
 989	/*
 990	 * If there had been no error, but we have leftover bytes in the
 991	 * request just queue the command up again.
 992	 */
 993	if (likely(result == 0))
 994		scsi_mq_requeue_cmd(cmd, 0);
 995	else
 996		scsi_io_completion_action(cmd, result);
 997}
 998
 999static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev,
1000		struct request *rq)
1001{
1002	return sdev->dma_drain_len && blk_rq_is_passthrough(rq) &&
1003	       !op_is_write(req_op(rq)) &&
1004	       sdev->host->hostt->dma_need_drain(rq);
1005}
1006
1007/**
1008 * scsi_alloc_sgtables - Allocate and initialize data and integrity scatterlists
1009 * @cmd: SCSI command data structure to initialize.
1010 *
1011 * Initializes @cmd->sdb and also @cmd->prot_sdb if data integrity is enabled
1012 * for @cmd.
1013 *
1014 * Returns:
1015 * * BLK_STS_OK       - on success
1016 * * BLK_STS_RESOURCE - if the failure is retryable
1017 * * BLK_STS_IOERR    - if the failure is fatal
1018 */
1019blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
1020{
1021	struct scsi_device *sdev = cmd->device;
1022	struct request *rq = scsi_cmd_to_rq(cmd);
1023	unsigned short nr_segs = blk_rq_nr_phys_segments(rq);
1024	struct scatterlist *last_sg = NULL;
1025	blk_status_t ret;
1026	bool need_drain = scsi_cmd_needs_dma_drain(sdev, rq);
1027	int count;
1028
1029	if (WARN_ON_ONCE(!nr_segs))
1030		return BLK_STS_IOERR;
1031
1032	/*
1033	 * Make sure there is space for the drain.  The driver must adjust
1034	 * max_hw_segments to be prepared for this.
1035	 */
1036	if (need_drain)
1037		nr_segs++;
1038
1039	/*
1040	 * If sg table allocation fails, requeue request later.
1041	 */
1042	if (unlikely(sg_alloc_table_chained(&cmd->sdb.table, nr_segs,
1043			cmd->sdb.table.sgl, SCSI_INLINE_SG_CNT)))
1044		return BLK_STS_RESOURCE;
1045
1046	/*
1047	 * Next, walk the list, and fill in the addresses and sizes of
1048	 * each segment.
1049	 */
1050	count = __blk_rq_map_sg(rq->q, rq, cmd->sdb.table.sgl, &last_sg);
1051
1052	if (blk_rq_bytes(rq) & rq->q->dma_pad_mask) {
1053		unsigned int pad_len =
1054			(rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
1055
1056		last_sg->length += pad_len;
1057		cmd->extra_len += pad_len;
1058	}
1059
1060	if (need_drain) {
1061		sg_unmark_end(last_sg);
1062		last_sg = sg_next(last_sg);
1063		sg_set_buf(last_sg, sdev->dma_drain_buf, sdev->dma_drain_len);
1064		sg_mark_end(last_sg);
1065
1066		cmd->extra_len += sdev->dma_drain_len;
1067		count++;
1068	}
1069
1070	BUG_ON(count > cmd->sdb.table.nents);
1071	cmd->sdb.table.nents = count;
1072	cmd->sdb.length = blk_rq_payload_bytes(rq);
1073
1074	if (blk_integrity_rq(rq)) {
1075		struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
1076		int ivecs;
1077
1078		if (WARN_ON_ONCE(!prot_sdb)) {
1079			/*
1080			 * This can happen if someone (e.g. multipath)
1081			 * queues a command to a device on an adapter
1082			 * that does not support DIX.
1083			 */
1084			ret = BLK_STS_IOERR;
1085			goto out_free_sgtables;
1086		}
1087
1088		ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
1089
1090		if (sg_alloc_table_chained(&prot_sdb->table, ivecs,
1091				prot_sdb->table.sgl,
1092				SCSI_INLINE_PROT_SG_CNT)) {
1093			ret = BLK_STS_RESOURCE;
1094			goto out_free_sgtables;
1095		}
1096
1097		count = blk_rq_map_integrity_sg(rq->q, rq->bio,
1098						prot_sdb->table.sgl);
1099		BUG_ON(count > ivecs);
1100		BUG_ON(count > queue_max_integrity_segments(rq->q));
1101
1102		cmd->prot_sdb = prot_sdb;
1103		cmd->prot_sdb->table.nents = count;
1104	}
1105
1106	return BLK_STS_OK;
1107out_free_sgtables:
1108	scsi_free_sgtables(cmd);
1109	return ret;
1110}
1111EXPORT_SYMBOL(scsi_alloc_sgtables);
1112
1113/**
1114 * scsi_initialize_rq - initialize struct scsi_cmnd partially
1115 * @rq: Request associated with the SCSI command to be initialized.
1116 *
1117 * This function initializes the members of struct scsi_cmnd that must be
1118 * initialized before request processing starts and that won't be
1119 * reinitialized if a SCSI command is requeued.
1120 */
1121static void scsi_initialize_rq(struct request *rq)
1122{
1123	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1124
1125	memset(cmd->cmnd, 0, sizeof(cmd->cmnd));
1126	cmd->cmd_len = MAX_COMMAND_SIZE;
1127	cmd->sense_len = 0;
1128	init_rcu_head(&cmd->rcu);
1129	cmd->jiffies_at_alloc = jiffies;
1130	cmd->retries = 0;
1131}
1132
1133struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf,
1134				   blk_mq_req_flags_t flags)
1135{
1136	struct request *rq;
1137
1138	rq = blk_mq_alloc_request(q, opf, flags);
1139	if (!IS_ERR(rq))
1140		scsi_initialize_rq(rq);
1141	return rq;
1142}
1143EXPORT_SYMBOL_GPL(scsi_alloc_request);
1144
1145/*
1146 * Only called when the request isn't completed by SCSI, and not freed by
1147 * SCSI
1148 */
1149static void scsi_cleanup_rq(struct request *rq)
1150{
1151	if (rq->rq_flags & RQF_DONTPREP) {
1152		scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq));
1153		rq->rq_flags &= ~RQF_DONTPREP;
1154	}
1155}
1156
1157/* Called before a request is prepared. See also scsi_mq_prep_fn(). */
1158void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
1159{
1160	struct request *rq = scsi_cmd_to_rq(cmd);
1161
1162	if (!blk_rq_is_passthrough(rq) && !(cmd->flags & SCMD_INITIALIZED)) {
1163		cmd->flags |= SCMD_INITIALIZED;
1164		scsi_initialize_rq(rq);
1165	}
1166
1167	cmd->device = dev;
1168	INIT_LIST_HEAD(&cmd->eh_entry);
1169	INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
1170}
1171
1172static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev,
1173		struct request *req)
1174{
1175	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1176
1177	/*
1178	 * Passthrough requests may transfer data, in which case they must
1179	 * a bio attached to them.  Or they might contain a SCSI command
1180	 * that does not transfer data, in which case they may optionally
1181	 * submit a request without an attached bio.
1182	 */
1183	if (req->bio) {
1184		blk_status_t ret = scsi_alloc_sgtables(cmd);
1185		if (unlikely(ret != BLK_STS_OK))
1186			return ret;
1187	} else {
1188		BUG_ON(blk_rq_bytes(req));
1189
1190		memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1191	}
1192
1193	cmd->transfersize = blk_rq_bytes(req);
1194	return BLK_STS_OK;
1195}
1196
1197static blk_status_t
1198scsi_device_state_check(struct scsi_device *sdev, struct request *req)
1199{
1200	switch (sdev->sdev_state) {
1201	case SDEV_CREATED:
1202		return BLK_STS_OK;
1203	case SDEV_OFFLINE:
1204	case SDEV_TRANSPORT_OFFLINE:
1205		/*
1206		 * If the device is offline we refuse to process any
1207		 * commands.  The device must be brought online
1208		 * before trying any recovery commands.
1209		 */
1210		if (!sdev->offline_already) {
1211			sdev->offline_already = true;
1212			sdev_printk(KERN_ERR, sdev,
1213				    "rejecting I/O to offline device\n");
1214		}
1215		return BLK_STS_IOERR;
1216	case SDEV_DEL:
1217		/*
1218		 * If the device is fully deleted, we refuse to
1219		 * process any commands as well.
1220		 */
1221		sdev_printk(KERN_ERR, sdev,
1222			    "rejecting I/O to dead device\n");
1223		return BLK_STS_IOERR;
1224	case SDEV_BLOCK:
1225	case SDEV_CREATED_BLOCK:
1226		return BLK_STS_RESOURCE;
1227	case SDEV_QUIESCE:
1228		/*
1229		 * If the device is blocked we only accept power management
1230		 * commands.
1231		 */
1232		if (req && WARN_ON_ONCE(!(req->rq_flags & RQF_PM)))
1233			return BLK_STS_RESOURCE;
1234		return BLK_STS_OK;
1235	default:
1236		/*
1237		 * For any other not fully online state we only allow
1238		 * power management commands.
1239		 */
1240		if (req && !(req->rq_flags & RQF_PM))
1241			return BLK_STS_OFFLINE;
1242		return BLK_STS_OK;
1243	}
1244}
1245
1246/*
1247 * scsi_dev_queue_ready: if we can send requests to sdev, assign one token
1248 * and return the token else return -1.
1249 */
1250static inline int scsi_dev_queue_ready(struct request_queue *q,
1251				  struct scsi_device *sdev)
1252{
1253	int token;
1254
1255	token = sbitmap_get(&sdev->budget_map);
1256	if (token < 0)
1257		return -1;
1258
1259	if (!atomic_read(&sdev->device_blocked))
1260		return token;
1261
1262	/*
1263	 * Only unblock if no other commands are pending and
1264	 * if device_blocked has decreased to zero
1265	 */
1266	if (scsi_device_busy(sdev) > 1 ||
1267	    atomic_dec_return(&sdev->device_blocked) > 0) {
1268		sbitmap_put(&sdev->budget_map, token);
1269		return -1;
1270	}
1271
1272	SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
1273			 "unblocking device at zero depth\n"));
1274
1275	return token;
1276}
1277
1278/*
1279 * scsi_target_queue_ready: checks if there we can send commands to target
1280 * @sdev: scsi device on starget to check.
1281 */
1282static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1283					   struct scsi_device *sdev)
1284{
1285	struct scsi_target *starget = scsi_target(sdev);
1286	unsigned int busy;
1287
1288	if (starget->single_lun) {
1289		spin_lock_irq(shost->host_lock);
1290		if (starget->starget_sdev_user &&
1291		    starget->starget_sdev_user != sdev) {
1292			spin_unlock_irq(shost->host_lock);
1293			return 0;
1294		}
1295		starget->starget_sdev_user = sdev;
1296		spin_unlock_irq(shost->host_lock);
1297	}
1298
1299	if (starget->can_queue <= 0)
1300		return 1;
1301
1302	busy = atomic_inc_return(&starget->target_busy) - 1;
1303	if (atomic_read(&starget->target_blocked) > 0) {
1304		if (busy)
1305			goto starved;
1306
1307		/*
1308		 * unblock after target_blocked iterates to zero
1309		 */
1310		if (atomic_dec_return(&starget->target_blocked) > 0)
1311			goto out_dec;
1312
1313		SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1314				 "unblocking target at zero depth\n"));
1315	}
1316
1317	if (busy >= starget->can_queue)
1318		goto starved;
1319
1320	return 1;
1321
1322starved:
1323	spin_lock_irq(shost->host_lock);
1324	list_move_tail(&sdev->starved_entry, &shost->starved_list);
1325	spin_unlock_irq(shost->host_lock);
1326out_dec:
1327	if (starget->can_queue > 0)
1328		atomic_dec(&starget->target_busy);
1329	return 0;
1330}
1331
1332/*
1333 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1334 * return 0. We must end up running the queue again whenever 0 is
1335 * returned, else IO can hang.
1336 */
1337static inline int scsi_host_queue_ready(struct request_queue *q,
1338				   struct Scsi_Host *shost,
1339				   struct scsi_device *sdev,
1340				   struct scsi_cmnd *cmd)
1341{
1342	if (atomic_read(&shost->host_blocked) > 0) {
1343		if (scsi_host_busy(shost) > 0)
1344			goto starved;
1345
1346		/*
1347		 * unblock after host_blocked iterates to zero
1348		 */
1349		if (atomic_dec_return(&shost->host_blocked) > 0)
1350			goto out_dec;
1351
1352		SCSI_LOG_MLQUEUE(3,
1353			shost_printk(KERN_INFO, shost,
1354				     "unblocking host at zero depth\n"));
1355	}
1356
1357	if (shost->host_self_blocked)
1358		goto starved;
1359
1360	/* We're OK to process the command, so we can't be starved */
1361	if (!list_empty(&sdev->starved_entry)) {
1362		spin_lock_irq(shost->host_lock);
1363		if (!list_empty(&sdev->starved_entry))
1364			list_del_init(&sdev->starved_entry);
1365		spin_unlock_irq(shost->host_lock);
1366	}
1367
1368	__set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
1369
1370	return 1;
1371
1372starved:
1373	spin_lock_irq(shost->host_lock);
1374	if (list_empty(&sdev->starved_entry))
1375		list_add_tail(&sdev->starved_entry, &shost->starved_list);
1376	spin_unlock_irq(shost->host_lock);
1377out_dec:
1378	scsi_dec_host_busy(shost, cmd);
1379	return 0;
1380}
1381
1382/*
1383 * Busy state exporting function for request stacking drivers.
1384 *
1385 * For efficiency, no lock is taken to check the busy state of
1386 * shost/starget/sdev, since the returned value is not guaranteed and
1387 * may be changed after request stacking drivers call the function,
1388 * regardless of taking lock or not.
1389 *
1390 * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi
1391 * needs to return 'not busy'. Otherwise, request stacking drivers
1392 * may hold requests forever.
1393 */
1394static bool scsi_mq_lld_busy(struct request_queue *q)
1395{
1396	struct scsi_device *sdev = q->queuedata;
1397	struct Scsi_Host *shost;
1398
1399	if (blk_queue_dying(q))
1400		return false;
1401
1402	shost = sdev->host;
1403
1404	/*
1405	 * Ignore host/starget busy state.
1406	 * Since block layer does not have a concept of fairness across
1407	 * multiple queues, congestion of host/starget needs to be handled
1408	 * in SCSI layer.
1409	 */
1410	if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1411		return true;
1412
1413	return false;
1414}
1415
1416/*
1417 * Block layer request completion callback. May be called from interrupt
1418 * context.
1419 */
1420static void scsi_complete(struct request *rq)
1421{
1422	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1423	enum scsi_disposition disposition;
1424
1425	INIT_LIST_HEAD(&cmd->eh_entry);
1426
1427	atomic_inc(&cmd->device->iodone_cnt);
1428	if (cmd->result)
1429		atomic_inc(&cmd->device->ioerr_cnt);
1430
1431	disposition = scsi_decide_disposition(cmd);
1432	if (disposition != SUCCESS && scsi_cmd_runtime_exceeced(cmd))
1433		disposition = SUCCESS;
1434
1435	scsi_log_completion(cmd, disposition);
1436
1437	switch (disposition) {
1438	case SUCCESS:
1439		scsi_finish_command(cmd);
1440		break;
1441	case NEEDS_RETRY:
1442		scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1443		break;
1444	case ADD_TO_MLQUEUE:
1445		scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1446		break;
1447	default:
1448		scsi_eh_scmd_add(cmd);
1449		break;
1450	}
1451}
1452
1453/**
1454 * scsi_dispatch_cmd - Dispatch a command to the low-level driver.
1455 * @cmd: command block we are dispatching.
1456 *
1457 * Return: nonzero return request was rejected and device's queue needs to be
1458 * plugged.
1459 */
1460static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
1461{
1462	struct Scsi_Host *host = cmd->device->host;
1463	int rtn = 0;
1464
1465	atomic_inc(&cmd->device->iorequest_cnt);
1466
1467	/* check if the device is still usable */
1468	if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
1469		/* in SDEV_DEL we error all commands. DID_NO_CONNECT
1470		 * returns an immediate error upwards, and signals
1471		 * that the device is no longer present */
1472		cmd->result = DID_NO_CONNECT << 16;
1473		goto done;
1474	}
1475
1476	/* Check to see if the scsi lld made this device blocked. */
1477	if (unlikely(scsi_device_blocked(cmd->device))) {
1478		/*
1479		 * in blocked state, the command is just put back on
1480		 * the device queue.  The suspend state has already
1481		 * blocked the queue so future requests should not
1482		 * occur until the device transitions out of the
1483		 * suspend state.
1484		 */
1485		SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1486			"queuecommand : device blocked\n"));
1487		atomic_dec(&cmd->device->iorequest_cnt);
1488		return SCSI_MLQUEUE_DEVICE_BUSY;
1489	}
1490
1491	/* Store the LUN value in cmnd, if needed. */
1492	if (cmd->device->lun_in_cdb)
1493		cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
1494			       (cmd->device->lun << 5 & 0xe0);
1495
1496	scsi_log_send(cmd);
1497
1498	/*
1499	 * Before we queue this command, check if the command
1500	 * length exceeds what the host adapter can handle.
1501	 */
1502	if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
1503		SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1504			       "queuecommand : command too long. "
1505			       "cdb_size=%d host->max_cmd_len=%d\n",
1506			       cmd->cmd_len, cmd->device->host->max_cmd_len));
1507		cmd->result = (DID_ABORT << 16);
1508		goto done;
1509	}
1510
1511	if (unlikely(host->shost_state == SHOST_DEL)) {
1512		cmd->result = (DID_NO_CONNECT << 16);
1513		goto done;
1514
1515	}
1516
1517	trace_scsi_dispatch_cmd_start(cmd);
1518	rtn = host->hostt->queuecommand(host, cmd);
1519	if (rtn) {
1520		atomic_dec(&cmd->device->iorequest_cnt);
1521		trace_scsi_dispatch_cmd_error(cmd, rtn);
1522		if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
1523		    rtn != SCSI_MLQUEUE_TARGET_BUSY)
1524			rtn = SCSI_MLQUEUE_HOST_BUSY;
1525
1526		SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1527			"queuecommand : request rejected\n"));
1528	}
1529
1530	return rtn;
1531 done:
1532	scsi_done(cmd);
1533	return 0;
1534}
1535
1536/* Size in bytes of the sg-list stored in the scsi-mq command-private data. */
1537static unsigned int scsi_mq_inline_sgl_size(struct Scsi_Host *shost)
1538{
1539	return min_t(unsigned int, shost->sg_tablesize, SCSI_INLINE_SG_CNT) *
1540		sizeof(struct scatterlist);
1541}
1542
1543static blk_status_t scsi_prepare_cmd(struct request *req)
1544{
1545	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1546	struct scsi_device *sdev = req->q->queuedata;
1547	struct Scsi_Host *shost = sdev->host;
1548	bool in_flight = test_bit(SCMD_STATE_INFLIGHT, &cmd->state);
1549	struct scatterlist *sg;
1550
1551	scsi_init_command(sdev, cmd);
1552
1553	cmd->eh_eflags = 0;
1554	cmd->prot_type = 0;
1555	cmd->prot_flags = 0;
1556	cmd->submitter = 0;
1557	memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1558	cmd->underflow = 0;
1559	cmd->transfersize = 0;
1560	cmd->host_scribble = NULL;
1561	cmd->result = 0;
1562	cmd->extra_len = 0;
1563	cmd->state = 0;
1564	if (in_flight)
1565		__set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
1566
1567	/*
1568	 * Only clear the driver-private command data if the LLD does not supply
1569	 * a function to initialize that data.
1570	 */
1571	if (!shost->hostt->init_cmd_priv)
1572		memset(cmd + 1, 0, shost->hostt->cmd_size);
1573
1574	cmd->prot_op = SCSI_PROT_NORMAL;
1575	if (blk_rq_bytes(req))
1576		cmd->sc_data_direction = rq_dma_dir(req);
1577	else
1578		cmd->sc_data_direction = DMA_NONE;
1579
1580	sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
1581	cmd->sdb.table.sgl = sg;
1582
1583	if (scsi_host_get_prot(shost)) {
1584		memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer));
1585
1586		cmd->prot_sdb->table.sgl =
1587			(struct scatterlist *)(cmd->prot_sdb + 1);
1588	}
1589
1590	/*
1591	 * Special handling for passthrough commands, which don't go to the ULP
1592	 * at all:
1593	 */
1594	if (blk_rq_is_passthrough(req))
1595		return scsi_setup_scsi_cmnd(sdev, req);
1596
1597	if (sdev->handler && sdev->handler->prep_fn) {
1598		blk_status_t ret = sdev->handler->prep_fn(sdev, req);
1599
1600		if (ret != BLK_STS_OK)
1601			return ret;
1602	}
1603
1604	/* Usually overridden by the ULP */
1605	cmd->allowed = 0;
1606	memset(cmd->cmnd, 0, sizeof(cmd->cmnd));
1607	return scsi_cmd_to_driver(cmd)->init_command(cmd);
1608}
1609
1610static void scsi_done_internal(struct scsi_cmnd *cmd, bool complete_directly)
1611{
1612	struct request *req = scsi_cmd_to_rq(cmd);
1613
1614	switch (cmd->submitter) {
1615	case SUBMITTED_BY_BLOCK_LAYER:
1616		break;
1617	case SUBMITTED_BY_SCSI_ERROR_HANDLER:
1618		return scsi_eh_done(cmd);
1619	case SUBMITTED_BY_SCSI_RESET_IOCTL:
1620		return;
1621	}
1622
1623	if (unlikely(blk_should_fake_timeout(scsi_cmd_to_rq(cmd)->q)))
1624		return;
1625	if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state)))
1626		return;
1627	trace_scsi_dispatch_cmd_done(cmd);
1628
1629	if (complete_directly)
1630		blk_mq_complete_request_direct(req, scsi_complete);
1631	else
1632		blk_mq_complete_request(req);
1633}
1634
1635void scsi_done(struct scsi_cmnd *cmd)
1636{
1637	scsi_done_internal(cmd, false);
1638}
1639EXPORT_SYMBOL(scsi_done);
1640
1641void scsi_done_direct(struct scsi_cmnd *cmd)
1642{
1643	scsi_done_internal(cmd, true);
1644}
1645EXPORT_SYMBOL(scsi_done_direct);
1646
1647static void scsi_mq_put_budget(struct request_queue *q, int budget_token)
1648{
1649	struct scsi_device *sdev = q->queuedata;
1650
1651	sbitmap_put(&sdev->budget_map, budget_token);
1652}
1653
1654/*
1655 * When to reinvoke queueing after a resource shortage. It's 3 msecs to
1656 * not change behaviour from the previous unplug mechanism, experimentation
1657 * may prove this needs changing.
1658 */
1659#define SCSI_QUEUE_DELAY 3
1660
1661static int scsi_mq_get_budget(struct request_queue *q)
1662{
1663	struct scsi_device *sdev = q->queuedata;
1664	int token = scsi_dev_queue_ready(q, sdev);
1665
1666	if (token >= 0)
1667		return token;
1668
1669	atomic_inc(&sdev->restarts);
1670
1671	/*
1672	 * Orders atomic_inc(&sdev->restarts) and atomic_read(&sdev->device_busy).
1673	 * .restarts must be incremented before .device_busy is read because the
1674	 * code in scsi_run_queue_async() depends on the order of these operations.
1675	 */
1676	smp_mb__after_atomic();
1677
1678	/*
1679	 * If all in-flight requests originated from this LUN are completed
1680	 * before reading .device_busy, sdev->device_busy will be observed as
1681	 * zero, then blk_mq_delay_run_hw_queues() will dispatch this request
1682	 * soon. Otherwise, completion of one of these requests will observe
1683	 * the .restarts flag, and the request queue will be run for handling
1684	 * this request, see scsi_end_request().
1685	 */
1686	if (unlikely(scsi_device_busy(sdev) == 0 &&
1687				!scsi_device_blocked(sdev)))
1688		blk_mq_delay_run_hw_queues(sdev->request_queue, SCSI_QUEUE_DELAY);
1689	return -1;
1690}
1691
1692static void scsi_mq_set_rq_budget_token(struct request *req, int token)
1693{
1694	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1695
1696	cmd->budget_token = token;
1697}
1698
1699static int scsi_mq_get_rq_budget_token(struct request *req)
1700{
1701	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1702
1703	return cmd->budget_token;
1704}
1705
1706static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
1707			 const struct blk_mq_queue_data *bd)
1708{
1709	struct request *req = bd->rq;
1710	struct request_queue *q = req->q;
1711	struct scsi_device *sdev = q->queuedata;
1712	struct Scsi_Host *shost = sdev->host;
1713	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1714	blk_status_t ret;
1715	int reason;
1716
1717	WARN_ON_ONCE(cmd->budget_token < 0);
1718
1719	/*
1720	 * If the device is not in running state we will reject some or all
1721	 * commands.
1722	 */
1723	if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1724		ret = scsi_device_state_check(sdev, req);
1725		if (ret != BLK_STS_OK)
1726			goto out_put_budget;
1727	}
1728
1729	ret = BLK_STS_RESOURCE;
1730	if (!scsi_target_queue_ready(shost, sdev))
1731		goto out_put_budget;
1732	if (unlikely(scsi_host_in_recovery(shost))) {
1733		if (cmd->flags & SCMD_FAIL_IF_RECOVERING)
1734			ret = BLK_STS_OFFLINE;
1735		goto out_dec_target_busy;
1736	}
1737	if (!scsi_host_queue_ready(q, shost, sdev, cmd))
1738		goto out_dec_target_busy;
1739
 
 
 
 
 
 
 
1740	if (!(req->rq_flags & RQF_DONTPREP)) {
1741		ret = scsi_prepare_cmd(req);
1742		if (ret != BLK_STS_OK)
1743			goto out_dec_host_busy;
1744		req->rq_flags |= RQF_DONTPREP;
1745	} else {
1746		clear_bit(SCMD_STATE_COMPLETE, &cmd->state);
1747	}
1748
1749	cmd->flags &= SCMD_PRESERVED_FLAGS;
1750	if (sdev->simple_tags)
1751		cmd->flags |= SCMD_TAGGED;
1752	if (bd->last)
1753		cmd->flags |= SCMD_LAST;
1754
1755	scsi_set_resid(cmd, 0);
1756	memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1757	cmd->submitter = SUBMITTED_BY_BLOCK_LAYER;
1758
1759	blk_mq_start_request(req);
1760	reason = scsi_dispatch_cmd(cmd);
1761	if (reason) {
1762		scsi_set_blocked(cmd, reason);
1763		ret = BLK_STS_RESOURCE;
1764		goto out_dec_host_busy;
1765	}
1766
1767	return BLK_STS_OK;
1768
1769out_dec_host_busy:
1770	scsi_dec_host_busy(shost, cmd);
1771out_dec_target_busy:
1772	if (scsi_target(sdev)->can_queue > 0)
1773		atomic_dec(&scsi_target(sdev)->target_busy);
1774out_put_budget:
1775	scsi_mq_put_budget(q, cmd->budget_token);
1776	cmd->budget_token = -1;
1777	switch (ret) {
1778	case BLK_STS_OK:
1779		break;
1780	case BLK_STS_RESOURCE:
1781	case BLK_STS_ZONE_RESOURCE:
1782		if (scsi_device_blocked(sdev))
1783			ret = BLK_STS_DEV_RESOURCE;
1784		break;
1785	case BLK_STS_AGAIN:
1786		cmd->result = DID_BUS_BUSY << 16;
1787		if (req->rq_flags & RQF_DONTPREP)
1788			scsi_mq_uninit_cmd(cmd);
1789		break;
1790	default:
1791		if (unlikely(!scsi_device_online(sdev)))
1792			cmd->result = DID_NO_CONNECT << 16;
1793		else
1794			cmd->result = DID_ERROR << 16;
1795		/*
1796		 * Make sure to release all allocated resources when
1797		 * we hit an error, as we will never see this command
1798		 * again.
1799		 */
1800		if (req->rq_flags & RQF_DONTPREP)
1801			scsi_mq_uninit_cmd(cmd);
1802		scsi_run_queue_async(sdev);
1803		break;
1804	}
1805	return ret;
1806}
1807
1808static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
1809				unsigned int hctx_idx, unsigned int numa_node)
1810{
1811	struct Scsi_Host *shost = set->driver_data;
1812	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1813	struct scatterlist *sg;
1814	int ret = 0;
1815
1816	cmd->sense_buffer =
1817		kmem_cache_alloc_node(scsi_sense_cache, GFP_KERNEL, numa_node);
1818	if (!cmd->sense_buffer)
1819		return -ENOMEM;
1820
1821	if (scsi_host_get_prot(shost)) {
1822		sg = (void *)cmd + sizeof(struct scsi_cmnd) +
1823			shost->hostt->cmd_size;
1824		cmd->prot_sdb = (void *)sg + scsi_mq_inline_sgl_size(shost);
1825	}
1826
1827	if (shost->hostt->init_cmd_priv) {
1828		ret = shost->hostt->init_cmd_priv(shost, cmd);
1829		if (ret < 0)
1830			kmem_cache_free(scsi_sense_cache, cmd->sense_buffer);
1831	}
1832
1833	return ret;
1834}
1835
1836static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1837				 unsigned int hctx_idx)
1838{
1839	struct Scsi_Host *shost = set->driver_data;
1840	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1841
1842	if (shost->hostt->exit_cmd_priv)
1843		shost->hostt->exit_cmd_priv(shost, cmd);
1844	kmem_cache_free(scsi_sense_cache, cmd->sense_buffer);
1845}
1846
1847
1848static int scsi_mq_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
1849{
1850	struct Scsi_Host *shost = hctx->driver_data;
1851
1852	if (shost->hostt->mq_poll)
1853		return shost->hostt->mq_poll(shost, hctx->queue_num);
1854
1855	return 0;
1856}
1857
1858static int scsi_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1859			  unsigned int hctx_idx)
1860{
1861	struct Scsi_Host *shost = data;
1862
1863	hctx->driver_data = shost;
1864	return 0;
1865}
1866
1867static void scsi_map_queues(struct blk_mq_tag_set *set)
1868{
1869	struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
1870
1871	if (shost->hostt->map_queues)
1872		return shost->hostt->map_queues(shost);
1873	blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
1874}
1875
1876void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
1877{
1878	struct device *dev = shost->dma_dev;
1879
1880	/*
1881	 * this limit is imposed by hardware restrictions
1882	 */
1883	blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
1884					SG_MAX_SEGMENTS));
1885
1886	if (scsi_host_prot_dma(shost)) {
1887		shost->sg_prot_tablesize =
1888			min_not_zero(shost->sg_prot_tablesize,
1889				     (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
1890		BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
1891		blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
1892	}
1893
1894	blk_queue_max_hw_sectors(q, shost->max_sectors);
1895	blk_queue_segment_boundary(q, shost->dma_boundary);
1896	dma_set_seg_boundary(dev, shost->dma_boundary);
 
 
 
1897
1898	blk_queue_max_segment_size(q, shost->max_segment_size);
1899	blk_queue_virt_boundary(q, shost->virt_boundary_mask);
1900	dma_set_max_seg_size(dev, queue_max_segment_size(q));
1901
1902	/*
1903	 * Set a reasonable default alignment:  The larger of 32-byte (dword),
1904	 * which is a common minimum for HBAs, and the minimum DMA alignment,
1905	 * which is set by the platform.
1906	 *
1907	 * Devices that require a bigger alignment can increase it later.
1908	 */
1909	blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1);
 
 
 
1910}
1911EXPORT_SYMBOL_GPL(__scsi_init_queue);
1912
1913static const struct blk_mq_ops scsi_mq_ops_no_commit = {
1914	.get_budget	= scsi_mq_get_budget,
1915	.put_budget	= scsi_mq_put_budget,
1916	.queue_rq	= scsi_queue_rq,
1917	.complete	= scsi_complete,
1918	.timeout	= scsi_timeout,
1919#ifdef CONFIG_BLK_DEBUG_FS
1920	.show_rq	= scsi_show_rq,
1921#endif
1922	.init_request	= scsi_mq_init_request,
1923	.exit_request	= scsi_mq_exit_request,
1924	.cleanup_rq	= scsi_cleanup_rq,
1925	.busy		= scsi_mq_lld_busy,
1926	.map_queues	= scsi_map_queues,
1927	.init_hctx	= scsi_init_hctx,
1928	.poll		= scsi_mq_poll,
1929	.set_rq_budget_token = scsi_mq_set_rq_budget_token,
1930	.get_rq_budget_token = scsi_mq_get_rq_budget_token,
1931};
1932
1933
1934static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx)
1935{
1936	struct Scsi_Host *shost = hctx->driver_data;
1937
1938	shost->hostt->commit_rqs(shost, hctx->queue_num);
1939}
1940
1941static const struct blk_mq_ops scsi_mq_ops = {
1942	.get_budget	= scsi_mq_get_budget,
1943	.put_budget	= scsi_mq_put_budget,
1944	.queue_rq	= scsi_queue_rq,
1945	.commit_rqs	= scsi_commit_rqs,
1946	.complete	= scsi_complete,
1947	.timeout	= scsi_timeout,
1948#ifdef CONFIG_BLK_DEBUG_FS
1949	.show_rq	= scsi_show_rq,
1950#endif
1951	.init_request	= scsi_mq_init_request,
1952	.exit_request	= scsi_mq_exit_request,
1953	.cleanup_rq	= scsi_cleanup_rq,
1954	.busy		= scsi_mq_lld_busy,
1955	.map_queues	= scsi_map_queues,
1956	.init_hctx	= scsi_init_hctx,
1957	.poll		= scsi_mq_poll,
1958	.set_rq_budget_token = scsi_mq_set_rq_budget_token,
1959	.get_rq_budget_token = scsi_mq_get_rq_budget_token,
1960};
1961
1962int scsi_mq_setup_tags(struct Scsi_Host *shost)
1963{
1964	unsigned int cmd_size, sgl_size;
1965	struct blk_mq_tag_set *tag_set = &shost->tag_set;
1966
1967	sgl_size = max_t(unsigned int, sizeof(struct scatterlist),
1968				scsi_mq_inline_sgl_size(shost));
1969	cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
1970	if (scsi_host_get_prot(shost))
1971		cmd_size += sizeof(struct scsi_data_buffer) +
1972			sizeof(struct scatterlist) * SCSI_INLINE_PROT_SG_CNT;
1973
1974	memset(tag_set, 0, sizeof(*tag_set));
1975	if (shost->hostt->commit_rqs)
1976		tag_set->ops = &scsi_mq_ops;
1977	else
1978		tag_set->ops = &scsi_mq_ops_no_commit;
1979	tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1;
1980	tag_set->nr_maps = shost->nr_maps ? : 1;
1981	tag_set->queue_depth = shost->can_queue;
1982	tag_set->cmd_size = cmd_size;
1983	tag_set->numa_node = dev_to_node(shost->dma_dev);
1984	tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
1985	tag_set->flags |=
1986		BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
1987	if (shost->queuecommand_may_block)
1988		tag_set->flags |= BLK_MQ_F_BLOCKING;
1989	tag_set->driver_data = shost;
1990	if (shost->host_tagset)
1991		tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
1992
1993	return blk_mq_alloc_tag_set(tag_set);
1994}
1995
1996void scsi_mq_free_tags(struct kref *kref)
1997{
1998	struct Scsi_Host *shost = container_of(kref, typeof(*shost),
1999					       tagset_refcnt);
2000
2001	blk_mq_free_tag_set(&shost->tag_set);
2002	complete(&shost->tagset_freed);
2003}
2004
2005/**
2006 * scsi_device_from_queue - return sdev associated with a request_queue
2007 * @q: The request queue to return the sdev from
2008 *
2009 * Return the sdev associated with a request queue or NULL if the
2010 * request_queue does not reference a SCSI device.
2011 */
2012struct scsi_device *scsi_device_from_queue(struct request_queue *q)
2013{
2014	struct scsi_device *sdev = NULL;
2015
2016	if (q->mq_ops == &scsi_mq_ops_no_commit ||
2017	    q->mq_ops == &scsi_mq_ops)
2018		sdev = q->queuedata;
2019	if (!sdev || !get_device(&sdev->sdev_gendev))
2020		sdev = NULL;
2021
2022	return sdev;
2023}
2024/*
2025 * pktcdvd should have been integrated into the SCSI layers, but for historical
2026 * reasons like the old IDE driver it isn't.  This export allows it to safely
2027 * probe if a given device is a SCSI one and only attach to that.
2028 */
2029#ifdef CONFIG_CDROM_PKTCDVD_MODULE
2030EXPORT_SYMBOL_GPL(scsi_device_from_queue);
2031#endif
2032
2033/**
2034 * scsi_block_requests - Utility function used by low-level drivers to prevent
2035 * further commands from being queued to the device.
2036 * @shost:  host in question
2037 *
2038 * There is no timer nor any other means by which the requests get unblocked
2039 * other than the low-level driver calling scsi_unblock_requests().
2040 */
2041void scsi_block_requests(struct Scsi_Host *shost)
2042{
2043	shost->host_self_blocked = 1;
2044}
2045EXPORT_SYMBOL(scsi_block_requests);
2046
2047/**
2048 * scsi_unblock_requests - Utility function used by low-level drivers to allow
2049 * further commands to be queued to the device.
2050 * @shost:  host in question
2051 *
2052 * There is no timer nor any other means by which the requests get unblocked
2053 * other than the low-level driver calling scsi_unblock_requests(). This is done
2054 * as an API function so that changes to the internals of the scsi mid-layer
2055 * won't require wholesale changes to drivers that use this feature.
2056 */
2057void scsi_unblock_requests(struct Scsi_Host *shost)
2058{
2059	shost->host_self_blocked = 0;
2060	scsi_run_host_queues(shost);
2061}
2062EXPORT_SYMBOL(scsi_unblock_requests);
2063
2064void scsi_exit_queue(void)
2065{
2066	kmem_cache_destroy(scsi_sense_cache);
2067}
2068
2069/**
2070 *	scsi_mode_select - issue a mode select
2071 *	@sdev:	SCSI device to be queried
2072 *	@pf:	Page format bit (1 == standard, 0 == vendor specific)
2073 *	@sp:	Save page bit (0 == don't save, 1 == save)
2074 *	@buffer: request buffer (may not be smaller than eight bytes)
2075 *	@len:	length of request buffer.
2076 *	@timeout: command timeout
2077 *	@retries: number of retries before failing
2078 *	@data: returns a structure abstracting the mode header data
2079 *	@sshdr: place to put sense data (or NULL if no sense to be collected).
2080 *		must be SCSI_SENSE_BUFFERSIZE big.
2081 *
2082 *	Returns zero if successful; negative error number or scsi
2083 *	status on error
2084 *
2085 */
2086int scsi_mode_select(struct scsi_device *sdev, int pf, int sp,
2087		     unsigned char *buffer, int len, int timeout, int retries,
2088		     struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
2089{
2090	unsigned char cmd[10];
2091	unsigned char *real_buffer;
2092	const struct scsi_exec_args exec_args = {
2093		.sshdr = sshdr,
2094	};
2095	int ret;
2096
2097	memset(cmd, 0, sizeof(cmd));
2098	cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
2099
2100	/*
2101	 * Use MODE SELECT(10) if the device asked for it or if the mode page
2102	 * and the mode select header cannot fit within the maximumm 255 bytes
2103	 * of the MODE SELECT(6) command.
2104	 */
2105	if (sdev->use_10_for_ms ||
2106	    len + 4 > 255 ||
2107	    data->block_descriptor_length > 255) {
2108		if (len > 65535 - 8)
2109			return -EINVAL;
2110		real_buffer = kmalloc(8 + len, GFP_KERNEL);
2111		if (!real_buffer)
2112			return -ENOMEM;
2113		memcpy(real_buffer + 8, buffer, len);
2114		len += 8;
2115		real_buffer[0] = 0;
2116		real_buffer[1] = 0;
2117		real_buffer[2] = data->medium_type;
2118		real_buffer[3] = data->device_specific;
2119		real_buffer[4] = data->longlba ? 0x01 : 0;
2120		real_buffer[5] = 0;
2121		put_unaligned_be16(data->block_descriptor_length,
2122				   &real_buffer[6]);
2123
2124		cmd[0] = MODE_SELECT_10;
2125		put_unaligned_be16(len, &cmd[7]);
2126	} else {
2127		if (data->longlba)
2128			return -EINVAL;
2129
2130		real_buffer = kmalloc(4 + len, GFP_KERNEL);
2131		if (!real_buffer)
2132			return -ENOMEM;
2133		memcpy(real_buffer + 4, buffer, len);
2134		len += 4;
2135		real_buffer[0] = 0;
2136		real_buffer[1] = data->medium_type;
2137		real_buffer[2] = data->device_specific;
2138		real_buffer[3] = data->block_descriptor_length;
2139
2140		cmd[0] = MODE_SELECT;
2141		cmd[4] = len;
2142	}
2143
2144	ret = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, real_buffer, len,
2145			       timeout, retries, &exec_args);
2146	kfree(real_buffer);
2147	return ret;
2148}
2149EXPORT_SYMBOL_GPL(scsi_mode_select);
2150
2151/**
2152 *	scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
2153 *	@sdev:	SCSI device to be queried
2154 *	@dbd:	set to prevent mode sense from returning block descriptors
2155 *	@modepage: mode page being requested
2156 *	@subpage: sub-page of the mode page being requested
2157 *	@buffer: request buffer (may not be smaller than eight bytes)
2158 *	@len:	length of request buffer.
2159 *	@timeout: command timeout
2160 *	@retries: number of retries before failing
2161 *	@data: returns a structure abstracting the mode header data
2162 *	@sshdr: place to put sense data (or NULL if no sense to be collected).
2163 *		must be SCSI_SENSE_BUFFERSIZE big.
2164 *
2165 *	Returns zero if successful, or a negative error number on failure
2166 */
2167int
2168scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, int subpage,
2169		  unsigned char *buffer, int len, int timeout, int retries,
2170		  struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
2171{
2172	unsigned char cmd[12];
2173	int use_10_for_ms;
2174	int header_length;
2175	int result, retry_count = retries;
2176	struct scsi_sense_hdr my_sshdr;
 
 
 
 
 
 
 
 
 
 
 
 
 
2177	const struct scsi_exec_args exec_args = {
2178		/* caller might not be interested in sense, but we need it */
2179		.sshdr = sshdr ? : &my_sshdr,
 
2180	};
2181
2182	memset(data, 0, sizeof(*data));
2183	memset(&cmd[0], 0, 12);
2184
2185	dbd = sdev->set_dbd_for_ms ? 8 : dbd;
2186	cmd[1] = dbd & 0x18;	/* allows DBD and LLBA bits */
2187	cmd[2] = modepage;
2188	cmd[3] = subpage;
2189
2190	sshdr = exec_args.sshdr;
2191
2192 retry:
2193	use_10_for_ms = sdev->use_10_for_ms || len > 255;
2194
2195	if (use_10_for_ms) {
2196		if (len < 8 || len > 65535)
2197			return -EINVAL;
2198
2199		cmd[0] = MODE_SENSE_10;
2200		put_unaligned_be16(len, &cmd[7]);
2201		header_length = 8;
2202	} else {
2203		if (len < 4)
2204			return -EINVAL;
2205
2206		cmd[0] = MODE_SENSE;
2207		cmd[4] = len;
2208		header_length = 4;
2209	}
2210
2211	memset(buffer, 0, len);
2212
2213	result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buffer, len,
2214				  timeout, retries, &exec_args);
2215	if (result < 0)
2216		return result;
2217
2218	/* This code looks awful: what it's doing is making sure an
2219	 * ILLEGAL REQUEST sense return identifies the actual command
2220	 * byte as the problem.  MODE_SENSE commands can return
2221	 * ILLEGAL REQUEST if the code page isn't supported */
2222
2223	if (!scsi_status_is_good(result)) {
2224		if (scsi_sense_valid(sshdr)) {
2225			if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
2226			    (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
2227				/*
2228				 * Invalid command operation code: retry using
2229				 * MODE SENSE(6) if this was a MODE SENSE(10)
2230				 * request, except if the request mode page is
2231				 * too large for MODE SENSE single byte
2232				 * allocation length field.
2233				 */
2234				if (use_10_for_ms) {
2235					if (len > 255)
2236						return -EIO;
2237					sdev->use_10_for_ms = 0;
2238					goto retry;
2239				}
2240			}
2241			if (scsi_status_is_check_condition(result) &&
2242			    sshdr->sense_key == UNIT_ATTENTION &&
2243			    retry_count) {
2244				retry_count--;
2245				goto retry;
2246			}
2247		}
2248		return -EIO;
2249	}
2250	if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
2251		     (modepage == 6 || modepage == 8))) {
2252		/* Initio breakage? */
2253		header_length = 0;
2254		data->length = 13;
2255		data->medium_type = 0;
2256		data->device_specific = 0;
2257		data->longlba = 0;
2258		data->block_descriptor_length = 0;
2259	} else if (use_10_for_ms) {
2260		data->length = get_unaligned_be16(&buffer[0]) + 2;
2261		data->medium_type = buffer[2];
2262		data->device_specific = buffer[3];
2263		data->longlba = buffer[4] & 0x01;
2264		data->block_descriptor_length = get_unaligned_be16(&buffer[6]);
2265	} else {
2266		data->length = buffer[0] + 1;
2267		data->medium_type = buffer[1];
2268		data->device_specific = buffer[2];
2269		data->block_descriptor_length = buffer[3];
2270	}
2271	data->header_length = header_length;
2272
2273	return 0;
2274}
2275EXPORT_SYMBOL(scsi_mode_sense);
2276
2277/**
2278 *	scsi_test_unit_ready - test if unit is ready
2279 *	@sdev:	scsi device to change the state of.
2280 *	@timeout: command timeout
2281 *	@retries: number of retries before failing
2282 *	@sshdr: outpout pointer for decoded sense information.
2283 *
2284 *	Returns zero if unsuccessful or an error if TUR failed.  For
2285 *	removable media, UNIT_ATTENTION sets ->changed flag.
2286 **/
2287int
2288scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
2289		     struct scsi_sense_hdr *sshdr)
2290{
2291	char cmd[] = {
2292		TEST_UNIT_READY, 0, 0, 0, 0, 0,
2293	};
2294	const struct scsi_exec_args exec_args = {
2295		.sshdr = sshdr,
2296	};
2297	int result;
2298
2299	/* try to eat the UNIT_ATTENTION if there are enough retries */
2300	do {
2301		result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, NULL, 0,
2302					  timeout, 1, &exec_args);
2303		if (sdev->removable && result > 0 && scsi_sense_valid(sshdr) &&
2304		    sshdr->sense_key == UNIT_ATTENTION)
2305			sdev->changed = 1;
2306	} while (result > 0 && scsi_sense_valid(sshdr) &&
2307		 sshdr->sense_key == UNIT_ATTENTION && --retries);
2308
2309	return result;
2310}
2311EXPORT_SYMBOL(scsi_test_unit_ready);
2312
2313/**
2314 *	scsi_device_set_state - Take the given device through the device state model.
2315 *	@sdev:	scsi device to change the state of.
2316 *	@state:	state to change to.
2317 *
2318 *	Returns zero if successful or an error if the requested
2319 *	transition is illegal.
2320 */
2321int
2322scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2323{
2324	enum scsi_device_state oldstate = sdev->sdev_state;
2325
2326	if (state == oldstate)
2327		return 0;
2328
2329	switch (state) {
2330	case SDEV_CREATED:
2331		switch (oldstate) {
2332		case SDEV_CREATED_BLOCK:
2333			break;
2334		default:
2335			goto illegal;
2336		}
2337		break;
2338
2339	case SDEV_RUNNING:
2340		switch (oldstate) {
2341		case SDEV_CREATED:
2342		case SDEV_OFFLINE:
2343		case SDEV_TRANSPORT_OFFLINE:
2344		case SDEV_QUIESCE:
2345		case SDEV_BLOCK:
2346			break;
2347		default:
2348			goto illegal;
2349		}
2350		break;
2351
2352	case SDEV_QUIESCE:
2353		switch (oldstate) {
2354		case SDEV_RUNNING:
2355		case SDEV_OFFLINE:
2356		case SDEV_TRANSPORT_OFFLINE:
2357			break;
2358		default:
2359			goto illegal;
2360		}
2361		break;
2362
2363	case SDEV_OFFLINE:
2364	case SDEV_TRANSPORT_OFFLINE:
2365		switch (oldstate) {
2366		case SDEV_CREATED:
2367		case SDEV_RUNNING:
2368		case SDEV_QUIESCE:
2369		case SDEV_BLOCK:
2370			break;
2371		default:
2372			goto illegal;
2373		}
2374		break;
2375
2376	case SDEV_BLOCK:
2377		switch (oldstate) {
2378		case SDEV_RUNNING:
2379		case SDEV_CREATED_BLOCK:
2380		case SDEV_QUIESCE:
2381		case SDEV_OFFLINE:
2382			break;
2383		default:
2384			goto illegal;
2385		}
2386		break;
2387
2388	case SDEV_CREATED_BLOCK:
2389		switch (oldstate) {
2390		case SDEV_CREATED:
2391			break;
2392		default:
2393			goto illegal;
2394		}
2395		break;
2396
2397	case SDEV_CANCEL:
2398		switch (oldstate) {
2399		case SDEV_CREATED:
2400		case SDEV_RUNNING:
2401		case SDEV_QUIESCE:
2402		case SDEV_OFFLINE:
2403		case SDEV_TRANSPORT_OFFLINE:
2404			break;
2405		default:
2406			goto illegal;
2407		}
2408		break;
2409
2410	case SDEV_DEL:
2411		switch (oldstate) {
2412		case SDEV_CREATED:
2413		case SDEV_RUNNING:
2414		case SDEV_OFFLINE:
2415		case SDEV_TRANSPORT_OFFLINE:
2416		case SDEV_CANCEL:
2417		case SDEV_BLOCK:
2418		case SDEV_CREATED_BLOCK:
2419			break;
2420		default:
2421			goto illegal;
2422		}
2423		break;
2424
2425	}
2426	sdev->offline_already = false;
2427	sdev->sdev_state = state;
2428	return 0;
2429
2430 illegal:
2431	SCSI_LOG_ERROR_RECOVERY(1,
2432				sdev_printk(KERN_ERR, sdev,
2433					    "Illegal state transition %s->%s",
2434					    scsi_device_state_name(oldstate),
2435					    scsi_device_state_name(state))
2436				);
2437	return -EINVAL;
2438}
2439EXPORT_SYMBOL(scsi_device_set_state);
2440
2441/**
2442 *	scsi_evt_emit - emit a single SCSI device uevent
2443 *	@sdev: associated SCSI device
2444 *	@evt: event to emit
2445 *
2446 *	Send a single uevent (scsi_event) to the associated scsi_device.
2447 */
2448static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2449{
2450	int idx = 0;
2451	char *envp[3];
2452
2453	switch (evt->evt_type) {
2454	case SDEV_EVT_MEDIA_CHANGE:
2455		envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2456		break;
2457	case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2458		scsi_rescan_device(sdev);
2459		envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
2460		break;
2461	case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2462		envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED";
2463		break;
2464	case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2465	       envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED";
2466		break;
2467	case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2468		envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED";
2469		break;
2470	case SDEV_EVT_LUN_CHANGE_REPORTED:
2471		envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED";
2472		break;
2473	case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
2474		envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED";
2475		break;
2476	case SDEV_EVT_POWER_ON_RESET_OCCURRED:
2477		envp[idx++] = "SDEV_UA=POWER_ON_RESET_OCCURRED";
2478		break;
2479	default:
2480		/* do nothing */
2481		break;
2482	}
2483
2484	envp[idx++] = NULL;
2485
2486	kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2487}
2488
2489/**
2490 *	scsi_evt_thread - send a uevent for each scsi event
2491 *	@work: work struct for scsi_device
2492 *
2493 *	Dispatch queued events to their associated scsi_device kobjects
2494 *	as uevents.
2495 */
2496void scsi_evt_thread(struct work_struct *work)
2497{
2498	struct scsi_device *sdev;
2499	enum scsi_device_event evt_type;
2500	LIST_HEAD(event_list);
2501
2502	sdev = container_of(work, struct scsi_device, event_work);
2503
2504	for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++)
2505		if (test_and_clear_bit(evt_type, sdev->pending_events))
2506			sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL);
2507
2508	while (1) {
2509		struct scsi_event *evt;
2510		struct list_head *this, *tmp;
2511		unsigned long flags;
2512
2513		spin_lock_irqsave(&sdev->list_lock, flags);
2514		list_splice_init(&sdev->event_list, &event_list);
2515		spin_unlock_irqrestore(&sdev->list_lock, flags);
2516
2517		if (list_empty(&event_list))
2518			break;
2519
2520		list_for_each_safe(this, tmp, &event_list) {
2521			evt = list_entry(this, struct scsi_event, node);
2522			list_del(&evt->node);
2523			scsi_evt_emit(sdev, evt);
2524			kfree(evt);
2525		}
2526	}
2527}
2528
2529/**
2530 * 	sdev_evt_send - send asserted event to uevent thread
2531 *	@sdev: scsi_device event occurred on
2532 *	@evt: event to send
2533 *
2534 *	Assert scsi device event asynchronously.
2535 */
2536void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2537{
2538	unsigned long flags;
2539
2540#if 0
2541	/* FIXME: currently this check eliminates all media change events
2542	 * for polled devices.  Need to update to discriminate between AN
2543	 * and polled events */
2544	if (!test_bit(evt->evt_type, sdev->supported_events)) {
2545		kfree(evt);
2546		return;
2547	}
2548#endif
2549
2550	spin_lock_irqsave(&sdev->list_lock, flags);
2551	list_add_tail(&evt->node, &sdev->event_list);
2552	schedule_work(&sdev->event_work);
2553	spin_unlock_irqrestore(&sdev->list_lock, flags);
2554}
2555EXPORT_SYMBOL_GPL(sdev_evt_send);
2556
2557/**
2558 * 	sdev_evt_alloc - allocate a new scsi event
2559 *	@evt_type: type of event to allocate
2560 *	@gfpflags: GFP flags for allocation
2561 *
2562 *	Allocates and returns a new scsi_event.
2563 */
2564struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2565				  gfp_t gfpflags)
2566{
2567	struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2568	if (!evt)
2569		return NULL;
2570
2571	evt->evt_type = evt_type;
2572	INIT_LIST_HEAD(&evt->node);
2573
2574	/* evt_type-specific initialization, if any */
2575	switch (evt_type) {
2576	case SDEV_EVT_MEDIA_CHANGE:
2577	case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2578	case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2579	case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2580	case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2581	case SDEV_EVT_LUN_CHANGE_REPORTED:
2582	case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
2583	case SDEV_EVT_POWER_ON_RESET_OCCURRED:
2584	default:
2585		/* do nothing */
2586		break;
2587	}
2588
2589	return evt;
2590}
2591EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2592
2593/**
2594 * 	sdev_evt_send_simple - send asserted event to uevent thread
2595 *	@sdev: scsi_device event occurred on
2596 *	@evt_type: type of event to send
2597 *	@gfpflags: GFP flags for allocation
2598 *
2599 *	Assert scsi device event asynchronously, given an event type.
2600 */
2601void sdev_evt_send_simple(struct scsi_device *sdev,
2602			  enum scsi_device_event evt_type, gfp_t gfpflags)
2603{
2604	struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2605	if (!evt) {
2606		sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2607			    evt_type);
2608		return;
2609	}
2610
2611	sdev_evt_send(sdev, evt);
2612}
2613EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2614
2615/**
2616 *	scsi_device_quiesce - Block all commands except power management.
2617 *	@sdev:	scsi device to quiesce.
2618 *
2619 *	This works by trying to transition to the SDEV_QUIESCE state
2620 *	(which must be a legal transition).  When the device is in this
2621 *	state, only power management requests will be accepted, all others will
2622 *	be deferred.
2623 *
2624 *	Must be called with user context, may sleep.
2625 *
2626 *	Returns zero if unsuccessful or an error if not.
2627 */
2628int
2629scsi_device_quiesce(struct scsi_device *sdev)
2630{
2631	struct request_queue *q = sdev->request_queue;
2632	int err;
2633
2634	/*
2635	 * It is allowed to call scsi_device_quiesce() multiple times from
2636	 * the same context but concurrent scsi_device_quiesce() calls are
2637	 * not allowed.
2638	 */
2639	WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current);
2640
2641	if (sdev->quiesced_by == current)
2642		return 0;
2643
2644	blk_set_pm_only(q);
2645
2646	blk_mq_freeze_queue(q);
2647	/*
2648	 * Ensure that the effect of blk_set_pm_only() will be visible
2649	 * for percpu_ref_tryget() callers that occur after the queue
2650	 * unfreeze even if the queue was already frozen before this function
2651	 * was called. See also https://lwn.net/Articles/573497/.
2652	 */
2653	synchronize_rcu();
2654	blk_mq_unfreeze_queue(q);
2655
2656	mutex_lock(&sdev->state_mutex);
2657	err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2658	if (err == 0)
2659		sdev->quiesced_by = current;
2660	else
2661		blk_clear_pm_only(q);
2662	mutex_unlock(&sdev->state_mutex);
2663
2664	return err;
2665}
2666EXPORT_SYMBOL(scsi_device_quiesce);
2667
2668/**
2669 *	scsi_device_resume - Restart user issued commands to a quiesced device.
2670 *	@sdev:	scsi device to resume.
2671 *
2672 *	Moves the device from quiesced back to running and restarts the
2673 *	queues.
2674 *
2675 *	Must be called with user context, may sleep.
2676 */
2677void scsi_device_resume(struct scsi_device *sdev)
2678{
2679	/* check if the device state was mutated prior to resume, and if
2680	 * so assume the state is being managed elsewhere (for example
2681	 * device deleted during suspend)
2682	 */
2683	mutex_lock(&sdev->state_mutex);
2684	if (sdev->sdev_state == SDEV_QUIESCE)
2685		scsi_device_set_state(sdev, SDEV_RUNNING);
2686	if (sdev->quiesced_by) {
2687		sdev->quiesced_by = NULL;
2688		blk_clear_pm_only(sdev->request_queue);
2689	}
2690	mutex_unlock(&sdev->state_mutex);
2691}
2692EXPORT_SYMBOL(scsi_device_resume);
2693
2694static void
2695device_quiesce_fn(struct scsi_device *sdev, void *data)
2696{
2697	scsi_device_quiesce(sdev);
2698}
2699
2700void
2701scsi_target_quiesce(struct scsi_target *starget)
2702{
2703	starget_for_each_device(starget, NULL, device_quiesce_fn);
2704}
2705EXPORT_SYMBOL(scsi_target_quiesce);
2706
2707static void
2708device_resume_fn(struct scsi_device *sdev, void *data)
2709{
2710	scsi_device_resume(sdev);
2711}
2712
2713void
2714scsi_target_resume(struct scsi_target *starget)
2715{
2716	starget_for_each_device(starget, NULL, device_resume_fn);
2717}
2718EXPORT_SYMBOL(scsi_target_resume);
2719
2720static int __scsi_internal_device_block_nowait(struct scsi_device *sdev)
2721{
2722	if (scsi_device_set_state(sdev, SDEV_BLOCK))
2723		return scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2724
2725	return 0;
2726}
2727
2728void scsi_start_queue(struct scsi_device *sdev)
2729{
2730	if (cmpxchg(&sdev->queue_stopped, 1, 0))
2731		blk_mq_unquiesce_queue(sdev->request_queue);
2732}
2733
2734static void scsi_stop_queue(struct scsi_device *sdev)
2735{
2736	/*
2737	 * The atomic variable of ->queue_stopped covers that
2738	 * blk_mq_quiesce_queue* is balanced with blk_mq_unquiesce_queue.
2739	 *
2740	 * The caller needs to wait until quiesce is done.
2741	 */
2742	if (!cmpxchg(&sdev->queue_stopped, 0, 1))
2743		blk_mq_quiesce_queue_nowait(sdev->request_queue);
2744}
2745
2746/**
2747 * scsi_internal_device_block_nowait - try to transition to the SDEV_BLOCK state
2748 * @sdev: device to block
2749 *
2750 * Pause SCSI command processing on the specified device. Does not sleep.
2751 *
2752 * Returns zero if successful or a negative error code upon failure.
2753 *
2754 * Notes:
2755 * This routine transitions the device to the SDEV_BLOCK state (which must be
2756 * a legal transition). When the device is in this state, command processing
2757 * is paused until the device leaves the SDEV_BLOCK state. See also
2758 * scsi_internal_device_unblock_nowait().
2759 */
2760int scsi_internal_device_block_nowait(struct scsi_device *sdev)
2761{
2762	int ret = __scsi_internal_device_block_nowait(sdev);
2763
2764	/*
2765	 * The device has transitioned to SDEV_BLOCK.  Stop the
2766	 * block layer from calling the midlayer with this device's
2767	 * request queue.
2768	 */
2769	if (!ret)
2770		scsi_stop_queue(sdev);
2771	return ret;
2772}
2773EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait);
2774
2775/**
2776 * scsi_device_block - try to transition to the SDEV_BLOCK state
2777 * @sdev: device to block
2778 * @data: dummy argument, ignored
2779 *
2780 * Pause SCSI command processing on the specified device. Callers must wait
2781 * until all ongoing scsi_queue_rq() calls have finished after this function
2782 * returns.
2783 *
2784 * Note:
2785 * This routine transitions the device to the SDEV_BLOCK state (which must be
2786 * a legal transition). When the device is in this state, command processing
2787 * is paused until the device leaves the SDEV_BLOCK state. See also
2788 * scsi_internal_device_unblock().
2789 */
2790static void scsi_device_block(struct scsi_device *sdev, void *data)
2791{
2792	int err;
2793	enum scsi_device_state state;
2794
2795	mutex_lock(&sdev->state_mutex);
2796	err = __scsi_internal_device_block_nowait(sdev);
2797	state = sdev->sdev_state;
2798	if (err == 0)
2799		/*
2800		 * scsi_stop_queue() must be called with the state_mutex
2801		 * held. Otherwise a simultaneous scsi_start_queue() call
2802		 * might unquiesce the queue before we quiesce it.
2803		 */
2804		scsi_stop_queue(sdev);
2805
2806	mutex_unlock(&sdev->state_mutex);
2807
2808	WARN_ONCE(err, "%s: failed to block %s in state %d\n",
2809		  __func__, dev_name(&sdev->sdev_gendev), state);
2810}
2811
2812/**
2813 * scsi_internal_device_unblock_nowait - resume a device after a block request
2814 * @sdev:	device to resume
2815 * @new_state:	state to set the device to after unblocking
2816 *
2817 * Restart the device queue for a previously suspended SCSI device. Does not
2818 * sleep.
2819 *
2820 * Returns zero if successful or a negative error code upon failure.
2821 *
2822 * Notes:
2823 * This routine transitions the device to the SDEV_RUNNING state or to one of
2824 * the offline states (which must be a legal transition) allowing the midlayer
2825 * to goose the queue for this device.
2826 */
2827int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
2828					enum scsi_device_state new_state)
2829{
2830	switch (new_state) {
2831	case SDEV_RUNNING:
2832	case SDEV_TRANSPORT_OFFLINE:
2833		break;
2834	default:
2835		return -EINVAL;
2836	}
2837
2838	/*
2839	 * Try to transition the scsi device to SDEV_RUNNING or one of the
2840	 * offlined states and goose the device queue if successful.
2841	 */
2842	switch (sdev->sdev_state) {
2843	case SDEV_BLOCK:
2844	case SDEV_TRANSPORT_OFFLINE:
2845		sdev->sdev_state = new_state;
2846		break;
2847	case SDEV_CREATED_BLOCK:
2848		if (new_state == SDEV_TRANSPORT_OFFLINE ||
2849		    new_state == SDEV_OFFLINE)
2850			sdev->sdev_state = new_state;
2851		else
2852			sdev->sdev_state = SDEV_CREATED;
2853		break;
2854	case SDEV_CANCEL:
2855	case SDEV_OFFLINE:
2856		break;
2857	default:
2858		return -EINVAL;
2859	}
2860	scsi_start_queue(sdev);
2861
2862	return 0;
2863}
2864EXPORT_SYMBOL_GPL(scsi_internal_device_unblock_nowait);
2865
2866/**
2867 * scsi_internal_device_unblock - resume a device after a block request
2868 * @sdev:	device to resume
2869 * @new_state:	state to set the device to after unblocking
2870 *
2871 * Restart the device queue for a previously suspended SCSI device. May sleep.
2872 *
2873 * Returns zero if successful or a negative error code upon failure.
2874 *
2875 * Notes:
2876 * This routine transitions the device to the SDEV_RUNNING state or to one of
2877 * the offline states (which must be a legal transition) allowing the midlayer
2878 * to goose the queue for this device.
2879 */
2880static int scsi_internal_device_unblock(struct scsi_device *sdev,
2881					enum scsi_device_state new_state)
2882{
2883	int ret;
2884
2885	mutex_lock(&sdev->state_mutex);
2886	ret = scsi_internal_device_unblock_nowait(sdev, new_state);
2887	mutex_unlock(&sdev->state_mutex);
2888
2889	return ret;
2890}
2891
2892static int
2893target_block(struct device *dev, void *data)
2894{
2895	if (scsi_is_target_device(dev))
2896		starget_for_each_device(to_scsi_target(dev), NULL,
2897					scsi_device_block);
2898	return 0;
2899}
2900
2901/**
2902 * scsi_block_targets - transition all SCSI child devices to SDEV_BLOCK state
2903 * @dev: a parent device of one or more scsi_target devices
2904 * @shost: the Scsi_Host to which this device belongs
2905 *
2906 * Iterate over all children of @dev, which should be scsi_target devices,
2907 * and switch all subordinate scsi devices to SDEV_BLOCK state. Wait for
2908 * ongoing scsi_queue_rq() calls to finish. May sleep.
2909 *
2910 * Note:
2911 * @dev must not itself be a scsi_target device.
2912 */
2913void
2914scsi_block_targets(struct Scsi_Host *shost, struct device *dev)
2915{
2916	WARN_ON_ONCE(scsi_is_target_device(dev));
2917	device_for_each_child(dev, NULL, target_block);
2918	blk_mq_wait_quiesce_done(&shost->tag_set);
2919}
2920EXPORT_SYMBOL_GPL(scsi_block_targets);
2921
2922static void
2923device_unblock(struct scsi_device *sdev, void *data)
2924{
2925	scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
2926}
2927
2928static int
2929target_unblock(struct device *dev, void *data)
2930{
2931	if (scsi_is_target_device(dev))
2932		starget_for_each_device(to_scsi_target(dev), data,
2933					device_unblock);
2934	return 0;
2935}
2936
2937void
2938scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
2939{
2940	if (scsi_is_target_device(dev))
2941		starget_for_each_device(to_scsi_target(dev), &new_state,
2942					device_unblock);
2943	else
2944		device_for_each_child(dev, &new_state, target_unblock);
2945}
2946EXPORT_SYMBOL_GPL(scsi_target_unblock);
2947
2948/**
2949 * scsi_host_block - Try to transition all logical units to the SDEV_BLOCK state
2950 * @shost: device to block
2951 *
2952 * Pause SCSI command processing for all logical units associated with the SCSI
2953 * host and wait until pending scsi_queue_rq() calls have finished.
2954 *
2955 * Returns zero if successful or a negative error code upon failure.
2956 */
2957int
2958scsi_host_block(struct Scsi_Host *shost)
2959{
2960	struct scsi_device *sdev;
2961	int ret;
2962
2963	/*
2964	 * Call scsi_internal_device_block_nowait so we can avoid
2965	 * calling synchronize_rcu() for each LUN.
2966	 */
2967	shost_for_each_device(sdev, shost) {
2968		mutex_lock(&sdev->state_mutex);
2969		ret = scsi_internal_device_block_nowait(sdev);
2970		mutex_unlock(&sdev->state_mutex);
2971		if (ret) {
2972			scsi_device_put(sdev);
2973			return ret;
2974		}
2975	}
2976
2977	/* Wait for ongoing scsi_queue_rq() calls to finish. */
2978	blk_mq_wait_quiesce_done(&shost->tag_set);
2979
2980	return 0;
2981}
2982EXPORT_SYMBOL_GPL(scsi_host_block);
2983
2984int
2985scsi_host_unblock(struct Scsi_Host *shost, int new_state)
2986{
2987	struct scsi_device *sdev;
2988	int ret = 0;
2989
2990	shost_for_each_device(sdev, shost) {
2991		ret = scsi_internal_device_unblock(sdev, new_state);
2992		if (ret) {
2993			scsi_device_put(sdev);
2994			break;
2995		}
2996	}
2997	return ret;
2998}
2999EXPORT_SYMBOL_GPL(scsi_host_unblock);
3000
3001/**
3002 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
3003 * @sgl:	scatter-gather list
3004 * @sg_count:	number of segments in sg
3005 * @offset:	offset in bytes into sg, on return offset into the mapped area
3006 * @len:	bytes to map, on return number of bytes mapped
3007 *
3008 * Returns virtual address of the start of the mapped page
3009 */
3010void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
3011			  size_t *offset, size_t *len)
3012{
3013	int i;
3014	size_t sg_len = 0, len_complete = 0;
3015	struct scatterlist *sg;
3016	struct page *page;
3017
3018	WARN_ON(!irqs_disabled());
3019
3020	for_each_sg(sgl, sg, sg_count, i) {
3021		len_complete = sg_len; /* Complete sg-entries */
3022		sg_len += sg->length;
3023		if (sg_len > *offset)
3024			break;
3025	}
3026
3027	if (unlikely(i == sg_count)) {
3028		printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
3029			"elements %d\n",
3030		       __func__, sg_len, *offset, sg_count);
3031		WARN_ON(1);
3032		return NULL;
3033	}
3034
3035	/* Offset starting from the beginning of first page in this sg-entry */
3036	*offset = *offset - len_complete + sg->offset;
3037
3038	/* Assumption: contiguous pages can be accessed as "page + i" */
3039	page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
3040	*offset &= ~PAGE_MASK;
3041
3042	/* Bytes in this sg-entry from *offset to the end of the page */
3043	sg_len = PAGE_SIZE - *offset;
3044	if (*len > sg_len)
3045		*len = sg_len;
3046
3047	return kmap_atomic(page);
3048}
3049EXPORT_SYMBOL(scsi_kmap_atomic_sg);
3050
3051/**
3052 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
3053 * @virt:	virtual address to be unmapped
3054 */
3055void scsi_kunmap_atomic_sg(void *virt)
3056{
3057	kunmap_atomic(virt);
3058}
3059EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
3060
3061void sdev_disable_disk_events(struct scsi_device *sdev)
3062{
3063	atomic_inc(&sdev->disk_events_disable_depth);
3064}
3065EXPORT_SYMBOL(sdev_disable_disk_events);
3066
3067void sdev_enable_disk_events(struct scsi_device *sdev)
3068{
3069	if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0))
3070		return;
3071	atomic_dec(&sdev->disk_events_disable_depth);
3072}
3073EXPORT_SYMBOL(sdev_enable_disk_events);
3074
3075static unsigned char designator_prio(const unsigned char *d)
3076{
3077	if (d[1] & 0x30)
3078		/* not associated with LUN */
3079		return 0;
3080
3081	if (d[3] == 0)
3082		/* invalid length */
3083		return 0;
3084
3085	/*
3086	 * Order of preference for lun descriptor:
3087	 * - SCSI name string
3088	 * - NAA IEEE Registered Extended
3089	 * - EUI-64 based 16-byte
3090	 * - EUI-64 based 12-byte
3091	 * - NAA IEEE Registered
3092	 * - NAA IEEE Extended
3093	 * - EUI-64 based 8-byte
3094	 * - SCSI name string (truncated)
3095	 * - T10 Vendor ID
3096	 * as longer descriptors reduce the likelyhood
3097	 * of identification clashes.
3098	 */
3099
3100	switch (d[1] & 0xf) {
3101	case 8:
3102		/* SCSI name string, variable-length UTF-8 */
3103		return 9;
3104	case 3:
3105		switch (d[4] >> 4) {
3106		case 6:
3107			/* NAA registered extended */
3108			return 8;
3109		case 5:
3110			/* NAA registered */
3111			return 5;
3112		case 4:
3113			/* NAA extended */
3114			return 4;
3115		case 3:
3116			/* NAA locally assigned */
3117			return 1;
3118		default:
3119			break;
3120		}
3121		break;
3122	case 2:
3123		switch (d[3]) {
3124		case 16:
3125			/* EUI64-based, 16 byte */
3126			return 7;
3127		case 12:
3128			/* EUI64-based, 12 byte */
3129			return 6;
3130		case 8:
3131			/* EUI64-based, 8 byte */
3132			return 3;
3133		default:
3134			break;
3135		}
3136		break;
3137	case 1:
3138		/* T10 vendor ID */
3139		return 1;
3140	default:
3141		break;
3142	}
3143
3144	return 0;
3145}
3146
3147/**
3148 * scsi_vpd_lun_id - return a unique device identification
3149 * @sdev: SCSI device
3150 * @id:   buffer for the identification
3151 * @id_len:  length of the buffer
3152 *
3153 * Copies a unique device identification into @id based
3154 * on the information in the VPD page 0x83 of the device.
3155 * The string will be formatted as a SCSI name string.
3156 *
3157 * Returns the length of the identification or error on failure.
3158 * If the identifier is longer than the supplied buffer the actual
3159 * identifier length is returned and the buffer is not zero-padded.
3160 */
3161int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
3162{
3163	u8 cur_id_prio = 0;
3164	u8 cur_id_size = 0;
3165	const unsigned char *d, *cur_id_str;
3166	const struct scsi_vpd *vpd_pg83;
3167	int id_size = -EINVAL;
3168
3169	rcu_read_lock();
3170	vpd_pg83 = rcu_dereference(sdev->vpd_pg83);
3171	if (!vpd_pg83) {
3172		rcu_read_unlock();
3173		return -ENXIO;
3174	}
3175
3176	/* The id string must be at least 20 bytes + terminating NULL byte */
3177	if (id_len < 21) {
3178		rcu_read_unlock();
3179		return -EINVAL;
3180	}
3181
3182	memset(id, 0, id_len);
3183	for (d = vpd_pg83->data + 4;
3184	     d < vpd_pg83->data + vpd_pg83->len;
3185	     d += d[3] + 4) {
3186		u8 prio = designator_prio(d);
3187
3188		if (prio == 0 || cur_id_prio > prio)
3189			continue;
3190
3191		switch (d[1] & 0xf) {
3192		case 0x1:
3193			/* T10 Vendor ID */
3194			if (cur_id_size > d[3])
3195				break;
3196			cur_id_prio = prio;
3197			cur_id_size = d[3];
3198			if (cur_id_size + 4 > id_len)
3199				cur_id_size = id_len - 4;
3200			cur_id_str = d + 4;
3201			id_size = snprintf(id, id_len, "t10.%*pE",
3202					   cur_id_size, cur_id_str);
3203			break;
3204		case 0x2:
3205			/* EUI-64 */
3206			cur_id_prio = prio;
3207			cur_id_size = d[3];
3208			cur_id_str = d + 4;
3209			switch (cur_id_size) {
3210			case 8:
3211				id_size = snprintf(id, id_len,
3212						   "eui.%8phN",
3213						   cur_id_str);
3214				break;
3215			case 12:
3216				id_size = snprintf(id, id_len,
3217						   "eui.%12phN",
3218						   cur_id_str);
3219				break;
3220			case 16:
3221				id_size = snprintf(id, id_len,
3222						   "eui.%16phN",
3223						   cur_id_str);
3224				break;
3225			default:
3226				break;
3227			}
3228			break;
3229		case 0x3:
3230			/* NAA */
3231			cur_id_prio = prio;
3232			cur_id_size = d[3];
3233			cur_id_str = d + 4;
3234			switch (cur_id_size) {
3235			case 8:
3236				id_size = snprintf(id, id_len,
3237						   "naa.%8phN",
3238						   cur_id_str);
3239				break;
3240			case 16:
3241				id_size = snprintf(id, id_len,
3242						   "naa.%16phN",
3243						   cur_id_str);
3244				break;
3245			default:
3246				break;
3247			}
3248			break;
3249		case 0x8:
3250			/* SCSI name string */
3251			if (cur_id_size > d[3])
3252				break;
3253			/* Prefer others for truncated descriptor */
3254			if (d[3] > id_len) {
3255				prio = 2;
3256				if (cur_id_prio > prio)
3257					break;
3258			}
3259			cur_id_prio = prio;
3260			cur_id_size = id_size = d[3];
3261			cur_id_str = d + 4;
3262			if (cur_id_size >= id_len)
3263				cur_id_size = id_len - 1;
3264			memcpy(id, cur_id_str, cur_id_size);
3265			break;
3266		default:
3267			break;
3268		}
3269	}
3270	rcu_read_unlock();
3271
3272	return id_size;
3273}
3274EXPORT_SYMBOL(scsi_vpd_lun_id);
3275
3276/*
3277 * scsi_vpd_tpg_id - return a target port group identifier
3278 * @sdev: SCSI device
3279 *
3280 * Returns the Target Port Group identifier from the information
3281 * froom VPD page 0x83 of the device.
3282 *
3283 * Returns the identifier or error on failure.
3284 */
3285int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id)
3286{
3287	const unsigned char *d;
3288	const struct scsi_vpd *vpd_pg83;
3289	int group_id = -EAGAIN, rel_port = -1;
3290
3291	rcu_read_lock();
3292	vpd_pg83 = rcu_dereference(sdev->vpd_pg83);
3293	if (!vpd_pg83) {
3294		rcu_read_unlock();
3295		return -ENXIO;
3296	}
3297
3298	d = vpd_pg83->data + 4;
3299	while (d < vpd_pg83->data + vpd_pg83->len) {
3300		switch (d[1] & 0xf) {
3301		case 0x4:
3302			/* Relative target port */
3303			rel_port = get_unaligned_be16(&d[6]);
3304			break;
3305		case 0x5:
3306			/* Target port group */
3307			group_id = get_unaligned_be16(&d[6]);
3308			break;
3309		default:
3310			break;
3311		}
3312		d += d[3] + 4;
3313	}
3314	rcu_read_unlock();
3315
3316	if (group_id >= 0 && rel_id && rel_port != -1)
3317		*rel_id = rel_port;
3318
3319	return group_id;
3320}
3321EXPORT_SYMBOL(scsi_vpd_tpg_id);
3322
3323/**
3324 * scsi_build_sense - build sense data for a command
3325 * @scmd:	scsi command for which the sense should be formatted
3326 * @desc:	Sense format (non-zero == descriptor format,
3327 *              0 == fixed format)
3328 * @key:	Sense key
3329 * @asc:	Additional sense code
3330 * @ascq:	Additional sense code qualifier
3331 *
3332 **/
3333void scsi_build_sense(struct scsi_cmnd *scmd, int desc, u8 key, u8 asc, u8 ascq)
3334{
3335	scsi_build_sense_buffer(desc, scmd->sense_buffer, key, asc, ascq);
3336	scmd->result = SAM_STAT_CHECK_CONDITION;
3337}
3338EXPORT_SYMBOL_GPL(scsi_build_sense);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 1999 Eric Youngdale
   4 * Copyright (C) 2014 Christoph Hellwig
   5 *
   6 *  SCSI queueing library.
   7 *      Initial versions: Eric Youngdale (eric@andante.org).
   8 *                        Based upon conversations with large numbers
   9 *                        of people at Linux Expo.
  10 */
  11
  12#include <linux/bio.h>
  13#include <linux/bitops.h>
  14#include <linux/blkdev.h>
  15#include <linux/completion.h>
  16#include <linux/kernel.h>
  17#include <linux/export.h>
  18#include <linux/init.h>
  19#include <linux/pci.h>
  20#include <linux/delay.h>
  21#include <linux/hardirq.h>
  22#include <linux/scatterlist.h>
  23#include <linux/blk-mq.h>
  24#include <linux/blk-integrity.h>
  25#include <linux/ratelimit.h>
  26#include <linux/unaligned.h>
  27
  28#include <scsi/scsi.h>
  29#include <scsi/scsi_cmnd.h>
  30#include <scsi/scsi_dbg.h>
  31#include <scsi/scsi_device.h>
  32#include <scsi/scsi_driver.h>
  33#include <scsi/scsi_eh.h>
  34#include <scsi/scsi_host.h>
  35#include <scsi/scsi_transport.h> /* scsi_init_limits() */
  36#include <scsi/scsi_dh.h>
  37
  38#include <trace/events/scsi.h>
  39
  40#include "scsi_debugfs.h"
  41#include "scsi_priv.h"
  42#include "scsi_logging.h"
  43
  44/*
  45 * Size of integrity metadata is usually small, 1 inline sg should
  46 * cover normal cases.
  47 */
  48#ifdef CONFIG_ARCH_NO_SG_CHAIN
  49#define  SCSI_INLINE_PROT_SG_CNT  0
  50#define  SCSI_INLINE_SG_CNT  0
  51#else
  52#define  SCSI_INLINE_PROT_SG_CNT  1
  53#define  SCSI_INLINE_SG_CNT  2
  54#endif
  55
  56static struct kmem_cache *scsi_sense_cache;
  57static DEFINE_MUTEX(scsi_sense_cache_mutex);
  58
  59static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd);
  60
  61int scsi_init_sense_cache(struct Scsi_Host *shost)
  62{
  63	int ret = 0;
  64
  65	mutex_lock(&scsi_sense_cache_mutex);
  66	if (!scsi_sense_cache) {
  67		scsi_sense_cache =
  68			kmem_cache_create_usercopy("scsi_sense_cache",
  69				SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN,
  70				0, SCSI_SENSE_BUFFERSIZE, NULL);
  71		if (!scsi_sense_cache)
  72			ret = -ENOMEM;
  73	}
  74	mutex_unlock(&scsi_sense_cache_mutex);
  75	return ret;
  76}
  77
  78static void
  79scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
  80{
  81	struct Scsi_Host *host = cmd->device->host;
  82	struct scsi_device *device = cmd->device;
  83	struct scsi_target *starget = scsi_target(device);
  84
  85	/*
  86	 * Set the appropriate busy bit for the device/host.
  87	 *
  88	 * If the host/device isn't busy, assume that something actually
  89	 * completed, and that we should be able to queue a command now.
  90	 *
  91	 * Note that the prior mid-layer assumption that any host could
  92	 * always queue at least one command is now broken.  The mid-layer
  93	 * will implement a user specifiable stall (see
  94	 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
  95	 * if a command is requeued with no other commands outstanding
  96	 * either for the device or for the host.
  97	 */
  98	switch (reason) {
  99	case SCSI_MLQUEUE_HOST_BUSY:
 100		atomic_set(&host->host_blocked, host->max_host_blocked);
 101		break;
 102	case SCSI_MLQUEUE_DEVICE_BUSY:
 103	case SCSI_MLQUEUE_EH_RETRY:
 104		atomic_set(&device->device_blocked,
 105			   device->max_device_blocked);
 106		break;
 107	case SCSI_MLQUEUE_TARGET_BUSY:
 108		atomic_set(&starget->target_blocked,
 109			   starget->max_target_blocked);
 110		break;
 111	}
 112}
 113
 114static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd, unsigned long msecs)
 115{
 116	struct request *rq = scsi_cmd_to_rq(cmd);
 117
 118	if (rq->rq_flags & RQF_DONTPREP) {
 119		rq->rq_flags &= ~RQF_DONTPREP;
 120		scsi_mq_uninit_cmd(cmd);
 121	} else {
 122		WARN_ON_ONCE(true);
 123	}
 124
 125	blk_mq_requeue_request(rq, false);
 126	if (!scsi_host_in_recovery(cmd->device->host))
 127		blk_mq_delay_kick_requeue_list(rq->q, msecs);
 128}
 129
 130/**
 131 * __scsi_queue_insert - private queue insertion
 132 * @cmd: The SCSI command being requeued
 133 * @reason:  The reason for the requeue
 134 * @unbusy: Whether the queue should be unbusied
 135 *
 136 * This is a private queue insertion.  The public interface
 137 * scsi_queue_insert() always assumes the queue should be unbusied
 138 * because it's always called before the completion.  This function is
 139 * for a requeue after completion, which should only occur in this
 140 * file.
 141 */
 142static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
 143{
 144	struct scsi_device *device = cmd->device;
 145
 146	SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd,
 147		"Inserting command %p into mlqueue\n", cmd));
 148
 149	scsi_set_blocked(cmd, reason);
 150
 151	/*
 152	 * Decrement the counters, since these commands are no longer
 153	 * active on the host/device.
 154	 */
 155	if (unbusy)
 156		scsi_device_unbusy(device, cmd);
 157
 158	/*
 159	 * Requeue this command.  It will go before all other commands
 160	 * that are already in the queue. Schedule requeue work under
 161	 * lock such that the kblockd_schedule_work() call happens
 162	 * before blk_mq_destroy_queue() finishes.
 163	 */
 164	cmd->result = 0;
 165
 166	blk_mq_requeue_request(scsi_cmd_to_rq(cmd),
 167			       !scsi_host_in_recovery(cmd->device->host));
 168}
 169
 170/**
 171 * scsi_queue_insert - Reinsert a command in the queue.
 172 * @cmd:    command that we are adding to queue.
 173 * @reason: why we are inserting command to queue.
 174 *
 175 * We do this for one of two cases. Either the host is busy and it cannot accept
 176 * any more commands for the time being, or the device returned QUEUE_FULL and
 177 * can accept no more commands.
 178 *
 179 * Context: This could be called either from an interrupt context or a normal
 180 * process context.
 181 */
 182void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
 183{
 184	__scsi_queue_insert(cmd, reason, true);
 185}
 186
 187void scsi_failures_reset_retries(struct scsi_failures *failures)
 188{
 189	struct scsi_failure *failure;
 190
 191	failures->total_retries = 0;
 192
 193	for (failure = failures->failure_definitions; failure->result;
 194	     failure++)
 195		failure->retries = 0;
 196}
 197EXPORT_SYMBOL_GPL(scsi_failures_reset_retries);
 198
 199/**
 200 * scsi_check_passthrough - Determine if passthrough scsi_cmnd needs a retry.
 201 * @scmd: scsi_cmnd to check.
 202 * @failures: scsi_failures struct that lists failures to check for.
 203 *
 204 * Returns -EAGAIN if the caller should retry else 0.
 205 */
 206static int scsi_check_passthrough(struct scsi_cmnd *scmd,
 207				  struct scsi_failures *failures)
 208{
 209	struct scsi_failure *failure;
 210	struct scsi_sense_hdr sshdr;
 211	enum sam_status status;
 212
 213	if (!scmd->result)
 214		return 0;
 215
 216	if (!failures)
 217		return 0;
 218
 219	for (failure = failures->failure_definitions; failure->result;
 220	     failure++) {
 221		if (failure->result == SCMD_FAILURE_RESULT_ANY)
 222			goto maybe_retry;
 223
 224		if (host_byte(scmd->result) &&
 225		    host_byte(scmd->result) == host_byte(failure->result))
 226			goto maybe_retry;
 227
 228		status = status_byte(scmd->result);
 229		if (!status)
 230			continue;
 231
 232		if (failure->result == SCMD_FAILURE_STAT_ANY &&
 233		    !scsi_status_is_good(scmd->result))
 234			goto maybe_retry;
 235
 236		if (status != status_byte(failure->result))
 237			continue;
 238
 239		if (status_byte(failure->result) != SAM_STAT_CHECK_CONDITION ||
 240		    failure->sense == SCMD_FAILURE_SENSE_ANY)
 241			goto maybe_retry;
 242
 243		if (!scsi_command_normalize_sense(scmd, &sshdr))
 244			return 0;
 245
 246		if (failure->sense != sshdr.sense_key)
 247			continue;
 248
 249		if (failure->asc == SCMD_FAILURE_ASC_ANY)
 250			goto maybe_retry;
 251
 252		if (failure->asc != sshdr.asc)
 253			continue;
 254
 255		if (failure->ascq == SCMD_FAILURE_ASCQ_ANY ||
 256		    failure->ascq == sshdr.ascq)
 257			goto maybe_retry;
 258	}
 259
 260	return 0;
 261
 262maybe_retry:
 263	if (failure->allowed) {
 264		if (failure->allowed == SCMD_FAILURE_NO_LIMIT ||
 265		    ++failure->retries <= failure->allowed)
 266			return -EAGAIN;
 267	} else {
 268		if (failures->total_allowed == SCMD_FAILURE_NO_LIMIT ||
 269		    ++failures->total_retries <= failures->total_allowed)
 270			return -EAGAIN;
 271	}
 272
 273	return 0;
 274}
 275
 276/**
 277 * scsi_execute_cmd - insert request and wait for the result
 278 * @sdev:	scsi_device
 279 * @cmd:	scsi command
 280 * @opf:	block layer request cmd_flags
 281 * @buffer:	data buffer
 282 * @bufflen:	len of buffer
 283 * @timeout:	request timeout in HZ
 284 * @ml_retries:	number of times SCSI midlayer will retry request
 285 * @args:	Optional args. See struct definition for field descriptions
 286 *
 287 * Returns the scsi_cmnd result field if a command was executed, or a negative
 288 * Linux error code if we didn't get that far.
 289 */
 290int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
 291		     blk_opf_t opf, void *buffer, unsigned int bufflen,
 292		     int timeout, int ml_retries,
 293		     const struct scsi_exec_args *args)
 294{
 295	static const struct scsi_exec_args default_args;
 296	struct request *req;
 297	struct scsi_cmnd *scmd;
 298	int ret;
 299
 300	if (!args)
 301		args = &default_args;
 302	else if (WARN_ON_ONCE(args->sense &&
 303			      args->sense_len != SCSI_SENSE_BUFFERSIZE))
 304		return -EINVAL;
 305
 306retry:
 307	req = scsi_alloc_request(sdev->request_queue, opf, args->req_flags);
 308	if (IS_ERR(req))
 309		return PTR_ERR(req);
 310
 311	if (bufflen) {
 312		ret = blk_rq_map_kern(sdev->request_queue, req,
 313				      buffer, bufflen, GFP_NOIO);
 314		if (ret)
 315			goto out;
 316	}
 317	scmd = blk_mq_rq_to_pdu(req);
 318	scmd->cmd_len = COMMAND_SIZE(cmd[0]);
 319	memcpy(scmd->cmnd, cmd, scmd->cmd_len);
 320	scmd->allowed = ml_retries;
 321	scmd->flags |= args->scmd_flags;
 322	req->timeout = timeout;
 323	req->rq_flags |= RQF_QUIET;
 324
 325	/*
 326	 * head injection *required* here otherwise quiesce won't work
 327	 */
 328	blk_execute_rq(req, true);
 329
 330	if (scsi_check_passthrough(scmd, args->failures) == -EAGAIN) {
 331		blk_mq_free_request(req);
 332		goto retry;
 333	}
 334
 335	/*
 336	 * Some devices (USB mass-storage in particular) may transfer
 337	 * garbage data together with a residue indicating that the data
 338	 * is invalid.  Prevent the garbage from being misinterpreted
 339	 * and prevent security leaks by zeroing out the excess data.
 340	 */
 341	if (unlikely(scmd->resid_len > 0 && scmd->resid_len <= bufflen))
 342		memset(buffer + bufflen - scmd->resid_len, 0, scmd->resid_len);
 343
 344	if (args->resid)
 345		*args->resid = scmd->resid_len;
 346	if (args->sense)
 347		memcpy(args->sense, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
 348	if (args->sshdr)
 349		scsi_normalize_sense(scmd->sense_buffer, scmd->sense_len,
 350				     args->sshdr);
 351
 352	ret = scmd->result;
 353 out:
 354	blk_mq_free_request(req);
 355
 356	return ret;
 357}
 358EXPORT_SYMBOL(scsi_execute_cmd);
 359
 360/*
 361 * Wake up the error handler if necessary. Avoid as follows that the error
 362 * handler is not woken up if host in-flight requests number ==
 363 * shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination
 364 * with an RCU read lock in this function to ensure that this function in
 365 * its entirety either finishes before scsi_eh_scmd_add() increases the
 366 * host_failed counter or that it notices the shost state change made by
 367 * scsi_eh_scmd_add().
 368 */
 369static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
 370{
 371	unsigned long flags;
 372
 373	rcu_read_lock();
 374	__clear_bit(SCMD_STATE_INFLIGHT, &cmd->state);
 375	if (unlikely(scsi_host_in_recovery(shost))) {
 376		unsigned int busy = scsi_host_busy(shost);
 377
 378		spin_lock_irqsave(shost->host_lock, flags);
 379		if (shost->host_failed || shost->host_eh_scheduled)
 380			scsi_eh_wakeup(shost, busy);
 381		spin_unlock_irqrestore(shost->host_lock, flags);
 382	}
 383	rcu_read_unlock();
 384}
 385
 386void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd)
 387{
 388	struct Scsi_Host *shost = sdev->host;
 389	struct scsi_target *starget = scsi_target(sdev);
 390
 391	scsi_dec_host_busy(shost, cmd);
 392
 393	if (starget->can_queue > 0)
 394		atomic_dec(&starget->target_busy);
 395
 396	sbitmap_put(&sdev->budget_map, cmd->budget_token);
 397	cmd->budget_token = -1;
 398}
 399
 400/*
 401 * Kick the queue of SCSI device @sdev if @sdev != current_sdev. Called with
 402 * interrupts disabled.
 403 */
 404static void scsi_kick_sdev_queue(struct scsi_device *sdev, void *data)
 405{
 406	struct scsi_device *current_sdev = data;
 407
 408	if (sdev != current_sdev)
 409		blk_mq_run_hw_queues(sdev->request_queue, true);
 410}
 411
 412/*
 413 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
 414 * and call blk_run_queue for all the scsi_devices on the target -
 415 * including current_sdev first.
 416 *
 417 * Called with *no* scsi locks held.
 418 */
 419static void scsi_single_lun_run(struct scsi_device *current_sdev)
 420{
 421	struct Scsi_Host *shost = current_sdev->host;
 422	struct scsi_target *starget = scsi_target(current_sdev);
 423	unsigned long flags;
 424
 425	spin_lock_irqsave(shost->host_lock, flags);
 426	starget->starget_sdev_user = NULL;
 427	spin_unlock_irqrestore(shost->host_lock, flags);
 428
 429	/*
 430	 * Call blk_run_queue for all LUNs on the target, starting with
 431	 * current_sdev. We race with others (to set starget_sdev_user),
 432	 * but in most cases, we will be first. Ideally, each LU on the
 433	 * target would get some limited time or requests on the target.
 434	 */
 435	blk_mq_run_hw_queues(current_sdev->request_queue,
 436			     shost->queuecommand_may_block);
 437
 438	spin_lock_irqsave(shost->host_lock, flags);
 439	if (!starget->starget_sdev_user)
 440		__starget_for_each_device(starget, current_sdev,
 441					  scsi_kick_sdev_queue);
 442	spin_unlock_irqrestore(shost->host_lock, flags);
 443}
 444
 445static inline bool scsi_device_is_busy(struct scsi_device *sdev)
 446{
 447	if (scsi_device_busy(sdev) >= sdev->queue_depth)
 448		return true;
 449	if (atomic_read(&sdev->device_blocked) > 0)
 450		return true;
 451	return false;
 452}
 453
 454static inline bool scsi_target_is_busy(struct scsi_target *starget)
 455{
 456	if (starget->can_queue > 0) {
 457		if (atomic_read(&starget->target_busy) >= starget->can_queue)
 458			return true;
 459		if (atomic_read(&starget->target_blocked) > 0)
 460			return true;
 461	}
 462	return false;
 463}
 464
 465static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
 466{
 467	if (atomic_read(&shost->host_blocked) > 0)
 468		return true;
 469	if (shost->host_self_blocked)
 470		return true;
 471	return false;
 472}
 473
 474static void scsi_starved_list_run(struct Scsi_Host *shost)
 475{
 476	LIST_HEAD(starved_list);
 477	struct scsi_device *sdev;
 478	unsigned long flags;
 479
 480	spin_lock_irqsave(shost->host_lock, flags);
 481	list_splice_init(&shost->starved_list, &starved_list);
 482
 483	while (!list_empty(&starved_list)) {
 484		struct request_queue *slq;
 485
 486		/*
 487		 * As long as shost is accepting commands and we have
 488		 * starved queues, call blk_run_queue. scsi_request_fn
 489		 * drops the queue_lock and can add us back to the
 490		 * starved_list.
 491		 *
 492		 * host_lock protects the starved_list and starved_entry.
 493		 * scsi_request_fn must get the host_lock before checking
 494		 * or modifying starved_list or starved_entry.
 495		 */
 496		if (scsi_host_is_busy(shost))
 497			break;
 498
 499		sdev = list_entry(starved_list.next,
 500				  struct scsi_device, starved_entry);
 501		list_del_init(&sdev->starved_entry);
 502		if (scsi_target_is_busy(scsi_target(sdev))) {
 503			list_move_tail(&sdev->starved_entry,
 504				       &shost->starved_list);
 505			continue;
 506		}
 507
 508		/*
 509		 * Once we drop the host lock, a racing scsi_remove_device()
 510		 * call may remove the sdev from the starved list and destroy
 511		 * it and the queue.  Mitigate by taking a reference to the
 512		 * queue and never touching the sdev again after we drop the
 513		 * host lock.  Note: if __scsi_remove_device() invokes
 514		 * blk_mq_destroy_queue() before the queue is run from this
 515		 * function then blk_run_queue() will return immediately since
 516		 * blk_mq_destroy_queue() marks the queue with QUEUE_FLAG_DYING.
 517		 */
 518		slq = sdev->request_queue;
 519		if (!blk_get_queue(slq))
 520			continue;
 521		spin_unlock_irqrestore(shost->host_lock, flags);
 522
 523		blk_mq_run_hw_queues(slq, false);
 524		blk_put_queue(slq);
 525
 526		spin_lock_irqsave(shost->host_lock, flags);
 527	}
 528	/* put any unprocessed entries back */
 529	list_splice(&starved_list, &shost->starved_list);
 530	spin_unlock_irqrestore(shost->host_lock, flags);
 531}
 532
 533/**
 534 * scsi_run_queue - Select a proper request queue to serve next.
 535 * @q:  last request's queue
 536 *
 537 * The previous command was completely finished, start a new one if possible.
 538 */
 539static void scsi_run_queue(struct request_queue *q)
 540{
 541	struct scsi_device *sdev = q->queuedata;
 542
 543	if (scsi_target(sdev)->single_lun)
 544		scsi_single_lun_run(sdev);
 545	if (!list_empty(&sdev->host->starved_list))
 546		scsi_starved_list_run(sdev->host);
 547
 548	/* Note: blk_mq_kick_requeue_list() runs the queue asynchronously. */
 549	blk_mq_kick_requeue_list(q);
 550}
 551
 552void scsi_requeue_run_queue(struct work_struct *work)
 553{
 554	struct scsi_device *sdev;
 555	struct request_queue *q;
 556
 557	sdev = container_of(work, struct scsi_device, requeue_work);
 558	q = sdev->request_queue;
 559	scsi_run_queue(q);
 560}
 561
 562void scsi_run_host_queues(struct Scsi_Host *shost)
 563{
 564	struct scsi_device *sdev;
 565
 566	shost_for_each_device(sdev, shost)
 567		scsi_run_queue(sdev->request_queue);
 568}
 569
 570static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
 571{
 572	if (!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd))) {
 573		struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
 574
 575		if (drv->uninit_command)
 576			drv->uninit_command(cmd);
 577	}
 578}
 579
 580void scsi_free_sgtables(struct scsi_cmnd *cmd)
 581{
 582	if (cmd->sdb.table.nents)
 583		sg_free_table_chained(&cmd->sdb.table,
 584				SCSI_INLINE_SG_CNT);
 585	if (scsi_prot_sg_count(cmd))
 586		sg_free_table_chained(&cmd->prot_sdb->table,
 587				SCSI_INLINE_PROT_SG_CNT);
 588}
 589EXPORT_SYMBOL_GPL(scsi_free_sgtables);
 590
 591static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
 592{
 593	scsi_free_sgtables(cmd);
 594	scsi_uninit_cmd(cmd);
 595}
 596
 597static void scsi_run_queue_async(struct scsi_device *sdev)
 598{
 599	if (scsi_host_in_recovery(sdev->host))
 600		return;
 601
 602	if (scsi_target(sdev)->single_lun ||
 603	    !list_empty(&sdev->host->starved_list)) {
 604		kblockd_schedule_work(&sdev->requeue_work);
 605	} else {
 606		/*
 607		 * smp_mb() present in sbitmap_queue_clear() or implied in
 608		 * .end_io is for ordering writing .device_busy in
 609		 * scsi_device_unbusy() and reading sdev->restarts.
 610		 */
 611		int old = atomic_read(&sdev->restarts);
 612
 613		/*
 614		 * ->restarts has to be kept as non-zero if new budget
 615		 *  contention occurs.
 616		 *
 617		 *  No need to run queue when either another re-run
 618		 *  queue wins in updating ->restarts or a new budget
 619		 *  contention occurs.
 620		 */
 621		if (old && atomic_cmpxchg(&sdev->restarts, old, 0) == old)
 622			blk_mq_run_hw_queues(sdev->request_queue, true);
 623	}
 624}
 625
 626/* Returns false when no more bytes to process, true if there are more */
 627static bool scsi_end_request(struct request *req, blk_status_t error,
 628		unsigned int bytes)
 629{
 630	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
 631	struct scsi_device *sdev = cmd->device;
 632	struct request_queue *q = sdev->request_queue;
 633
 634	if (blk_update_request(req, error, bytes))
 635		return true;
 636
 637	if (q->limits.features & BLK_FEAT_ADD_RANDOM)
 
 638		add_disk_randomness(req->q->disk);
 639
 640	WARN_ON_ONCE(!blk_rq_is_passthrough(req) &&
 641		     !(cmd->flags & SCMD_INITIALIZED));
 642	cmd->flags = 0;
 
 643
 644	/*
 645	 * Calling rcu_barrier() is not necessary here because the
 646	 * SCSI error handler guarantees that the function called by
 647	 * call_rcu() has been called before scsi_end_request() is
 648	 * called.
 649	 */
 650	destroy_rcu_head(&cmd->rcu);
 651
 652	/*
 653	 * In the MQ case the command gets freed by __blk_mq_end_request,
 654	 * so we have to do all cleanup that depends on it earlier.
 655	 *
 656	 * We also can't kick the queues from irq context, so we
 657	 * will have to defer it to a workqueue.
 658	 */
 659	scsi_mq_uninit_cmd(cmd);
 660
 661	/*
 662	 * queue is still alive, so grab the ref for preventing it
 663	 * from being cleaned up during running queue.
 664	 */
 665	percpu_ref_get(&q->q_usage_counter);
 666
 667	__blk_mq_end_request(req, error);
 668
 669	scsi_run_queue_async(sdev);
 670
 671	percpu_ref_put(&q->q_usage_counter);
 672	return false;
 673}
 674
 675/**
 676 * scsi_result_to_blk_status - translate a SCSI result code into blk_status_t
 677 * @result:	scsi error code
 678 *
 679 * Translate a SCSI result code into a blk_status_t value.
 680 */
 681static blk_status_t scsi_result_to_blk_status(int result)
 682{
 683	/*
 684	 * Check the scsi-ml byte first in case we converted a host or status
 685	 * byte.
 686	 */
 687	switch (scsi_ml_byte(result)) {
 688	case SCSIML_STAT_OK:
 689		break;
 690	case SCSIML_STAT_RESV_CONFLICT:
 691		return BLK_STS_RESV_CONFLICT;
 692	case SCSIML_STAT_NOSPC:
 693		return BLK_STS_NOSPC;
 694	case SCSIML_STAT_MED_ERROR:
 695		return BLK_STS_MEDIUM;
 696	case SCSIML_STAT_TGT_FAILURE:
 697		return BLK_STS_TARGET;
 698	case SCSIML_STAT_DL_TIMEOUT:
 699		return BLK_STS_DURATION_LIMIT;
 700	}
 701
 702	switch (host_byte(result)) {
 703	case DID_OK:
 704		if (scsi_status_is_good(result))
 705			return BLK_STS_OK;
 706		return BLK_STS_IOERR;
 707	case DID_TRANSPORT_FAILFAST:
 708	case DID_TRANSPORT_MARGINAL:
 709		return BLK_STS_TRANSPORT;
 710	default:
 711		return BLK_STS_IOERR;
 712	}
 713}
 714
 715/**
 716 * scsi_rq_err_bytes - determine number of bytes till the next failure boundary
 717 * @rq: request to examine
 718 *
 719 * Description:
 720 *     A request could be merge of IOs which require different failure
 721 *     handling.  This function determines the number of bytes which
 722 *     can be failed from the beginning of the request without
 723 *     crossing into area which need to be retried further.
 724 *
 725 * Return:
 726 *     The number of bytes to fail.
 727 */
 728static unsigned int scsi_rq_err_bytes(const struct request *rq)
 729{
 730	blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
 731	unsigned int bytes = 0;
 732	struct bio *bio;
 733
 734	if (!(rq->rq_flags & RQF_MIXED_MERGE))
 735		return blk_rq_bytes(rq);
 736
 737	/*
 738	 * Currently the only 'mixing' which can happen is between
 739	 * different fastfail types.  We can safely fail portions
 740	 * which have all the failfast bits that the first one has -
 741	 * the ones which are at least as eager to fail as the first
 742	 * one.
 743	 */
 744	for (bio = rq->bio; bio; bio = bio->bi_next) {
 745		if ((bio->bi_opf & ff) != ff)
 746			break;
 747		bytes += bio->bi_iter.bi_size;
 748	}
 749
 750	/* this could lead to infinite loop */
 751	BUG_ON(blk_rq_bytes(rq) && !bytes);
 752	return bytes;
 753}
 754
 755static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
 756{
 757	struct request *req = scsi_cmd_to_rq(cmd);
 758	unsigned long wait_for;
 759
 760	if (cmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT)
 761		return false;
 762
 763	wait_for = (cmd->allowed + 1) * req->timeout;
 764	if (time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
 765		scmd_printk(KERN_ERR, cmd, "timing out command, waited %lus\n",
 766			    wait_for/HZ);
 767		return true;
 768	}
 769	return false;
 770}
 771
 772/*
 773 * When ALUA transition state is returned, reprep the cmd to
 774 * use the ALUA handler's transition timeout. Delay the reprep
 775 * 1 sec to avoid aggressive retries of the target in that
 776 * state.
 777 */
 778#define ALUA_TRANSITION_REPREP_DELAY	1000
 779
 780/* Helper for scsi_io_completion() when special action required. */
 781static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
 782{
 783	struct request *req = scsi_cmd_to_rq(cmd);
 784	int level = 0;
 785	enum {ACTION_FAIL, ACTION_REPREP, ACTION_DELAYED_REPREP,
 786	      ACTION_RETRY, ACTION_DELAYED_RETRY} action;
 787	struct scsi_sense_hdr sshdr;
 788	bool sense_valid;
 789	bool sense_current = true;      /* false implies "deferred sense" */
 790	blk_status_t blk_stat;
 791
 792	sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
 793	if (sense_valid)
 794		sense_current = !scsi_sense_is_deferred(&sshdr);
 795
 796	blk_stat = scsi_result_to_blk_status(result);
 797
 798	if (host_byte(result) == DID_RESET) {
 799		/* Third party bus reset or reset for error recovery
 800		 * reasons.  Just retry the command and see what
 801		 * happens.
 802		 */
 803		action = ACTION_RETRY;
 804	} else if (sense_valid && sense_current) {
 805		switch (sshdr.sense_key) {
 806		case UNIT_ATTENTION:
 807			if (cmd->device->removable) {
 808				/* Detected disc change.  Set a bit
 809				 * and quietly refuse further access.
 810				 */
 811				cmd->device->changed = 1;
 812				action = ACTION_FAIL;
 813			} else {
 814				/* Must have been a power glitch, or a
 815				 * bus reset.  Could not have been a
 816				 * media change, so we just retry the
 817				 * command and see what happens.
 818				 */
 819				action = ACTION_RETRY;
 820			}
 821			break;
 822		case ILLEGAL_REQUEST:
 823			/* If we had an ILLEGAL REQUEST returned, then
 824			 * we may have performed an unsupported
 825			 * command.  The only thing this should be
 826			 * would be a ten byte read where only a six
 827			 * byte read was supported.  Also, on a system
 828			 * where READ CAPACITY failed, we may have
 829			 * read past the end of the disk.
 830			 */
 831			if ((cmd->device->use_10_for_rw &&
 832			    sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
 833			    (cmd->cmnd[0] == READ_10 ||
 834			     cmd->cmnd[0] == WRITE_10)) {
 835				/* This will issue a new 6-byte command. */
 836				cmd->device->use_10_for_rw = 0;
 837				action = ACTION_REPREP;
 838			} else if (sshdr.asc == 0x10) /* DIX */ {
 839				action = ACTION_FAIL;
 840				blk_stat = BLK_STS_PROTECTION;
 841			/* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
 842			} else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
 843				action = ACTION_FAIL;
 844				blk_stat = BLK_STS_TARGET;
 845			} else
 846				action = ACTION_FAIL;
 847			break;
 848		case ABORTED_COMMAND:
 849			action = ACTION_FAIL;
 850			if (sshdr.asc == 0x10) /* DIF */
 851				blk_stat = BLK_STS_PROTECTION;
 852			break;
 853		case NOT_READY:
 854			/* If the device is in the process of becoming
 855			 * ready, or has a temporary blockage, retry.
 856			 */
 857			if (sshdr.asc == 0x04) {
 858				switch (sshdr.ascq) {
 859				case 0x01: /* becoming ready */
 860				case 0x04: /* format in progress */
 861				case 0x05: /* rebuild in progress */
 862				case 0x06: /* recalculation in progress */
 863				case 0x07: /* operation in progress */
 864				case 0x08: /* Long write in progress */
 865				case 0x09: /* self test in progress */
 866				case 0x11: /* notify (enable spinup) required */
 867				case 0x14: /* space allocation in progress */
 868				case 0x1a: /* start stop unit in progress */
 869				case 0x1b: /* sanitize in progress */
 870				case 0x1d: /* configuration in progress */
 
 
 871					action = ACTION_DELAYED_RETRY;
 872					break;
 873				case 0x0a: /* ALUA state transition */
 874					action = ACTION_DELAYED_REPREP;
 875					break;
 876				/*
 877				 * Depopulation might take many hours,
 878				 * thus it is not worthwhile to retry.
 879				 */
 880				case 0x24: /* depopulation in progress */
 881				case 0x25: /* depopulation restore in progress */
 882					fallthrough;
 883				default:
 884					action = ACTION_FAIL;
 885					break;
 886				}
 887			} else
 888				action = ACTION_FAIL;
 889			break;
 890		case VOLUME_OVERFLOW:
 891			/* See SSC3rXX or current. */
 892			action = ACTION_FAIL;
 893			break;
 894		case DATA_PROTECT:
 895			action = ACTION_FAIL;
 896			if ((sshdr.asc == 0x0C && sshdr.ascq == 0x12) ||
 897			    (sshdr.asc == 0x55 &&
 898			     (sshdr.ascq == 0x0E || sshdr.ascq == 0x0F))) {
 899				/* Insufficient zone resources */
 900				blk_stat = BLK_STS_ZONE_OPEN_RESOURCE;
 901			}
 902			break;
 903		case COMPLETED:
 904			fallthrough;
 905		default:
 906			action = ACTION_FAIL;
 907			break;
 908		}
 909	} else
 910		action = ACTION_FAIL;
 911
 912	if (action != ACTION_FAIL && scsi_cmd_runtime_exceeced(cmd))
 913		action = ACTION_FAIL;
 914
 915	switch (action) {
 916	case ACTION_FAIL:
 917		/* Give up and fail the remainder of the request */
 918		if (!(req->rq_flags & RQF_QUIET)) {
 919			static DEFINE_RATELIMIT_STATE(_rs,
 920					DEFAULT_RATELIMIT_INTERVAL,
 921					DEFAULT_RATELIMIT_BURST);
 922
 923			if (unlikely(scsi_logging_level))
 924				level =
 925				     SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
 926						    SCSI_LOG_MLCOMPLETE_BITS);
 927
 928			/*
 929			 * if logging is enabled the failure will be printed
 930			 * in scsi_log_completion(), so avoid duplicate messages
 931			 */
 932			if (!level && __ratelimit(&_rs)) {
 933				scsi_print_result(cmd, NULL, FAILED);
 934				if (sense_valid)
 935					scsi_print_sense(cmd);
 936				scsi_print_command(cmd);
 937			}
 938		}
 939		if (!scsi_end_request(req, blk_stat, scsi_rq_err_bytes(req)))
 940			return;
 941		fallthrough;
 942	case ACTION_REPREP:
 943		scsi_mq_requeue_cmd(cmd, 0);
 944		break;
 945	case ACTION_DELAYED_REPREP:
 946		scsi_mq_requeue_cmd(cmd, ALUA_TRANSITION_REPREP_DELAY);
 947		break;
 948	case ACTION_RETRY:
 949		/* Retry the same command immediately */
 950		__scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, false);
 951		break;
 952	case ACTION_DELAYED_RETRY:
 953		/* Retry the same command after a delay */
 954		__scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, false);
 955		break;
 956	}
 957}
 958
 959/*
 960 * Helper for scsi_io_completion() when cmd->result is non-zero. Returns a
 961 * new result that may suppress further error checking. Also modifies
 962 * *blk_statp in some cases.
 963 */
 964static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
 965					blk_status_t *blk_statp)
 966{
 967	bool sense_valid;
 968	bool sense_current = true;	/* false implies "deferred sense" */
 969	struct request *req = scsi_cmd_to_rq(cmd);
 970	struct scsi_sense_hdr sshdr;
 971
 972	sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
 973	if (sense_valid)
 974		sense_current = !scsi_sense_is_deferred(&sshdr);
 975
 976	if (blk_rq_is_passthrough(req)) {
 977		if (sense_valid) {
 978			/*
 979			 * SG_IO wants current and deferred errors
 980			 */
 981			cmd->sense_len = min(8 + cmd->sense_buffer[7],
 982					     SCSI_SENSE_BUFFERSIZE);
 983		}
 984		if (sense_current)
 985			*blk_statp = scsi_result_to_blk_status(result);
 986	} else if (blk_rq_bytes(req) == 0 && sense_current) {
 987		/*
 988		 * Flush commands do not transfers any data, and thus cannot use
 989		 * good_bytes != blk_rq_bytes(req) as the signal for an error.
 990		 * This sets *blk_statp explicitly for the problem case.
 991		 */
 992		*blk_statp = scsi_result_to_blk_status(result);
 993	}
 994	/*
 995	 * Recovered errors need reporting, but they're always treated as
 996	 * success, so fiddle the result code here.  For passthrough requests
 997	 * we already took a copy of the original into sreq->result which
 998	 * is what gets returned to the user
 999	 */
1000	if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
1001		bool do_print = true;
1002		/*
1003		 * if ATA PASS-THROUGH INFORMATION AVAILABLE [0x0, 0x1d]
1004		 * skip print since caller wants ATA registers. Only occurs
1005		 * on SCSI ATA PASS_THROUGH commands when CK_COND=1
1006		 */
1007		if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
1008			do_print = false;
1009		else if (req->rq_flags & RQF_QUIET)
1010			do_print = false;
1011		if (do_print)
1012			scsi_print_sense(cmd);
1013		result = 0;
1014		/* for passthrough, *blk_statp may be set */
1015		*blk_statp = BLK_STS_OK;
1016	}
1017	/*
1018	 * Another corner case: the SCSI status byte is non-zero but 'good'.
1019	 * Example: PRE-FETCH command returns SAM_STAT_CONDITION_MET when
1020	 * it is able to fit nominated LBs in its cache (and SAM_STAT_GOOD
1021	 * if it can't fit). Treat SAM_STAT_CONDITION_MET and the related
1022	 * intermediate statuses (both obsolete in SAM-4) as good.
1023	 */
1024	if ((result & 0xff) && scsi_status_is_good(result)) {
1025		result = 0;
1026		*blk_statp = BLK_STS_OK;
1027	}
1028	return result;
1029}
1030
1031/**
1032 * scsi_io_completion - Completion processing for SCSI commands.
1033 * @cmd:	command that is finished.
1034 * @good_bytes:	number of processed bytes.
1035 *
1036 * We will finish off the specified number of sectors. If we are done, the
1037 * command block will be released and the queue function will be goosed. If we
1038 * are not done then we have to figure out what to do next:
1039 *
1040 *   a) We can call scsi_mq_requeue_cmd().  The request will be
1041 *	unprepared and put back on the queue.  Then a new command will
1042 *	be created for it.  This should be used if we made forward
1043 *	progress, or if we want to switch from READ(10) to READ(6) for
1044 *	example.
1045 *
1046 *   b) We can call scsi_io_completion_action().  The request will be
1047 *	put back on the queue and retried using the same command as
1048 *	before, possibly after a delay.
1049 *
1050 *   c) We can call scsi_end_request() with blk_stat other than
1051 *	BLK_STS_OK, to fail the remainder of the request.
1052 */
1053void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1054{
1055	int result = cmd->result;
1056	struct request *req = scsi_cmd_to_rq(cmd);
1057	blk_status_t blk_stat = BLK_STS_OK;
1058
1059	if (unlikely(result))	/* a nz result may or may not be an error */
1060		result = scsi_io_completion_nz_result(cmd, result, &blk_stat);
1061
1062	/*
1063	 * Next deal with any sectors which we were able to correctly
1064	 * handle.
1065	 */
1066	SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd,
1067		"%u sectors total, %d bytes done.\n",
1068		blk_rq_sectors(req), good_bytes));
1069
1070	/*
1071	 * Failed, zero length commands always need to drop down
1072	 * to retry code. Fast path should return in this block.
1073	 */
1074	if (likely(blk_rq_bytes(req) > 0 || blk_stat == BLK_STS_OK)) {
1075		if (likely(!scsi_end_request(req, blk_stat, good_bytes)))
1076			return; /* no bytes remaining */
1077	}
1078
1079	/* Kill remainder if no retries. */
1080	if (unlikely(blk_stat && scsi_noretry_cmd(cmd))) {
1081		if (scsi_end_request(req, blk_stat, blk_rq_bytes(req)))
1082			WARN_ONCE(true,
1083			    "Bytes remaining after failed, no-retry command");
1084		return;
1085	}
1086
1087	/*
1088	 * If there had been no error, but we have leftover bytes in the
1089	 * request just queue the command up again.
1090	 */
1091	if (likely(result == 0))
1092		scsi_mq_requeue_cmd(cmd, 0);
1093	else
1094		scsi_io_completion_action(cmd, result);
1095}
1096
1097static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev,
1098		struct request *rq)
1099{
1100	return sdev->dma_drain_len && blk_rq_is_passthrough(rq) &&
1101	       !op_is_write(req_op(rq)) &&
1102	       sdev->host->hostt->dma_need_drain(rq);
1103}
1104
1105/**
1106 * scsi_alloc_sgtables - Allocate and initialize data and integrity scatterlists
1107 * @cmd: SCSI command data structure to initialize.
1108 *
1109 * Initializes @cmd->sdb and also @cmd->prot_sdb if data integrity is enabled
1110 * for @cmd.
1111 *
1112 * Returns:
1113 * * BLK_STS_OK       - on success
1114 * * BLK_STS_RESOURCE - if the failure is retryable
1115 * * BLK_STS_IOERR    - if the failure is fatal
1116 */
1117blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
1118{
1119	struct scsi_device *sdev = cmd->device;
1120	struct request *rq = scsi_cmd_to_rq(cmd);
1121	unsigned short nr_segs = blk_rq_nr_phys_segments(rq);
1122	struct scatterlist *last_sg = NULL;
1123	blk_status_t ret;
1124	bool need_drain = scsi_cmd_needs_dma_drain(sdev, rq);
1125	int count;
1126
1127	if (WARN_ON_ONCE(!nr_segs))
1128		return BLK_STS_IOERR;
1129
1130	/*
1131	 * Make sure there is space for the drain.  The driver must adjust
1132	 * max_hw_segments to be prepared for this.
1133	 */
1134	if (need_drain)
1135		nr_segs++;
1136
1137	/*
1138	 * If sg table allocation fails, requeue request later.
1139	 */
1140	if (unlikely(sg_alloc_table_chained(&cmd->sdb.table, nr_segs,
1141			cmd->sdb.table.sgl, SCSI_INLINE_SG_CNT)))
1142		return BLK_STS_RESOURCE;
1143
1144	/*
1145	 * Next, walk the list, and fill in the addresses and sizes of
1146	 * each segment.
1147	 */
1148	count = __blk_rq_map_sg(rq->q, rq, cmd->sdb.table.sgl, &last_sg);
1149
1150	if (blk_rq_bytes(rq) & rq->q->limits.dma_pad_mask) {
1151		unsigned int pad_len =
1152			(rq->q->limits.dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
1153
1154		last_sg->length += pad_len;
1155		cmd->extra_len += pad_len;
1156	}
1157
1158	if (need_drain) {
1159		sg_unmark_end(last_sg);
1160		last_sg = sg_next(last_sg);
1161		sg_set_buf(last_sg, sdev->dma_drain_buf, sdev->dma_drain_len);
1162		sg_mark_end(last_sg);
1163
1164		cmd->extra_len += sdev->dma_drain_len;
1165		count++;
1166	}
1167
1168	BUG_ON(count > cmd->sdb.table.nents);
1169	cmd->sdb.table.nents = count;
1170	cmd->sdb.length = blk_rq_payload_bytes(rq);
1171
1172	if (blk_integrity_rq(rq)) {
1173		struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
 
1174
1175		if (WARN_ON_ONCE(!prot_sdb)) {
1176			/*
1177			 * This can happen if someone (e.g. multipath)
1178			 * queues a command to a device on an adapter
1179			 * that does not support DIX.
1180			 */
1181			ret = BLK_STS_IOERR;
1182			goto out_free_sgtables;
1183		}
1184
1185		if (sg_alloc_table_chained(&prot_sdb->table,
1186				rq->nr_integrity_segments,
 
1187				prot_sdb->table.sgl,
1188				SCSI_INLINE_PROT_SG_CNT)) {
1189			ret = BLK_STS_RESOURCE;
1190			goto out_free_sgtables;
1191		}
1192
1193		count = blk_rq_map_integrity_sg(rq, prot_sdb->table.sgl);
 
 
 
 
1194		cmd->prot_sdb = prot_sdb;
1195		cmd->prot_sdb->table.nents = count;
1196	}
1197
1198	return BLK_STS_OK;
1199out_free_sgtables:
1200	scsi_free_sgtables(cmd);
1201	return ret;
1202}
1203EXPORT_SYMBOL(scsi_alloc_sgtables);
1204
1205/**
1206 * scsi_initialize_rq - initialize struct scsi_cmnd partially
1207 * @rq: Request associated with the SCSI command to be initialized.
1208 *
1209 * This function initializes the members of struct scsi_cmnd that must be
1210 * initialized before request processing starts and that won't be
1211 * reinitialized if a SCSI command is requeued.
1212 */
1213static void scsi_initialize_rq(struct request *rq)
1214{
1215	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1216
1217	memset(cmd->cmnd, 0, sizeof(cmd->cmnd));
1218	cmd->cmd_len = MAX_COMMAND_SIZE;
1219	cmd->sense_len = 0;
1220	init_rcu_head(&cmd->rcu);
1221	cmd->jiffies_at_alloc = jiffies;
1222	cmd->retries = 0;
1223}
1224
1225struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf,
1226				   blk_mq_req_flags_t flags)
1227{
1228	struct request *rq;
1229
1230	rq = blk_mq_alloc_request(q, opf, flags);
1231	if (!IS_ERR(rq))
1232		scsi_initialize_rq(rq);
1233	return rq;
1234}
1235EXPORT_SYMBOL_GPL(scsi_alloc_request);
1236
1237/*
1238 * Only called when the request isn't completed by SCSI, and not freed by
1239 * SCSI
1240 */
1241static void scsi_cleanup_rq(struct request *rq)
1242{
1243	if (rq->rq_flags & RQF_DONTPREP) {
1244		scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq));
1245		rq->rq_flags &= ~RQF_DONTPREP;
1246	}
1247}
1248
1249/* Called before a request is prepared. See also scsi_mq_prep_fn(). */
1250void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
1251{
1252	struct request *rq = scsi_cmd_to_rq(cmd);
1253
1254	if (!blk_rq_is_passthrough(rq) && !(cmd->flags & SCMD_INITIALIZED)) {
1255		cmd->flags |= SCMD_INITIALIZED;
1256		scsi_initialize_rq(rq);
1257	}
1258
1259	cmd->device = dev;
1260	INIT_LIST_HEAD(&cmd->eh_entry);
1261	INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
1262}
1263
1264static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev,
1265		struct request *req)
1266{
1267	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1268
1269	/*
1270	 * Passthrough requests may transfer data, in which case they must
1271	 * a bio attached to them.  Or they might contain a SCSI command
1272	 * that does not transfer data, in which case they may optionally
1273	 * submit a request without an attached bio.
1274	 */
1275	if (req->bio) {
1276		blk_status_t ret = scsi_alloc_sgtables(cmd);
1277		if (unlikely(ret != BLK_STS_OK))
1278			return ret;
1279	} else {
1280		BUG_ON(blk_rq_bytes(req));
1281
1282		memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1283	}
1284
1285	cmd->transfersize = blk_rq_bytes(req);
1286	return BLK_STS_OK;
1287}
1288
1289static blk_status_t
1290scsi_device_state_check(struct scsi_device *sdev, struct request *req)
1291{
1292	switch (sdev->sdev_state) {
1293	case SDEV_CREATED:
1294		return BLK_STS_OK;
1295	case SDEV_OFFLINE:
1296	case SDEV_TRANSPORT_OFFLINE:
1297		/*
1298		 * If the device is offline we refuse to process any
1299		 * commands.  The device must be brought online
1300		 * before trying any recovery commands.
1301		 */
1302		if (!sdev->offline_already) {
1303			sdev->offline_already = true;
1304			sdev_printk(KERN_ERR, sdev,
1305				    "rejecting I/O to offline device\n");
1306		}
1307		return BLK_STS_IOERR;
1308	case SDEV_DEL:
1309		/*
1310		 * If the device is fully deleted, we refuse to
1311		 * process any commands as well.
1312		 */
1313		sdev_printk(KERN_ERR, sdev,
1314			    "rejecting I/O to dead device\n");
1315		return BLK_STS_IOERR;
1316	case SDEV_BLOCK:
1317	case SDEV_CREATED_BLOCK:
1318		return BLK_STS_RESOURCE;
1319	case SDEV_QUIESCE:
1320		/*
1321		 * If the device is blocked we only accept power management
1322		 * commands.
1323		 */
1324		if (req && WARN_ON_ONCE(!(req->rq_flags & RQF_PM)))
1325			return BLK_STS_RESOURCE;
1326		return BLK_STS_OK;
1327	default:
1328		/*
1329		 * For any other not fully online state we only allow
1330		 * power management commands.
1331		 */
1332		if (req && !(req->rq_flags & RQF_PM))
1333			return BLK_STS_OFFLINE;
1334		return BLK_STS_OK;
1335	}
1336}
1337
1338/*
1339 * scsi_dev_queue_ready: if we can send requests to sdev, assign one token
1340 * and return the token else return -1.
1341 */
1342static inline int scsi_dev_queue_ready(struct request_queue *q,
1343				  struct scsi_device *sdev)
1344{
1345	int token;
1346
1347	token = sbitmap_get(&sdev->budget_map);
1348	if (token < 0)
1349		return -1;
1350
1351	if (!atomic_read(&sdev->device_blocked))
1352		return token;
1353
1354	/*
1355	 * Only unblock if no other commands are pending and
1356	 * if device_blocked has decreased to zero
1357	 */
1358	if (scsi_device_busy(sdev) > 1 ||
1359	    atomic_dec_return(&sdev->device_blocked) > 0) {
1360		sbitmap_put(&sdev->budget_map, token);
1361		return -1;
1362	}
1363
1364	SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
1365			 "unblocking device at zero depth\n"));
1366
1367	return token;
1368}
1369
1370/*
1371 * scsi_target_queue_ready: checks if there we can send commands to target
1372 * @sdev: scsi device on starget to check.
1373 */
1374static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1375					   struct scsi_device *sdev)
1376{
1377	struct scsi_target *starget = scsi_target(sdev);
1378	unsigned int busy;
1379
1380	if (starget->single_lun) {
1381		spin_lock_irq(shost->host_lock);
1382		if (starget->starget_sdev_user &&
1383		    starget->starget_sdev_user != sdev) {
1384			spin_unlock_irq(shost->host_lock);
1385			return 0;
1386		}
1387		starget->starget_sdev_user = sdev;
1388		spin_unlock_irq(shost->host_lock);
1389	}
1390
1391	if (starget->can_queue <= 0)
1392		return 1;
1393
1394	busy = atomic_inc_return(&starget->target_busy) - 1;
1395	if (atomic_read(&starget->target_blocked) > 0) {
1396		if (busy)
1397			goto starved;
1398
1399		/*
1400		 * unblock after target_blocked iterates to zero
1401		 */
1402		if (atomic_dec_return(&starget->target_blocked) > 0)
1403			goto out_dec;
1404
1405		SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1406				 "unblocking target at zero depth\n"));
1407	}
1408
1409	if (busy >= starget->can_queue)
1410		goto starved;
1411
1412	return 1;
1413
1414starved:
1415	spin_lock_irq(shost->host_lock);
1416	list_move_tail(&sdev->starved_entry, &shost->starved_list);
1417	spin_unlock_irq(shost->host_lock);
1418out_dec:
1419	if (starget->can_queue > 0)
1420		atomic_dec(&starget->target_busy);
1421	return 0;
1422}
1423
1424/*
1425 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1426 * return 0. We must end up running the queue again whenever 0 is
1427 * returned, else IO can hang.
1428 */
1429static inline int scsi_host_queue_ready(struct request_queue *q,
1430				   struct Scsi_Host *shost,
1431				   struct scsi_device *sdev,
1432				   struct scsi_cmnd *cmd)
1433{
1434	if (atomic_read(&shost->host_blocked) > 0) {
1435		if (scsi_host_busy(shost) > 0)
1436			goto starved;
1437
1438		/*
1439		 * unblock after host_blocked iterates to zero
1440		 */
1441		if (atomic_dec_return(&shost->host_blocked) > 0)
1442			goto out_dec;
1443
1444		SCSI_LOG_MLQUEUE(3,
1445			shost_printk(KERN_INFO, shost,
1446				     "unblocking host at zero depth\n"));
1447	}
1448
1449	if (shost->host_self_blocked)
1450		goto starved;
1451
1452	/* We're OK to process the command, so we can't be starved */
1453	if (!list_empty(&sdev->starved_entry)) {
1454		spin_lock_irq(shost->host_lock);
1455		if (!list_empty(&sdev->starved_entry))
1456			list_del_init(&sdev->starved_entry);
1457		spin_unlock_irq(shost->host_lock);
1458	}
1459
1460	__set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
1461
1462	return 1;
1463
1464starved:
1465	spin_lock_irq(shost->host_lock);
1466	if (list_empty(&sdev->starved_entry))
1467		list_add_tail(&sdev->starved_entry, &shost->starved_list);
1468	spin_unlock_irq(shost->host_lock);
1469out_dec:
1470	scsi_dec_host_busy(shost, cmd);
1471	return 0;
1472}
1473
1474/*
1475 * Busy state exporting function for request stacking drivers.
1476 *
1477 * For efficiency, no lock is taken to check the busy state of
1478 * shost/starget/sdev, since the returned value is not guaranteed and
1479 * may be changed after request stacking drivers call the function,
1480 * regardless of taking lock or not.
1481 *
1482 * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi
1483 * needs to return 'not busy'. Otherwise, request stacking drivers
1484 * may hold requests forever.
1485 */
1486static bool scsi_mq_lld_busy(struct request_queue *q)
1487{
1488	struct scsi_device *sdev = q->queuedata;
1489	struct Scsi_Host *shost;
1490
1491	if (blk_queue_dying(q))
1492		return false;
1493
1494	shost = sdev->host;
1495
1496	/*
1497	 * Ignore host/starget busy state.
1498	 * Since block layer does not have a concept of fairness across
1499	 * multiple queues, congestion of host/starget needs to be handled
1500	 * in SCSI layer.
1501	 */
1502	if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1503		return true;
1504
1505	return false;
1506}
1507
1508/*
1509 * Block layer request completion callback. May be called from interrupt
1510 * context.
1511 */
1512static void scsi_complete(struct request *rq)
1513{
1514	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1515	enum scsi_disposition disposition;
1516
1517	INIT_LIST_HEAD(&cmd->eh_entry);
1518
1519	atomic_inc(&cmd->device->iodone_cnt);
1520	if (cmd->result)
1521		atomic_inc(&cmd->device->ioerr_cnt);
1522
1523	disposition = scsi_decide_disposition(cmd);
1524	if (disposition != SUCCESS && scsi_cmd_runtime_exceeced(cmd))
1525		disposition = SUCCESS;
1526
1527	scsi_log_completion(cmd, disposition);
1528
1529	switch (disposition) {
1530	case SUCCESS:
1531		scsi_finish_command(cmd);
1532		break;
1533	case NEEDS_RETRY:
1534		scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1535		break;
1536	case ADD_TO_MLQUEUE:
1537		scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1538		break;
1539	default:
1540		scsi_eh_scmd_add(cmd);
1541		break;
1542	}
1543}
1544
1545/**
1546 * scsi_dispatch_cmd - Dispatch a command to the low-level driver.
1547 * @cmd: command block we are dispatching.
1548 *
1549 * Return: nonzero return request was rejected and device's queue needs to be
1550 * plugged.
1551 */
1552static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
1553{
1554	struct Scsi_Host *host = cmd->device->host;
1555	int rtn = 0;
1556
1557	atomic_inc(&cmd->device->iorequest_cnt);
1558
1559	/* check if the device is still usable */
1560	if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
1561		/* in SDEV_DEL we error all commands. DID_NO_CONNECT
1562		 * returns an immediate error upwards, and signals
1563		 * that the device is no longer present */
1564		cmd->result = DID_NO_CONNECT << 16;
1565		goto done;
1566	}
1567
1568	/* Check to see if the scsi lld made this device blocked. */
1569	if (unlikely(scsi_device_blocked(cmd->device))) {
1570		/*
1571		 * in blocked state, the command is just put back on
1572		 * the device queue.  The suspend state has already
1573		 * blocked the queue so future requests should not
1574		 * occur until the device transitions out of the
1575		 * suspend state.
1576		 */
1577		SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1578			"queuecommand : device blocked\n"));
1579		atomic_dec(&cmd->device->iorequest_cnt);
1580		return SCSI_MLQUEUE_DEVICE_BUSY;
1581	}
1582
1583	/* Store the LUN value in cmnd, if needed. */
1584	if (cmd->device->lun_in_cdb)
1585		cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
1586			       (cmd->device->lun << 5 & 0xe0);
1587
1588	scsi_log_send(cmd);
1589
1590	/*
1591	 * Before we queue this command, check if the command
1592	 * length exceeds what the host adapter can handle.
1593	 */
1594	if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
1595		SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1596			       "queuecommand : command too long. "
1597			       "cdb_size=%d host->max_cmd_len=%d\n",
1598			       cmd->cmd_len, cmd->device->host->max_cmd_len));
1599		cmd->result = (DID_ABORT << 16);
1600		goto done;
1601	}
1602
1603	if (unlikely(host->shost_state == SHOST_DEL)) {
1604		cmd->result = (DID_NO_CONNECT << 16);
1605		goto done;
1606
1607	}
1608
1609	trace_scsi_dispatch_cmd_start(cmd);
1610	rtn = host->hostt->queuecommand(host, cmd);
1611	if (rtn) {
1612		atomic_dec(&cmd->device->iorequest_cnt);
1613		trace_scsi_dispatch_cmd_error(cmd, rtn);
1614		if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
1615		    rtn != SCSI_MLQUEUE_TARGET_BUSY)
1616			rtn = SCSI_MLQUEUE_HOST_BUSY;
1617
1618		SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1619			"queuecommand : request rejected\n"));
1620	}
1621
1622	return rtn;
1623 done:
1624	scsi_done(cmd);
1625	return 0;
1626}
1627
1628/* Size in bytes of the sg-list stored in the scsi-mq command-private data. */
1629static unsigned int scsi_mq_inline_sgl_size(struct Scsi_Host *shost)
1630{
1631	return min_t(unsigned int, shost->sg_tablesize, SCSI_INLINE_SG_CNT) *
1632		sizeof(struct scatterlist);
1633}
1634
1635static blk_status_t scsi_prepare_cmd(struct request *req)
1636{
1637	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1638	struct scsi_device *sdev = req->q->queuedata;
1639	struct Scsi_Host *shost = sdev->host;
1640	bool in_flight = test_bit(SCMD_STATE_INFLIGHT, &cmd->state);
1641	struct scatterlist *sg;
1642
1643	scsi_init_command(sdev, cmd);
1644
1645	cmd->eh_eflags = 0;
1646	cmd->prot_type = 0;
1647	cmd->prot_flags = 0;
1648	cmd->submitter = 0;
1649	memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1650	cmd->underflow = 0;
1651	cmd->transfersize = 0;
1652	cmd->host_scribble = NULL;
1653	cmd->result = 0;
1654	cmd->extra_len = 0;
1655	cmd->state = 0;
1656	if (in_flight)
1657		__set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
1658
 
 
 
 
 
 
 
1659	cmd->prot_op = SCSI_PROT_NORMAL;
1660	if (blk_rq_bytes(req))
1661		cmd->sc_data_direction = rq_dma_dir(req);
1662	else
1663		cmd->sc_data_direction = DMA_NONE;
1664
1665	sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
1666	cmd->sdb.table.sgl = sg;
1667
1668	if (scsi_host_get_prot(shost)) {
1669		memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer));
1670
1671		cmd->prot_sdb->table.sgl =
1672			(struct scatterlist *)(cmd->prot_sdb + 1);
1673	}
1674
1675	/*
1676	 * Special handling for passthrough commands, which don't go to the ULP
1677	 * at all:
1678	 */
1679	if (blk_rq_is_passthrough(req))
1680		return scsi_setup_scsi_cmnd(sdev, req);
1681
1682	if (sdev->handler && sdev->handler->prep_fn) {
1683		blk_status_t ret = sdev->handler->prep_fn(sdev, req);
1684
1685		if (ret != BLK_STS_OK)
1686			return ret;
1687	}
1688
1689	/* Usually overridden by the ULP */
1690	cmd->allowed = 0;
1691	memset(cmd->cmnd, 0, sizeof(cmd->cmnd));
1692	return scsi_cmd_to_driver(cmd)->init_command(cmd);
1693}
1694
1695static void scsi_done_internal(struct scsi_cmnd *cmd, bool complete_directly)
1696{
1697	struct request *req = scsi_cmd_to_rq(cmd);
1698
1699	switch (cmd->submitter) {
1700	case SUBMITTED_BY_BLOCK_LAYER:
1701		break;
1702	case SUBMITTED_BY_SCSI_ERROR_HANDLER:
1703		return scsi_eh_done(cmd);
1704	case SUBMITTED_BY_SCSI_RESET_IOCTL:
1705		return;
1706	}
1707
1708	if (unlikely(blk_should_fake_timeout(scsi_cmd_to_rq(cmd)->q)))
1709		return;
1710	if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state)))
1711		return;
1712	trace_scsi_dispatch_cmd_done(cmd);
1713
1714	if (complete_directly)
1715		blk_mq_complete_request_direct(req, scsi_complete);
1716	else
1717		blk_mq_complete_request(req);
1718}
1719
1720void scsi_done(struct scsi_cmnd *cmd)
1721{
1722	scsi_done_internal(cmd, false);
1723}
1724EXPORT_SYMBOL(scsi_done);
1725
1726void scsi_done_direct(struct scsi_cmnd *cmd)
1727{
1728	scsi_done_internal(cmd, true);
1729}
1730EXPORT_SYMBOL(scsi_done_direct);
1731
1732static void scsi_mq_put_budget(struct request_queue *q, int budget_token)
1733{
1734	struct scsi_device *sdev = q->queuedata;
1735
1736	sbitmap_put(&sdev->budget_map, budget_token);
1737}
1738
1739/*
1740 * When to reinvoke queueing after a resource shortage. It's 3 msecs to
1741 * not change behaviour from the previous unplug mechanism, experimentation
1742 * may prove this needs changing.
1743 */
1744#define SCSI_QUEUE_DELAY 3
1745
1746static int scsi_mq_get_budget(struct request_queue *q)
1747{
1748	struct scsi_device *sdev = q->queuedata;
1749	int token = scsi_dev_queue_ready(q, sdev);
1750
1751	if (token >= 0)
1752		return token;
1753
1754	atomic_inc(&sdev->restarts);
1755
1756	/*
1757	 * Orders atomic_inc(&sdev->restarts) and atomic_read(&sdev->device_busy).
1758	 * .restarts must be incremented before .device_busy is read because the
1759	 * code in scsi_run_queue_async() depends on the order of these operations.
1760	 */
1761	smp_mb__after_atomic();
1762
1763	/*
1764	 * If all in-flight requests originated from this LUN are completed
1765	 * before reading .device_busy, sdev->device_busy will be observed as
1766	 * zero, then blk_mq_delay_run_hw_queues() will dispatch this request
1767	 * soon. Otherwise, completion of one of these requests will observe
1768	 * the .restarts flag, and the request queue will be run for handling
1769	 * this request, see scsi_end_request().
1770	 */
1771	if (unlikely(scsi_device_busy(sdev) == 0 &&
1772				!scsi_device_blocked(sdev)))
1773		blk_mq_delay_run_hw_queues(sdev->request_queue, SCSI_QUEUE_DELAY);
1774	return -1;
1775}
1776
1777static void scsi_mq_set_rq_budget_token(struct request *req, int token)
1778{
1779	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1780
1781	cmd->budget_token = token;
1782}
1783
1784static int scsi_mq_get_rq_budget_token(struct request *req)
1785{
1786	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1787
1788	return cmd->budget_token;
1789}
1790
1791static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
1792			 const struct blk_mq_queue_data *bd)
1793{
1794	struct request *req = bd->rq;
1795	struct request_queue *q = req->q;
1796	struct scsi_device *sdev = q->queuedata;
1797	struct Scsi_Host *shost = sdev->host;
1798	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1799	blk_status_t ret;
1800	int reason;
1801
1802	WARN_ON_ONCE(cmd->budget_token < 0);
1803
1804	/*
1805	 * If the device is not in running state we will reject some or all
1806	 * commands.
1807	 */
1808	if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1809		ret = scsi_device_state_check(sdev, req);
1810		if (ret != BLK_STS_OK)
1811			goto out_put_budget;
1812	}
1813
1814	ret = BLK_STS_RESOURCE;
1815	if (!scsi_target_queue_ready(shost, sdev))
1816		goto out_put_budget;
1817	if (unlikely(scsi_host_in_recovery(shost))) {
1818		if (cmd->flags & SCMD_FAIL_IF_RECOVERING)
1819			ret = BLK_STS_OFFLINE;
1820		goto out_dec_target_busy;
1821	}
1822	if (!scsi_host_queue_ready(q, shost, sdev, cmd))
1823		goto out_dec_target_busy;
1824
1825	/*
1826	 * Only clear the driver-private command data if the LLD does not supply
1827	 * a function to initialize that data.
1828	 */
1829	if (shost->hostt->cmd_size && !shost->hostt->init_cmd_priv)
1830		memset(cmd + 1, 0, shost->hostt->cmd_size);
1831
1832	if (!(req->rq_flags & RQF_DONTPREP)) {
1833		ret = scsi_prepare_cmd(req);
1834		if (ret != BLK_STS_OK)
1835			goto out_dec_host_busy;
1836		req->rq_flags |= RQF_DONTPREP;
1837	} else {
1838		clear_bit(SCMD_STATE_COMPLETE, &cmd->state);
1839	}
1840
1841	cmd->flags &= SCMD_PRESERVED_FLAGS;
1842	if (sdev->simple_tags)
1843		cmd->flags |= SCMD_TAGGED;
1844	if (bd->last)
1845		cmd->flags |= SCMD_LAST;
1846
1847	scsi_set_resid(cmd, 0);
1848	memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1849	cmd->submitter = SUBMITTED_BY_BLOCK_LAYER;
1850
1851	blk_mq_start_request(req);
1852	reason = scsi_dispatch_cmd(cmd);
1853	if (reason) {
1854		scsi_set_blocked(cmd, reason);
1855		ret = BLK_STS_RESOURCE;
1856		goto out_dec_host_busy;
1857	}
1858
1859	return BLK_STS_OK;
1860
1861out_dec_host_busy:
1862	scsi_dec_host_busy(shost, cmd);
1863out_dec_target_busy:
1864	if (scsi_target(sdev)->can_queue > 0)
1865		atomic_dec(&scsi_target(sdev)->target_busy);
1866out_put_budget:
1867	scsi_mq_put_budget(q, cmd->budget_token);
1868	cmd->budget_token = -1;
1869	switch (ret) {
1870	case BLK_STS_OK:
1871		break;
1872	case BLK_STS_RESOURCE:
 
1873		if (scsi_device_blocked(sdev))
1874			ret = BLK_STS_DEV_RESOURCE;
1875		break;
1876	case BLK_STS_AGAIN:
1877		cmd->result = DID_BUS_BUSY << 16;
1878		if (req->rq_flags & RQF_DONTPREP)
1879			scsi_mq_uninit_cmd(cmd);
1880		break;
1881	default:
1882		if (unlikely(!scsi_device_online(sdev)))
1883			cmd->result = DID_NO_CONNECT << 16;
1884		else
1885			cmd->result = DID_ERROR << 16;
1886		/*
1887		 * Make sure to release all allocated resources when
1888		 * we hit an error, as we will never see this command
1889		 * again.
1890		 */
1891		if (req->rq_flags & RQF_DONTPREP)
1892			scsi_mq_uninit_cmd(cmd);
1893		scsi_run_queue_async(sdev);
1894		break;
1895	}
1896	return ret;
1897}
1898
1899static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
1900				unsigned int hctx_idx, unsigned int numa_node)
1901{
1902	struct Scsi_Host *shost = set->driver_data;
1903	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1904	struct scatterlist *sg;
1905	int ret = 0;
1906
1907	cmd->sense_buffer =
1908		kmem_cache_alloc_node(scsi_sense_cache, GFP_KERNEL, numa_node);
1909	if (!cmd->sense_buffer)
1910		return -ENOMEM;
1911
1912	if (scsi_host_get_prot(shost)) {
1913		sg = (void *)cmd + sizeof(struct scsi_cmnd) +
1914			shost->hostt->cmd_size;
1915		cmd->prot_sdb = (void *)sg + scsi_mq_inline_sgl_size(shost);
1916	}
1917
1918	if (shost->hostt->init_cmd_priv) {
1919		ret = shost->hostt->init_cmd_priv(shost, cmd);
1920		if (ret < 0)
1921			kmem_cache_free(scsi_sense_cache, cmd->sense_buffer);
1922	}
1923
1924	return ret;
1925}
1926
1927static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1928				 unsigned int hctx_idx)
1929{
1930	struct Scsi_Host *shost = set->driver_data;
1931	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1932
1933	if (shost->hostt->exit_cmd_priv)
1934		shost->hostt->exit_cmd_priv(shost, cmd);
1935	kmem_cache_free(scsi_sense_cache, cmd->sense_buffer);
1936}
1937
1938
1939static int scsi_mq_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
1940{
1941	struct Scsi_Host *shost = hctx->driver_data;
1942
1943	if (shost->hostt->mq_poll)
1944		return shost->hostt->mq_poll(shost, hctx->queue_num);
1945
1946	return 0;
1947}
1948
1949static int scsi_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1950			  unsigned int hctx_idx)
1951{
1952	struct Scsi_Host *shost = data;
1953
1954	hctx->driver_data = shost;
1955	return 0;
1956}
1957
1958static void scsi_map_queues(struct blk_mq_tag_set *set)
1959{
1960	struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
1961
1962	if (shost->hostt->map_queues)
1963		return shost->hostt->map_queues(shost);
1964	blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
1965}
1966
1967void scsi_init_limits(struct Scsi_Host *shost, struct queue_limits *lim)
1968{
1969	struct device *dev = shost->dma_dev;
1970
1971	memset(lim, 0, sizeof(*lim));
1972	lim->max_segments =
1973		min_t(unsigned short, shost->sg_tablesize, SG_MAX_SEGMENTS);
 
 
1974
1975	if (scsi_host_prot_dma(shost)) {
1976		shost->sg_prot_tablesize =
1977			min_not_zero(shost->sg_prot_tablesize,
1978				     (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
1979		BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
1980		lim->max_integrity_segments = shost->sg_prot_tablesize;
1981	}
1982
1983	lim->max_hw_sectors = shost->max_sectors;
1984	lim->seg_boundary_mask = shost->dma_boundary;
1985	lim->max_segment_size = shost->max_segment_size;
1986	lim->virt_boundary_mask = shost->virt_boundary_mask;
1987	lim->dma_alignment = max_t(unsigned int,
1988		shost->dma_alignment, dma_get_cache_alignment() - 1);
1989
1990	if (shost->no_highmem)
1991		lim->features |= BLK_FEAT_BOUNCE_HIGH;
 
1992
1993	/*
1994	 * Propagate the DMA formation properties to the dma-mapping layer as
1995	 * a courtesy service to the LLDDs.  This needs to check that the buses
1996	 * actually support the DMA API first, though.
 
 
1997	 */
1998	if (dev->dma_parms) {
1999		dma_set_seg_boundary(dev, shost->dma_boundary);
2000		dma_set_max_seg_size(dev, shost->max_segment_size);
2001	}
2002}
2003EXPORT_SYMBOL_GPL(scsi_init_limits);
2004
2005static const struct blk_mq_ops scsi_mq_ops_no_commit = {
2006	.get_budget	= scsi_mq_get_budget,
2007	.put_budget	= scsi_mq_put_budget,
2008	.queue_rq	= scsi_queue_rq,
2009	.complete	= scsi_complete,
2010	.timeout	= scsi_timeout,
2011#ifdef CONFIG_BLK_DEBUG_FS
2012	.show_rq	= scsi_show_rq,
2013#endif
2014	.init_request	= scsi_mq_init_request,
2015	.exit_request	= scsi_mq_exit_request,
2016	.cleanup_rq	= scsi_cleanup_rq,
2017	.busy		= scsi_mq_lld_busy,
2018	.map_queues	= scsi_map_queues,
2019	.init_hctx	= scsi_init_hctx,
2020	.poll		= scsi_mq_poll,
2021	.set_rq_budget_token = scsi_mq_set_rq_budget_token,
2022	.get_rq_budget_token = scsi_mq_get_rq_budget_token,
2023};
2024
2025
2026static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx)
2027{
2028	struct Scsi_Host *shost = hctx->driver_data;
2029
2030	shost->hostt->commit_rqs(shost, hctx->queue_num);
2031}
2032
2033static const struct blk_mq_ops scsi_mq_ops = {
2034	.get_budget	= scsi_mq_get_budget,
2035	.put_budget	= scsi_mq_put_budget,
2036	.queue_rq	= scsi_queue_rq,
2037	.commit_rqs	= scsi_commit_rqs,
2038	.complete	= scsi_complete,
2039	.timeout	= scsi_timeout,
2040#ifdef CONFIG_BLK_DEBUG_FS
2041	.show_rq	= scsi_show_rq,
2042#endif
2043	.init_request	= scsi_mq_init_request,
2044	.exit_request	= scsi_mq_exit_request,
2045	.cleanup_rq	= scsi_cleanup_rq,
2046	.busy		= scsi_mq_lld_busy,
2047	.map_queues	= scsi_map_queues,
2048	.init_hctx	= scsi_init_hctx,
2049	.poll		= scsi_mq_poll,
2050	.set_rq_budget_token = scsi_mq_set_rq_budget_token,
2051	.get_rq_budget_token = scsi_mq_get_rq_budget_token,
2052};
2053
2054int scsi_mq_setup_tags(struct Scsi_Host *shost)
2055{
2056	unsigned int cmd_size, sgl_size;
2057	struct blk_mq_tag_set *tag_set = &shost->tag_set;
2058
2059	sgl_size = max_t(unsigned int, sizeof(struct scatterlist),
2060				scsi_mq_inline_sgl_size(shost));
2061	cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
2062	if (scsi_host_get_prot(shost))
2063		cmd_size += sizeof(struct scsi_data_buffer) +
2064			sizeof(struct scatterlist) * SCSI_INLINE_PROT_SG_CNT;
2065
2066	memset(tag_set, 0, sizeof(*tag_set));
2067	if (shost->hostt->commit_rqs)
2068		tag_set->ops = &scsi_mq_ops;
2069	else
2070		tag_set->ops = &scsi_mq_ops_no_commit;
2071	tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1;
2072	tag_set->nr_maps = shost->nr_maps ? : 1;
2073	tag_set->queue_depth = shost->can_queue;
2074	tag_set->cmd_size = cmd_size;
2075	tag_set->numa_node = dev_to_node(shost->dma_dev);
2076	tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
2077	tag_set->flags |=
2078		BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
2079	if (shost->queuecommand_may_block)
2080		tag_set->flags |= BLK_MQ_F_BLOCKING;
2081	tag_set->driver_data = shost;
2082	if (shost->host_tagset)
2083		tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
2084
2085	return blk_mq_alloc_tag_set(tag_set);
2086}
2087
2088void scsi_mq_free_tags(struct kref *kref)
2089{
2090	struct Scsi_Host *shost = container_of(kref, typeof(*shost),
2091					       tagset_refcnt);
2092
2093	blk_mq_free_tag_set(&shost->tag_set);
2094	complete(&shost->tagset_freed);
2095}
2096
2097/**
2098 * scsi_device_from_queue - return sdev associated with a request_queue
2099 * @q: The request queue to return the sdev from
2100 *
2101 * Return the sdev associated with a request queue or NULL if the
2102 * request_queue does not reference a SCSI device.
2103 */
2104struct scsi_device *scsi_device_from_queue(struct request_queue *q)
2105{
2106	struct scsi_device *sdev = NULL;
2107
2108	if (q->mq_ops == &scsi_mq_ops_no_commit ||
2109	    q->mq_ops == &scsi_mq_ops)
2110		sdev = q->queuedata;
2111	if (!sdev || !get_device(&sdev->sdev_gendev))
2112		sdev = NULL;
2113
2114	return sdev;
2115}
2116/*
2117 * pktcdvd should have been integrated into the SCSI layers, but for historical
2118 * reasons like the old IDE driver it isn't.  This export allows it to safely
2119 * probe if a given device is a SCSI one and only attach to that.
2120 */
2121#ifdef CONFIG_CDROM_PKTCDVD_MODULE
2122EXPORT_SYMBOL_GPL(scsi_device_from_queue);
2123#endif
2124
2125/**
2126 * scsi_block_requests - Utility function used by low-level drivers to prevent
2127 * further commands from being queued to the device.
2128 * @shost:  host in question
2129 *
2130 * There is no timer nor any other means by which the requests get unblocked
2131 * other than the low-level driver calling scsi_unblock_requests().
2132 */
2133void scsi_block_requests(struct Scsi_Host *shost)
2134{
2135	shost->host_self_blocked = 1;
2136}
2137EXPORT_SYMBOL(scsi_block_requests);
2138
2139/**
2140 * scsi_unblock_requests - Utility function used by low-level drivers to allow
2141 * further commands to be queued to the device.
2142 * @shost:  host in question
2143 *
2144 * There is no timer nor any other means by which the requests get unblocked
2145 * other than the low-level driver calling scsi_unblock_requests(). This is done
2146 * as an API function so that changes to the internals of the scsi mid-layer
2147 * won't require wholesale changes to drivers that use this feature.
2148 */
2149void scsi_unblock_requests(struct Scsi_Host *shost)
2150{
2151	shost->host_self_blocked = 0;
2152	scsi_run_host_queues(shost);
2153}
2154EXPORT_SYMBOL(scsi_unblock_requests);
2155
2156void scsi_exit_queue(void)
2157{
2158	kmem_cache_destroy(scsi_sense_cache);
2159}
2160
2161/**
2162 *	scsi_mode_select - issue a mode select
2163 *	@sdev:	SCSI device to be queried
2164 *	@pf:	Page format bit (1 == standard, 0 == vendor specific)
2165 *	@sp:	Save page bit (0 == don't save, 1 == save)
2166 *	@buffer: request buffer (may not be smaller than eight bytes)
2167 *	@len:	length of request buffer.
2168 *	@timeout: command timeout
2169 *	@retries: number of retries before failing
2170 *	@data: returns a structure abstracting the mode header data
2171 *	@sshdr: place to put sense data (or NULL if no sense to be collected).
2172 *		must be SCSI_SENSE_BUFFERSIZE big.
2173 *
2174 *	Returns zero if successful; negative error number or scsi
2175 *	status on error
2176 *
2177 */
2178int scsi_mode_select(struct scsi_device *sdev, int pf, int sp,
2179		     unsigned char *buffer, int len, int timeout, int retries,
2180		     struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
2181{
2182	unsigned char cmd[10];
2183	unsigned char *real_buffer;
2184	const struct scsi_exec_args exec_args = {
2185		.sshdr = sshdr,
2186	};
2187	int ret;
2188
2189	memset(cmd, 0, sizeof(cmd));
2190	cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
2191
2192	/*
2193	 * Use MODE SELECT(10) if the device asked for it or if the mode page
2194	 * and the mode select header cannot fit within the maximumm 255 bytes
2195	 * of the MODE SELECT(6) command.
2196	 */
2197	if (sdev->use_10_for_ms ||
2198	    len + 4 > 255 ||
2199	    data->block_descriptor_length > 255) {
2200		if (len > 65535 - 8)
2201			return -EINVAL;
2202		real_buffer = kmalloc(8 + len, GFP_KERNEL);
2203		if (!real_buffer)
2204			return -ENOMEM;
2205		memcpy(real_buffer + 8, buffer, len);
2206		len += 8;
2207		real_buffer[0] = 0;
2208		real_buffer[1] = 0;
2209		real_buffer[2] = data->medium_type;
2210		real_buffer[3] = data->device_specific;
2211		real_buffer[4] = data->longlba ? 0x01 : 0;
2212		real_buffer[5] = 0;
2213		put_unaligned_be16(data->block_descriptor_length,
2214				   &real_buffer[6]);
2215
2216		cmd[0] = MODE_SELECT_10;
2217		put_unaligned_be16(len, &cmd[7]);
2218	} else {
2219		if (data->longlba)
2220			return -EINVAL;
2221
2222		real_buffer = kmalloc(4 + len, GFP_KERNEL);
2223		if (!real_buffer)
2224			return -ENOMEM;
2225		memcpy(real_buffer + 4, buffer, len);
2226		len += 4;
2227		real_buffer[0] = 0;
2228		real_buffer[1] = data->medium_type;
2229		real_buffer[2] = data->device_specific;
2230		real_buffer[3] = data->block_descriptor_length;
2231
2232		cmd[0] = MODE_SELECT;
2233		cmd[4] = len;
2234	}
2235
2236	ret = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, real_buffer, len,
2237			       timeout, retries, &exec_args);
2238	kfree(real_buffer);
2239	return ret;
2240}
2241EXPORT_SYMBOL_GPL(scsi_mode_select);
2242
2243/**
2244 *	scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
2245 *	@sdev:	SCSI device to be queried
2246 *	@dbd:	set to prevent mode sense from returning block descriptors
2247 *	@modepage: mode page being requested
2248 *	@subpage: sub-page of the mode page being requested
2249 *	@buffer: request buffer (may not be smaller than eight bytes)
2250 *	@len:	length of request buffer.
2251 *	@timeout: command timeout
2252 *	@retries: number of retries before failing
2253 *	@data: returns a structure abstracting the mode header data
2254 *	@sshdr: place to put sense data (or NULL if no sense to be collected).
2255 *		must be SCSI_SENSE_BUFFERSIZE big.
2256 *
2257 *	Returns zero if successful, or a negative error number on failure
2258 */
2259int
2260scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, int subpage,
2261		  unsigned char *buffer, int len, int timeout, int retries,
2262		  struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
2263{
2264	unsigned char cmd[12];
2265	int use_10_for_ms;
2266	int header_length;
2267	int result;
2268	struct scsi_sense_hdr my_sshdr;
2269	struct scsi_failure failure_defs[] = {
2270		{
2271			.sense = UNIT_ATTENTION,
2272			.asc = SCMD_FAILURE_ASC_ANY,
2273			.ascq = SCMD_FAILURE_ASCQ_ANY,
2274			.allowed = retries,
2275			.result = SAM_STAT_CHECK_CONDITION,
2276		},
2277		{}
2278	};
2279	struct scsi_failures failures = {
2280		.failure_definitions = failure_defs,
2281	};
2282	const struct scsi_exec_args exec_args = {
2283		/* caller might not be interested in sense, but we need it */
2284		.sshdr = sshdr ? : &my_sshdr,
2285		.failures = &failures,
2286	};
2287
2288	memset(data, 0, sizeof(*data));
2289	memset(&cmd[0], 0, 12);
2290
2291	dbd = sdev->set_dbd_for_ms ? 8 : dbd;
2292	cmd[1] = dbd & 0x18;	/* allows DBD and LLBA bits */
2293	cmd[2] = modepage;
2294	cmd[3] = subpage;
2295
2296	sshdr = exec_args.sshdr;
2297
2298 retry:
2299	use_10_for_ms = sdev->use_10_for_ms || len > 255;
2300
2301	if (use_10_for_ms) {
2302		if (len < 8 || len > 65535)
2303			return -EINVAL;
2304
2305		cmd[0] = MODE_SENSE_10;
2306		put_unaligned_be16(len, &cmd[7]);
2307		header_length = 8;
2308	} else {
2309		if (len < 4)
2310			return -EINVAL;
2311
2312		cmd[0] = MODE_SENSE;
2313		cmd[4] = len;
2314		header_length = 4;
2315	}
2316
2317	memset(buffer, 0, len);
2318
2319	result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buffer, len,
2320				  timeout, retries, &exec_args);
2321	if (result < 0)
2322		return result;
2323
2324	/* This code looks awful: what it's doing is making sure an
2325	 * ILLEGAL REQUEST sense return identifies the actual command
2326	 * byte as the problem.  MODE_SENSE commands can return
2327	 * ILLEGAL REQUEST if the code page isn't supported */
2328
2329	if (!scsi_status_is_good(result)) {
2330		if (scsi_sense_valid(sshdr)) {
2331			if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
2332			    (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
2333				/*
2334				 * Invalid command operation code: retry using
2335				 * MODE SENSE(6) if this was a MODE SENSE(10)
2336				 * request, except if the request mode page is
2337				 * too large for MODE SENSE single byte
2338				 * allocation length field.
2339				 */
2340				if (use_10_for_ms) {
2341					if (len > 255)
2342						return -EIO;
2343					sdev->use_10_for_ms = 0;
2344					goto retry;
2345				}
2346			}
 
 
 
 
 
 
2347		}
2348		return -EIO;
2349	}
2350	if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
2351		     (modepage == 6 || modepage == 8))) {
2352		/* Initio breakage? */
2353		header_length = 0;
2354		data->length = 13;
2355		data->medium_type = 0;
2356		data->device_specific = 0;
2357		data->longlba = 0;
2358		data->block_descriptor_length = 0;
2359	} else if (use_10_for_ms) {
2360		data->length = get_unaligned_be16(&buffer[0]) + 2;
2361		data->medium_type = buffer[2];
2362		data->device_specific = buffer[3];
2363		data->longlba = buffer[4] & 0x01;
2364		data->block_descriptor_length = get_unaligned_be16(&buffer[6]);
2365	} else {
2366		data->length = buffer[0] + 1;
2367		data->medium_type = buffer[1];
2368		data->device_specific = buffer[2];
2369		data->block_descriptor_length = buffer[3];
2370	}
2371	data->header_length = header_length;
2372
2373	return 0;
2374}
2375EXPORT_SYMBOL(scsi_mode_sense);
2376
2377/**
2378 *	scsi_test_unit_ready - test if unit is ready
2379 *	@sdev:	scsi device to change the state of.
2380 *	@timeout: command timeout
2381 *	@retries: number of retries before failing
2382 *	@sshdr: outpout pointer for decoded sense information.
2383 *
2384 *	Returns zero if unsuccessful or an error if TUR failed.  For
2385 *	removable media, UNIT_ATTENTION sets ->changed flag.
2386 **/
2387int
2388scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
2389		     struct scsi_sense_hdr *sshdr)
2390{
2391	char cmd[] = {
2392		TEST_UNIT_READY, 0, 0, 0, 0, 0,
2393	};
2394	const struct scsi_exec_args exec_args = {
2395		.sshdr = sshdr,
2396	};
2397	int result;
2398
2399	/* try to eat the UNIT_ATTENTION if there are enough retries */
2400	do {
2401		result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, NULL, 0,
2402					  timeout, 1, &exec_args);
2403		if (sdev->removable && result > 0 && scsi_sense_valid(sshdr) &&
2404		    sshdr->sense_key == UNIT_ATTENTION)
2405			sdev->changed = 1;
2406	} while (result > 0 && scsi_sense_valid(sshdr) &&
2407		 sshdr->sense_key == UNIT_ATTENTION && --retries);
2408
2409	return result;
2410}
2411EXPORT_SYMBOL(scsi_test_unit_ready);
2412
2413/**
2414 *	scsi_device_set_state - Take the given device through the device state model.
2415 *	@sdev:	scsi device to change the state of.
2416 *	@state:	state to change to.
2417 *
2418 *	Returns zero if successful or an error if the requested
2419 *	transition is illegal.
2420 */
2421int
2422scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2423{
2424	enum scsi_device_state oldstate = sdev->sdev_state;
2425
2426	if (state == oldstate)
2427		return 0;
2428
2429	switch (state) {
2430	case SDEV_CREATED:
2431		switch (oldstate) {
2432		case SDEV_CREATED_BLOCK:
2433			break;
2434		default:
2435			goto illegal;
2436		}
2437		break;
2438
2439	case SDEV_RUNNING:
2440		switch (oldstate) {
2441		case SDEV_CREATED:
2442		case SDEV_OFFLINE:
2443		case SDEV_TRANSPORT_OFFLINE:
2444		case SDEV_QUIESCE:
2445		case SDEV_BLOCK:
2446			break;
2447		default:
2448			goto illegal;
2449		}
2450		break;
2451
2452	case SDEV_QUIESCE:
2453		switch (oldstate) {
2454		case SDEV_RUNNING:
2455		case SDEV_OFFLINE:
2456		case SDEV_TRANSPORT_OFFLINE:
2457			break;
2458		default:
2459			goto illegal;
2460		}
2461		break;
2462
2463	case SDEV_OFFLINE:
2464	case SDEV_TRANSPORT_OFFLINE:
2465		switch (oldstate) {
2466		case SDEV_CREATED:
2467		case SDEV_RUNNING:
2468		case SDEV_QUIESCE:
2469		case SDEV_BLOCK:
2470			break;
2471		default:
2472			goto illegal;
2473		}
2474		break;
2475
2476	case SDEV_BLOCK:
2477		switch (oldstate) {
2478		case SDEV_RUNNING:
2479		case SDEV_CREATED_BLOCK:
2480		case SDEV_QUIESCE:
2481		case SDEV_OFFLINE:
2482			break;
2483		default:
2484			goto illegal;
2485		}
2486		break;
2487
2488	case SDEV_CREATED_BLOCK:
2489		switch (oldstate) {
2490		case SDEV_CREATED:
2491			break;
2492		default:
2493			goto illegal;
2494		}
2495		break;
2496
2497	case SDEV_CANCEL:
2498		switch (oldstate) {
2499		case SDEV_CREATED:
2500		case SDEV_RUNNING:
2501		case SDEV_QUIESCE:
2502		case SDEV_OFFLINE:
2503		case SDEV_TRANSPORT_OFFLINE:
2504			break;
2505		default:
2506			goto illegal;
2507		}
2508		break;
2509
2510	case SDEV_DEL:
2511		switch (oldstate) {
2512		case SDEV_CREATED:
2513		case SDEV_RUNNING:
2514		case SDEV_OFFLINE:
2515		case SDEV_TRANSPORT_OFFLINE:
2516		case SDEV_CANCEL:
2517		case SDEV_BLOCK:
2518		case SDEV_CREATED_BLOCK:
2519			break;
2520		default:
2521			goto illegal;
2522		}
2523		break;
2524
2525	}
2526	sdev->offline_already = false;
2527	sdev->sdev_state = state;
2528	return 0;
2529
2530 illegal:
2531	SCSI_LOG_ERROR_RECOVERY(1,
2532				sdev_printk(KERN_ERR, sdev,
2533					    "Illegal state transition %s->%s",
2534					    scsi_device_state_name(oldstate),
2535					    scsi_device_state_name(state))
2536				);
2537	return -EINVAL;
2538}
2539EXPORT_SYMBOL(scsi_device_set_state);
2540
2541/**
2542 *	scsi_evt_emit - emit a single SCSI device uevent
2543 *	@sdev: associated SCSI device
2544 *	@evt: event to emit
2545 *
2546 *	Send a single uevent (scsi_event) to the associated scsi_device.
2547 */
2548static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2549{
2550	int idx = 0;
2551	char *envp[3];
2552
2553	switch (evt->evt_type) {
2554	case SDEV_EVT_MEDIA_CHANGE:
2555		envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2556		break;
2557	case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2558		scsi_rescan_device(sdev);
2559		envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
2560		break;
2561	case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2562		envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED";
2563		break;
2564	case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2565	       envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED";
2566		break;
2567	case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2568		envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED";
2569		break;
2570	case SDEV_EVT_LUN_CHANGE_REPORTED:
2571		envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED";
2572		break;
2573	case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
2574		envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED";
2575		break;
2576	case SDEV_EVT_POWER_ON_RESET_OCCURRED:
2577		envp[idx++] = "SDEV_UA=POWER_ON_RESET_OCCURRED";
2578		break;
2579	default:
2580		/* do nothing */
2581		break;
2582	}
2583
2584	envp[idx++] = NULL;
2585
2586	kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2587}
2588
2589/**
2590 *	scsi_evt_thread - send a uevent for each scsi event
2591 *	@work: work struct for scsi_device
2592 *
2593 *	Dispatch queued events to their associated scsi_device kobjects
2594 *	as uevents.
2595 */
2596void scsi_evt_thread(struct work_struct *work)
2597{
2598	struct scsi_device *sdev;
2599	enum scsi_device_event evt_type;
2600	LIST_HEAD(event_list);
2601
2602	sdev = container_of(work, struct scsi_device, event_work);
2603
2604	for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++)
2605		if (test_and_clear_bit(evt_type, sdev->pending_events))
2606			sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL);
2607
2608	while (1) {
2609		struct scsi_event *evt;
2610		struct list_head *this, *tmp;
2611		unsigned long flags;
2612
2613		spin_lock_irqsave(&sdev->list_lock, flags);
2614		list_splice_init(&sdev->event_list, &event_list);
2615		spin_unlock_irqrestore(&sdev->list_lock, flags);
2616
2617		if (list_empty(&event_list))
2618			break;
2619
2620		list_for_each_safe(this, tmp, &event_list) {
2621			evt = list_entry(this, struct scsi_event, node);
2622			list_del(&evt->node);
2623			scsi_evt_emit(sdev, evt);
2624			kfree(evt);
2625		}
2626	}
2627}
2628
2629/**
2630 * 	sdev_evt_send - send asserted event to uevent thread
2631 *	@sdev: scsi_device event occurred on
2632 *	@evt: event to send
2633 *
2634 *	Assert scsi device event asynchronously.
2635 */
2636void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2637{
2638	unsigned long flags;
2639
2640#if 0
2641	/* FIXME: currently this check eliminates all media change events
2642	 * for polled devices.  Need to update to discriminate between AN
2643	 * and polled events */
2644	if (!test_bit(evt->evt_type, sdev->supported_events)) {
2645		kfree(evt);
2646		return;
2647	}
2648#endif
2649
2650	spin_lock_irqsave(&sdev->list_lock, flags);
2651	list_add_tail(&evt->node, &sdev->event_list);
2652	schedule_work(&sdev->event_work);
2653	spin_unlock_irqrestore(&sdev->list_lock, flags);
2654}
2655EXPORT_SYMBOL_GPL(sdev_evt_send);
2656
2657/**
2658 * 	sdev_evt_alloc - allocate a new scsi event
2659 *	@evt_type: type of event to allocate
2660 *	@gfpflags: GFP flags for allocation
2661 *
2662 *	Allocates and returns a new scsi_event.
2663 */
2664struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2665				  gfp_t gfpflags)
2666{
2667	struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2668	if (!evt)
2669		return NULL;
2670
2671	evt->evt_type = evt_type;
2672	INIT_LIST_HEAD(&evt->node);
2673
2674	/* evt_type-specific initialization, if any */
2675	switch (evt_type) {
2676	case SDEV_EVT_MEDIA_CHANGE:
2677	case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2678	case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2679	case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2680	case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2681	case SDEV_EVT_LUN_CHANGE_REPORTED:
2682	case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
2683	case SDEV_EVT_POWER_ON_RESET_OCCURRED:
2684	default:
2685		/* do nothing */
2686		break;
2687	}
2688
2689	return evt;
2690}
2691EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2692
2693/**
2694 * 	sdev_evt_send_simple - send asserted event to uevent thread
2695 *	@sdev: scsi_device event occurred on
2696 *	@evt_type: type of event to send
2697 *	@gfpflags: GFP flags for allocation
2698 *
2699 *	Assert scsi device event asynchronously, given an event type.
2700 */
2701void sdev_evt_send_simple(struct scsi_device *sdev,
2702			  enum scsi_device_event evt_type, gfp_t gfpflags)
2703{
2704	struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2705	if (!evt) {
2706		sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2707			    evt_type);
2708		return;
2709	}
2710
2711	sdev_evt_send(sdev, evt);
2712}
2713EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2714
2715/**
2716 *	scsi_device_quiesce - Block all commands except power management.
2717 *	@sdev:	scsi device to quiesce.
2718 *
2719 *	This works by trying to transition to the SDEV_QUIESCE state
2720 *	(which must be a legal transition).  When the device is in this
2721 *	state, only power management requests will be accepted, all others will
2722 *	be deferred.
2723 *
2724 *	Must be called with user context, may sleep.
2725 *
2726 *	Returns zero if unsuccessful or an error if not.
2727 */
2728int
2729scsi_device_quiesce(struct scsi_device *sdev)
2730{
2731	struct request_queue *q = sdev->request_queue;
2732	int err;
2733
2734	/*
2735	 * It is allowed to call scsi_device_quiesce() multiple times from
2736	 * the same context but concurrent scsi_device_quiesce() calls are
2737	 * not allowed.
2738	 */
2739	WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current);
2740
2741	if (sdev->quiesced_by == current)
2742		return 0;
2743
2744	blk_set_pm_only(q);
2745
2746	blk_mq_freeze_queue(q);
2747	/*
2748	 * Ensure that the effect of blk_set_pm_only() will be visible
2749	 * for percpu_ref_tryget() callers that occur after the queue
2750	 * unfreeze even if the queue was already frozen before this function
2751	 * was called. See also https://lwn.net/Articles/573497/.
2752	 */
2753	synchronize_rcu();
2754	blk_mq_unfreeze_queue(q);
2755
2756	mutex_lock(&sdev->state_mutex);
2757	err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2758	if (err == 0)
2759		sdev->quiesced_by = current;
2760	else
2761		blk_clear_pm_only(q);
2762	mutex_unlock(&sdev->state_mutex);
2763
2764	return err;
2765}
2766EXPORT_SYMBOL(scsi_device_quiesce);
2767
2768/**
2769 *	scsi_device_resume - Restart user issued commands to a quiesced device.
2770 *	@sdev:	scsi device to resume.
2771 *
2772 *	Moves the device from quiesced back to running and restarts the
2773 *	queues.
2774 *
2775 *	Must be called with user context, may sleep.
2776 */
2777void scsi_device_resume(struct scsi_device *sdev)
2778{
2779	/* check if the device state was mutated prior to resume, and if
2780	 * so assume the state is being managed elsewhere (for example
2781	 * device deleted during suspend)
2782	 */
2783	mutex_lock(&sdev->state_mutex);
2784	if (sdev->sdev_state == SDEV_QUIESCE)
2785		scsi_device_set_state(sdev, SDEV_RUNNING);
2786	if (sdev->quiesced_by) {
2787		sdev->quiesced_by = NULL;
2788		blk_clear_pm_only(sdev->request_queue);
2789	}
2790	mutex_unlock(&sdev->state_mutex);
2791}
2792EXPORT_SYMBOL(scsi_device_resume);
2793
2794static void
2795device_quiesce_fn(struct scsi_device *sdev, void *data)
2796{
2797	scsi_device_quiesce(sdev);
2798}
2799
2800void
2801scsi_target_quiesce(struct scsi_target *starget)
2802{
2803	starget_for_each_device(starget, NULL, device_quiesce_fn);
2804}
2805EXPORT_SYMBOL(scsi_target_quiesce);
2806
2807static void
2808device_resume_fn(struct scsi_device *sdev, void *data)
2809{
2810	scsi_device_resume(sdev);
2811}
2812
2813void
2814scsi_target_resume(struct scsi_target *starget)
2815{
2816	starget_for_each_device(starget, NULL, device_resume_fn);
2817}
2818EXPORT_SYMBOL(scsi_target_resume);
2819
2820static int __scsi_internal_device_block_nowait(struct scsi_device *sdev)
2821{
2822	if (scsi_device_set_state(sdev, SDEV_BLOCK))
2823		return scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2824
2825	return 0;
2826}
2827
2828void scsi_start_queue(struct scsi_device *sdev)
2829{
2830	if (cmpxchg(&sdev->queue_stopped, 1, 0))
2831		blk_mq_unquiesce_queue(sdev->request_queue);
2832}
2833
2834static void scsi_stop_queue(struct scsi_device *sdev)
2835{
2836	/*
2837	 * The atomic variable of ->queue_stopped covers that
2838	 * blk_mq_quiesce_queue* is balanced with blk_mq_unquiesce_queue.
2839	 *
2840	 * The caller needs to wait until quiesce is done.
2841	 */
2842	if (!cmpxchg(&sdev->queue_stopped, 0, 1))
2843		blk_mq_quiesce_queue_nowait(sdev->request_queue);
2844}
2845
2846/**
2847 * scsi_internal_device_block_nowait - try to transition to the SDEV_BLOCK state
2848 * @sdev: device to block
2849 *
2850 * Pause SCSI command processing on the specified device. Does not sleep.
2851 *
2852 * Returns zero if successful or a negative error code upon failure.
2853 *
2854 * Notes:
2855 * This routine transitions the device to the SDEV_BLOCK state (which must be
2856 * a legal transition). When the device is in this state, command processing
2857 * is paused until the device leaves the SDEV_BLOCK state. See also
2858 * scsi_internal_device_unblock_nowait().
2859 */
2860int scsi_internal_device_block_nowait(struct scsi_device *sdev)
2861{
2862	int ret = __scsi_internal_device_block_nowait(sdev);
2863
2864	/*
2865	 * The device has transitioned to SDEV_BLOCK.  Stop the
2866	 * block layer from calling the midlayer with this device's
2867	 * request queue.
2868	 */
2869	if (!ret)
2870		scsi_stop_queue(sdev);
2871	return ret;
2872}
2873EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait);
2874
2875/**
2876 * scsi_device_block - try to transition to the SDEV_BLOCK state
2877 * @sdev: device to block
2878 * @data: dummy argument, ignored
2879 *
2880 * Pause SCSI command processing on the specified device. Callers must wait
2881 * until all ongoing scsi_queue_rq() calls have finished after this function
2882 * returns.
2883 *
2884 * Note:
2885 * This routine transitions the device to the SDEV_BLOCK state (which must be
2886 * a legal transition). When the device is in this state, command processing
2887 * is paused until the device leaves the SDEV_BLOCK state. See also
2888 * scsi_internal_device_unblock().
2889 */
2890static void scsi_device_block(struct scsi_device *sdev, void *data)
2891{
2892	int err;
2893	enum scsi_device_state state;
2894
2895	mutex_lock(&sdev->state_mutex);
2896	err = __scsi_internal_device_block_nowait(sdev);
2897	state = sdev->sdev_state;
2898	if (err == 0)
2899		/*
2900		 * scsi_stop_queue() must be called with the state_mutex
2901		 * held. Otherwise a simultaneous scsi_start_queue() call
2902		 * might unquiesce the queue before we quiesce it.
2903		 */
2904		scsi_stop_queue(sdev);
2905
2906	mutex_unlock(&sdev->state_mutex);
2907
2908	WARN_ONCE(err, "%s: failed to block %s in state %d\n",
2909		  __func__, dev_name(&sdev->sdev_gendev), state);
2910}
2911
2912/**
2913 * scsi_internal_device_unblock_nowait - resume a device after a block request
2914 * @sdev:	device to resume
2915 * @new_state:	state to set the device to after unblocking
2916 *
2917 * Restart the device queue for a previously suspended SCSI device. Does not
2918 * sleep.
2919 *
2920 * Returns zero if successful or a negative error code upon failure.
2921 *
2922 * Notes:
2923 * This routine transitions the device to the SDEV_RUNNING state or to one of
2924 * the offline states (which must be a legal transition) allowing the midlayer
2925 * to goose the queue for this device.
2926 */
2927int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
2928					enum scsi_device_state new_state)
2929{
2930	switch (new_state) {
2931	case SDEV_RUNNING:
2932	case SDEV_TRANSPORT_OFFLINE:
2933		break;
2934	default:
2935		return -EINVAL;
2936	}
2937
2938	/*
2939	 * Try to transition the scsi device to SDEV_RUNNING or one of the
2940	 * offlined states and goose the device queue if successful.
2941	 */
2942	switch (sdev->sdev_state) {
2943	case SDEV_BLOCK:
2944	case SDEV_TRANSPORT_OFFLINE:
2945		sdev->sdev_state = new_state;
2946		break;
2947	case SDEV_CREATED_BLOCK:
2948		if (new_state == SDEV_TRANSPORT_OFFLINE ||
2949		    new_state == SDEV_OFFLINE)
2950			sdev->sdev_state = new_state;
2951		else
2952			sdev->sdev_state = SDEV_CREATED;
2953		break;
2954	case SDEV_CANCEL:
2955	case SDEV_OFFLINE:
2956		break;
2957	default:
2958		return -EINVAL;
2959	}
2960	scsi_start_queue(sdev);
2961
2962	return 0;
2963}
2964EXPORT_SYMBOL_GPL(scsi_internal_device_unblock_nowait);
2965
2966/**
2967 * scsi_internal_device_unblock - resume a device after a block request
2968 * @sdev:	device to resume
2969 * @new_state:	state to set the device to after unblocking
2970 *
2971 * Restart the device queue for a previously suspended SCSI device. May sleep.
2972 *
2973 * Returns zero if successful or a negative error code upon failure.
2974 *
2975 * Notes:
2976 * This routine transitions the device to the SDEV_RUNNING state or to one of
2977 * the offline states (which must be a legal transition) allowing the midlayer
2978 * to goose the queue for this device.
2979 */
2980static int scsi_internal_device_unblock(struct scsi_device *sdev,
2981					enum scsi_device_state new_state)
2982{
2983	int ret;
2984
2985	mutex_lock(&sdev->state_mutex);
2986	ret = scsi_internal_device_unblock_nowait(sdev, new_state);
2987	mutex_unlock(&sdev->state_mutex);
2988
2989	return ret;
2990}
2991
2992static int
2993target_block(struct device *dev, void *data)
2994{
2995	if (scsi_is_target_device(dev))
2996		starget_for_each_device(to_scsi_target(dev), NULL,
2997					scsi_device_block);
2998	return 0;
2999}
3000
3001/**
3002 * scsi_block_targets - transition all SCSI child devices to SDEV_BLOCK state
3003 * @dev: a parent device of one or more scsi_target devices
3004 * @shost: the Scsi_Host to which this device belongs
3005 *
3006 * Iterate over all children of @dev, which should be scsi_target devices,
3007 * and switch all subordinate scsi devices to SDEV_BLOCK state. Wait for
3008 * ongoing scsi_queue_rq() calls to finish. May sleep.
3009 *
3010 * Note:
3011 * @dev must not itself be a scsi_target device.
3012 */
3013void
3014scsi_block_targets(struct Scsi_Host *shost, struct device *dev)
3015{
3016	WARN_ON_ONCE(scsi_is_target_device(dev));
3017	device_for_each_child(dev, NULL, target_block);
3018	blk_mq_wait_quiesce_done(&shost->tag_set);
3019}
3020EXPORT_SYMBOL_GPL(scsi_block_targets);
3021
3022static void
3023device_unblock(struct scsi_device *sdev, void *data)
3024{
3025	scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
3026}
3027
3028static int
3029target_unblock(struct device *dev, void *data)
3030{
3031	if (scsi_is_target_device(dev))
3032		starget_for_each_device(to_scsi_target(dev), data,
3033					device_unblock);
3034	return 0;
3035}
3036
3037void
3038scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
3039{
3040	if (scsi_is_target_device(dev))
3041		starget_for_each_device(to_scsi_target(dev), &new_state,
3042					device_unblock);
3043	else
3044		device_for_each_child(dev, &new_state, target_unblock);
3045}
3046EXPORT_SYMBOL_GPL(scsi_target_unblock);
3047
3048/**
3049 * scsi_host_block - Try to transition all logical units to the SDEV_BLOCK state
3050 * @shost: device to block
3051 *
3052 * Pause SCSI command processing for all logical units associated with the SCSI
3053 * host and wait until pending scsi_queue_rq() calls have finished.
3054 *
3055 * Returns zero if successful or a negative error code upon failure.
3056 */
3057int
3058scsi_host_block(struct Scsi_Host *shost)
3059{
3060	struct scsi_device *sdev;
3061	int ret;
3062
3063	/*
3064	 * Call scsi_internal_device_block_nowait so we can avoid
3065	 * calling synchronize_rcu() for each LUN.
3066	 */
3067	shost_for_each_device(sdev, shost) {
3068		mutex_lock(&sdev->state_mutex);
3069		ret = scsi_internal_device_block_nowait(sdev);
3070		mutex_unlock(&sdev->state_mutex);
3071		if (ret) {
3072			scsi_device_put(sdev);
3073			return ret;
3074		}
3075	}
3076
3077	/* Wait for ongoing scsi_queue_rq() calls to finish. */
3078	blk_mq_wait_quiesce_done(&shost->tag_set);
3079
3080	return 0;
3081}
3082EXPORT_SYMBOL_GPL(scsi_host_block);
3083
3084int
3085scsi_host_unblock(struct Scsi_Host *shost, int new_state)
3086{
3087	struct scsi_device *sdev;
3088	int ret = 0;
3089
3090	shost_for_each_device(sdev, shost) {
3091		ret = scsi_internal_device_unblock(sdev, new_state);
3092		if (ret) {
3093			scsi_device_put(sdev);
3094			break;
3095		}
3096	}
3097	return ret;
3098}
3099EXPORT_SYMBOL_GPL(scsi_host_unblock);
3100
3101/**
3102 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
3103 * @sgl:	scatter-gather list
3104 * @sg_count:	number of segments in sg
3105 * @offset:	offset in bytes into sg, on return offset into the mapped area
3106 * @len:	bytes to map, on return number of bytes mapped
3107 *
3108 * Returns virtual address of the start of the mapped page
3109 */
3110void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
3111			  size_t *offset, size_t *len)
3112{
3113	int i;
3114	size_t sg_len = 0, len_complete = 0;
3115	struct scatterlist *sg;
3116	struct page *page;
3117
3118	WARN_ON(!irqs_disabled());
3119
3120	for_each_sg(sgl, sg, sg_count, i) {
3121		len_complete = sg_len; /* Complete sg-entries */
3122		sg_len += sg->length;
3123		if (sg_len > *offset)
3124			break;
3125	}
3126
3127	if (unlikely(i == sg_count)) {
3128		printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
3129			"elements %d\n",
3130		       __func__, sg_len, *offset, sg_count);
3131		WARN_ON(1);
3132		return NULL;
3133	}
3134
3135	/* Offset starting from the beginning of first page in this sg-entry */
3136	*offset = *offset - len_complete + sg->offset;
3137
3138	/* Assumption: contiguous pages can be accessed as "page + i" */
3139	page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
3140	*offset &= ~PAGE_MASK;
3141
3142	/* Bytes in this sg-entry from *offset to the end of the page */
3143	sg_len = PAGE_SIZE - *offset;
3144	if (*len > sg_len)
3145		*len = sg_len;
3146
3147	return kmap_atomic(page);
3148}
3149EXPORT_SYMBOL(scsi_kmap_atomic_sg);
3150
3151/**
3152 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
3153 * @virt:	virtual address to be unmapped
3154 */
3155void scsi_kunmap_atomic_sg(void *virt)
3156{
3157	kunmap_atomic(virt);
3158}
3159EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
3160
3161void sdev_disable_disk_events(struct scsi_device *sdev)
3162{
3163	atomic_inc(&sdev->disk_events_disable_depth);
3164}
3165EXPORT_SYMBOL(sdev_disable_disk_events);
3166
3167void sdev_enable_disk_events(struct scsi_device *sdev)
3168{
3169	if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0))
3170		return;
3171	atomic_dec(&sdev->disk_events_disable_depth);
3172}
3173EXPORT_SYMBOL(sdev_enable_disk_events);
3174
3175static unsigned char designator_prio(const unsigned char *d)
3176{
3177	if (d[1] & 0x30)
3178		/* not associated with LUN */
3179		return 0;
3180
3181	if (d[3] == 0)
3182		/* invalid length */
3183		return 0;
3184
3185	/*
3186	 * Order of preference for lun descriptor:
3187	 * - SCSI name string
3188	 * - NAA IEEE Registered Extended
3189	 * - EUI-64 based 16-byte
3190	 * - EUI-64 based 12-byte
3191	 * - NAA IEEE Registered
3192	 * - NAA IEEE Extended
3193	 * - EUI-64 based 8-byte
3194	 * - SCSI name string (truncated)
3195	 * - T10 Vendor ID
3196	 * as longer descriptors reduce the likelyhood
3197	 * of identification clashes.
3198	 */
3199
3200	switch (d[1] & 0xf) {
3201	case 8:
3202		/* SCSI name string, variable-length UTF-8 */
3203		return 9;
3204	case 3:
3205		switch (d[4] >> 4) {
3206		case 6:
3207			/* NAA registered extended */
3208			return 8;
3209		case 5:
3210			/* NAA registered */
3211			return 5;
3212		case 4:
3213			/* NAA extended */
3214			return 4;
3215		case 3:
3216			/* NAA locally assigned */
3217			return 1;
3218		default:
3219			break;
3220		}
3221		break;
3222	case 2:
3223		switch (d[3]) {
3224		case 16:
3225			/* EUI64-based, 16 byte */
3226			return 7;
3227		case 12:
3228			/* EUI64-based, 12 byte */
3229			return 6;
3230		case 8:
3231			/* EUI64-based, 8 byte */
3232			return 3;
3233		default:
3234			break;
3235		}
3236		break;
3237	case 1:
3238		/* T10 vendor ID */
3239		return 1;
3240	default:
3241		break;
3242	}
3243
3244	return 0;
3245}
3246
3247/**
3248 * scsi_vpd_lun_id - return a unique device identification
3249 * @sdev: SCSI device
3250 * @id:   buffer for the identification
3251 * @id_len:  length of the buffer
3252 *
3253 * Copies a unique device identification into @id based
3254 * on the information in the VPD page 0x83 of the device.
3255 * The string will be formatted as a SCSI name string.
3256 *
3257 * Returns the length of the identification or error on failure.
3258 * If the identifier is longer than the supplied buffer the actual
3259 * identifier length is returned and the buffer is not zero-padded.
3260 */
3261int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
3262{
3263	u8 cur_id_prio = 0;
3264	u8 cur_id_size = 0;
3265	const unsigned char *d, *cur_id_str;
3266	const struct scsi_vpd *vpd_pg83;
3267	int id_size = -EINVAL;
3268
3269	rcu_read_lock();
3270	vpd_pg83 = rcu_dereference(sdev->vpd_pg83);
3271	if (!vpd_pg83) {
3272		rcu_read_unlock();
3273		return -ENXIO;
3274	}
3275
3276	/* The id string must be at least 20 bytes + terminating NULL byte */
3277	if (id_len < 21) {
3278		rcu_read_unlock();
3279		return -EINVAL;
3280	}
3281
3282	memset(id, 0, id_len);
3283	for (d = vpd_pg83->data + 4;
3284	     d < vpd_pg83->data + vpd_pg83->len;
3285	     d += d[3] + 4) {
3286		u8 prio = designator_prio(d);
3287
3288		if (prio == 0 || cur_id_prio > prio)
3289			continue;
3290
3291		switch (d[1] & 0xf) {
3292		case 0x1:
3293			/* T10 Vendor ID */
3294			if (cur_id_size > d[3])
3295				break;
3296			cur_id_prio = prio;
3297			cur_id_size = d[3];
3298			if (cur_id_size + 4 > id_len)
3299				cur_id_size = id_len - 4;
3300			cur_id_str = d + 4;
3301			id_size = snprintf(id, id_len, "t10.%*pE",
3302					   cur_id_size, cur_id_str);
3303			break;
3304		case 0x2:
3305			/* EUI-64 */
3306			cur_id_prio = prio;
3307			cur_id_size = d[3];
3308			cur_id_str = d + 4;
3309			switch (cur_id_size) {
3310			case 8:
3311				id_size = snprintf(id, id_len,
3312						   "eui.%8phN",
3313						   cur_id_str);
3314				break;
3315			case 12:
3316				id_size = snprintf(id, id_len,
3317						   "eui.%12phN",
3318						   cur_id_str);
3319				break;
3320			case 16:
3321				id_size = snprintf(id, id_len,
3322						   "eui.%16phN",
3323						   cur_id_str);
3324				break;
3325			default:
3326				break;
3327			}
3328			break;
3329		case 0x3:
3330			/* NAA */
3331			cur_id_prio = prio;
3332			cur_id_size = d[3];
3333			cur_id_str = d + 4;
3334			switch (cur_id_size) {
3335			case 8:
3336				id_size = snprintf(id, id_len,
3337						   "naa.%8phN",
3338						   cur_id_str);
3339				break;
3340			case 16:
3341				id_size = snprintf(id, id_len,
3342						   "naa.%16phN",
3343						   cur_id_str);
3344				break;
3345			default:
3346				break;
3347			}
3348			break;
3349		case 0x8:
3350			/* SCSI name string */
3351			if (cur_id_size > d[3])
3352				break;
3353			/* Prefer others for truncated descriptor */
3354			if (d[3] > id_len) {
3355				prio = 2;
3356				if (cur_id_prio > prio)
3357					break;
3358			}
3359			cur_id_prio = prio;
3360			cur_id_size = id_size = d[3];
3361			cur_id_str = d + 4;
3362			if (cur_id_size >= id_len)
3363				cur_id_size = id_len - 1;
3364			memcpy(id, cur_id_str, cur_id_size);
3365			break;
3366		default:
3367			break;
3368		}
3369	}
3370	rcu_read_unlock();
3371
3372	return id_size;
3373}
3374EXPORT_SYMBOL(scsi_vpd_lun_id);
3375
3376/*
3377 * scsi_vpd_tpg_id - return a target port group identifier
3378 * @sdev: SCSI device
3379 *
3380 * Returns the Target Port Group identifier from the information
3381 * froom VPD page 0x83 of the device.
3382 *
3383 * Returns the identifier or error on failure.
3384 */
3385int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id)
3386{
3387	const unsigned char *d;
3388	const struct scsi_vpd *vpd_pg83;
3389	int group_id = -EAGAIN, rel_port = -1;
3390
3391	rcu_read_lock();
3392	vpd_pg83 = rcu_dereference(sdev->vpd_pg83);
3393	if (!vpd_pg83) {
3394		rcu_read_unlock();
3395		return -ENXIO;
3396	}
3397
3398	d = vpd_pg83->data + 4;
3399	while (d < vpd_pg83->data + vpd_pg83->len) {
3400		switch (d[1] & 0xf) {
3401		case 0x4:
3402			/* Relative target port */
3403			rel_port = get_unaligned_be16(&d[6]);
3404			break;
3405		case 0x5:
3406			/* Target port group */
3407			group_id = get_unaligned_be16(&d[6]);
3408			break;
3409		default:
3410			break;
3411		}
3412		d += d[3] + 4;
3413	}
3414	rcu_read_unlock();
3415
3416	if (group_id >= 0 && rel_id && rel_port != -1)
3417		*rel_id = rel_port;
3418
3419	return group_id;
3420}
3421EXPORT_SYMBOL(scsi_vpd_tpg_id);
3422
3423/**
3424 * scsi_build_sense - build sense data for a command
3425 * @scmd:	scsi command for which the sense should be formatted
3426 * @desc:	Sense format (non-zero == descriptor format,
3427 *              0 == fixed format)
3428 * @key:	Sense key
3429 * @asc:	Additional sense code
3430 * @ascq:	Additional sense code qualifier
3431 *
3432 **/
3433void scsi_build_sense(struct scsi_cmnd *scmd, int desc, u8 key, u8 asc, u8 ascq)
3434{
3435	scsi_build_sense_buffer(desc, scmd->sense_buffer, key, asc, ascq);
3436	scmd->result = SAM_STAT_CHECK_CONDITION;
3437}
3438EXPORT_SYMBOL_GPL(scsi_build_sense);
3439
3440#ifdef CONFIG_SCSI_LIB_KUNIT_TEST
3441#include "scsi_lib_test.c"
3442#endif