Linux Audio

Check our new training course

Loading...
   1/*
   2 *  scsi_lib.c Copyright (C) 1999 Eric Youngdale
   3 *
   4 *  SCSI queueing library.
   5 *      Initial versions: Eric Youngdale (eric@andante.org).
   6 *                        Based upon conversations with large numbers
   7 *                        of people at Linux Expo.
   8 */
   9
  10#include <linux/bio.h>
  11#include <linux/bitops.h>
  12#include <linux/blkdev.h>
  13#include <linux/completion.h>
  14#include <linux/kernel.h>
  15#include <linux/export.h>
  16#include <linux/mempool.h>
  17#include <linux/slab.h>
  18#include <linux/init.h>
  19#include <linux/pci.h>
  20#include <linux/delay.h>
  21#include <linux/hardirq.h>
  22#include <linux/scatterlist.h>
  23
  24#include <scsi/scsi.h>
  25#include <scsi/scsi_cmnd.h>
  26#include <scsi/scsi_dbg.h>
  27#include <scsi/scsi_device.h>
  28#include <scsi/scsi_driver.h>
  29#include <scsi/scsi_eh.h>
  30#include <scsi/scsi_host.h>
  31
  32#include "scsi_priv.h"
  33#include "scsi_logging.h"
  34
  35
  36#define SG_MEMPOOL_NR		ARRAY_SIZE(scsi_sg_pools)
  37#define SG_MEMPOOL_SIZE		2
  38
  39struct scsi_host_sg_pool {
  40	size_t		size;
  41	char		*name;
  42	struct kmem_cache	*slab;
  43	mempool_t	*pool;
  44};
  45
  46#define SP(x) { x, "sgpool-" __stringify(x) }
  47#if (SCSI_MAX_SG_SEGMENTS < 32)
  48#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
  49#endif
  50static struct scsi_host_sg_pool scsi_sg_pools[] = {
  51	SP(8),
  52	SP(16),
  53#if (SCSI_MAX_SG_SEGMENTS > 32)
  54	SP(32),
  55#if (SCSI_MAX_SG_SEGMENTS > 64)
  56	SP(64),
  57#if (SCSI_MAX_SG_SEGMENTS > 128)
  58	SP(128),
  59#if (SCSI_MAX_SG_SEGMENTS > 256)
  60#error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
  61#endif
  62#endif
  63#endif
  64#endif
  65	SP(SCSI_MAX_SG_SEGMENTS)
  66};
  67#undef SP
  68
  69struct kmem_cache *scsi_sdb_cache;
  70
  71/*
  72 * When to reinvoke queueing after a resource shortage. It's 3 msecs to
  73 * not change behaviour from the previous unplug mechanism, experimentation
  74 * may prove this needs changing.
  75 */
  76#define SCSI_QUEUE_DELAY	3
  77
  78/*
  79 * Function:	scsi_unprep_request()
  80 *
  81 * Purpose:	Remove all preparation done for a request, including its
  82 *		associated scsi_cmnd, so that it can be requeued.
  83 *
  84 * Arguments:	req	- request to unprepare
  85 *
  86 * Lock status:	Assumed that no locks are held upon entry.
  87 *
  88 * Returns:	Nothing.
  89 */
  90static void scsi_unprep_request(struct request *req)
  91{
  92	struct scsi_cmnd *cmd = req->special;
  93
  94	blk_unprep_request(req);
  95	req->special = NULL;
  96
  97	scsi_put_command(cmd);
  98}
  99
 100/**
 101 * __scsi_queue_insert - private queue insertion
 102 * @cmd: The SCSI command being requeued
 103 * @reason:  The reason for the requeue
 104 * @unbusy: Whether the queue should be unbusied
 105 *
 106 * This is a private queue insertion.  The public interface
 107 * scsi_queue_insert() always assumes the queue should be unbusied
 108 * because it's always called before the completion.  This function is
 109 * for a requeue after completion, which should only occur in this
 110 * file.
 111 */
 112static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
 113{
 114	struct Scsi_Host *host = cmd->device->host;
 115	struct scsi_device *device = cmd->device;
 116	struct scsi_target *starget = scsi_target(device);
 117	struct request_queue *q = device->request_queue;
 118	unsigned long flags;
 119
 120	SCSI_LOG_MLQUEUE(1,
 121		 printk("Inserting command %p into mlqueue\n", cmd));
 122
 123	/*
 124	 * Set the appropriate busy bit for the device/host.
 125	 *
 126	 * If the host/device isn't busy, assume that something actually
 127	 * completed, and that we should be able to queue a command now.
 128	 *
 129	 * Note that the prior mid-layer assumption that any host could
 130	 * always queue at least one command is now broken.  The mid-layer
 131	 * will implement a user specifiable stall (see
 132	 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
 133	 * if a command is requeued with no other commands outstanding
 134	 * either for the device or for the host.
 135	 */
 136	switch (reason) {
 137	case SCSI_MLQUEUE_HOST_BUSY:
 138		host->host_blocked = host->max_host_blocked;
 139		break;
 140	case SCSI_MLQUEUE_DEVICE_BUSY:
 141	case SCSI_MLQUEUE_EH_RETRY:
 142		device->device_blocked = device->max_device_blocked;
 143		break;
 144	case SCSI_MLQUEUE_TARGET_BUSY:
 145		starget->target_blocked = starget->max_target_blocked;
 146		break;
 147	}
 148
 149	/*
 150	 * Decrement the counters, since these commands are no longer
 151	 * active on the host/device.
 152	 */
 153	if (unbusy)
 154		scsi_device_unbusy(device);
 155
 156	/*
 157	 * Requeue this command.  It will go before all other commands
 158	 * that are already in the queue.
 159	 */
 160	spin_lock_irqsave(q->queue_lock, flags);
 161	blk_requeue_request(q, cmd->request);
 162	spin_unlock_irqrestore(q->queue_lock, flags);
 163
 164	kblockd_schedule_work(q, &device->requeue_work);
 165
 166	return 0;
 167}
 168
 169/*
 170 * Function:    scsi_queue_insert()
 171 *
 172 * Purpose:     Insert a command in the midlevel queue.
 173 *
 174 * Arguments:   cmd    - command that we are adding to queue.
 175 *              reason - why we are inserting command to queue.
 176 *
 177 * Lock status: Assumed that lock is not held upon entry.
 178 *
 179 * Returns:     Nothing.
 180 *
 181 * Notes:       We do this for one of two cases.  Either the host is busy
 182 *              and it cannot accept any more commands for the time being,
 183 *              or the device returned QUEUE_FULL and can accept no more
 184 *              commands.
 185 * Notes:       This could be called either from an interrupt context or a
 186 *              normal process context.
 187 */
 188int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
 189{
 190	return __scsi_queue_insert(cmd, reason, 1);
 191}
 192/**
 193 * scsi_execute - insert request and wait for the result
 194 * @sdev:	scsi device
 195 * @cmd:	scsi command
 196 * @data_direction: data direction
 197 * @buffer:	data buffer
 198 * @bufflen:	len of buffer
 199 * @sense:	optional sense buffer
 200 * @timeout:	request timeout in seconds
 201 * @retries:	number of times to retry request
 202 * @flags:	or into request flags;
 203 * @resid:	optional residual length
 204 *
 205 * returns the req->errors value which is the scsi_cmnd result
 206 * field.
 207 */
 208int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
 209		 int data_direction, void *buffer, unsigned bufflen,
 210		 unsigned char *sense, int timeout, int retries, int flags,
 211		 int *resid)
 212{
 213	struct request *req;
 214	int write = (data_direction == DMA_TO_DEVICE);
 215	int ret = DRIVER_ERROR << 24;
 216
 217	req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
 218	if (!req)
 219		return ret;
 220
 221	if (bufflen &&	blk_rq_map_kern(sdev->request_queue, req,
 222					buffer, bufflen, __GFP_WAIT))
 223		goto out;
 224
 225	req->cmd_len = COMMAND_SIZE(cmd[0]);
 226	memcpy(req->cmd, cmd, req->cmd_len);
 227	req->sense = sense;
 228	req->sense_len = 0;
 229	req->retries = retries;
 230	req->timeout = timeout;
 231	req->cmd_type = REQ_TYPE_BLOCK_PC;
 232	req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
 233
 234	/*
 235	 * head injection *required* here otherwise quiesce won't work
 236	 */
 237	blk_execute_rq(req->q, NULL, req, 1);
 238
 239	/*
 240	 * Some devices (USB mass-storage in particular) may transfer
 241	 * garbage data together with a residue indicating that the data
 242	 * is invalid.  Prevent the garbage from being misinterpreted
 243	 * and prevent security leaks by zeroing out the excess data.
 244	 */
 245	if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
 246		memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
 247
 248	if (resid)
 249		*resid = req->resid_len;
 250	ret = req->errors;
 251 out:
 252	blk_put_request(req);
 253
 254	return ret;
 255}
 256EXPORT_SYMBOL(scsi_execute);
 257
 258
 259int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
 260		     int data_direction, void *buffer, unsigned bufflen,
 261		     struct scsi_sense_hdr *sshdr, int timeout, int retries,
 262		     int *resid)
 263{
 264	char *sense = NULL;
 265	int result;
 266	
 267	if (sshdr) {
 268		sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
 269		if (!sense)
 270			return DRIVER_ERROR << 24;
 271	}
 272	result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
 273			      sense, timeout, retries, 0, resid);
 274	if (sshdr)
 275		scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
 276
 277	kfree(sense);
 278	return result;
 279}
 280EXPORT_SYMBOL(scsi_execute_req);
 281
 282/*
 283 * Function:    scsi_init_cmd_errh()
 284 *
 285 * Purpose:     Initialize cmd fields related to error handling.
 286 *
 287 * Arguments:   cmd	- command that is ready to be queued.
 288 *
 289 * Notes:       This function has the job of initializing a number of
 290 *              fields related to error handling.   Typically this will
 291 *              be called once for each command, as required.
 292 */
 293static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
 294{
 295	cmd->serial_number = 0;
 296	scsi_set_resid(cmd, 0);
 297	memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
 298	if (cmd->cmd_len == 0)
 299		cmd->cmd_len = scsi_command_size(cmd->cmnd);
 300}
 301
 302void scsi_device_unbusy(struct scsi_device *sdev)
 303{
 304	struct Scsi_Host *shost = sdev->host;
 305	struct scsi_target *starget = scsi_target(sdev);
 306	unsigned long flags;
 307
 308	spin_lock_irqsave(shost->host_lock, flags);
 309	shost->host_busy--;
 310	starget->target_busy--;
 311	if (unlikely(scsi_host_in_recovery(shost) &&
 312		     (shost->host_failed || shost->host_eh_scheduled)))
 313		scsi_eh_wakeup(shost);
 314	spin_unlock(shost->host_lock);
 315	spin_lock(sdev->request_queue->queue_lock);
 316	sdev->device_busy--;
 317	spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
 318}
 319
 320/*
 321 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
 322 * and call blk_run_queue for all the scsi_devices on the target -
 323 * including current_sdev first.
 324 *
 325 * Called with *no* scsi locks held.
 326 */
 327static void scsi_single_lun_run(struct scsi_device *current_sdev)
 328{
 329	struct Scsi_Host *shost = current_sdev->host;
 330	struct scsi_device *sdev, *tmp;
 331	struct scsi_target *starget = scsi_target(current_sdev);
 332	unsigned long flags;
 333
 334	spin_lock_irqsave(shost->host_lock, flags);
 335	starget->starget_sdev_user = NULL;
 336	spin_unlock_irqrestore(shost->host_lock, flags);
 337
 338	/*
 339	 * Call blk_run_queue for all LUNs on the target, starting with
 340	 * current_sdev. We race with others (to set starget_sdev_user),
 341	 * but in most cases, we will be first. Ideally, each LU on the
 342	 * target would get some limited time or requests on the target.
 343	 */
 344	blk_run_queue(current_sdev->request_queue);
 345
 346	spin_lock_irqsave(shost->host_lock, flags);
 347	if (starget->starget_sdev_user)
 348		goto out;
 349	list_for_each_entry_safe(sdev, tmp, &starget->devices,
 350			same_target_siblings) {
 351		if (sdev == current_sdev)
 352			continue;
 353		if (scsi_device_get(sdev))
 354			continue;
 355
 356		spin_unlock_irqrestore(shost->host_lock, flags);
 357		blk_run_queue(sdev->request_queue);
 358		spin_lock_irqsave(shost->host_lock, flags);
 359	
 360		scsi_device_put(sdev);
 361	}
 362 out:
 363	spin_unlock_irqrestore(shost->host_lock, flags);
 364}
 365
 366static inline int scsi_device_is_busy(struct scsi_device *sdev)
 367{
 368	if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked)
 369		return 1;
 370
 371	return 0;
 372}
 373
 374static inline int scsi_target_is_busy(struct scsi_target *starget)
 375{
 376	return ((starget->can_queue > 0 &&
 377		 starget->target_busy >= starget->can_queue) ||
 378		 starget->target_blocked);
 379}
 380
 381static inline int scsi_host_is_busy(struct Scsi_Host *shost)
 382{
 383	if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
 384	    shost->host_blocked || shost->host_self_blocked)
 385		return 1;
 386
 387	return 0;
 388}
 389
 390/*
 391 * Function:	scsi_run_queue()
 392 *
 393 * Purpose:	Select a proper request queue to serve next
 394 *
 395 * Arguments:	q	- last request's queue
 396 *
 397 * Returns:     Nothing
 398 *
 399 * Notes:	The previous command was completely finished, start
 400 *		a new one if possible.
 401 */
 402static void scsi_run_queue(struct request_queue *q)
 403{
 404	struct scsi_device *sdev = q->queuedata;
 405	struct Scsi_Host *shost;
 406	LIST_HEAD(starved_list);
 407	unsigned long flags;
 408
 409	shost = sdev->host;
 410	if (scsi_target(sdev)->single_lun)
 411		scsi_single_lun_run(sdev);
 412
 413	spin_lock_irqsave(shost->host_lock, flags);
 414	list_splice_init(&shost->starved_list, &starved_list);
 415
 416	while (!list_empty(&starved_list)) {
 417		/*
 418		 * As long as shost is accepting commands and we have
 419		 * starved queues, call blk_run_queue. scsi_request_fn
 420		 * drops the queue_lock and can add us back to the
 421		 * starved_list.
 422		 *
 423		 * host_lock protects the starved_list and starved_entry.
 424		 * scsi_request_fn must get the host_lock before checking
 425		 * or modifying starved_list or starved_entry.
 426		 */
 427		if (scsi_host_is_busy(shost))
 428			break;
 429
 430		sdev = list_entry(starved_list.next,
 431				  struct scsi_device, starved_entry);
 432		list_del_init(&sdev->starved_entry);
 433		if (scsi_target_is_busy(scsi_target(sdev))) {
 434			list_move_tail(&sdev->starved_entry,
 435				       &shost->starved_list);
 436			continue;
 437		}
 438
 439		spin_unlock(shost->host_lock);
 440		spin_lock(sdev->request_queue->queue_lock);
 441		__blk_run_queue(sdev->request_queue);
 442		spin_unlock(sdev->request_queue->queue_lock);
 443		spin_lock(shost->host_lock);
 444	}
 445	/* put any unprocessed entries back */
 446	list_splice(&starved_list, &shost->starved_list);
 447	spin_unlock_irqrestore(shost->host_lock, flags);
 448
 449	blk_run_queue(q);
 450}
 451
 452void scsi_requeue_run_queue(struct work_struct *work)
 453{
 454	struct scsi_device *sdev;
 455	struct request_queue *q;
 456
 457	sdev = container_of(work, struct scsi_device, requeue_work);
 458	q = sdev->request_queue;
 459	scsi_run_queue(q);
 460}
 461
 462/*
 463 * Function:	scsi_requeue_command()
 464 *
 465 * Purpose:	Handle post-processing of completed commands.
 466 *
 467 * Arguments:	q	- queue to operate on
 468 *		cmd	- command that may need to be requeued.
 469 *
 470 * Returns:	Nothing
 471 *
 472 * Notes:	After command completion, there may be blocks left
 473 *		over which weren't finished by the previous command
 474 *		this can be for a number of reasons - the main one is
 475 *		I/O errors in the middle of the request, in which case
 476 *		we need to request the blocks that come after the bad
 477 *		sector.
 478 * Notes:	Upon return, cmd is a stale pointer.
 479 */
 480static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
 481{
 482	struct scsi_device *sdev = cmd->device;
 483	struct request *req = cmd->request;
 484	unsigned long flags;
 485
 486	/*
 487	 * We need to hold a reference on the device to avoid the queue being
 488	 * killed after the unlock and before scsi_run_queue is invoked which
 489	 * may happen because scsi_unprep_request() puts the command which
 490	 * releases its reference on the device.
 491	 */
 492	get_device(&sdev->sdev_gendev);
 493
 494	spin_lock_irqsave(q->queue_lock, flags);
 495	scsi_unprep_request(req);
 496	blk_requeue_request(q, req);
 497	spin_unlock_irqrestore(q->queue_lock, flags);
 498
 499	scsi_run_queue(q);
 500
 501	put_device(&sdev->sdev_gendev);
 502}
 503
 504void scsi_next_command(struct scsi_cmnd *cmd)
 505{
 506	struct scsi_device *sdev = cmd->device;
 507	struct request_queue *q = sdev->request_queue;
 508
 509	/* need to hold a reference on the device before we let go of the cmd */
 510	get_device(&sdev->sdev_gendev);
 511
 512	scsi_put_command(cmd);
 513	scsi_run_queue(q);
 514
 515	/* ok to remove device now */
 516	put_device(&sdev->sdev_gendev);
 517}
 518
 519void scsi_run_host_queues(struct Scsi_Host *shost)
 520{
 521	struct scsi_device *sdev;
 522
 523	shost_for_each_device(sdev, shost)
 524		scsi_run_queue(sdev->request_queue);
 525}
 526
 527static void __scsi_release_buffers(struct scsi_cmnd *, int);
 528
 529/*
 530 * Function:    scsi_end_request()
 531 *
 532 * Purpose:     Post-processing of completed commands (usually invoked at end
 533 *		of upper level post-processing and scsi_io_completion).
 534 *
 535 * Arguments:   cmd	 - command that is complete.
 536 *              error    - 0 if I/O indicates success, < 0 for I/O error.
 537 *              bytes    - number of bytes of completed I/O
 538 *		requeue  - indicates whether we should requeue leftovers.
 539 *
 540 * Lock status: Assumed that lock is not held upon entry.
 541 *
 542 * Returns:     cmd if requeue required, NULL otherwise.
 543 *
 544 * Notes:       This is called for block device requests in order to
 545 *              mark some number of sectors as complete.
 546 * 
 547 *		We are guaranteeing that the request queue will be goosed
 548 *		at some point during this call.
 549 * Notes:	If cmd was requeued, upon return it will be a stale pointer.
 550 */
 551static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
 552					  int bytes, int requeue)
 553{
 554	struct request_queue *q = cmd->device->request_queue;
 555	struct request *req = cmd->request;
 556
 557	/*
 558	 * If there are blocks left over at the end, set up the command
 559	 * to queue the remainder of them.
 560	 */
 561	if (blk_end_request(req, error, bytes)) {
 562		/* kill remainder if no retrys */
 563		if (error && scsi_noretry_cmd(cmd))
 564			blk_end_request_all(req, error);
 565		else {
 566			if (requeue) {
 567				/*
 568				 * Bleah.  Leftovers again.  Stick the
 569				 * leftovers in the front of the
 570				 * queue, and goose the queue again.
 571				 */
 572				scsi_release_buffers(cmd);
 573				scsi_requeue_command(q, cmd);
 574				cmd = NULL;
 575			}
 576			return cmd;
 577		}
 578	}
 579
 580	/*
 581	 * This will goose the queue request function at the end, so we don't
 582	 * need to worry about launching another command.
 583	 */
 584	__scsi_release_buffers(cmd, 0);
 585	scsi_next_command(cmd);
 586	return NULL;
 587}
 588
 589static inline unsigned int scsi_sgtable_index(unsigned short nents)
 590{
 591	unsigned int index;
 592
 593	BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
 594
 595	if (nents <= 8)
 596		index = 0;
 597	else
 598		index = get_count_order(nents) - 3;
 599
 600	return index;
 601}
 602
 603static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
 604{
 605	struct scsi_host_sg_pool *sgp;
 606
 607	sgp = scsi_sg_pools + scsi_sgtable_index(nents);
 608	mempool_free(sgl, sgp->pool);
 609}
 610
 611static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
 612{
 613	struct scsi_host_sg_pool *sgp;
 614
 615	sgp = scsi_sg_pools + scsi_sgtable_index(nents);
 616	return mempool_alloc(sgp->pool, gfp_mask);
 617}
 618
 619static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents,
 620			      gfp_t gfp_mask)
 621{
 622	int ret;
 623
 624	BUG_ON(!nents);
 625
 626	ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
 627			       gfp_mask, scsi_sg_alloc);
 628	if (unlikely(ret))
 629		__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS,
 630				scsi_sg_free);
 631
 632	return ret;
 633}
 634
 635static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
 636{
 637	__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
 638}
 639
 640static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
 641{
 642
 643	if (cmd->sdb.table.nents)
 644		scsi_free_sgtable(&cmd->sdb);
 645
 646	memset(&cmd->sdb, 0, sizeof(cmd->sdb));
 647
 648	if (do_bidi_check && scsi_bidi_cmnd(cmd)) {
 649		struct scsi_data_buffer *bidi_sdb =
 650			cmd->request->next_rq->special;
 651		scsi_free_sgtable(bidi_sdb);
 652		kmem_cache_free(scsi_sdb_cache, bidi_sdb);
 653		cmd->request->next_rq->special = NULL;
 654	}
 655
 656	if (scsi_prot_sg_count(cmd))
 657		scsi_free_sgtable(cmd->prot_sdb);
 658}
 659
 660/*
 661 * Function:    scsi_release_buffers()
 662 *
 663 * Purpose:     Completion processing for block device I/O requests.
 664 *
 665 * Arguments:   cmd	- command that we are bailing.
 666 *
 667 * Lock status: Assumed that no lock is held upon entry.
 668 *
 669 * Returns:     Nothing
 670 *
 671 * Notes:       In the event that an upper level driver rejects a
 672 *		command, we must release resources allocated during
 673 *		the __init_io() function.  Primarily this would involve
 674 *		the scatter-gather table, and potentially any bounce
 675 *		buffers.
 676 */
 677void scsi_release_buffers(struct scsi_cmnd *cmd)
 678{
 679	__scsi_release_buffers(cmd, 1);
 680}
 681EXPORT_SYMBOL(scsi_release_buffers);
 682
 683static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
 684{
 685	int error = 0;
 686
 687	switch(host_byte(result)) {
 688	case DID_TRANSPORT_FAILFAST:
 689		error = -ENOLINK;
 690		break;
 691	case DID_TARGET_FAILURE:
 692		set_host_byte(cmd, DID_OK);
 693		error = -EREMOTEIO;
 694		break;
 695	case DID_NEXUS_FAILURE:
 696		set_host_byte(cmd, DID_OK);
 697		error = -EBADE;
 698		break;
 699	default:
 700		error = -EIO;
 701		break;
 702	}
 703
 704	return error;
 705}
 706
 707/*
 708 * Function:    scsi_io_completion()
 709 *
 710 * Purpose:     Completion processing for block device I/O requests.
 711 *
 712 * Arguments:   cmd   - command that is finished.
 713 *
 714 * Lock status: Assumed that no lock is held upon entry.
 715 *
 716 * Returns:     Nothing
 717 *
 718 * Notes:       This function is matched in terms of capabilities to
 719 *              the function that created the scatter-gather list.
 720 *              In other words, if there are no bounce buffers
 721 *              (the normal case for most drivers), we don't need
 722 *              the logic to deal with cleaning up afterwards.
 723 *
 724 *		We must call scsi_end_request().  This will finish off
 725 *		the specified number of sectors.  If we are done, the
 726 *		command block will be released and the queue function
 727 *		will be goosed.  If we are not done then we have to
 728 *		figure out what to do next:
 729 *
 730 *		a) We can call scsi_requeue_command().  The request
 731 *		   will be unprepared and put back on the queue.  Then
 732 *		   a new command will be created for it.  This should
 733 *		   be used if we made forward progress, or if we want
 734 *		   to switch from READ(10) to READ(6) for example.
 735 *
 736 *		b) We can call scsi_queue_insert().  The request will
 737 *		   be put back on the queue and retried using the same
 738 *		   command as before, possibly after a delay.
 739 *
 740 *		c) We can call blk_end_request() with -EIO to fail
 741 *		   the remainder of the request.
 742 */
 743void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 744{
 745	int result = cmd->result;
 746	struct request_queue *q = cmd->device->request_queue;
 747	struct request *req = cmd->request;
 748	int error = 0;
 749	struct scsi_sense_hdr sshdr;
 750	int sense_valid = 0;
 751	int sense_deferred = 0;
 752	enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
 753	      ACTION_DELAYED_RETRY} action;
 754	char *description = NULL;
 755
 756	if (result) {
 757		sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
 758		if (sense_valid)
 759			sense_deferred = scsi_sense_is_deferred(&sshdr);
 760	}
 761
 762	if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */
 763		if (result) {
 764			if (sense_valid && req->sense) {
 765				/*
 766				 * SG_IO wants current and deferred errors
 767				 */
 768				int len = 8 + cmd->sense_buffer[7];
 769
 770				if (len > SCSI_SENSE_BUFFERSIZE)
 771					len = SCSI_SENSE_BUFFERSIZE;
 772				memcpy(req->sense, cmd->sense_buffer,  len);
 773				req->sense_len = len;
 774			}
 775			if (!sense_deferred)
 776				error = __scsi_error_from_host_byte(cmd, result);
 777		}
 778		/*
 779		 * __scsi_error_from_host_byte may have reset the host_byte
 780		 */
 781		req->errors = cmd->result;
 782
 783		req->resid_len = scsi_get_resid(cmd);
 784
 785		if (scsi_bidi_cmnd(cmd)) {
 786			/*
 787			 * Bidi commands Must be complete as a whole,
 788			 * both sides at once.
 789			 */
 790			req->next_rq->resid_len = scsi_in(cmd)->resid;
 791
 792			scsi_release_buffers(cmd);
 793			blk_end_request_all(req, 0);
 794
 795			scsi_next_command(cmd);
 796			return;
 797		}
 798	}
 799
 800	/* no bidi support for !REQ_TYPE_BLOCK_PC yet */
 801	BUG_ON(blk_bidi_rq(req));
 802
 803	/*
 804	 * Next deal with any sectors which we were able to correctly
 805	 * handle.
 806	 */
 807	SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
 808				      "%d bytes done.\n",
 809				      blk_rq_sectors(req), good_bytes));
 810
 811	/*
 812	 * Recovered errors need reporting, but they're always treated
 813	 * as success, so fiddle the result code here.  For BLOCK_PC
 814	 * we already took a copy of the original into rq->errors which
 815	 * is what gets returned to the user
 816	 */
 817	if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
 818		/* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
 819		 * print since caller wants ATA registers. Only occurs on
 820		 * SCSI ATA PASS_THROUGH commands when CK_COND=1
 821		 */
 822		if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
 823			;
 824		else if (!(req->cmd_flags & REQ_QUIET))
 825			scsi_print_sense("", cmd);
 826		result = 0;
 827		/* BLOCK_PC may have set error */
 828		error = 0;
 829	}
 830
 831	/*
 832	 * A number of bytes were successfully read.  If there
 833	 * are leftovers and there is some kind of error
 834	 * (result != 0), retry the rest.
 835	 */
 836	if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
 837		return;
 838
 839	error = __scsi_error_from_host_byte(cmd, result);
 840
 841	if (host_byte(result) == DID_RESET) {
 842		/* Third party bus reset or reset for error recovery
 843		 * reasons.  Just retry the command and see what
 844		 * happens.
 845		 */
 846		action = ACTION_RETRY;
 847	} else if (sense_valid && !sense_deferred) {
 848		switch (sshdr.sense_key) {
 849		case UNIT_ATTENTION:
 850			if (cmd->device->removable) {
 851				/* Detected disc change.  Set a bit
 852				 * and quietly refuse further access.
 853				 */
 854				cmd->device->changed = 1;
 855				description = "Media Changed";
 856				action = ACTION_FAIL;
 857			} else {
 858				/* Must have been a power glitch, or a
 859				 * bus reset.  Could not have been a
 860				 * media change, so we just retry the
 861				 * command and see what happens.
 862				 */
 863				action = ACTION_RETRY;
 864			}
 865			break;
 866		case ILLEGAL_REQUEST:
 867			/* If we had an ILLEGAL REQUEST returned, then
 868			 * we may have performed an unsupported
 869			 * command.  The only thing this should be
 870			 * would be a ten byte read where only a six
 871			 * byte read was supported.  Also, on a system
 872			 * where READ CAPACITY failed, we may have
 873			 * read past the end of the disk.
 874			 */
 875			if ((cmd->device->use_10_for_rw &&
 876			    sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
 877			    (cmd->cmnd[0] == READ_10 ||
 878			     cmd->cmnd[0] == WRITE_10)) {
 879				/* This will issue a new 6-byte command. */
 880				cmd->device->use_10_for_rw = 0;
 881				action = ACTION_REPREP;
 882			} else if (sshdr.asc == 0x10) /* DIX */ {
 883				description = "Host Data Integrity Failure";
 884				action = ACTION_FAIL;
 885				error = -EILSEQ;
 886			/* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
 887			} else if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
 888				   (cmd->cmnd[0] == UNMAP ||
 889				    cmd->cmnd[0] == WRITE_SAME_16 ||
 890				    cmd->cmnd[0] == WRITE_SAME)) {
 891				description = "Discard failure";
 892				action = ACTION_FAIL;
 893				error = -EREMOTEIO;
 894			} else
 895				action = ACTION_FAIL;
 896			break;
 897		case ABORTED_COMMAND:
 898			action = ACTION_FAIL;
 899			if (sshdr.asc == 0x10) { /* DIF */
 900				description = "Target Data Integrity Failure";
 901				error = -EILSEQ;
 902			}
 903			break;
 904		case NOT_READY:
 905			/* If the device is in the process of becoming
 906			 * ready, or has a temporary blockage, retry.
 907			 */
 908			if (sshdr.asc == 0x04) {
 909				switch (sshdr.ascq) {
 910				case 0x01: /* becoming ready */
 911				case 0x04: /* format in progress */
 912				case 0x05: /* rebuild in progress */
 913				case 0x06: /* recalculation in progress */
 914				case 0x07: /* operation in progress */
 915				case 0x08: /* Long write in progress */
 916				case 0x09: /* self test in progress */
 917				case 0x14: /* space allocation in progress */
 918					action = ACTION_DELAYED_RETRY;
 919					break;
 920				default:
 921					description = "Device not ready";
 922					action = ACTION_FAIL;
 923					break;
 924				}
 925			} else {
 926				description = "Device not ready";
 927				action = ACTION_FAIL;
 928			}
 929			break;
 930		case VOLUME_OVERFLOW:
 931			/* See SSC3rXX or current. */
 932			action = ACTION_FAIL;
 933			break;
 934		default:
 935			description = "Unhandled sense code";
 936			action = ACTION_FAIL;
 937			break;
 938		}
 939	} else {
 940		description = "Unhandled error code";
 941		action = ACTION_FAIL;
 942	}
 943
 944	switch (action) {
 945	case ACTION_FAIL:
 946		/* Give up and fail the remainder of the request */
 947		scsi_release_buffers(cmd);
 948		if (!(req->cmd_flags & REQ_QUIET)) {
 949			if (description)
 950				scmd_printk(KERN_INFO, cmd, "%s\n",
 951					    description);
 952			scsi_print_result(cmd);
 953			if (driver_byte(result) & DRIVER_SENSE)
 954				scsi_print_sense("", cmd);
 955			scsi_print_command(cmd);
 956		}
 957		if (blk_end_request_err(req, error))
 958			scsi_requeue_command(q, cmd);
 959		else
 960			scsi_next_command(cmd);
 961		break;
 962	case ACTION_REPREP:
 963		/* Unprep the request and put it back at the head of the queue.
 964		 * A new command will be prepared and issued.
 965		 */
 966		scsi_release_buffers(cmd);
 967		scsi_requeue_command(q, cmd);
 968		break;
 969	case ACTION_RETRY:
 970		/* Retry the same command immediately */
 971		__scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
 972		break;
 973	case ACTION_DELAYED_RETRY:
 974		/* Retry the same command after a delay */
 975		__scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
 976		break;
 977	}
 978}
 979
 980static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
 981			     gfp_t gfp_mask)
 982{
 983	int count;
 984
 985	/*
 986	 * If sg table allocation fails, requeue request later.
 987	 */
 988	if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
 989					gfp_mask))) {
 990		return BLKPREP_DEFER;
 991	}
 992
 993	req->buffer = NULL;
 994
 995	/* 
 996	 * Next, walk the list, and fill in the addresses and sizes of
 997	 * each segment.
 998	 */
 999	count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
1000	BUG_ON(count > sdb->table.nents);
1001	sdb->table.nents = count;
1002	sdb->length = blk_rq_bytes(req);
1003	return BLKPREP_OK;
1004}
1005
1006/*
1007 * Function:    scsi_init_io()
1008 *
1009 * Purpose:     SCSI I/O initialize function.
1010 *
1011 * Arguments:   cmd   - Command descriptor we wish to initialize
1012 *
1013 * Returns:     0 on success
1014 *		BLKPREP_DEFER if the failure is retryable
1015 *		BLKPREP_KILL if the failure is fatal
1016 */
1017int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
1018{
1019	struct request *rq = cmd->request;
1020
1021	int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
1022	if (error)
1023		goto err_exit;
1024
1025	if (blk_bidi_rq(rq)) {
1026		struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
1027			scsi_sdb_cache, GFP_ATOMIC);
1028		if (!bidi_sdb) {
1029			error = BLKPREP_DEFER;
1030			goto err_exit;
1031		}
1032
1033		rq->next_rq->special = bidi_sdb;
1034		error = scsi_init_sgtable(rq->next_rq, bidi_sdb, GFP_ATOMIC);
1035		if (error)
1036			goto err_exit;
1037	}
1038
1039	if (blk_integrity_rq(rq)) {
1040		struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
1041		int ivecs, count;
1042
1043		BUG_ON(prot_sdb == NULL);
1044		ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
1045
1046		if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
1047			error = BLKPREP_DEFER;
1048			goto err_exit;
1049		}
1050
1051		count = blk_rq_map_integrity_sg(rq->q, rq->bio,
1052						prot_sdb->table.sgl);
1053		BUG_ON(unlikely(count > ivecs));
1054		BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
1055
1056		cmd->prot_sdb = prot_sdb;
1057		cmd->prot_sdb->table.nents = count;
1058	}
1059
1060	return BLKPREP_OK ;
1061
1062err_exit:
1063	scsi_release_buffers(cmd);
1064	cmd->request->special = NULL;
1065	scsi_put_command(cmd);
1066	return error;
1067}
1068EXPORT_SYMBOL(scsi_init_io);
1069
1070static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1071		struct request *req)
1072{
1073	struct scsi_cmnd *cmd;
1074
1075	if (!req->special) {
1076		cmd = scsi_get_command(sdev, GFP_ATOMIC);
1077		if (unlikely(!cmd))
1078			return NULL;
1079		req->special = cmd;
1080	} else {
1081		cmd = req->special;
1082	}
1083
1084	/* pull a tag out of the request if we have one */
1085	cmd->tag = req->tag;
1086	cmd->request = req;
1087
1088	cmd->cmnd = req->cmd;
1089	cmd->prot_op = SCSI_PROT_NORMAL;
1090
1091	return cmd;
1092}
1093
1094int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1095{
1096	struct scsi_cmnd *cmd;
1097	int ret = scsi_prep_state_check(sdev, req);
1098
1099	if (ret != BLKPREP_OK)
1100		return ret;
1101
1102	cmd = scsi_get_cmd_from_req(sdev, req);
1103	if (unlikely(!cmd))
1104		return BLKPREP_DEFER;
1105
1106	/*
1107	 * BLOCK_PC requests may transfer data, in which case they must
1108	 * a bio attached to them.  Or they might contain a SCSI command
1109	 * that does not transfer data, in which case they may optionally
1110	 * submit a request without an attached bio.
1111	 */
1112	if (req->bio) {
1113		int ret;
1114
1115		BUG_ON(!req->nr_phys_segments);
1116
1117		ret = scsi_init_io(cmd, GFP_ATOMIC);
1118		if (unlikely(ret))
1119			return ret;
1120	} else {
1121		BUG_ON(blk_rq_bytes(req));
1122
1123		memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1124		req->buffer = NULL;
1125	}
1126
1127	cmd->cmd_len = req->cmd_len;
1128	if (!blk_rq_bytes(req))
1129		cmd->sc_data_direction = DMA_NONE;
1130	else if (rq_data_dir(req) == WRITE)
1131		cmd->sc_data_direction = DMA_TO_DEVICE;
1132	else
1133		cmd->sc_data_direction = DMA_FROM_DEVICE;
1134	
1135	cmd->transfersize = blk_rq_bytes(req);
1136	cmd->allowed = req->retries;
1137	return BLKPREP_OK;
1138}
1139EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
1140
1141/*
1142 * Setup a REQ_TYPE_FS command.  These are simple read/write request
1143 * from filesystems that still need to be translated to SCSI CDBs from
1144 * the ULD.
1145 */
1146int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1147{
1148	struct scsi_cmnd *cmd;
1149	int ret = scsi_prep_state_check(sdev, req);
1150
1151	if (ret != BLKPREP_OK)
1152		return ret;
1153
1154	if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
1155			 && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
1156		ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
1157		if (ret != BLKPREP_OK)
1158			return ret;
1159	}
1160
1161	/*
1162	 * Filesystem requests must transfer data.
1163	 */
1164	BUG_ON(!req->nr_phys_segments);
1165
1166	cmd = scsi_get_cmd_from_req(sdev, req);
1167	if (unlikely(!cmd))
1168		return BLKPREP_DEFER;
1169
1170	memset(cmd->cmnd, 0, BLK_MAX_CDB);
1171	return scsi_init_io(cmd, GFP_ATOMIC);
1172}
1173EXPORT_SYMBOL(scsi_setup_fs_cmnd);
1174
1175int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1176{
1177	int ret = BLKPREP_OK;
1178
1179	/*
1180	 * If the device is not in running state we will reject some
1181	 * or all commands.
1182	 */
1183	if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1184		switch (sdev->sdev_state) {
1185		case SDEV_OFFLINE:
1186			/*
1187			 * If the device is offline we refuse to process any
1188			 * commands.  The device must be brought online
1189			 * before trying any recovery commands.
1190			 */
1191			sdev_printk(KERN_ERR, sdev,
1192				    "rejecting I/O to offline device\n");
1193			ret = BLKPREP_KILL;
1194			break;
1195		case SDEV_DEL:
1196			/*
1197			 * If the device is fully deleted, we refuse to
1198			 * process any commands as well.
1199			 */
1200			sdev_printk(KERN_ERR, sdev,
1201				    "rejecting I/O to dead device\n");
1202			ret = BLKPREP_KILL;
1203			break;
1204		case SDEV_QUIESCE:
1205		case SDEV_BLOCK:
1206		case SDEV_CREATED_BLOCK:
1207			/*
1208			 * If the devices is blocked we defer normal commands.
1209			 */
1210			if (!(req->cmd_flags & REQ_PREEMPT))
1211				ret = BLKPREP_DEFER;
1212			break;
1213		default:
1214			/*
1215			 * For any other not fully online state we only allow
1216			 * special commands.  In particular any user initiated
1217			 * command is not allowed.
1218			 */
1219			if (!(req->cmd_flags & REQ_PREEMPT))
1220				ret = BLKPREP_KILL;
1221			break;
1222		}
1223	}
1224	return ret;
1225}
1226EXPORT_SYMBOL(scsi_prep_state_check);
1227
1228int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1229{
1230	struct scsi_device *sdev = q->queuedata;
1231
1232	switch (ret) {
1233	case BLKPREP_KILL:
1234		req->errors = DID_NO_CONNECT << 16;
1235		/* release the command and kill it */
1236		if (req->special) {
1237			struct scsi_cmnd *cmd = req->special;
1238			scsi_release_buffers(cmd);
1239			scsi_put_command(cmd);
1240			req->special = NULL;
1241		}
1242		break;
1243	case BLKPREP_DEFER:
1244		/*
1245		 * If we defer, the blk_peek_request() returns NULL, but the
1246		 * queue must be restarted, so we schedule a callback to happen
1247		 * shortly.
1248		 */
1249		if (sdev->device_busy == 0)
1250			blk_delay_queue(q, SCSI_QUEUE_DELAY);
1251		break;
1252	default:
1253		req->cmd_flags |= REQ_DONTPREP;
1254	}
1255
1256	return ret;
1257}
1258EXPORT_SYMBOL(scsi_prep_return);
1259
1260int scsi_prep_fn(struct request_queue *q, struct request *req)
1261{
1262	struct scsi_device *sdev = q->queuedata;
1263	int ret = BLKPREP_KILL;
1264
1265	if (req->cmd_type == REQ_TYPE_BLOCK_PC)
1266		ret = scsi_setup_blk_pc_cmnd(sdev, req);
1267	return scsi_prep_return(q, req, ret);
1268}
1269EXPORT_SYMBOL(scsi_prep_fn);
1270
1271/*
1272 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1273 * return 0.
1274 *
1275 * Called with the queue_lock held.
1276 */
1277static inline int scsi_dev_queue_ready(struct request_queue *q,
1278				  struct scsi_device *sdev)
1279{
1280	if (sdev->device_busy == 0 && sdev->device_blocked) {
1281		/*
1282		 * unblock after device_blocked iterates to zero
1283		 */
1284		if (--sdev->device_blocked == 0) {
1285			SCSI_LOG_MLQUEUE(3,
1286				   sdev_printk(KERN_INFO, sdev,
1287				   "unblocking device at zero depth\n"));
1288		} else {
1289			blk_delay_queue(q, SCSI_QUEUE_DELAY);
1290			return 0;
1291		}
1292	}
1293	if (scsi_device_is_busy(sdev))
1294		return 0;
1295
1296	return 1;
1297}
1298
1299
1300/*
1301 * scsi_target_queue_ready: checks if there we can send commands to target
1302 * @sdev: scsi device on starget to check.
1303 *
1304 * Called with the host lock held.
1305 */
1306static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1307					   struct scsi_device *sdev)
1308{
1309	struct scsi_target *starget = scsi_target(sdev);
1310
1311	if (starget->single_lun) {
1312		if (starget->starget_sdev_user &&
1313		    starget->starget_sdev_user != sdev)
1314			return 0;
1315		starget->starget_sdev_user = sdev;
1316	}
1317
1318	if (starget->target_busy == 0 && starget->target_blocked) {
1319		/*
1320		 * unblock after target_blocked iterates to zero
1321		 */
1322		if (--starget->target_blocked == 0) {
1323			SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1324					 "unblocking target at zero depth\n"));
1325		} else
1326			return 0;
1327	}
1328
1329	if (scsi_target_is_busy(starget)) {
1330		list_move_tail(&sdev->starved_entry, &shost->starved_list);
1331		return 0;
1332	}
1333
1334	return 1;
1335}
1336
1337/*
1338 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1339 * return 0. We must end up running the queue again whenever 0 is
1340 * returned, else IO can hang.
1341 *
1342 * Called with host_lock held.
1343 */
1344static inline int scsi_host_queue_ready(struct request_queue *q,
1345				   struct Scsi_Host *shost,
1346				   struct scsi_device *sdev)
1347{
1348	if (scsi_host_in_recovery(shost))
1349		return 0;
1350	if (shost->host_busy == 0 && shost->host_blocked) {
1351		/*
1352		 * unblock after host_blocked iterates to zero
1353		 */
1354		if (--shost->host_blocked == 0) {
1355			SCSI_LOG_MLQUEUE(3,
1356				printk("scsi%d unblocking host at zero depth\n",
1357					shost->host_no));
1358		} else {
1359			return 0;
1360		}
1361	}
1362	if (scsi_host_is_busy(shost)) {
1363		if (list_empty(&sdev->starved_entry))
1364			list_add_tail(&sdev->starved_entry, &shost->starved_list);
1365		return 0;
1366	}
1367
1368	/* We're OK to process the command, so we can't be starved */
1369	if (!list_empty(&sdev->starved_entry))
1370		list_del_init(&sdev->starved_entry);
1371
1372	return 1;
1373}
1374
1375/*
1376 * Busy state exporting function for request stacking drivers.
1377 *
1378 * For efficiency, no lock is taken to check the busy state of
1379 * shost/starget/sdev, since the returned value is not guaranteed and
1380 * may be changed after request stacking drivers call the function,
1381 * regardless of taking lock or not.
1382 *
1383 * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi
1384 * needs to return 'not busy'. Otherwise, request stacking drivers
1385 * may hold requests forever.
1386 */
1387static int scsi_lld_busy(struct request_queue *q)
1388{
1389	struct scsi_device *sdev = q->queuedata;
1390	struct Scsi_Host *shost;
1391
1392	if (blk_queue_dead(q))
1393		return 0;
1394
1395	shost = sdev->host;
1396
1397	/*
1398	 * Ignore host/starget busy state.
1399	 * Since block layer does not have a concept of fairness across
1400	 * multiple queues, congestion of host/starget needs to be handled
1401	 * in SCSI layer.
1402	 */
1403	if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1404		return 1;
1405
1406	return 0;
1407}
1408
1409/*
1410 * Kill a request for a dead device
1411 */
1412static void scsi_kill_request(struct request *req, struct request_queue *q)
1413{
1414	struct scsi_cmnd *cmd = req->special;
1415	struct scsi_device *sdev;
1416	struct scsi_target *starget;
1417	struct Scsi_Host *shost;
1418
1419	blk_start_request(req);
1420
1421	scmd_printk(KERN_INFO, cmd, "killing request\n");
1422
1423	sdev = cmd->device;
1424	starget = scsi_target(sdev);
1425	shost = sdev->host;
1426	scsi_init_cmd_errh(cmd);
1427	cmd->result = DID_NO_CONNECT << 16;
1428	atomic_inc(&cmd->device->iorequest_cnt);
1429
1430	/*
1431	 * SCSI request completion path will do scsi_device_unbusy(),
1432	 * bump busy counts.  To bump the counters, we need to dance
1433	 * with the locks as normal issue path does.
1434	 */
1435	sdev->device_busy++;
1436	spin_unlock(sdev->request_queue->queue_lock);
1437	spin_lock(shost->host_lock);
1438	shost->host_busy++;
1439	starget->target_busy++;
1440	spin_unlock(shost->host_lock);
1441	spin_lock(sdev->request_queue->queue_lock);
1442
1443	blk_complete_request(req);
1444}
1445
1446static void scsi_softirq_done(struct request *rq)
1447{
1448	struct scsi_cmnd *cmd = rq->special;
1449	unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1450	int disposition;
1451
1452	INIT_LIST_HEAD(&cmd->eh_entry);
1453
1454	atomic_inc(&cmd->device->iodone_cnt);
1455	if (cmd->result)
1456		atomic_inc(&cmd->device->ioerr_cnt);
1457
1458	disposition = scsi_decide_disposition(cmd);
1459	if (disposition != SUCCESS &&
1460	    time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1461		sdev_printk(KERN_ERR, cmd->device,
1462			    "timing out command, waited %lus\n",
1463			    wait_for/HZ);
1464		disposition = SUCCESS;
1465	}
1466			
1467	scsi_log_completion(cmd, disposition);
1468
1469	switch (disposition) {
1470		case SUCCESS:
1471			scsi_finish_command(cmd);
1472			break;
1473		case NEEDS_RETRY:
1474			scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1475			break;
1476		case ADD_TO_MLQUEUE:
1477			scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1478			break;
1479		default:
1480			if (!scsi_eh_scmd_add(cmd, 0))
1481				scsi_finish_command(cmd);
1482	}
1483}
1484
1485/*
1486 * Function:    scsi_request_fn()
1487 *
1488 * Purpose:     Main strategy routine for SCSI.
1489 *
1490 * Arguments:   q       - Pointer to actual queue.
1491 *
1492 * Returns:     Nothing
1493 *
1494 * Lock status: IO request lock assumed to be held when called.
1495 */
1496static void scsi_request_fn(struct request_queue *q)
1497{
1498	struct scsi_device *sdev = q->queuedata;
1499	struct Scsi_Host *shost;
1500	struct scsi_cmnd *cmd;
1501	struct request *req;
1502
1503	if(!get_device(&sdev->sdev_gendev))
1504		/* We must be tearing the block queue down already */
1505		return;
1506
1507	/*
1508	 * To start with, we keep looping until the queue is empty, or until
1509	 * the host is no longer able to accept any more requests.
1510	 */
1511	shost = sdev->host;
1512	for (;;) {
1513		int rtn;
1514		/*
1515		 * get next queueable request.  We do this early to make sure
1516		 * that the request is fully prepared even if we cannot 
1517		 * accept it.
1518		 */
1519		req = blk_peek_request(q);
1520		if (!req || !scsi_dev_queue_ready(q, sdev))
1521			break;
1522
1523		if (unlikely(!scsi_device_online(sdev))) {
1524			sdev_printk(KERN_ERR, sdev,
1525				    "rejecting I/O to offline device\n");
1526			scsi_kill_request(req, q);
1527			continue;
1528		}
1529
1530
1531		/*
1532		 * Remove the request from the request list.
1533		 */
1534		if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1535			blk_start_request(req);
1536		sdev->device_busy++;
1537
1538		spin_unlock(q->queue_lock);
1539		cmd = req->special;
1540		if (unlikely(cmd == NULL)) {
1541			printk(KERN_CRIT "impossible request in %s.\n"
1542					 "please mail a stack trace to "
1543					 "linux-scsi@vger.kernel.org\n",
1544					 __func__);
1545			blk_dump_rq_flags(req, "foo");
1546			BUG();
1547		}
1548		spin_lock(shost->host_lock);
1549
1550		/*
1551		 * We hit this when the driver is using a host wide
1552		 * tag map. For device level tag maps the queue_depth check
1553		 * in the device ready fn would prevent us from trying
1554		 * to allocate a tag. Since the map is a shared host resource
1555		 * we add the dev to the starved list so it eventually gets
1556		 * a run when a tag is freed.
1557		 */
1558		if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
1559			if (list_empty(&sdev->starved_entry))
1560				list_add_tail(&sdev->starved_entry,
1561					      &shost->starved_list);
1562			goto not_ready;
1563		}
1564
1565		if (!scsi_target_queue_ready(shost, sdev))
1566			goto not_ready;
1567
1568		if (!scsi_host_queue_ready(q, shost, sdev))
1569			goto not_ready;
1570
1571		scsi_target(sdev)->target_busy++;
1572		shost->host_busy++;
1573
1574		/*
1575		 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1576		 *		take the lock again.
1577		 */
1578		spin_unlock_irq(shost->host_lock);
1579
1580		/*
1581		 * Finally, initialize any error handling parameters, and set up
1582		 * the timers for timeouts.
1583		 */
1584		scsi_init_cmd_errh(cmd);
1585
1586		/*
1587		 * Dispatch the command to the low-level driver.
1588		 */
1589		rtn = scsi_dispatch_cmd(cmd);
1590		spin_lock_irq(q->queue_lock);
1591		if (rtn)
1592			goto out_delay;
1593	}
1594
1595	goto out;
1596
1597 not_ready:
1598	spin_unlock_irq(shost->host_lock);
1599
1600	/*
1601	 * lock q, handle tag, requeue req, and decrement device_busy. We
1602	 * must return with queue_lock held.
1603	 *
1604	 * Decrementing device_busy without checking it is OK, as all such
1605	 * cases (host limits or settings) should run the queue at some
1606	 * later time.
1607	 */
1608	spin_lock_irq(q->queue_lock);
1609	blk_requeue_request(q, req);
1610	sdev->device_busy--;
1611out_delay:
1612	if (sdev->device_busy == 0)
1613		blk_delay_queue(q, SCSI_QUEUE_DELAY);
1614out:
1615	/* must be careful here...if we trigger the ->remove() function
1616	 * we cannot be holding the q lock */
1617	spin_unlock_irq(q->queue_lock);
1618	put_device(&sdev->sdev_gendev);
1619	spin_lock_irq(q->queue_lock);
1620}
1621
1622u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1623{
1624	struct device *host_dev;
1625	u64 bounce_limit = 0xffffffff;
1626
1627	if (shost->unchecked_isa_dma)
1628		return BLK_BOUNCE_ISA;
1629	/*
1630	 * Platforms with virtual-DMA translation
1631	 * hardware have no practical limit.
1632	 */
1633	if (!PCI_DMA_BUS_IS_PHYS)
1634		return BLK_BOUNCE_ANY;
1635
1636	host_dev = scsi_get_device(shost);
1637	if (host_dev && host_dev->dma_mask)
1638		bounce_limit = *host_dev->dma_mask;
1639
1640	return bounce_limit;
1641}
1642EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1643
1644struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1645					 request_fn_proc *request_fn)
1646{
1647	struct request_queue *q;
1648	struct device *dev = shost->dma_dev;
1649
1650	q = blk_init_queue(request_fn, NULL);
1651	if (!q)
1652		return NULL;
1653
1654	/*
1655	 * this limit is imposed by hardware restrictions
1656	 */
1657	blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
1658					SCSI_MAX_SG_CHAIN_SEGMENTS));
1659
1660	if (scsi_host_prot_dma(shost)) {
1661		shost->sg_prot_tablesize =
1662			min_not_zero(shost->sg_prot_tablesize,
1663				     (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
1664		BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
1665		blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
1666	}
1667
1668	blk_queue_max_hw_sectors(q, shost->max_sectors);
1669	blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1670	blk_queue_segment_boundary(q, shost->dma_boundary);
1671	dma_set_seg_boundary(dev, shost->dma_boundary);
1672
1673	blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
1674
1675	if (!shost->use_clustering)
1676		q->limits.cluster = 0;
1677
1678	/*
1679	 * set a reasonable default alignment on word boundaries: the
1680	 * host and device may alter it using
1681	 * blk_queue_update_dma_alignment() later.
1682	 */
1683	blk_queue_dma_alignment(q, 0x03);
1684
1685	return q;
1686}
1687EXPORT_SYMBOL(__scsi_alloc_queue);
1688
1689struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1690{
1691	struct request_queue *q;
1692
1693	q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
1694	if (!q)
1695		return NULL;
1696
1697	blk_queue_prep_rq(q, scsi_prep_fn);
1698	blk_queue_softirq_done(q, scsi_softirq_done);
1699	blk_queue_rq_timed_out(q, scsi_times_out);
1700	blk_queue_lld_busy(q, scsi_lld_busy);
1701	return q;
1702}
1703
1704/*
1705 * Function:    scsi_block_requests()
1706 *
1707 * Purpose:     Utility function used by low-level drivers to prevent further
1708 *		commands from being queued to the device.
1709 *
1710 * Arguments:   shost       - Host in question
1711 *
1712 * Returns:     Nothing
1713 *
1714 * Lock status: No locks are assumed held.
1715 *
1716 * Notes:       There is no timer nor any other means by which the requests
1717 *		get unblocked other than the low-level driver calling
1718 *		scsi_unblock_requests().
1719 */
1720void scsi_block_requests(struct Scsi_Host *shost)
1721{
1722	shost->host_self_blocked = 1;
1723}
1724EXPORT_SYMBOL(scsi_block_requests);
1725
1726/*
1727 * Function:    scsi_unblock_requests()
1728 *
1729 * Purpose:     Utility function used by low-level drivers to allow further
1730 *		commands from being queued to the device.
1731 *
1732 * Arguments:   shost       - Host in question
1733 *
1734 * Returns:     Nothing
1735 *
1736 * Lock status: No locks are assumed held.
1737 *
1738 * Notes:       There is no timer nor any other means by which the requests
1739 *		get unblocked other than the low-level driver calling
1740 *		scsi_unblock_requests().
1741 *
1742 *		This is done as an API function so that changes to the
1743 *		internals of the scsi mid-layer won't require wholesale
1744 *		changes to drivers that use this feature.
1745 */
1746void scsi_unblock_requests(struct Scsi_Host *shost)
1747{
1748	shost->host_self_blocked = 0;
1749	scsi_run_host_queues(shost);
1750}
1751EXPORT_SYMBOL(scsi_unblock_requests);
1752
1753int __init scsi_init_queue(void)
1754{
1755	int i;
1756
1757	scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
1758					   sizeof(struct scsi_data_buffer),
1759					   0, 0, NULL);
1760	if (!scsi_sdb_cache) {
1761		printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
1762		return -ENOMEM;
1763	}
1764
1765	for (i = 0; i < SG_MEMPOOL_NR; i++) {
1766		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1767		int size = sgp->size * sizeof(struct scatterlist);
1768
1769		sgp->slab = kmem_cache_create(sgp->name, size, 0,
1770				SLAB_HWCACHE_ALIGN, NULL);
1771		if (!sgp->slab) {
1772			printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1773					sgp->name);
1774			goto cleanup_sdb;
1775		}
1776
1777		sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
1778						     sgp->slab);
1779		if (!sgp->pool) {
1780			printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1781					sgp->name);
1782			goto cleanup_sdb;
1783		}
1784	}
1785
1786	return 0;
1787
1788cleanup_sdb:
1789	for (i = 0; i < SG_MEMPOOL_NR; i++) {
1790		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1791		if (sgp->pool)
1792			mempool_destroy(sgp->pool);
1793		if (sgp->slab)
1794			kmem_cache_destroy(sgp->slab);
1795	}
1796	kmem_cache_destroy(scsi_sdb_cache);
1797
1798	return -ENOMEM;
1799}
1800
1801void scsi_exit_queue(void)
1802{
1803	int i;
1804
1805	kmem_cache_destroy(scsi_sdb_cache);
1806
1807	for (i = 0; i < SG_MEMPOOL_NR; i++) {
1808		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1809		mempool_destroy(sgp->pool);
1810		kmem_cache_destroy(sgp->slab);
1811	}
1812}
1813
1814/**
1815 *	scsi_mode_select - issue a mode select
1816 *	@sdev:	SCSI device to be queried
1817 *	@pf:	Page format bit (1 == standard, 0 == vendor specific)
1818 *	@sp:	Save page bit (0 == don't save, 1 == save)
1819 *	@modepage: mode page being requested
1820 *	@buffer: request buffer (may not be smaller than eight bytes)
1821 *	@len:	length of request buffer.
1822 *	@timeout: command timeout
1823 *	@retries: number of retries before failing
1824 *	@data: returns a structure abstracting the mode header data
1825 *	@sshdr: place to put sense data (or NULL if no sense to be collected).
1826 *		must be SCSI_SENSE_BUFFERSIZE big.
1827 *
1828 *	Returns zero if successful; negative error number or scsi
1829 *	status on error
1830 *
1831 */
1832int
1833scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
1834		 unsigned char *buffer, int len, int timeout, int retries,
1835		 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1836{
1837	unsigned char cmd[10];
1838	unsigned char *real_buffer;
1839	int ret;
1840
1841	memset(cmd, 0, sizeof(cmd));
1842	cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
1843
1844	if (sdev->use_10_for_ms) {
1845		if (len > 65535)
1846			return -EINVAL;
1847		real_buffer = kmalloc(8 + len, GFP_KERNEL);
1848		if (!real_buffer)
1849			return -ENOMEM;
1850		memcpy(real_buffer + 8, buffer, len);
1851		len += 8;
1852		real_buffer[0] = 0;
1853		real_buffer[1] = 0;
1854		real_buffer[2] = data->medium_type;
1855		real_buffer[3] = data->device_specific;
1856		real_buffer[4] = data->longlba ? 0x01 : 0;
1857		real_buffer[5] = 0;
1858		real_buffer[6] = data->block_descriptor_length >> 8;
1859		real_buffer[7] = data->block_descriptor_length;
1860
1861		cmd[0] = MODE_SELECT_10;
1862		cmd[7] = len >> 8;
1863		cmd[8] = len;
1864	} else {
1865		if (len > 255 || data->block_descriptor_length > 255 ||
1866		    data->longlba)
1867			return -EINVAL;
1868
1869		real_buffer = kmalloc(4 + len, GFP_KERNEL);
1870		if (!real_buffer)
1871			return -ENOMEM;
1872		memcpy(real_buffer + 4, buffer, len);
1873		len += 4;
1874		real_buffer[0] = 0;
1875		real_buffer[1] = data->medium_type;
1876		real_buffer[2] = data->device_specific;
1877		real_buffer[3] = data->block_descriptor_length;
1878		
1879
1880		cmd[0] = MODE_SELECT;
1881		cmd[4] = len;
1882	}
1883
1884	ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
1885			       sshdr, timeout, retries, NULL);
1886	kfree(real_buffer);
1887	return ret;
1888}
1889EXPORT_SYMBOL_GPL(scsi_mode_select);
1890
1891/**
1892 *	scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
1893 *	@sdev:	SCSI device to be queried
1894 *	@dbd:	set if mode sense will allow block descriptors to be returned
1895 *	@modepage: mode page being requested
1896 *	@buffer: request buffer (may not be smaller than eight bytes)
1897 *	@len:	length of request buffer.
1898 *	@timeout: command timeout
1899 *	@retries: number of retries before failing
1900 *	@data: returns a structure abstracting the mode header data
1901 *	@sshdr: place to put sense data (or NULL if no sense to be collected).
1902 *		must be SCSI_SENSE_BUFFERSIZE big.
1903 *
1904 *	Returns zero if unsuccessful, or the header offset (either 4
1905 *	or 8 depending on whether a six or ten byte command was
1906 *	issued) if successful.
1907 */
1908int
1909scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1910		  unsigned char *buffer, int len, int timeout, int retries,
1911		  struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1912{
1913	unsigned char cmd[12];
1914	int use_10_for_ms;
1915	int header_length;
1916	int result;
1917	struct scsi_sense_hdr my_sshdr;
1918
1919	memset(data, 0, sizeof(*data));
1920	memset(&cmd[0], 0, 12);
1921	cmd[1] = dbd & 0x18;	/* allows DBD and LLBA bits */
1922	cmd[2] = modepage;
1923
1924	/* caller might not be interested in sense, but we need it */
1925	if (!sshdr)
1926		sshdr = &my_sshdr;
1927
1928 retry:
1929	use_10_for_ms = sdev->use_10_for_ms;
1930
1931	if (use_10_for_ms) {
1932		if (len < 8)
1933			len = 8;
1934
1935		cmd[0] = MODE_SENSE_10;
1936		cmd[8] = len;
1937		header_length = 8;
1938	} else {
1939		if (len < 4)
1940			len = 4;
1941
1942		cmd[0] = MODE_SENSE;
1943		cmd[4] = len;
1944		header_length = 4;
1945	}
1946
1947	memset(buffer, 0, len);
1948
1949	result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1950				  sshdr, timeout, retries, NULL);
1951
1952	/* This code looks awful: what it's doing is making sure an
1953	 * ILLEGAL REQUEST sense return identifies the actual command
1954	 * byte as the problem.  MODE_SENSE commands can return
1955	 * ILLEGAL REQUEST if the code page isn't supported */
1956
1957	if (use_10_for_ms && !scsi_status_is_good(result) &&
1958	    (driver_byte(result) & DRIVER_SENSE)) {
1959		if (scsi_sense_valid(sshdr)) {
1960			if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
1961			    (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
1962				/* 
1963				 * Invalid command operation code
1964				 */
1965				sdev->use_10_for_ms = 0;
1966				goto retry;
1967			}
1968		}
1969	}
1970
1971	if(scsi_status_is_good(result)) {
1972		if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
1973			     (modepage == 6 || modepage == 8))) {
1974			/* Initio breakage? */
1975			header_length = 0;
1976			data->length = 13;
1977			data->medium_type = 0;
1978			data->device_specific = 0;
1979			data->longlba = 0;
1980			data->block_descriptor_length = 0;
1981		} else if(use_10_for_ms) {
1982			data->length = buffer[0]*256 + buffer[1] + 2;
1983			data->medium_type = buffer[2];
1984			data->device_specific = buffer[3];
1985			data->longlba = buffer[4] & 0x01;
1986			data->block_descriptor_length = buffer[6]*256
1987				+ buffer[7];
1988		} else {
1989			data->length = buffer[0] + 1;
1990			data->medium_type = buffer[1];
1991			data->device_specific = buffer[2];
1992			data->block_descriptor_length = buffer[3];
1993		}
1994		data->header_length = header_length;
1995	}
1996
1997	return result;
1998}
1999EXPORT_SYMBOL(scsi_mode_sense);
2000
2001/**
2002 *	scsi_test_unit_ready - test if unit is ready
2003 *	@sdev:	scsi device to change the state of.
2004 *	@timeout: command timeout
2005 *	@retries: number of retries before failing
2006 *	@sshdr_external: Optional pointer to struct scsi_sense_hdr for
2007 *		returning sense. Make sure that this is cleared before passing
2008 *		in.
2009 *
2010 *	Returns zero if unsuccessful or an error if TUR failed.  For
2011 *	removable media, UNIT_ATTENTION sets ->changed flag.
2012 **/
2013int
2014scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
2015		     struct scsi_sense_hdr *sshdr_external)
2016{
2017	char cmd[] = {
2018		TEST_UNIT_READY, 0, 0, 0, 0, 0,
2019	};
2020	struct scsi_sense_hdr *sshdr;
2021	int result;
2022
2023	if (!sshdr_external)
2024		sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
2025	else
2026		sshdr = sshdr_external;
2027
2028	/* try to eat the UNIT_ATTENTION if there are enough retries */
2029	do {
2030		result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
2031					  timeout, retries, NULL);
2032		if (sdev->removable && scsi_sense_valid(sshdr) &&
2033		    sshdr->sense_key == UNIT_ATTENTION)
2034			sdev->changed = 1;
2035	} while (scsi_sense_valid(sshdr) &&
2036		 sshdr->sense_key == UNIT_ATTENTION && --retries);
2037
2038	if (!sshdr_external)
2039		kfree(sshdr);
2040	return result;
2041}
2042EXPORT_SYMBOL(scsi_test_unit_ready);
2043
2044/**
2045 *	scsi_device_set_state - Take the given device through the device state model.
2046 *	@sdev:	scsi device to change the state of.
2047 *	@state:	state to change to.
2048 *
2049 *	Returns zero if unsuccessful or an error if the requested 
2050 *	transition is illegal.
2051 */
2052int
2053scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2054{
2055	enum scsi_device_state oldstate = sdev->sdev_state;
2056
2057	if (state == oldstate)
2058		return 0;
2059
2060	switch (state) {
2061	case SDEV_CREATED:
2062		switch (oldstate) {
2063		case SDEV_CREATED_BLOCK:
2064			break;
2065		default:
2066			goto illegal;
2067		}
2068		break;
2069			
2070	case SDEV_RUNNING:
2071		switch (oldstate) {
2072		case SDEV_CREATED:
2073		case SDEV_OFFLINE:
2074		case SDEV_QUIESCE:
2075		case SDEV_BLOCK:
2076			break;
2077		default:
2078			goto illegal;
2079		}
2080		break;
2081
2082	case SDEV_QUIESCE:
2083		switch (oldstate) {
2084		case SDEV_RUNNING:
2085		case SDEV_OFFLINE:
2086			break;
2087		default:
2088			goto illegal;
2089		}
2090		break;
2091
2092	case SDEV_OFFLINE:
2093		switch (oldstate) {
2094		case SDEV_CREATED:
2095		case SDEV_RUNNING:
2096		case SDEV_QUIESCE:
2097		case SDEV_BLOCK:
2098			break;
2099		default:
2100			goto illegal;
2101		}
2102		break;
2103
2104	case SDEV_BLOCK:
2105		switch (oldstate) {
2106		case SDEV_RUNNING:
2107		case SDEV_CREATED_BLOCK:
2108			break;
2109		default:
2110			goto illegal;
2111		}
2112		break;
2113
2114	case SDEV_CREATED_BLOCK:
2115		switch (oldstate) {
2116		case SDEV_CREATED:
2117			break;
2118		default:
2119			goto illegal;
2120		}
2121		break;
2122
2123	case SDEV_CANCEL:
2124		switch (oldstate) {
2125		case SDEV_CREATED:
2126		case SDEV_RUNNING:
2127		case SDEV_QUIESCE:
2128		case SDEV_OFFLINE:
2129		case SDEV_BLOCK:
2130			break;
2131		default:
2132			goto illegal;
2133		}
2134		break;
2135
2136	case SDEV_DEL:
2137		switch (oldstate) {
2138		case SDEV_CREATED:
2139		case SDEV_RUNNING:
2140		case SDEV_OFFLINE:
2141		case SDEV_CANCEL:
2142			break;
2143		default:
2144			goto illegal;
2145		}
2146		break;
2147
2148	}
2149	sdev->sdev_state = state;
2150	return 0;
2151
2152 illegal:
2153	SCSI_LOG_ERROR_RECOVERY(1, 
2154				sdev_printk(KERN_ERR, sdev,
2155					    "Illegal state transition %s->%s\n",
2156					    scsi_device_state_name(oldstate),
2157					    scsi_device_state_name(state))
2158				);
2159	return -EINVAL;
2160}
2161EXPORT_SYMBOL(scsi_device_set_state);
2162
2163/**
2164 * 	sdev_evt_emit - emit a single SCSI device uevent
2165 *	@sdev: associated SCSI device
2166 *	@evt: event to emit
2167 *
2168 *	Send a single uevent (scsi_event) to the associated scsi_device.
2169 */
2170static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2171{
2172	int idx = 0;
2173	char *envp[3];
2174
2175	switch (evt->evt_type) {
2176	case SDEV_EVT_MEDIA_CHANGE:
2177		envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2178		break;
2179
2180	default:
2181		/* do nothing */
2182		break;
2183	}
2184
2185	envp[idx++] = NULL;
2186
2187	kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2188}
2189
2190/**
2191 * 	sdev_evt_thread - send a uevent for each scsi event
2192 *	@work: work struct for scsi_device
2193 *
2194 *	Dispatch queued events to their associated scsi_device kobjects
2195 *	as uevents.
2196 */
2197void scsi_evt_thread(struct work_struct *work)
2198{
2199	struct scsi_device *sdev;
2200	LIST_HEAD(event_list);
2201
2202	sdev = container_of(work, struct scsi_device, event_work);
2203
2204	while (1) {
2205		struct scsi_event *evt;
2206		struct list_head *this, *tmp;
2207		unsigned long flags;
2208
2209		spin_lock_irqsave(&sdev->list_lock, flags);
2210		list_splice_init(&sdev->event_list, &event_list);
2211		spin_unlock_irqrestore(&sdev->list_lock, flags);
2212
2213		if (list_empty(&event_list))
2214			break;
2215
2216		list_for_each_safe(this, tmp, &event_list) {
2217			evt = list_entry(this, struct scsi_event, node);
2218			list_del(&evt->node);
2219			scsi_evt_emit(sdev, evt);
2220			kfree(evt);
2221		}
2222	}
2223}
2224
2225/**
2226 * 	sdev_evt_send - send asserted event to uevent thread
2227 *	@sdev: scsi_device event occurred on
2228 *	@evt: event to send
2229 *
2230 *	Assert scsi device event asynchronously.
2231 */
2232void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2233{
2234	unsigned long flags;
2235
2236#if 0
2237	/* FIXME: currently this check eliminates all media change events
2238	 * for polled devices.  Need to update to discriminate between AN
2239	 * and polled events */
2240	if (!test_bit(evt->evt_type, sdev->supported_events)) {
2241		kfree(evt);
2242		return;
2243	}
2244#endif
2245
2246	spin_lock_irqsave(&sdev->list_lock, flags);
2247	list_add_tail(&evt->node, &sdev->event_list);
2248	schedule_work(&sdev->event_work);
2249	spin_unlock_irqrestore(&sdev->list_lock, flags);
2250}
2251EXPORT_SYMBOL_GPL(sdev_evt_send);
2252
2253/**
2254 * 	sdev_evt_alloc - allocate a new scsi event
2255 *	@evt_type: type of event to allocate
2256 *	@gfpflags: GFP flags for allocation
2257 *
2258 *	Allocates and returns a new scsi_event.
2259 */
2260struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2261				  gfp_t gfpflags)
2262{
2263	struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2264	if (!evt)
2265		return NULL;
2266
2267	evt->evt_type = evt_type;
2268	INIT_LIST_HEAD(&evt->node);
2269
2270	/* evt_type-specific initialization, if any */
2271	switch (evt_type) {
2272	case SDEV_EVT_MEDIA_CHANGE:
2273	default:
2274		/* do nothing */
2275		break;
2276	}
2277
2278	return evt;
2279}
2280EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2281
2282/**
2283 * 	sdev_evt_send_simple - send asserted event to uevent thread
2284 *	@sdev: scsi_device event occurred on
2285 *	@evt_type: type of event to send
2286 *	@gfpflags: GFP flags for allocation
2287 *
2288 *	Assert scsi device event asynchronously, given an event type.
2289 */
2290void sdev_evt_send_simple(struct scsi_device *sdev,
2291			  enum scsi_device_event evt_type, gfp_t gfpflags)
2292{
2293	struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2294	if (!evt) {
2295		sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2296			    evt_type);
2297		return;
2298	}
2299
2300	sdev_evt_send(sdev, evt);
2301}
2302EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2303
2304/**
2305 *	scsi_device_quiesce - Block user issued commands.
2306 *	@sdev:	scsi device to quiesce.
2307 *
2308 *	This works by trying to transition to the SDEV_QUIESCE state
2309 *	(which must be a legal transition).  When the device is in this
2310 *	state, only special requests will be accepted, all others will
2311 *	be deferred.  Since special requests may also be requeued requests,
2312 *	a successful return doesn't guarantee the device will be 
2313 *	totally quiescent.
2314 *
2315 *	Must be called with user context, may sleep.
2316 *
2317 *	Returns zero if unsuccessful or an error if not.
2318 */
2319int
2320scsi_device_quiesce(struct scsi_device *sdev)
2321{
2322	int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2323	if (err)
2324		return err;
2325
2326	scsi_run_queue(sdev->request_queue);
2327	while (sdev->device_busy) {
2328		msleep_interruptible(200);
2329		scsi_run_queue(sdev->request_queue);
2330	}
2331	return 0;
2332}
2333EXPORT_SYMBOL(scsi_device_quiesce);
2334
2335/**
2336 *	scsi_device_resume - Restart user issued commands to a quiesced device.
2337 *	@sdev:	scsi device to resume.
2338 *
2339 *	Moves the device from quiesced back to running and restarts the
2340 *	queues.
2341 *
2342 *	Must be called with user context, may sleep.
2343 */
2344void scsi_device_resume(struct scsi_device *sdev)
2345{
2346	/* check if the device state was mutated prior to resume, and if
2347	 * so assume the state is being managed elsewhere (for example
2348	 * device deleted during suspend)
2349	 */
2350	if (sdev->sdev_state != SDEV_QUIESCE ||
2351	    scsi_device_set_state(sdev, SDEV_RUNNING))
2352		return;
2353	scsi_run_queue(sdev->request_queue);
2354}
2355EXPORT_SYMBOL(scsi_device_resume);
2356
2357static void
2358device_quiesce_fn(struct scsi_device *sdev, void *data)
2359{
2360	scsi_device_quiesce(sdev);
2361}
2362
2363void
2364scsi_target_quiesce(struct scsi_target *starget)
2365{
2366	starget_for_each_device(starget, NULL, device_quiesce_fn);
2367}
2368EXPORT_SYMBOL(scsi_target_quiesce);
2369
2370static void
2371device_resume_fn(struct scsi_device *sdev, void *data)
2372{
2373	scsi_device_resume(sdev);
2374}
2375
2376void
2377scsi_target_resume(struct scsi_target *starget)
2378{
2379	starget_for_each_device(starget, NULL, device_resume_fn);
2380}
2381EXPORT_SYMBOL(scsi_target_resume);
2382
2383/**
2384 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
2385 * @sdev:	device to block
2386 *
2387 * Block request made by scsi lld's to temporarily stop all
2388 * scsi commands on the specified device.  Called from interrupt
2389 * or normal process context.
2390 *
2391 * Returns zero if successful or error if not
2392 *
2393 * Notes:       
2394 *	This routine transitions the device to the SDEV_BLOCK state
2395 *	(which must be a legal transition).  When the device is in this
2396 *	state, all commands are deferred until the scsi lld reenables
2397 *	the device with scsi_device_unblock or device_block_tmo fires.
2398 *	This routine assumes the host_lock is held on entry.
2399 */
2400int
2401scsi_internal_device_block(struct scsi_device *sdev)
2402{
2403	struct request_queue *q = sdev->request_queue;
2404	unsigned long flags;
2405	int err = 0;
2406
2407	err = scsi_device_set_state(sdev, SDEV_BLOCK);
2408	if (err) {
2409		err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2410
2411		if (err)
2412			return err;
2413	}
2414
2415	/* 
2416	 * The device has transitioned to SDEV_BLOCK.  Stop the
2417	 * block layer from calling the midlayer with this device's
2418	 * request queue. 
2419	 */
2420	spin_lock_irqsave(q->queue_lock, flags);
2421	blk_stop_queue(q);
2422	spin_unlock_irqrestore(q->queue_lock, flags);
2423
2424	return 0;
2425}
2426EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2427 
2428/**
2429 * scsi_internal_device_unblock - resume a device after a block request
2430 * @sdev:	device to resume
2431 *
2432 * Called by scsi lld's or the midlayer to restart the device queue
2433 * for the previously suspended scsi device.  Called from interrupt or
2434 * normal process context.
2435 *
2436 * Returns zero if successful or error if not.
2437 *
2438 * Notes:       
2439 *	This routine transitions the device to the SDEV_RUNNING state
2440 *	(which must be a legal transition) allowing the midlayer to
2441 *	goose the queue for this device.  This routine assumes the 
2442 *	host_lock is held upon entry.
2443 */
2444int
2445scsi_internal_device_unblock(struct scsi_device *sdev)
2446{
2447	struct request_queue *q = sdev->request_queue; 
2448	unsigned long flags;
2449	
2450	/* 
2451	 * Try to transition the scsi device to SDEV_RUNNING
2452	 * and goose the device queue if successful.  
2453	 */
2454	if (sdev->sdev_state == SDEV_BLOCK)
2455		sdev->sdev_state = SDEV_RUNNING;
2456	else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
2457		sdev->sdev_state = SDEV_CREATED;
2458	else if (sdev->sdev_state != SDEV_CANCEL &&
2459		 sdev->sdev_state != SDEV_OFFLINE)
2460		return -EINVAL;
2461
2462	spin_lock_irqsave(q->queue_lock, flags);
2463	blk_start_queue(q);
2464	spin_unlock_irqrestore(q->queue_lock, flags);
2465
2466	return 0;
2467}
2468EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2469
2470static void
2471device_block(struct scsi_device *sdev, void *data)
2472{
2473	scsi_internal_device_block(sdev);
2474}
2475
2476static int
2477target_block(struct device *dev, void *data)
2478{
2479	if (scsi_is_target_device(dev))
2480		starget_for_each_device(to_scsi_target(dev), NULL,
2481					device_block);
2482	return 0;
2483}
2484
2485void
2486scsi_target_block(struct device *dev)
2487{
2488	if (scsi_is_target_device(dev))
2489		starget_for_each_device(to_scsi_target(dev), NULL,
2490					device_block);
2491	else
2492		device_for_each_child(dev, NULL, target_block);
2493}
2494EXPORT_SYMBOL_GPL(scsi_target_block);
2495
2496static void
2497device_unblock(struct scsi_device *sdev, void *data)
2498{
2499	scsi_internal_device_unblock(sdev);
2500}
2501
2502static int
2503target_unblock(struct device *dev, void *data)
2504{
2505	if (scsi_is_target_device(dev))
2506		starget_for_each_device(to_scsi_target(dev), NULL,
2507					device_unblock);
2508	return 0;
2509}
2510
2511void
2512scsi_target_unblock(struct device *dev)
2513{
2514	if (scsi_is_target_device(dev))
2515		starget_for_each_device(to_scsi_target(dev), NULL,
2516					device_unblock);
2517	else
2518		device_for_each_child(dev, NULL, target_unblock);
2519}
2520EXPORT_SYMBOL_GPL(scsi_target_unblock);
2521
2522/**
2523 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
2524 * @sgl:	scatter-gather list
2525 * @sg_count:	number of segments in sg
2526 * @offset:	offset in bytes into sg, on return offset into the mapped area
2527 * @len:	bytes to map, on return number of bytes mapped
2528 *
2529 * Returns virtual address of the start of the mapped page
2530 */
2531void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
2532			  size_t *offset, size_t *len)
2533{
2534	int i;
2535	size_t sg_len = 0, len_complete = 0;
2536	struct scatterlist *sg;
2537	struct page *page;
2538
2539	WARN_ON(!irqs_disabled());
2540
2541	for_each_sg(sgl, sg, sg_count, i) {
2542		len_complete = sg_len; /* Complete sg-entries */
2543		sg_len += sg->length;
2544		if (sg_len > *offset)
2545			break;
2546	}
2547
2548	if (unlikely(i == sg_count)) {
2549		printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
2550			"elements %d\n",
2551		       __func__, sg_len, *offset, sg_count);
2552		WARN_ON(1);
2553		return NULL;
2554	}
2555
2556	/* Offset starting from the beginning of first page in this sg-entry */
2557	*offset = *offset - len_complete + sg->offset;
2558
2559	/* Assumption: contiguous pages can be accessed as "page + i" */
2560	page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
2561	*offset &= ~PAGE_MASK;
2562
2563	/* Bytes in this sg-entry from *offset to the end of the page */
2564	sg_len = PAGE_SIZE - *offset;
2565	if (*len > sg_len)
2566		*len = sg_len;
2567
2568	return kmap_atomic(page);
2569}
2570EXPORT_SYMBOL(scsi_kmap_atomic_sg);
2571
2572/**
2573 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
2574 * @virt:	virtual address to be unmapped
2575 */
2576void scsi_kunmap_atomic_sg(void *virt)
2577{
2578	kunmap_atomic(virt);
2579}
2580EXPORT_SYMBOL(scsi_kunmap_atomic_sg);