Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* esp_scsi.c: ESP SCSI driver.
   3 *
   4 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
   5 */
   6
   7#include <linux/kernel.h>
   8#include <linux/types.h>
   9#include <linux/slab.h>
  10#include <linux/delay.h>
  11#include <linux/list.h>
  12#include <linux/completion.h>
  13#include <linux/kallsyms.h>
  14#include <linux/module.h>
  15#include <linux/moduleparam.h>
  16#include <linux/init.h>
  17#include <linux/irqreturn.h>
  18
  19#include <asm/irq.h>
  20#include <asm/io.h>
  21#include <asm/dma.h>
  22
  23#include <scsi/scsi.h>
  24#include <scsi/scsi_host.h>
  25#include <scsi/scsi_cmnd.h>
  26#include <scsi/scsi_device.h>
  27#include <scsi/scsi_tcq.h>
  28#include <scsi/scsi_dbg.h>
  29#include <scsi/scsi_transport_spi.h>
  30
  31#include "esp_scsi.h"
  32
  33#define DRV_MODULE_NAME		"esp"
  34#define PFX DRV_MODULE_NAME	": "
  35#define DRV_VERSION		"2.000"
  36#define DRV_MODULE_RELDATE	"April 19, 2007"
  37
  38/* SCSI bus reset settle time in seconds.  */
  39static int esp_bus_reset_settle = 3;
  40
  41static u32 esp_debug;
  42#define ESP_DEBUG_INTR		0x00000001
  43#define ESP_DEBUG_SCSICMD	0x00000002
  44#define ESP_DEBUG_RESET		0x00000004
  45#define ESP_DEBUG_MSGIN		0x00000008
  46#define ESP_DEBUG_MSGOUT	0x00000010
  47#define ESP_DEBUG_CMDDONE	0x00000020
  48#define ESP_DEBUG_DISCONNECT	0x00000040
  49#define ESP_DEBUG_DATASTART	0x00000080
  50#define ESP_DEBUG_DATADONE	0x00000100
  51#define ESP_DEBUG_RECONNECT	0x00000200
  52#define ESP_DEBUG_AUTOSENSE	0x00000400
  53#define ESP_DEBUG_EVENT		0x00000800
  54#define ESP_DEBUG_COMMAND	0x00001000
  55
  56#define esp_log_intr(f, a...) \
  57do {	if (esp_debug & ESP_DEBUG_INTR) \
  58		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
  59} while (0)
  60
  61#define esp_log_reset(f, a...) \
  62do {	if (esp_debug & ESP_DEBUG_RESET) \
  63		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
  64} while (0)
  65
  66#define esp_log_msgin(f, a...) \
  67do {	if (esp_debug & ESP_DEBUG_MSGIN) \
  68		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
  69} while (0)
  70
  71#define esp_log_msgout(f, a...) \
  72do {	if (esp_debug & ESP_DEBUG_MSGOUT) \
  73		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
  74} while (0)
  75
  76#define esp_log_cmddone(f, a...) \
  77do {	if (esp_debug & ESP_DEBUG_CMDDONE) \
  78		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
  79} while (0)
  80
  81#define esp_log_disconnect(f, a...) \
  82do {	if (esp_debug & ESP_DEBUG_DISCONNECT) \
  83		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
  84} while (0)
  85
  86#define esp_log_datastart(f, a...) \
  87do {	if (esp_debug & ESP_DEBUG_DATASTART) \
  88		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
  89} while (0)
  90
  91#define esp_log_datadone(f, a...) \
  92do {	if (esp_debug & ESP_DEBUG_DATADONE) \
  93		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
  94} while (0)
  95
  96#define esp_log_reconnect(f, a...) \
  97do {	if (esp_debug & ESP_DEBUG_RECONNECT) \
  98		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
  99} while (0)
 100
 101#define esp_log_autosense(f, a...) \
 102do {	if (esp_debug & ESP_DEBUG_AUTOSENSE) \
 103		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
 104} while (0)
 105
 106#define esp_log_event(f, a...) \
 107do {   if (esp_debug & ESP_DEBUG_EVENT)	\
 108		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
 109} while (0)
 110
 111#define esp_log_command(f, a...) \
 112do {   if (esp_debug & ESP_DEBUG_COMMAND)	\
 113		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
 114} while (0)
 115
 116#define esp_read8(REG)		esp->ops->esp_read8(esp, REG)
 117#define esp_write8(VAL,REG)	esp->ops->esp_write8(esp, VAL, REG)
 118
 119static void esp_log_fill_regs(struct esp *esp,
 120			      struct esp_event_ent *p)
 121{
 122	p->sreg = esp->sreg;
 123	p->seqreg = esp->seqreg;
 124	p->sreg2 = esp->sreg2;
 125	p->ireg = esp->ireg;
 126	p->select_state = esp->select_state;
 127	p->event = esp->event;
 128}
 129
 130void scsi_esp_cmd(struct esp *esp, u8 val)
 131{
 132	struct esp_event_ent *p;
 133	int idx = esp->esp_event_cur;
 134
 135	p = &esp->esp_event_log[idx];
 136	p->type = ESP_EVENT_TYPE_CMD;
 137	p->val = val;
 138	esp_log_fill_regs(esp, p);
 139
 140	esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
 141
 142	esp_log_command("cmd[%02x]\n", val);
 143	esp_write8(val, ESP_CMD);
 144}
 145EXPORT_SYMBOL(scsi_esp_cmd);
 146
 147static void esp_send_dma_cmd(struct esp *esp, int len, int max_len, int cmd)
 148{
 149	if (esp->flags & ESP_FLAG_USE_FIFO) {
 150		int i;
 151
 152		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
 153		for (i = 0; i < len; i++)
 154			esp_write8(esp->command_block[i], ESP_FDATA);
 155		scsi_esp_cmd(esp, cmd);
 156	} else {
 157		if (esp->rev == FASHME)
 158			scsi_esp_cmd(esp, ESP_CMD_FLUSH);
 159		cmd |= ESP_CMD_DMA;
 160		esp->ops->send_dma_cmd(esp, esp->command_block_dma,
 161				       len, max_len, 0, cmd);
 162	}
 163}
 164
 165static void esp_event(struct esp *esp, u8 val)
 166{
 167	struct esp_event_ent *p;
 168	int idx = esp->esp_event_cur;
 169
 170	p = &esp->esp_event_log[idx];
 171	p->type = ESP_EVENT_TYPE_EVENT;
 172	p->val = val;
 173	esp_log_fill_regs(esp, p);
 174
 175	esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
 176
 177	esp->event = val;
 178}
 179
 180static void esp_dump_cmd_log(struct esp *esp)
 181{
 182	int idx = esp->esp_event_cur;
 183	int stop = idx;
 184
 185	shost_printk(KERN_INFO, esp->host, "Dumping command log\n");
 186	do {
 187		struct esp_event_ent *p = &esp->esp_event_log[idx];
 188
 189		shost_printk(KERN_INFO, esp->host,
 190			     "ent[%d] %s val[%02x] sreg[%02x] seqreg[%02x] "
 191			     "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
 192			     idx,
 193			     p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT",
 194			     p->val, p->sreg, p->seqreg,
 195			     p->sreg2, p->ireg, p->select_state, p->event);
 196
 197		idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
 198	} while (idx != stop);
 199}
 200
 201static void esp_flush_fifo(struct esp *esp)
 202{
 203	scsi_esp_cmd(esp, ESP_CMD_FLUSH);
 204	if (esp->rev == ESP236) {
 205		int lim = 1000;
 206
 207		while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
 208			if (--lim == 0) {
 209				shost_printk(KERN_ALERT, esp->host,
 210					     "ESP_FF_BYTES will not clear!\n");
 211				break;
 212			}
 213			udelay(1);
 214		}
 215	}
 216}
 217
 218static void hme_read_fifo(struct esp *esp)
 219{
 220	int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
 221	int idx = 0;
 222
 223	while (fcnt--) {
 224		esp->fifo[idx++] = esp_read8(ESP_FDATA);
 225		esp->fifo[idx++] = esp_read8(ESP_FDATA);
 226	}
 227	if (esp->sreg2 & ESP_STAT2_F1BYTE) {
 228		esp_write8(0, ESP_FDATA);
 229		esp->fifo[idx++] = esp_read8(ESP_FDATA);
 230		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
 231	}
 232	esp->fifo_cnt = idx;
 233}
 234
 235static void esp_set_all_config3(struct esp *esp, u8 val)
 236{
 237	int i;
 238
 239	for (i = 0; i < ESP_MAX_TARGET; i++)
 240		esp->target[i].esp_config3 = val;
 241}
 242
 243/* Reset the ESP chip, _not_ the SCSI bus. */
 244static void esp_reset_esp(struct esp *esp)
 245{
 
 
 246	/* Now reset the ESP chip */
 247	scsi_esp_cmd(esp, ESP_CMD_RC);
 248	scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
 249	if (esp->rev == FAST)
 250		esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
 251	scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
 252
 253	/* This is the only point at which it is reliable to read
 254	 * the ID-code for a fast ESP chip variants.
 255	 */
 256	esp->max_period = ((35 * esp->ccycle) / 1000);
 257	if (esp->rev == FAST) {
 258		u8 family_code = ESP_FAMILY(esp_read8(ESP_UID));
 259
 260		if (family_code == ESP_UID_F236) {
 261			esp->rev = FAS236;
 262		} else if (family_code == ESP_UID_HME) {
 263			esp->rev = FASHME; /* Version is usually '5'. */
 264		} else if (family_code == ESP_UID_FSC) {
 265			esp->rev = FSC;
 266			/* Enable Active Negation */
 267			esp_write8(ESP_CONFIG4_RADE, ESP_CFG4);
 268		} else {
 269			esp->rev = FAS100A;
 270		}
 271		esp->min_period = ((4 * esp->ccycle) / 1000);
 272	} else {
 273		esp->min_period = ((5 * esp->ccycle) / 1000);
 274	}
 275	if (esp->rev == FAS236) {
 276		/*
 277		 * The AM53c974 chip returns the same ID as FAS236;
 278		 * try to configure glitch eater.
 279		 */
 280		u8 config4 = ESP_CONFIG4_GE1;
 281		esp_write8(config4, ESP_CFG4);
 282		config4 = esp_read8(ESP_CFG4);
 283		if (config4 & ESP_CONFIG4_GE1) {
 284			esp->rev = PCSCSI;
 285			esp_write8(esp->config4, ESP_CFG4);
 286		}
 287	}
 288	esp->max_period = (esp->max_period + 3)>>2;
 289	esp->min_period = (esp->min_period + 3)>>2;
 290
 291	esp_write8(esp->config1, ESP_CFG1);
 292	switch (esp->rev) {
 293	case ESP100:
 294		/* nothing to do */
 295		break;
 296
 297	case ESP100A:
 298		esp_write8(esp->config2, ESP_CFG2);
 299		break;
 300
 301	case ESP236:
 302		/* Slow 236 */
 303		esp_write8(esp->config2, ESP_CFG2);
 304		esp->prev_cfg3 = esp->target[0].esp_config3;
 305		esp_write8(esp->prev_cfg3, ESP_CFG3);
 306		break;
 307
 308	case FASHME:
 309		esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
 310		fallthrough;
 311
 312	case FAS236:
 313	case PCSCSI:
 314	case FSC:
 315		esp_write8(esp->config2, ESP_CFG2);
 316		if (esp->rev == FASHME) {
 317			u8 cfg3 = esp->target[0].esp_config3;
 318
 319			cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
 320			if (esp->scsi_id >= 8)
 321				cfg3 |= ESP_CONFIG3_IDBIT3;
 322			esp_set_all_config3(esp, cfg3);
 323		} else {
 324			u32 cfg3 = esp->target[0].esp_config3;
 325
 326			cfg3 |= ESP_CONFIG3_FCLK;
 327			esp_set_all_config3(esp, cfg3);
 328		}
 329		esp->prev_cfg3 = esp->target[0].esp_config3;
 330		esp_write8(esp->prev_cfg3, ESP_CFG3);
 331		if (esp->rev == FASHME) {
 332			esp->radelay = 80;
 333		} else {
 334			if (esp->flags & ESP_FLAG_DIFFERENTIAL)
 335				esp->radelay = 0;
 336			else
 337				esp->radelay = 96;
 338		}
 339		break;
 340
 341	case FAS100A:
 342		/* Fast 100a */
 343		esp_write8(esp->config2, ESP_CFG2);
 344		esp_set_all_config3(esp,
 345				    (esp->target[0].esp_config3 |
 346				     ESP_CONFIG3_FCLOCK));
 347		esp->prev_cfg3 = esp->target[0].esp_config3;
 348		esp_write8(esp->prev_cfg3, ESP_CFG3);
 349		esp->radelay = 32;
 350		break;
 351
 352	default:
 353		break;
 354	}
 355
 356	/* Reload the configuration registers */
 357	esp_write8(esp->cfact, ESP_CFACT);
 358
 359	esp->prev_stp = 0;
 360	esp_write8(esp->prev_stp, ESP_STP);
 361
 362	esp->prev_soff = 0;
 363	esp_write8(esp->prev_soff, ESP_SOFF);
 364
 365	esp_write8(esp->neg_defp, ESP_TIMEO);
 366
 367	/* Eat any bitrot in the chip */
 368	esp_read8(ESP_INTRPT);
 369	udelay(100);
 370}
 371
 372static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
 373{
 374	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
 375	struct scatterlist *sg = scsi_sglist(cmd);
 376	int total = 0, i;
 377	struct scatterlist *s;
 378
 379	if (cmd->sc_data_direction == DMA_NONE)
 380		return;
 381
 382	if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
 383		/*
 384		 * For pseudo DMA and PIO we need the virtual address instead of
 385		 * a dma address, so perform an identity mapping.
 386		 */
 387		spriv->num_sg = scsi_sg_count(cmd);
 388
 389		scsi_for_each_sg(cmd, s, spriv->num_sg, i) {
 390			s->dma_address = (uintptr_t)sg_virt(s);
 391			total += sg_dma_len(s);
 392		}
 393	} else {
 394		spriv->num_sg = scsi_dma_map(cmd);
 395		scsi_for_each_sg(cmd, s, spriv->num_sg, i)
 396			total += sg_dma_len(s);
 397	}
 398	spriv->cur_residue = sg_dma_len(sg);
 399	spriv->prv_sg = NULL;
 400	spriv->cur_sg = sg;
 401	spriv->tot_residue = total;
 402}
 403
 404static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
 405				   struct scsi_cmnd *cmd)
 406{
 407	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
 408
 409	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 410		return ent->sense_dma +
 411			(ent->sense_ptr - cmd->sense_buffer);
 412	}
 413
 414	return sg_dma_address(p->cur_sg) +
 415		(sg_dma_len(p->cur_sg) -
 416		 p->cur_residue);
 417}
 418
 419static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
 420				    struct scsi_cmnd *cmd)
 421{
 422	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
 423
 424	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 425		return SCSI_SENSE_BUFFERSIZE -
 426			(ent->sense_ptr - cmd->sense_buffer);
 427	}
 428	return p->cur_residue;
 429}
 430
 431static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
 432			    struct scsi_cmnd *cmd, unsigned int len)
 433{
 434	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
 435
 436	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 437		ent->sense_ptr += len;
 438		return;
 439	}
 440
 441	p->cur_residue -= len;
 442	p->tot_residue -= len;
 443	if (p->cur_residue < 0 || p->tot_residue < 0) {
 444		shost_printk(KERN_ERR, esp->host,
 445			     "Data transfer overflow.\n");
 446		shost_printk(KERN_ERR, esp->host,
 447			     "cur_residue[%d] tot_residue[%d] len[%u]\n",
 448			     p->cur_residue, p->tot_residue, len);
 449		p->cur_residue = 0;
 450		p->tot_residue = 0;
 451	}
 452	if (!p->cur_residue && p->tot_residue) {
 453		p->prv_sg = p->cur_sg;
 454		p->cur_sg = sg_next(p->cur_sg);
 455		p->cur_residue = sg_dma_len(p->cur_sg);
 456	}
 457}
 458
 459static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
 460{
 461	if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
 462		scsi_dma_unmap(cmd);
 463}
 464
 465static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
 466{
 467	struct scsi_cmnd *cmd = ent->cmd;
 468	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
 469
 470	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 471		ent->saved_sense_ptr = ent->sense_ptr;
 472		return;
 473	}
 474	ent->saved_cur_residue = spriv->cur_residue;
 475	ent->saved_prv_sg = spriv->prv_sg;
 476	ent->saved_cur_sg = spriv->cur_sg;
 477	ent->saved_tot_residue = spriv->tot_residue;
 478}
 479
 480static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
 481{
 482	struct scsi_cmnd *cmd = ent->cmd;
 483	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
 484
 485	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 486		ent->sense_ptr = ent->saved_sense_ptr;
 487		return;
 488	}
 489	spriv->cur_residue = ent->saved_cur_residue;
 490	spriv->prv_sg = ent->saved_prv_sg;
 491	spriv->cur_sg = ent->saved_cur_sg;
 492	spriv->tot_residue = ent->saved_tot_residue;
 493}
 494
 495static void esp_write_tgt_config3(struct esp *esp, int tgt)
 496{
 497	if (esp->rev > ESP100A) {
 498		u8 val = esp->target[tgt].esp_config3;
 499
 500		if (val != esp->prev_cfg3) {
 501			esp->prev_cfg3 = val;
 502			esp_write8(val, ESP_CFG3);
 503		}
 504	}
 505}
 506
 507static void esp_write_tgt_sync(struct esp *esp, int tgt)
 508{
 509	u8 off = esp->target[tgt].esp_offset;
 510	u8 per = esp->target[tgt].esp_period;
 511
 512	if (off != esp->prev_soff) {
 513		esp->prev_soff = off;
 514		esp_write8(off, ESP_SOFF);
 515	}
 516	if (per != esp->prev_stp) {
 517		esp->prev_stp = per;
 518		esp_write8(per, ESP_STP);
 519	}
 520}
 521
 522static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
 523{
 524	if (esp->rev == FASHME) {
 525		/* Arbitrary segment boundaries, 24-bit counts.  */
 526		if (dma_len > (1U << 24))
 527			dma_len = (1U << 24);
 528	} else {
 529		u32 base, end;
 530
 531		/* ESP chip limits other variants by 16-bits of transfer
 532		 * count.  Actually on FAS100A and FAS236 we could get
 533		 * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB
 534		 * in the ESP_CFG2 register but that causes other unwanted
 535		 * changes so we don't use it currently.
 536		 */
 537		if (dma_len > (1U << 16))
 538			dma_len = (1U << 16);
 539
 540		/* All of the DMA variants hooked up to these chips
 541		 * cannot handle crossing a 24-bit address boundary.
 542		 */
 543		base = dma_addr & ((1U << 24) - 1U);
 544		end = base + dma_len;
 545		if (end > (1U << 24))
 546			end = (1U <<24);
 547		dma_len = end - base;
 548	}
 549	return dma_len;
 550}
 551
 552static int esp_need_to_nego_wide(struct esp_target_data *tp)
 553{
 554	struct scsi_target *target = tp->starget;
 555
 556	return spi_width(target) != tp->nego_goal_width;
 557}
 558
 559static int esp_need_to_nego_sync(struct esp_target_data *tp)
 560{
 561	struct scsi_target *target = tp->starget;
 562
 563	/* When offset is zero, period is "don't care".  */
 564	if (!spi_offset(target) && !tp->nego_goal_offset)
 565		return 0;
 566
 567	if (spi_offset(target) == tp->nego_goal_offset &&
 568	    spi_period(target) == tp->nego_goal_period)
 569		return 0;
 570
 571	return 1;
 572}
 573
 574static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
 575			     struct esp_lun_data *lp)
 576{
 577	if (!ent->orig_tag[0]) {
 578		/* Non-tagged, slot already taken?  */
 579		if (lp->non_tagged_cmd)
 580			return -EBUSY;
 581
 582		if (lp->hold) {
 583			/* We are being held by active tagged
 584			 * commands.
 585			 */
 586			if (lp->num_tagged)
 587				return -EBUSY;
 588
 589			/* Tagged commands completed, we can unplug
 590			 * the queue and run this untagged command.
 591			 */
 592			lp->hold = 0;
 593		} else if (lp->num_tagged) {
 594			/* Plug the queue until num_tagged decreases
 595			 * to zero in esp_free_lun_tag.
 596			 */
 597			lp->hold = 1;
 598			return -EBUSY;
 599		}
 600
 601		lp->non_tagged_cmd = ent;
 602		return 0;
 603	}
 604
 605	/* Tagged command. Check that it isn't blocked by a non-tagged one. */
 606	if (lp->non_tagged_cmd || lp->hold)
 607		return -EBUSY;
 608
 609	BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
 610
 611	lp->tagged_cmds[ent->orig_tag[1]] = ent;
 612	lp->num_tagged++;
 613
 614	return 0;
 615}
 616
 617static void esp_free_lun_tag(struct esp_cmd_entry *ent,
 618			     struct esp_lun_data *lp)
 619{
 620	if (ent->orig_tag[0]) {
 621		BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent);
 622		lp->tagged_cmds[ent->orig_tag[1]] = NULL;
 623		lp->num_tagged--;
 624	} else {
 625		BUG_ON(lp->non_tagged_cmd != ent);
 626		lp->non_tagged_cmd = NULL;
 627	}
 628}
 629
 630static void esp_map_sense(struct esp *esp, struct esp_cmd_entry *ent)
 631{
 632	ent->sense_ptr = ent->cmd->sense_buffer;
 633	if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
 634		ent->sense_dma = (uintptr_t)ent->sense_ptr;
 635		return;
 636	}
 637
 638	ent->sense_dma = dma_map_single(esp->dev, ent->sense_ptr,
 639					SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
 640}
 641
 642static void esp_unmap_sense(struct esp *esp, struct esp_cmd_entry *ent)
 643{
 644	if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
 645		dma_unmap_single(esp->dev, ent->sense_dma,
 646				 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
 647	ent->sense_ptr = NULL;
 648}
 649
 650/* When a contingent allegiance conditon is created, we force feed a
 651 * REQUEST_SENSE command to the device to fetch the sense data.  I
 652 * tried many other schemes, relying on the scsi error handling layer
 653 * to send out the REQUEST_SENSE automatically, but this was difficult
 654 * to get right especially in the presence of applications like smartd
 655 * which use SG_IO to send out their own REQUEST_SENSE commands.
 656 */
 657static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
 658{
 659	struct scsi_cmnd *cmd = ent->cmd;
 660	struct scsi_device *dev = cmd->device;
 661	int tgt, lun;
 662	u8 *p, val;
 663
 664	tgt = dev->id;
 665	lun = dev->lun;
 666
 667
 668	if (!ent->sense_ptr) {
 669		esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n",
 670				  tgt, lun);
 671		esp_map_sense(esp, ent);
 672	}
 673	ent->saved_sense_ptr = ent->sense_ptr;
 674
 675	esp->active_cmd = ent;
 676
 677	p = esp->command_block;
 678	esp->msg_out_len = 0;
 679
 680	*p++ = IDENTIFY(0, lun);
 681	*p++ = REQUEST_SENSE;
 682	*p++ = ((dev->scsi_level <= SCSI_2) ?
 683		(lun << 5) : 0);
 684	*p++ = 0;
 685	*p++ = 0;
 686	*p++ = SCSI_SENSE_BUFFERSIZE;
 687	*p++ = 0;
 688
 689	esp->select_state = ESP_SELECT_BASIC;
 690
 691	val = tgt;
 692	if (esp->rev == FASHME)
 693		val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
 694	esp_write8(val, ESP_BUSID);
 695
 696	esp_write_tgt_sync(esp, tgt);
 697	esp_write_tgt_config3(esp, tgt);
 698
 699	val = (p - esp->command_block);
 700
 701	esp_send_dma_cmd(esp, val, 16, ESP_CMD_SELA);
 702}
 703
 704static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
 705{
 706	struct esp_cmd_entry *ent;
 707
 708	list_for_each_entry(ent, &esp->queued_cmds, list) {
 709		struct scsi_cmnd *cmd = ent->cmd;
 710		struct scsi_device *dev = cmd->device;
 711		struct esp_lun_data *lp = dev->hostdata;
 712
 713		if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 714			ent->tag[0] = 0;
 715			ent->tag[1] = 0;
 716			return ent;
 717		}
 718
 719		if (!spi_populate_tag_msg(&ent->tag[0], cmd)) {
 720			ent->tag[0] = 0;
 721			ent->tag[1] = 0;
 722		}
 723		ent->orig_tag[0] = ent->tag[0];
 724		ent->orig_tag[1] = ent->tag[1];
 725
 726		if (esp_alloc_lun_tag(ent, lp) < 0)
 727			continue;
 728
 729		return ent;
 730	}
 731
 732	return NULL;
 733}
 734
 735static void esp_maybe_execute_command(struct esp *esp)
 736{
 737	struct esp_target_data *tp;
 738	struct scsi_device *dev;
 739	struct scsi_cmnd *cmd;
 740	struct esp_cmd_entry *ent;
 741	bool select_and_stop = false;
 742	int tgt, lun, i;
 743	u32 val, start_cmd;
 744	u8 *p;
 745
 746	if (esp->active_cmd ||
 747	    (esp->flags & ESP_FLAG_RESETTING))
 748		return;
 749
 750	ent = find_and_prep_issuable_command(esp);
 751	if (!ent)
 752		return;
 753
 754	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 755		esp_autosense(esp, ent);
 756		return;
 757	}
 758
 759	cmd = ent->cmd;
 760	dev = cmd->device;
 761	tgt = dev->id;
 762	lun = dev->lun;
 763	tp = &esp->target[tgt];
 764
 765	list_move(&ent->list, &esp->active_cmds);
 766
 767	esp->active_cmd = ent;
 768
 769	esp_map_dma(esp, cmd);
 770	esp_save_pointers(esp, ent);
 771
 772	if (!(cmd->cmd_len == 6 || cmd->cmd_len == 10 || cmd->cmd_len == 12))
 773		select_and_stop = true;
 774
 775	p = esp->command_block;
 776
 777	esp->msg_out_len = 0;
 778	if (tp->flags & ESP_TGT_CHECK_NEGO) {
 779		/* Need to negotiate.  If the target is broken
 780		 * go for synchronous transfers and non-wide.
 781		 */
 782		if (tp->flags & ESP_TGT_BROKEN) {
 783			tp->flags &= ~ESP_TGT_DISCONNECT;
 784			tp->nego_goal_period = 0;
 785			tp->nego_goal_offset = 0;
 786			tp->nego_goal_width = 0;
 787			tp->nego_goal_tags = 0;
 788		}
 789
 790		/* If the settings are not changing, skip this.  */
 791		if (spi_width(tp->starget) == tp->nego_goal_width &&
 792		    spi_period(tp->starget) == tp->nego_goal_period &&
 793		    spi_offset(tp->starget) == tp->nego_goal_offset) {
 794			tp->flags &= ~ESP_TGT_CHECK_NEGO;
 795			goto build_identify;
 796		}
 797
 798		if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
 799			esp->msg_out_len =
 800				spi_populate_width_msg(&esp->msg_out[0],
 801						       (tp->nego_goal_width ?
 802							1 : 0));
 803			tp->flags |= ESP_TGT_NEGO_WIDE;
 804		} else if (esp_need_to_nego_sync(tp)) {
 805			esp->msg_out_len =
 806				spi_populate_sync_msg(&esp->msg_out[0],
 807						      tp->nego_goal_period,
 808						      tp->nego_goal_offset);
 809			tp->flags |= ESP_TGT_NEGO_SYNC;
 810		} else {
 811			tp->flags &= ~ESP_TGT_CHECK_NEGO;
 812		}
 813
 814		/* If there are multiple message bytes, use Select and Stop */
 815		if (esp->msg_out_len)
 816			select_and_stop = true;
 817	}
 818
 819build_identify:
 820	*p++ = IDENTIFY(tp->flags & ESP_TGT_DISCONNECT, lun);
 821
 822	if (ent->tag[0] && esp->rev == ESP100) {
 823		/* ESP100 lacks select w/atn3 command, use select
 824		 * and stop instead.
 825		 */
 826		select_and_stop = true;
 827	}
 828
 829	if (select_and_stop) {
 830		esp->cmd_bytes_left = cmd->cmd_len;
 831		esp->cmd_bytes_ptr = &cmd->cmnd[0];
 832
 833		if (ent->tag[0]) {
 834			for (i = esp->msg_out_len - 1;
 835			     i >= 0; i--)
 836				esp->msg_out[i + 2] = esp->msg_out[i];
 837			esp->msg_out[0] = ent->tag[0];
 838			esp->msg_out[1] = ent->tag[1];
 839			esp->msg_out_len += 2;
 840		}
 841
 842		start_cmd = ESP_CMD_SELAS;
 843		esp->select_state = ESP_SELECT_MSGOUT;
 844	} else {
 845		start_cmd = ESP_CMD_SELA;
 846		if (ent->tag[0]) {
 847			*p++ = ent->tag[0];
 848			*p++ = ent->tag[1];
 849
 850			start_cmd = ESP_CMD_SA3;
 851		}
 852
 853		for (i = 0; i < cmd->cmd_len; i++)
 854			*p++ = cmd->cmnd[i];
 855
 856		esp->select_state = ESP_SELECT_BASIC;
 857	}
 858	val = tgt;
 859	if (esp->rev == FASHME)
 860		val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
 861	esp_write8(val, ESP_BUSID);
 862
 863	esp_write_tgt_sync(esp, tgt);
 864	esp_write_tgt_config3(esp, tgt);
 865
 866	val = (p - esp->command_block);
 867
 868	if (esp_debug & ESP_DEBUG_SCSICMD) {
 869		printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
 870		for (i = 0; i < cmd->cmd_len; i++)
 871			printk("%02x ", cmd->cmnd[i]);
 872		printk("]\n");
 873	}
 874
 875	esp_send_dma_cmd(esp, val, 16, start_cmd);
 876}
 877
 878static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
 879{
 880	struct list_head *head = &esp->esp_cmd_pool;
 881	struct esp_cmd_entry *ret;
 882
 883	if (list_empty(head)) {
 884		ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
 885	} else {
 886		ret = list_entry(head->next, struct esp_cmd_entry, list);
 887		list_del(&ret->list);
 888		memset(ret, 0, sizeof(*ret));
 889	}
 890	return ret;
 891}
 892
 893static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
 894{
 895	list_add(&ent->list, &esp->esp_cmd_pool);
 896}
 897
 898static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
 899			    struct scsi_cmnd *cmd, unsigned int result)
 900{
 901	struct scsi_device *dev = cmd->device;
 902	int tgt = dev->id;
 903	int lun = dev->lun;
 904
 905	esp->active_cmd = NULL;
 906	esp_unmap_dma(esp, cmd);
 907	esp_free_lun_tag(ent, dev->hostdata);
 908	cmd->result = result;
 909
 910	if (ent->eh_done) {
 911		complete(ent->eh_done);
 912		ent->eh_done = NULL;
 913	}
 914
 915	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 916		esp_unmap_sense(esp, ent);
 917
 918		/* Restore the message/status bytes to what we actually
 919		 * saw originally.  Also, report that we are providing
 920		 * the sense data.
 921		 */
 922		cmd->result = ((DRIVER_SENSE << 24) |
 923			       (DID_OK << 16) |
 924			       (COMMAND_COMPLETE << 8) |
 925			       (SAM_STAT_CHECK_CONDITION << 0));
 926
 927		ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
 928		if (esp_debug & ESP_DEBUG_AUTOSENSE) {
 929			int i;
 930
 931			printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
 932			       esp->host->unique_id, tgt, lun);
 933			for (i = 0; i < 18; i++)
 934				printk("%02x ", cmd->sense_buffer[i]);
 935			printk("]\n");
 936		}
 937	}
 938
 939	cmd->scsi_done(cmd);
 940
 941	list_del(&ent->list);
 942	esp_put_ent(esp, ent);
 943
 944	esp_maybe_execute_command(esp);
 945}
 946
 947static unsigned int compose_result(unsigned int status, unsigned int message,
 948				   unsigned int driver_code)
 949{
 950	return (status | (message << 8) | (driver_code << 16));
 951}
 952
 953static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
 954{
 955	struct scsi_device *dev = ent->cmd->device;
 956	struct esp_lun_data *lp = dev->hostdata;
 957
 958	scsi_track_queue_full(dev, lp->num_tagged - 1);
 959}
 960
 961static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
 962{
 963	struct scsi_device *dev = cmd->device;
 964	struct esp *esp = shost_priv(dev->host);
 965	struct esp_cmd_priv *spriv;
 966	struct esp_cmd_entry *ent;
 967
 968	ent = esp_get_ent(esp);
 969	if (!ent)
 970		return SCSI_MLQUEUE_HOST_BUSY;
 971
 972	ent->cmd = cmd;
 973
 974	cmd->scsi_done = done;
 975
 976	spriv = ESP_CMD_PRIV(cmd);
 977	spriv->num_sg = 0;
 978
 979	list_add_tail(&ent->list, &esp->queued_cmds);
 980
 981	esp_maybe_execute_command(esp);
 982
 983	return 0;
 984}
 985
 986static DEF_SCSI_QCMD(esp_queuecommand)
 987
 988static int esp_check_gross_error(struct esp *esp)
 989{
 990	if (esp->sreg & ESP_STAT_SPAM) {
 991		/* Gross Error, could be one of:
 992		 * - top of fifo overwritten
 993		 * - top of command register overwritten
 994		 * - DMA programmed with wrong direction
 995		 * - improper phase change
 996		 */
 997		shost_printk(KERN_ERR, esp->host,
 998			     "Gross error sreg[%02x]\n", esp->sreg);
 999		/* XXX Reset the chip. XXX */
1000		return 1;
1001	}
1002	return 0;
1003}
1004
1005static int esp_check_spur_intr(struct esp *esp)
1006{
1007	switch (esp->rev) {
1008	case ESP100:
1009	case ESP100A:
1010		/* The interrupt pending bit of the status register cannot
1011		 * be trusted on these revisions.
1012		 */
1013		esp->sreg &= ~ESP_STAT_INTR;
1014		break;
1015
1016	default:
1017		if (!(esp->sreg & ESP_STAT_INTR)) {
1018			if (esp->ireg & ESP_INTR_SR)
1019				return 1;
1020
1021			/* If the DMA is indicating interrupt pending and the
1022			 * ESP is not, the only possibility is a DMA error.
1023			 */
1024			if (!esp->ops->dma_error(esp)) {
1025				shost_printk(KERN_ERR, esp->host,
1026					     "Spurious irq, sreg=%02x.\n",
1027					     esp->sreg);
1028				return -1;
1029			}
1030
1031			shost_printk(KERN_ERR, esp->host, "DMA error\n");
1032
1033			/* XXX Reset the chip. XXX */
1034			return -1;
1035		}
1036		break;
1037	}
1038
1039	return 0;
1040}
1041
1042static void esp_schedule_reset(struct esp *esp)
1043{
1044	esp_log_reset("esp_schedule_reset() from %ps\n",
1045		      __builtin_return_address(0));
1046	esp->flags |= ESP_FLAG_RESETTING;
1047	esp_event(esp, ESP_EVENT_RESET);
1048}
1049
1050/* In order to avoid having to add a special half-reconnected state
1051 * into the driver we just sit here and poll through the rest of
1052 * the reselection process to get the tag message bytes.
1053 */
1054static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1055						    struct esp_lun_data *lp)
1056{
1057	struct esp_cmd_entry *ent;
1058	int i;
1059
1060	if (!lp->num_tagged) {
1061		shost_printk(KERN_ERR, esp->host,
1062			     "Reconnect w/num_tagged==0\n");
1063		return NULL;
1064	}
1065
1066	esp_log_reconnect("reconnect tag, ");
1067
1068	for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1069		if (esp->ops->irq_pending(esp))
1070			break;
1071	}
1072	if (i == ESP_QUICKIRQ_LIMIT) {
1073		shost_printk(KERN_ERR, esp->host,
1074			     "Reconnect IRQ1 timeout\n");
1075		return NULL;
1076	}
1077
1078	esp->sreg = esp_read8(ESP_STATUS);
1079	esp->ireg = esp_read8(ESP_INTRPT);
1080
1081	esp_log_reconnect("IRQ(%d:%x:%x), ",
1082			  i, esp->ireg, esp->sreg);
1083
1084	if (esp->ireg & ESP_INTR_DC) {
1085		shost_printk(KERN_ERR, esp->host,
1086			     "Reconnect, got disconnect.\n");
1087		return NULL;
1088	}
1089
1090	if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
1091		shost_printk(KERN_ERR, esp->host,
1092			     "Reconnect, not MIP sreg[%02x].\n", esp->sreg);
1093		return NULL;
1094	}
1095
1096	/* DMA in the tag bytes... */
1097	esp->command_block[0] = 0xff;
1098	esp->command_block[1] = 0xff;
1099	esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1100			       2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1101
1102	/* ACK the message.  */
1103	scsi_esp_cmd(esp, ESP_CMD_MOK);
1104
1105	for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1106		if (esp->ops->irq_pending(esp)) {
1107			esp->sreg = esp_read8(ESP_STATUS);
1108			esp->ireg = esp_read8(ESP_INTRPT);
1109			if (esp->ireg & ESP_INTR_FDONE)
1110				break;
1111		}
1112		udelay(1);
1113	}
1114	if (i == ESP_RESELECT_TAG_LIMIT) {
1115		shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n");
1116		return NULL;
1117	}
1118	esp->ops->dma_drain(esp);
1119	esp->ops->dma_invalidate(esp);
1120
1121	esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1122			  i, esp->ireg, esp->sreg,
1123			  esp->command_block[0],
1124			  esp->command_block[1]);
1125
1126	if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1127	    esp->command_block[0] > ORDERED_QUEUE_TAG) {
1128		shost_printk(KERN_ERR, esp->host,
1129			     "Reconnect, bad tag type %02x.\n",
1130			     esp->command_block[0]);
1131		return NULL;
1132	}
1133
1134	ent = lp->tagged_cmds[esp->command_block[1]];
1135	if (!ent) {
1136		shost_printk(KERN_ERR, esp->host,
1137			     "Reconnect, no entry for tag %02x.\n",
1138			     esp->command_block[1]);
1139		return NULL;
1140	}
1141
1142	return ent;
1143}
1144
1145static int esp_reconnect(struct esp *esp)
1146{
1147	struct esp_cmd_entry *ent;
1148	struct esp_target_data *tp;
1149	struct esp_lun_data *lp;
1150	struct scsi_device *dev;
1151	int target, lun;
1152
1153	BUG_ON(esp->active_cmd);
1154	if (esp->rev == FASHME) {
1155		/* FASHME puts the target and lun numbers directly
1156		 * into the fifo.
1157		 */
1158		target = esp->fifo[0];
1159		lun = esp->fifo[1] & 0x7;
1160	} else {
1161		u8 bits = esp_read8(ESP_FDATA);
1162
1163		/* Older chips put the lun directly into the fifo, but
1164		 * the target is given as a sample of the arbitration
1165		 * lines on the bus at reselection time.  So we should
1166		 * see the ID of the ESP and the one reconnecting target
1167		 * set in the bitmap.
1168		 */
1169		if (!(bits & esp->scsi_id_mask))
1170			goto do_reset;
1171		bits &= ~esp->scsi_id_mask;
1172		if (!bits || (bits & (bits - 1)))
1173			goto do_reset;
1174
1175		target = ffs(bits) - 1;
1176		lun = (esp_read8(ESP_FDATA) & 0x7);
1177
1178		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1179		if (esp->rev == ESP100) {
1180			u8 ireg = esp_read8(ESP_INTRPT);
1181			/* This chip has a bug during reselection that can
1182			 * cause a spurious illegal-command interrupt, which
1183			 * we simply ACK here.  Another possibility is a bus
1184			 * reset so we must check for that.
1185			 */
1186			if (ireg & ESP_INTR_SR)
1187				goto do_reset;
1188		}
1189		scsi_esp_cmd(esp, ESP_CMD_NULL);
1190	}
1191
1192	esp_write_tgt_sync(esp, target);
1193	esp_write_tgt_config3(esp, target);
1194
1195	scsi_esp_cmd(esp, ESP_CMD_MOK);
1196
1197	if (esp->rev == FASHME)
1198		esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1199			   ESP_BUSID);
1200
1201	tp = &esp->target[target];
1202	dev = __scsi_device_lookup_by_target(tp->starget, lun);
1203	if (!dev) {
1204		shost_printk(KERN_ERR, esp->host,
1205			     "Reconnect, no lp tgt[%u] lun[%u]\n",
1206			     target, lun);
1207		goto do_reset;
1208	}
1209	lp = dev->hostdata;
1210
1211	ent = lp->non_tagged_cmd;
1212	if (!ent) {
1213		ent = esp_reconnect_with_tag(esp, lp);
1214		if (!ent)
1215			goto do_reset;
1216	}
1217
1218	esp->active_cmd = ent;
1219
1220	esp_event(esp, ESP_EVENT_CHECK_PHASE);
1221	esp_restore_pointers(esp, ent);
1222	esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1223	return 1;
1224
1225do_reset:
1226	esp_schedule_reset(esp);
1227	return 0;
1228}
1229
1230static int esp_finish_select(struct esp *esp)
1231{
1232	struct esp_cmd_entry *ent;
1233	struct scsi_cmnd *cmd;
1234
1235	/* No longer selecting.  */
1236	esp->select_state = ESP_SELECT_NONE;
1237
1238	esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1239	ent = esp->active_cmd;
1240	cmd = ent->cmd;
1241
1242	if (esp->ops->dma_error(esp)) {
1243		/* If we see a DMA error during or as a result of selection,
1244		 * all bets are off.
1245		 */
1246		esp_schedule_reset(esp);
1247		esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
1248		return 0;
1249	}
1250
1251	esp->ops->dma_invalidate(esp);
1252
1253	if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1254		struct esp_target_data *tp = &esp->target[cmd->device->id];
1255
1256		/* Carefully back out of the selection attempt.  Release
1257		 * resources (such as DMA mapping & TAG) and reset state (such
1258		 * as message out and command delivery variables).
1259		 */
1260		if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1261			esp_unmap_dma(esp, cmd);
1262			esp_free_lun_tag(ent, cmd->device->hostdata);
1263			tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1264			esp->cmd_bytes_ptr = NULL;
1265			esp->cmd_bytes_left = 0;
1266		} else {
1267			esp_unmap_sense(esp, ent);
1268		}
1269
1270		/* Now that the state is unwound properly, put back onto
1271		 * the issue queue.  This command is no longer active.
1272		 */
1273		list_move(&ent->list, &esp->queued_cmds);
1274		esp->active_cmd = NULL;
1275
1276		/* Return value ignored by caller, it directly invokes
1277		 * esp_reconnect().
1278		 */
1279		return 0;
1280	}
1281
1282	if (esp->ireg == ESP_INTR_DC) {
1283		struct scsi_device *dev = cmd->device;
1284
1285		/* Disconnect.  Make sure we re-negotiate sync and
1286		 * wide parameters if this target starts responding
1287		 * again in the future.
1288		 */
1289		esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1290
1291		scsi_esp_cmd(esp, ESP_CMD_ESEL);
1292		esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
1293		return 1;
1294	}
1295
1296	if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1297		/* Selection successful.  On pre-FAST chips we have
1298		 * to do a NOP and possibly clean out the FIFO.
1299		 */
1300		if (esp->rev <= ESP236) {
1301			int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1302
1303			scsi_esp_cmd(esp, ESP_CMD_NULL);
1304
1305			if (!fcnt &&
1306			    (!esp->prev_soff ||
1307			     ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1308				esp_flush_fifo(esp);
1309		}
1310
1311		/* If we are doing a Select And Stop command, negotiation, etc.
1312		 * we'll do the right thing as we transition to the next phase.
1313		 */
1314		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1315		return 0;
1316	}
1317
1318	shost_printk(KERN_INFO, esp->host,
1319		     "Unexpected selection completion ireg[%x]\n", esp->ireg);
1320	esp_schedule_reset(esp);
1321	return 0;
1322}
1323
1324static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1325			       struct scsi_cmnd *cmd)
1326{
1327	int fifo_cnt, ecount, bytes_sent, flush_fifo;
1328
1329	fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1330	if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1331		fifo_cnt <<= 1;
1332
1333	ecount = 0;
1334	if (!(esp->sreg & ESP_STAT_TCNT)) {
1335		ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1336			  (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1337		if (esp->rev == FASHME)
1338			ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
1339		if (esp->rev == PCSCSI && (esp->config2 & ESP_CONFIG2_FENAB))
1340			ecount |= ((unsigned int)esp_read8(ESP_TCHI)) << 16;
1341	}
1342
1343	bytes_sent = esp->data_dma_len;
1344	bytes_sent -= ecount;
1345	bytes_sent -= esp->send_cmd_residual;
1346
1347	/*
1348	 * The am53c974 has a DMA 'pecularity'. The doc states:
1349	 * In some odd byte conditions, one residual byte will
1350	 * be left in the SCSI FIFO, and the FIFO Flags will
1351	 * never count to '0 '. When this happens, the residual
1352	 * byte should be retrieved via PIO following completion
1353	 * of the BLAST operation.
1354	 */
1355	if (fifo_cnt == 1 && ent->flags & ESP_CMD_FLAG_RESIDUAL) {
1356		size_t count = 1;
1357		size_t offset = bytes_sent;
1358		u8 bval = esp_read8(ESP_FDATA);
1359
1360		if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
1361			ent->sense_ptr[bytes_sent] = bval;
1362		else {
1363			struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
1364			u8 *ptr;
1365
1366			ptr = scsi_kmap_atomic_sg(p->cur_sg, p->num_sg,
1367						  &offset, &count);
1368			if (likely(ptr)) {
1369				*(ptr + offset) = bval;
1370				scsi_kunmap_atomic_sg(ptr);
1371			}
1372		}
1373		bytes_sent += fifo_cnt;
1374		ent->flags &= ~ESP_CMD_FLAG_RESIDUAL;
1375	}
1376	if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1377		bytes_sent -= fifo_cnt;
1378
1379	flush_fifo = 0;
1380	if (!esp->prev_soff) {
1381		/* Synchronous data transfer, always flush fifo. */
1382		flush_fifo = 1;
1383	} else {
1384		if (esp->rev == ESP100) {
1385			u32 fflags, phase;
1386
1387			/* ESP100 has a chip bug where in the synchronous data
1388			 * phase it can mistake a final long REQ pulse from the
1389			 * target as an extra data byte.  Fun.
1390			 *
1391			 * To detect this case we resample the status register
1392			 * and fifo flags.  If we're still in a data phase and
1393			 * we see spurious chunks in the fifo, we return error
1394			 * to the caller which should reset and set things up
1395			 * such that we only try future transfers to this
1396			 * target in synchronous mode.
1397			 */
1398			esp->sreg = esp_read8(ESP_STATUS);
1399			phase = esp->sreg & ESP_STAT_PMASK;
1400			fflags = esp_read8(ESP_FFLAGS);
1401
1402			if ((phase == ESP_DOP &&
1403			     (fflags & ESP_FF_ONOTZERO)) ||
1404			    (phase == ESP_DIP &&
1405			     (fflags & ESP_FF_FBYTES)))
1406				return -1;
1407		}
1408		if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1409			flush_fifo = 1;
1410	}
1411
1412	if (flush_fifo)
1413		esp_flush_fifo(esp);
1414
1415	return bytes_sent;
1416}
1417
1418static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1419			u8 scsi_period, u8 scsi_offset,
1420			u8 esp_stp, u8 esp_soff)
1421{
1422	spi_period(tp->starget) = scsi_period;
1423	spi_offset(tp->starget) = scsi_offset;
1424	spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1425
1426	if (esp_soff) {
1427		esp_stp &= 0x1f;
1428		esp_soff |= esp->radelay;
1429		if (esp->rev >= FAS236) {
1430			u8 bit = ESP_CONFIG3_FSCSI;
1431			if (esp->rev >= FAS100A)
1432				bit = ESP_CONFIG3_FAST;
1433
1434			if (scsi_period < 50) {
1435				if (esp->rev == FASHME)
1436					esp_soff &= ~esp->radelay;
1437				tp->esp_config3 |= bit;
1438			} else {
1439				tp->esp_config3 &= ~bit;
1440			}
1441			esp->prev_cfg3 = tp->esp_config3;
1442			esp_write8(esp->prev_cfg3, ESP_CFG3);
1443		}
1444	}
1445
1446	tp->esp_period = esp->prev_stp = esp_stp;
1447	tp->esp_offset = esp->prev_soff = esp_soff;
1448
1449	esp_write8(esp_soff, ESP_SOFF);
1450	esp_write8(esp_stp, ESP_STP);
1451
1452	tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1453
1454	spi_display_xfer_agreement(tp->starget);
1455}
1456
1457static void esp_msgin_reject(struct esp *esp)
1458{
1459	struct esp_cmd_entry *ent = esp->active_cmd;
1460	struct scsi_cmnd *cmd = ent->cmd;
1461	struct esp_target_data *tp;
1462	int tgt;
1463
1464	tgt = cmd->device->id;
1465	tp = &esp->target[tgt];
1466
1467	if (tp->flags & ESP_TGT_NEGO_WIDE) {
1468		tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1469
1470		if (!esp_need_to_nego_sync(tp)) {
1471			tp->flags &= ~ESP_TGT_CHECK_NEGO;
1472			scsi_esp_cmd(esp, ESP_CMD_RATN);
1473		} else {
1474			esp->msg_out_len =
1475				spi_populate_sync_msg(&esp->msg_out[0],
1476						      tp->nego_goal_period,
1477						      tp->nego_goal_offset);
1478			tp->flags |= ESP_TGT_NEGO_SYNC;
1479			scsi_esp_cmd(esp, ESP_CMD_SATN);
1480		}
1481		return;
1482	}
1483
1484	if (tp->flags & ESP_TGT_NEGO_SYNC) {
1485		tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1486		tp->esp_period = 0;
1487		tp->esp_offset = 0;
1488		esp_setsync(esp, tp, 0, 0, 0, 0);
1489		scsi_esp_cmd(esp, ESP_CMD_RATN);
1490		return;
1491	}
1492
1493	shost_printk(KERN_INFO, esp->host, "Unexpected MESSAGE REJECT\n");
1494	esp_schedule_reset(esp);
1495}
1496
1497static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1498{
1499	u8 period = esp->msg_in[3];
1500	u8 offset = esp->msg_in[4];
1501	u8 stp;
1502
1503	if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1504		goto do_reject;
1505
1506	if (offset > 15)
1507		goto do_reject;
1508
1509	if (offset) {
1510		int one_clock;
1511
1512		if (period > esp->max_period) {
1513			period = offset = 0;
1514			goto do_sdtr;
1515		}
1516		if (period < esp->min_period)
1517			goto do_reject;
1518
1519		one_clock = esp->ccycle / 1000;
1520		stp = DIV_ROUND_UP(period << 2, one_clock);
1521		if (stp && esp->rev >= FAS236) {
1522			if (stp >= 50)
1523				stp--;
1524		}
1525	} else {
1526		stp = 0;
1527	}
1528
1529	esp_setsync(esp, tp, period, offset, stp, offset);
1530	return;
1531
1532do_reject:
1533	esp->msg_out[0] = MESSAGE_REJECT;
1534	esp->msg_out_len = 1;
1535	scsi_esp_cmd(esp, ESP_CMD_SATN);
1536	return;
1537
1538do_sdtr:
1539	tp->nego_goal_period = period;
1540	tp->nego_goal_offset = offset;
1541	esp->msg_out_len =
1542		spi_populate_sync_msg(&esp->msg_out[0],
1543				      tp->nego_goal_period,
1544				      tp->nego_goal_offset);
1545	scsi_esp_cmd(esp, ESP_CMD_SATN);
1546}
1547
1548static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1549{
1550	int size = 8 << esp->msg_in[3];
1551	u8 cfg3;
1552
1553	if (esp->rev != FASHME)
1554		goto do_reject;
1555
1556	if (size != 8 && size != 16)
1557		goto do_reject;
1558
1559	if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1560		goto do_reject;
1561
1562	cfg3 = tp->esp_config3;
1563	if (size == 16) {
1564		tp->flags |= ESP_TGT_WIDE;
1565		cfg3 |= ESP_CONFIG3_EWIDE;
1566	} else {
1567		tp->flags &= ~ESP_TGT_WIDE;
1568		cfg3 &= ~ESP_CONFIG3_EWIDE;
1569	}
1570	tp->esp_config3 = cfg3;
1571	esp->prev_cfg3 = cfg3;
1572	esp_write8(cfg3, ESP_CFG3);
1573
1574	tp->flags &= ~ESP_TGT_NEGO_WIDE;
1575
1576	spi_period(tp->starget) = 0;
1577	spi_offset(tp->starget) = 0;
1578	if (!esp_need_to_nego_sync(tp)) {
1579		tp->flags &= ~ESP_TGT_CHECK_NEGO;
1580		scsi_esp_cmd(esp, ESP_CMD_RATN);
1581	} else {
1582		esp->msg_out_len =
1583			spi_populate_sync_msg(&esp->msg_out[0],
1584					      tp->nego_goal_period,
1585					      tp->nego_goal_offset);
1586		tp->flags |= ESP_TGT_NEGO_SYNC;
1587		scsi_esp_cmd(esp, ESP_CMD_SATN);
1588	}
1589	return;
1590
1591do_reject:
1592	esp->msg_out[0] = MESSAGE_REJECT;
1593	esp->msg_out_len = 1;
1594	scsi_esp_cmd(esp, ESP_CMD_SATN);
1595}
1596
1597static void esp_msgin_extended(struct esp *esp)
1598{
1599	struct esp_cmd_entry *ent = esp->active_cmd;
1600	struct scsi_cmnd *cmd = ent->cmd;
1601	struct esp_target_data *tp;
1602	int tgt = cmd->device->id;
1603
1604	tp = &esp->target[tgt];
1605	if (esp->msg_in[2] == EXTENDED_SDTR) {
1606		esp_msgin_sdtr(esp, tp);
1607		return;
1608	}
1609	if (esp->msg_in[2] == EXTENDED_WDTR) {
1610		esp_msgin_wdtr(esp, tp);
1611		return;
1612	}
1613
1614	shost_printk(KERN_INFO, esp->host,
1615		     "Unexpected extended msg type %x\n", esp->msg_in[2]);
1616
1617	esp->msg_out[0] = MESSAGE_REJECT;
1618	esp->msg_out_len = 1;
1619	scsi_esp_cmd(esp, ESP_CMD_SATN);
1620}
1621
1622/* Analyze msgin bytes received from target so far.  Return non-zero
1623 * if there are more bytes needed to complete the message.
1624 */
1625static int esp_msgin_process(struct esp *esp)
1626{
1627	u8 msg0 = esp->msg_in[0];
1628	int len = esp->msg_in_len;
1629
1630	if (msg0 & 0x80) {
1631		/* Identify */
1632		shost_printk(KERN_INFO, esp->host,
1633			     "Unexpected msgin identify\n");
1634		return 0;
1635	}
1636
1637	switch (msg0) {
1638	case EXTENDED_MESSAGE:
1639		if (len == 1)
1640			return 1;
1641		if (len < esp->msg_in[1] + 2)
1642			return 1;
1643		esp_msgin_extended(esp);
1644		return 0;
1645
1646	case IGNORE_WIDE_RESIDUE: {
1647		struct esp_cmd_entry *ent;
1648		struct esp_cmd_priv *spriv;
1649		if (len == 1)
1650			return 1;
1651
1652		if (esp->msg_in[1] != 1)
1653			goto do_reject;
1654
1655		ent = esp->active_cmd;
1656		spriv = ESP_CMD_PRIV(ent->cmd);
1657
1658		if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1659			spriv->cur_sg = spriv->prv_sg;
1660			spriv->cur_residue = 1;
1661		} else
1662			spriv->cur_residue++;
1663		spriv->tot_residue++;
1664		return 0;
1665	}
1666	case NOP:
1667		return 0;
1668	case RESTORE_POINTERS:
1669		esp_restore_pointers(esp, esp->active_cmd);
1670		return 0;
1671	case SAVE_POINTERS:
1672		esp_save_pointers(esp, esp->active_cmd);
1673		return 0;
1674
1675	case COMMAND_COMPLETE:
1676	case DISCONNECT: {
1677		struct esp_cmd_entry *ent = esp->active_cmd;
1678
1679		ent->message = msg0;
1680		esp_event(esp, ESP_EVENT_FREE_BUS);
1681		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1682		return 0;
1683	}
1684	case MESSAGE_REJECT:
1685		esp_msgin_reject(esp);
1686		return 0;
1687
1688	default:
1689	do_reject:
1690		esp->msg_out[0] = MESSAGE_REJECT;
1691		esp->msg_out_len = 1;
1692		scsi_esp_cmd(esp, ESP_CMD_SATN);
1693		return 0;
1694	}
1695}
1696
1697static int esp_process_event(struct esp *esp)
1698{
1699	int write, i;
1700
1701again:
1702	write = 0;
1703	esp_log_event("process event %d phase %x\n",
1704		      esp->event, esp->sreg & ESP_STAT_PMASK);
1705	switch (esp->event) {
1706	case ESP_EVENT_CHECK_PHASE:
1707		switch (esp->sreg & ESP_STAT_PMASK) {
1708		case ESP_DOP:
1709			esp_event(esp, ESP_EVENT_DATA_OUT);
1710			break;
1711		case ESP_DIP:
1712			esp_event(esp, ESP_EVENT_DATA_IN);
1713			break;
1714		case ESP_STATP:
1715			esp_flush_fifo(esp);
1716			scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1717			esp_event(esp, ESP_EVENT_STATUS);
1718			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1719			return 1;
1720
1721		case ESP_MOP:
1722			esp_event(esp, ESP_EVENT_MSGOUT);
1723			break;
1724
1725		case ESP_MIP:
1726			esp_event(esp, ESP_EVENT_MSGIN);
1727			break;
1728
1729		case ESP_CMDP:
1730			esp_event(esp, ESP_EVENT_CMD_START);
1731			break;
1732
1733		default:
1734			shost_printk(KERN_INFO, esp->host,
1735				     "Unexpected phase, sreg=%02x\n",
1736				     esp->sreg);
1737			esp_schedule_reset(esp);
1738			return 0;
1739		}
1740		goto again;
1741
1742	case ESP_EVENT_DATA_IN:
1743		write = 1;
1744		fallthrough;
1745
1746	case ESP_EVENT_DATA_OUT: {
1747		struct esp_cmd_entry *ent = esp->active_cmd;
1748		struct scsi_cmnd *cmd = ent->cmd;
1749		dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1750		unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1751
1752		if (esp->rev == ESP100)
1753			scsi_esp_cmd(esp, ESP_CMD_NULL);
1754
1755		if (write)
1756			ent->flags |= ESP_CMD_FLAG_WRITE;
1757		else
1758			ent->flags &= ~ESP_CMD_FLAG_WRITE;
1759
1760		if (esp->ops->dma_length_limit)
1761			dma_len = esp->ops->dma_length_limit(esp, dma_addr,
1762							     dma_len);
1763		else
1764			dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1765
1766		esp->data_dma_len = dma_len;
1767
1768		if (!dma_len) {
1769			shost_printk(KERN_ERR, esp->host,
1770				     "DMA length is zero!\n");
1771			shost_printk(KERN_ERR, esp->host,
1772				     "cur adr[%08llx] len[%08x]\n",
1773				     (unsigned long long)esp_cur_dma_addr(ent, cmd),
1774				     esp_cur_dma_len(ent, cmd));
1775			esp_schedule_reset(esp);
1776			return 0;
1777		}
1778
1779		esp_log_datastart("start data addr[%08llx] len[%u] write(%d)\n",
1780				  (unsigned long long)dma_addr, dma_len, write);
1781
1782		esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1783				       write, ESP_CMD_DMA | ESP_CMD_TI);
1784		esp_event(esp, ESP_EVENT_DATA_DONE);
1785		break;
1786	}
1787	case ESP_EVENT_DATA_DONE: {
1788		struct esp_cmd_entry *ent = esp->active_cmd;
1789		struct scsi_cmnd *cmd = ent->cmd;
1790		int bytes_sent;
1791
1792		if (esp->ops->dma_error(esp)) {
1793			shost_printk(KERN_INFO, esp->host,
1794				     "data done, DMA error, resetting\n");
1795			esp_schedule_reset(esp);
1796			return 0;
1797		}
1798
1799		if (ent->flags & ESP_CMD_FLAG_WRITE) {
1800			/* XXX parity errors, etc. XXX */
1801
1802			esp->ops->dma_drain(esp);
1803		}
1804		esp->ops->dma_invalidate(esp);
1805
1806		if (esp->ireg != ESP_INTR_BSERV) {
1807			/* We should always see exactly a bus-service
1808			 * interrupt at the end of a successful transfer.
1809			 */
1810			shost_printk(KERN_INFO, esp->host,
1811				     "data done, not BSERV, resetting\n");
1812			esp_schedule_reset(esp);
1813			return 0;
1814		}
1815
1816		bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1817
1818		esp_log_datadone("data done flgs[%x] sent[%d]\n",
1819				 ent->flags, bytes_sent);
1820
1821		if (bytes_sent < 0) {
1822			/* XXX force sync mode for this target XXX */
1823			esp_schedule_reset(esp);
1824			return 0;
1825		}
1826
1827		esp_advance_dma(esp, ent, cmd, bytes_sent);
1828		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1829		goto again;
1830	}
1831
1832	case ESP_EVENT_STATUS: {
1833		struct esp_cmd_entry *ent = esp->active_cmd;
1834
1835		if (esp->ireg & ESP_INTR_FDONE) {
1836			ent->status = esp_read8(ESP_FDATA);
1837			ent->message = esp_read8(ESP_FDATA);
1838			scsi_esp_cmd(esp, ESP_CMD_MOK);
1839		} else if (esp->ireg == ESP_INTR_BSERV) {
1840			ent->status = esp_read8(ESP_FDATA);
1841			ent->message = 0xff;
1842			esp_event(esp, ESP_EVENT_MSGIN);
1843			return 0;
1844		}
1845
1846		if (ent->message != COMMAND_COMPLETE) {
1847			shost_printk(KERN_INFO, esp->host,
1848				     "Unexpected message %x in status\n",
1849				     ent->message);
1850			esp_schedule_reset(esp);
1851			return 0;
1852		}
1853
1854		esp_event(esp, ESP_EVENT_FREE_BUS);
1855		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1856		break;
1857	}
1858	case ESP_EVENT_FREE_BUS: {
1859		struct esp_cmd_entry *ent = esp->active_cmd;
1860		struct scsi_cmnd *cmd = ent->cmd;
1861
1862		if (ent->message == COMMAND_COMPLETE ||
1863		    ent->message == DISCONNECT)
1864			scsi_esp_cmd(esp, ESP_CMD_ESEL);
1865
1866		if (ent->message == COMMAND_COMPLETE) {
1867			esp_log_cmddone("Command done status[%x] message[%x]\n",
1868					ent->status, ent->message);
1869			if (ent->status == SAM_STAT_TASK_SET_FULL)
1870				esp_event_queue_full(esp, ent);
1871
1872			if (ent->status == SAM_STAT_CHECK_CONDITION &&
1873			    !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1874				ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1875				esp_autosense(esp, ent);
1876			} else {
1877				esp_cmd_is_done(esp, ent, cmd,
1878						compose_result(ent->status,
1879							       ent->message,
1880							       DID_OK));
1881			}
1882		} else if (ent->message == DISCONNECT) {
1883			esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n",
1884					   cmd->device->id,
1885					   ent->tag[0], ent->tag[1]);
1886
1887			esp->active_cmd = NULL;
1888			esp_maybe_execute_command(esp);
1889		} else {
1890			shost_printk(KERN_INFO, esp->host,
1891				     "Unexpected message %x in freebus\n",
1892				     ent->message);
1893			esp_schedule_reset(esp);
1894			return 0;
1895		}
1896		if (esp->active_cmd)
1897			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1898		break;
1899	}
1900	case ESP_EVENT_MSGOUT: {
1901		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1902
1903		if (esp_debug & ESP_DEBUG_MSGOUT) {
1904			int i;
1905			printk("ESP: Sending message [ ");
1906			for (i = 0; i < esp->msg_out_len; i++)
1907				printk("%02x ", esp->msg_out[i]);
1908			printk("]\n");
1909		}
1910
1911		if (esp->rev == FASHME) {
1912			int i;
1913
1914			/* Always use the fifo.  */
1915			for (i = 0; i < esp->msg_out_len; i++) {
1916				esp_write8(esp->msg_out[i], ESP_FDATA);
1917				esp_write8(0, ESP_FDATA);
1918			}
1919			scsi_esp_cmd(esp, ESP_CMD_TI);
1920		} else {
1921			if (esp->msg_out_len == 1) {
1922				esp_write8(esp->msg_out[0], ESP_FDATA);
1923				scsi_esp_cmd(esp, ESP_CMD_TI);
1924			} else if (esp->flags & ESP_FLAG_USE_FIFO) {
1925				for (i = 0; i < esp->msg_out_len; i++)
1926					esp_write8(esp->msg_out[i], ESP_FDATA);
1927				scsi_esp_cmd(esp, ESP_CMD_TI);
1928			} else {
1929				/* Use DMA. */
1930				memcpy(esp->command_block,
1931				       esp->msg_out,
1932				       esp->msg_out_len);
1933
1934				esp->ops->send_dma_cmd(esp,
1935						       esp->command_block_dma,
1936						       esp->msg_out_len,
1937						       esp->msg_out_len,
1938						       0,
1939						       ESP_CMD_DMA|ESP_CMD_TI);
1940			}
1941		}
1942		esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1943		break;
1944	}
1945	case ESP_EVENT_MSGOUT_DONE:
1946		if (esp->rev == FASHME) {
1947			scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1948		} else {
1949			if (esp->msg_out_len > 1)
1950				esp->ops->dma_invalidate(esp);
1951
1952			/* XXX if the chip went into disconnected mode,
1953			 * we can't run the phase state machine anyway.
1954			 */
1955			if (!(esp->ireg & ESP_INTR_DC))
1956				scsi_esp_cmd(esp, ESP_CMD_NULL);
1957		}
1958
1959		esp->msg_out_len = 0;
1960
1961		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1962		goto again;
1963	case ESP_EVENT_MSGIN:
1964		if (esp->ireg & ESP_INTR_BSERV) {
1965			if (esp->rev == FASHME) {
1966				if (!(esp_read8(ESP_STATUS2) &
1967				      ESP_STAT2_FEMPTY))
1968					scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1969			} else {
1970				scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1971				if (esp->rev == ESP100)
1972					scsi_esp_cmd(esp, ESP_CMD_NULL);
1973			}
1974			scsi_esp_cmd(esp, ESP_CMD_TI);
1975			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1976			return 1;
1977		}
1978		if (esp->ireg & ESP_INTR_FDONE) {
1979			u8 val;
1980
1981			if (esp->rev == FASHME)
1982				val = esp->fifo[0];
1983			else
1984				val = esp_read8(ESP_FDATA);
1985			esp->msg_in[esp->msg_in_len++] = val;
1986
1987			esp_log_msgin("Got msgin byte %x\n", val);
1988
1989			if (!esp_msgin_process(esp))
1990				esp->msg_in_len = 0;
1991
1992			if (esp->rev == FASHME)
1993				scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1994
1995			scsi_esp_cmd(esp, ESP_CMD_MOK);
1996
1997			/* Check whether a bus reset is to be done next */
1998			if (esp->event == ESP_EVENT_RESET)
1999				return 0;
2000
2001			if (esp->event != ESP_EVENT_FREE_BUS)
2002				esp_event(esp, ESP_EVENT_CHECK_PHASE);
2003		} else {
2004			shost_printk(KERN_INFO, esp->host,
2005				     "MSGIN neither BSERV not FDON, resetting");
2006			esp_schedule_reset(esp);
2007			return 0;
2008		}
2009		break;
2010	case ESP_EVENT_CMD_START:
2011		memcpy(esp->command_block, esp->cmd_bytes_ptr,
2012		       esp->cmd_bytes_left);
2013		esp_send_dma_cmd(esp, esp->cmd_bytes_left, 16, ESP_CMD_TI);
2014		esp_event(esp, ESP_EVENT_CMD_DONE);
2015		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
2016		break;
2017	case ESP_EVENT_CMD_DONE:
2018		esp->ops->dma_invalidate(esp);
2019		if (esp->ireg & ESP_INTR_BSERV) {
2020			esp_event(esp, ESP_EVENT_CHECK_PHASE);
2021			goto again;
2022		}
2023		esp_schedule_reset(esp);
2024		return 0;
2025
2026	case ESP_EVENT_RESET:
2027		scsi_esp_cmd(esp, ESP_CMD_RS);
2028		break;
2029
2030	default:
2031		shost_printk(KERN_INFO, esp->host,
2032			     "Unexpected event %x, resetting\n", esp->event);
2033		esp_schedule_reset(esp);
2034		return 0;
2035	}
2036	return 1;
2037}
2038
2039static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
2040{
2041	struct scsi_cmnd *cmd = ent->cmd;
2042
2043	esp_unmap_dma(esp, cmd);
2044	esp_free_lun_tag(ent, cmd->device->hostdata);
2045	cmd->result = DID_RESET << 16;
2046
2047	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
2048		esp_unmap_sense(esp, ent);
2049
2050	cmd->scsi_done(cmd);
2051	list_del(&ent->list);
2052	esp_put_ent(esp, ent);
2053}
2054
2055static void esp_clear_hold(struct scsi_device *dev, void *data)
2056{
2057	struct esp_lun_data *lp = dev->hostdata;
2058
2059	BUG_ON(lp->num_tagged);
2060	lp->hold = 0;
2061}
2062
2063static void esp_reset_cleanup(struct esp *esp)
2064{
2065	struct esp_cmd_entry *ent, *tmp;
2066	int i;
2067
2068	list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2069		struct scsi_cmnd *cmd = ent->cmd;
2070
2071		list_del(&ent->list);
2072		cmd->result = DID_RESET << 16;
2073		cmd->scsi_done(cmd);
2074		esp_put_ent(esp, ent);
2075	}
2076
2077	list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2078		if (ent == esp->active_cmd)
2079			esp->active_cmd = NULL;
2080		esp_reset_cleanup_one(esp, ent);
2081	}
2082
2083	BUG_ON(esp->active_cmd != NULL);
2084
2085	/* Force renegotiation of sync/wide transfers.  */
2086	for (i = 0; i < ESP_MAX_TARGET; i++) {
2087		struct esp_target_data *tp = &esp->target[i];
2088
2089		tp->esp_period = 0;
2090		tp->esp_offset = 0;
2091		tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2092				     ESP_CONFIG3_FSCSI |
2093				     ESP_CONFIG3_FAST);
2094		tp->flags &= ~ESP_TGT_WIDE;
2095		tp->flags |= ESP_TGT_CHECK_NEGO;
2096
2097		if (tp->starget)
2098			__starget_for_each_device(tp->starget, NULL,
2099						  esp_clear_hold);
2100	}
2101	esp->flags &= ~ESP_FLAG_RESETTING;
2102}
2103
2104/* Runs under host->lock */
2105static void __esp_interrupt(struct esp *esp)
2106{
2107	int finish_reset, intr_done;
2108	u8 phase;
2109
2110       /*
2111	* Once INTRPT is read STATUS and SSTEP are cleared.
2112	*/
2113	esp->sreg = esp_read8(ESP_STATUS);
2114	esp->seqreg = esp_read8(ESP_SSTEP);
2115	esp->ireg = esp_read8(ESP_INTRPT);
2116
2117	if (esp->flags & ESP_FLAG_RESETTING) {
2118		finish_reset = 1;
2119	} else {
2120		if (esp_check_gross_error(esp))
2121			return;
2122
2123		finish_reset = esp_check_spur_intr(esp);
2124		if (finish_reset < 0)
2125			return;
2126	}
2127
2128	if (esp->ireg & ESP_INTR_SR)
2129		finish_reset = 1;
2130
2131	if (finish_reset) {
2132		esp_reset_cleanup(esp);
2133		if (esp->eh_reset) {
2134			complete(esp->eh_reset);
2135			esp->eh_reset = NULL;
2136		}
2137		return;
2138	}
2139
2140	phase = (esp->sreg & ESP_STAT_PMASK);
2141	if (esp->rev == FASHME) {
2142		if (((phase != ESP_DIP && phase != ESP_DOP) &&
2143		     esp->select_state == ESP_SELECT_NONE &&
2144		     esp->event != ESP_EVENT_STATUS &&
2145		     esp->event != ESP_EVENT_DATA_DONE) ||
2146		    (esp->ireg & ESP_INTR_RSEL)) {
2147			esp->sreg2 = esp_read8(ESP_STATUS2);
2148			if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2149			    (esp->sreg2 & ESP_STAT2_F1BYTE))
2150				hme_read_fifo(esp);
2151		}
2152	}
2153
2154	esp_log_intr("intr sreg[%02x] seqreg[%02x] "
2155		     "sreg2[%02x] ireg[%02x]\n",
2156		     esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2157
2158	intr_done = 0;
2159
2160	if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
2161		shost_printk(KERN_INFO, esp->host,
2162			     "unexpected IREG %02x\n", esp->ireg);
2163		if (esp->ireg & ESP_INTR_IC)
2164			esp_dump_cmd_log(esp);
2165
2166		esp_schedule_reset(esp);
2167	} else {
2168		if (esp->ireg & ESP_INTR_RSEL) {
2169			if (esp->active_cmd)
2170				(void) esp_finish_select(esp);
2171			intr_done = esp_reconnect(esp);
2172		} else {
2173			/* Some combination of FDONE, BSERV, DC. */
2174			if (esp->select_state != ESP_SELECT_NONE)
2175				intr_done = esp_finish_select(esp);
2176		}
2177	}
2178	while (!intr_done)
2179		intr_done = esp_process_event(esp);
2180}
2181
2182irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2183{
2184	struct esp *esp = dev_id;
2185	unsigned long flags;
2186	irqreturn_t ret;
2187
2188	spin_lock_irqsave(esp->host->host_lock, flags);
2189	ret = IRQ_NONE;
2190	if (esp->ops->irq_pending(esp)) {
2191		ret = IRQ_HANDLED;
2192		for (;;) {
2193			int i;
2194
2195			__esp_interrupt(esp);
2196			if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2197				break;
2198			esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2199
2200			for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2201				if (esp->ops->irq_pending(esp))
2202					break;
2203			}
2204			if (i == ESP_QUICKIRQ_LIMIT)
2205				break;
2206		}
2207	}
2208	spin_unlock_irqrestore(esp->host->host_lock, flags);
2209
2210	return ret;
2211}
2212EXPORT_SYMBOL(scsi_esp_intr);
2213
2214static void esp_get_revision(struct esp *esp)
2215{
2216	u8 val;
2217
2218	esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2219	if (esp->config2 == 0) {
2220		esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2221		esp_write8(esp->config2, ESP_CFG2);
2222
2223		val = esp_read8(ESP_CFG2);
2224		val &= ~ESP_CONFIG2_MAGIC;
2225
2226		esp->config2 = 0;
2227		if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2228			/*
2229			 * If what we write to cfg2 does not come back,
2230			 * cfg2 is not implemented.
2231			 * Therefore this must be a plain esp100.
2232			 */
2233			esp->rev = ESP100;
2234			return;
2235		}
2236	}
2237
2238	esp_set_all_config3(esp, 5);
2239	esp->prev_cfg3 = 5;
2240	esp_write8(esp->config2, ESP_CFG2);
2241	esp_write8(0, ESP_CFG3);
2242	esp_write8(esp->prev_cfg3, ESP_CFG3);
2243
2244	val = esp_read8(ESP_CFG3);
2245	if (val != 5) {
2246		/* The cfg2 register is implemented, however
2247		 * cfg3 is not, must be esp100a.
2248		 */
2249		esp->rev = ESP100A;
2250	} else {
2251		esp_set_all_config3(esp, 0);
2252		esp->prev_cfg3 = 0;
2253		esp_write8(esp->prev_cfg3, ESP_CFG3);
2254
2255		/* All of cfg{1,2,3} implemented, must be one of
2256		 * the fas variants, figure out which one.
2257		 */
2258		if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2259			esp->rev = FAST;
2260			esp->sync_defp = SYNC_DEFP_FAST;
2261		} else {
2262			esp->rev = ESP236;
2263		}
2264	}
2265}
2266
2267static void esp_init_swstate(struct esp *esp)
2268{
2269	int i;
2270
2271	INIT_LIST_HEAD(&esp->queued_cmds);
2272	INIT_LIST_HEAD(&esp->active_cmds);
2273	INIT_LIST_HEAD(&esp->esp_cmd_pool);
2274
2275	/* Start with a clear state, domain validation (via ->slave_configure,
2276	 * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
2277	 * commands.
2278	 */
2279	for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2280		esp->target[i].flags = 0;
2281		esp->target[i].nego_goal_period = 0;
2282		esp->target[i].nego_goal_offset = 0;
2283		esp->target[i].nego_goal_width = 0;
2284		esp->target[i].nego_goal_tags = 0;
2285	}
2286}
2287
2288/* This places the ESP into a known state at boot time. */
2289static void esp_bootup_reset(struct esp *esp)
2290{
2291	u8 val;
2292
2293	/* Reset the DMA */
2294	esp->ops->reset_dma(esp);
2295
2296	/* Reset the ESP */
2297	esp_reset_esp(esp);
2298
2299	/* Reset the SCSI bus, but tell ESP not to generate an irq */
2300	val = esp_read8(ESP_CFG1);
2301	val |= ESP_CONFIG1_SRRDISAB;
2302	esp_write8(val, ESP_CFG1);
2303
2304	scsi_esp_cmd(esp, ESP_CMD_RS);
2305	udelay(400);
2306
2307	esp_write8(esp->config1, ESP_CFG1);
2308
2309	/* Eat any bitrot in the chip and we are done... */
2310	esp_read8(ESP_INTRPT);
2311}
2312
2313static void esp_set_clock_params(struct esp *esp)
2314{
2315	int fhz;
2316	u8 ccf;
2317
2318	/* This is getting messy but it has to be done correctly or else
2319	 * you get weird behavior all over the place.  We are trying to
2320	 * basically figure out three pieces of information.
2321	 *
2322	 * a) Clock Conversion Factor
2323	 *
2324	 *    This is a representation of the input crystal clock frequency
2325	 *    going into the ESP on this machine.  Any operation whose timing
2326	 *    is longer than 400ns depends on this value being correct.  For
2327	 *    example, you'll get blips for arbitration/selection during high
2328	 *    load or with multiple targets if this is not set correctly.
2329	 *
2330	 * b) Selection Time-Out
2331	 *
2332	 *    The ESP isn't very bright and will arbitrate for the bus and try
2333	 *    to select a target forever if you let it.  This value tells the
2334	 *    ESP when it has taken too long to negotiate and that it should
2335	 *    interrupt the CPU so we can see what happened.  The value is
2336	 *    computed as follows (from NCR/Symbios chip docs).
2337	 *
2338	 *          (Time Out Period) *  (Input Clock)
2339	 *    STO = ----------------------------------
2340	 *          (8192) * (Clock Conversion Factor)
2341	 *
2342	 *    We use a time out period of 250ms (ESP_BUS_TIMEOUT).
2343	 *
2344	 * c) Imperical constants for synchronous offset and transfer period
2345         *    register values
2346	 *
2347	 *    This entails the smallest and largest sync period we could ever
2348	 *    handle on this ESP.
2349	 */
2350	fhz = esp->cfreq;
2351
2352	ccf = ((fhz / 1000000) + 4) / 5;
2353	if (ccf == 1)
2354		ccf = 2;
2355
2356	/* If we can't find anything reasonable, just assume 20MHZ.
2357	 * This is the clock frequency of the older sun4c's where I've
2358	 * been unable to find the clock-frequency PROM property.  All
2359	 * other machines provide useful values it seems.
2360	 */
2361	if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
2362		fhz = 20000000;
2363		ccf = 4;
2364	}
2365
2366	esp->cfact = (ccf == 8 ? 0 : ccf);
2367	esp->cfreq = fhz;
2368	esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
2369	esp->ctick = ESP_TICK(ccf, esp->ccycle);
2370	esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
2371	esp->sync_defp = SYNC_DEFP_SLOW;
2372}
2373
2374static const char *esp_chip_names[] = {
2375	"ESP100",
2376	"ESP100A",
2377	"ESP236",
2378	"FAS236",
2379	"AM53C974",
2380	"53CF9x-2",
2381	"FAS100A",
2382	"FAST",
2383	"FASHME",
 
2384};
2385
2386static struct scsi_transport_template *esp_transport_template;
2387
2388int scsi_esp_register(struct esp *esp)
2389{
2390	static int instance;
2391	int err;
2392
2393	if (!esp->num_tags)
2394		esp->num_tags = ESP_DEFAULT_TAGS;
2395	esp->host->transportt = esp_transport_template;
2396	esp->host->max_lun = ESP_MAX_LUN;
2397	esp->host->cmd_per_lun = 2;
2398	esp->host->unique_id = instance;
2399
2400	esp_set_clock_params(esp);
2401
2402	esp_get_revision(esp);
2403
2404	esp_init_swstate(esp);
2405
2406	esp_bootup_reset(esp);
2407
2408	dev_printk(KERN_INFO, esp->dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
2409		   esp->host->unique_id, esp->regs, esp->dma_regs,
2410		   esp->host->irq);
2411	dev_printk(KERN_INFO, esp->dev,
2412		   "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2413		   esp->host->unique_id, esp_chip_names[esp->rev],
2414		   esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
2415
2416	/* Let the SCSI bus reset settle. */
2417	ssleep(esp_bus_reset_settle);
2418
2419	err = scsi_add_host(esp->host, esp->dev);
2420	if (err)
2421		return err;
2422
2423	instance++;
2424
2425	scsi_scan_host(esp->host);
2426
2427	return 0;
2428}
2429EXPORT_SYMBOL(scsi_esp_register);
2430
2431void scsi_esp_unregister(struct esp *esp)
2432{
2433	scsi_remove_host(esp->host);
2434}
2435EXPORT_SYMBOL(scsi_esp_unregister);
2436
2437static int esp_target_alloc(struct scsi_target *starget)
2438{
2439	struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2440	struct esp_target_data *tp = &esp->target[starget->id];
2441
2442	tp->starget = starget;
2443
2444	return 0;
2445}
2446
2447static void esp_target_destroy(struct scsi_target *starget)
2448{
2449	struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2450	struct esp_target_data *tp = &esp->target[starget->id];
2451
2452	tp->starget = NULL;
2453}
2454
2455static int esp_slave_alloc(struct scsi_device *dev)
2456{
2457	struct esp *esp = shost_priv(dev->host);
2458	struct esp_target_data *tp = &esp->target[dev->id];
2459	struct esp_lun_data *lp;
2460
2461	lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2462	if (!lp)
2463		return -ENOMEM;
2464	dev->hostdata = lp;
2465
2466	spi_min_period(tp->starget) = esp->min_period;
2467	spi_max_offset(tp->starget) = 15;
2468
2469	if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2470		spi_max_width(tp->starget) = 1;
2471	else
2472		spi_max_width(tp->starget) = 0;
2473
2474	return 0;
2475}
2476
2477static int esp_slave_configure(struct scsi_device *dev)
2478{
2479	struct esp *esp = shost_priv(dev->host);
2480	struct esp_target_data *tp = &esp->target[dev->id];
2481
2482	if (dev->tagged_supported)
2483		scsi_change_queue_depth(dev, esp->num_tags);
2484
2485	tp->flags |= ESP_TGT_DISCONNECT;
2486
2487	if (!spi_initial_dv(dev->sdev_target))
2488		spi_dv_device(dev);
2489
2490	return 0;
2491}
2492
2493static void esp_slave_destroy(struct scsi_device *dev)
2494{
2495	struct esp_lun_data *lp = dev->hostdata;
2496
2497	kfree(lp);
2498	dev->hostdata = NULL;
2499}
2500
2501static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2502{
2503	struct esp *esp = shost_priv(cmd->device->host);
2504	struct esp_cmd_entry *ent, *tmp;
2505	struct completion eh_done;
2506	unsigned long flags;
2507
2508	/* XXX This helps a lot with debugging but might be a bit
2509	 * XXX much for the final driver.
2510	 */
2511	spin_lock_irqsave(esp->host->host_lock, flags);
2512	shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n",
2513		     cmd, cmd->cmnd[0]);
2514	ent = esp->active_cmd;
2515	if (ent)
2516		shost_printk(KERN_ERR, esp->host,
2517			     "Current command [%p:%02x]\n",
2518			     ent->cmd, ent->cmd->cmnd[0]);
2519	list_for_each_entry(ent, &esp->queued_cmds, list) {
2520		shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n",
2521			     ent->cmd, ent->cmd->cmnd[0]);
2522	}
2523	list_for_each_entry(ent, &esp->active_cmds, list) {
2524		shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n",
2525			     ent->cmd, ent->cmd->cmnd[0]);
2526	}
2527	esp_dump_cmd_log(esp);
2528	spin_unlock_irqrestore(esp->host->host_lock, flags);
2529
2530	spin_lock_irqsave(esp->host->host_lock, flags);
2531
2532	ent = NULL;
2533	list_for_each_entry(tmp, &esp->queued_cmds, list) {
2534		if (tmp->cmd == cmd) {
2535			ent = tmp;
2536			break;
2537		}
2538	}
2539
2540	if (ent) {
2541		/* Easiest case, we didn't even issue the command
2542		 * yet so it is trivial to abort.
2543		 */
2544		list_del(&ent->list);
2545
2546		cmd->result = DID_ABORT << 16;
2547		cmd->scsi_done(cmd);
2548
2549		esp_put_ent(esp, ent);
2550
2551		goto out_success;
2552	}
2553
2554	init_completion(&eh_done);
2555
2556	ent = esp->active_cmd;
2557	if (ent && ent->cmd == cmd) {
2558		/* Command is the currently active command on
2559		 * the bus.  If we already have an output message
2560		 * pending, no dice.
2561		 */
2562		if (esp->msg_out_len)
2563			goto out_failure;
2564
2565		/* Send out an abort, encouraging the target to
2566		 * go to MSGOUT phase by asserting ATN.
2567		 */
2568		esp->msg_out[0] = ABORT_TASK_SET;
2569		esp->msg_out_len = 1;
2570		ent->eh_done = &eh_done;
2571
2572		scsi_esp_cmd(esp, ESP_CMD_SATN);
2573	} else {
2574		/* The command is disconnected.  This is not easy to
2575		 * abort.  For now we fail and let the scsi error
2576		 * handling layer go try a scsi bus reset or host
2577		 * reset.
2578		 *
2579		 * What we could do is put together a scsi command
2580		 * solely for the purpose of sending an abort message
2581		 * to the target.  Coming up with all the code to
2582		 * cook up scsi commands, special case them everywhere,
2583		 * etc. is for questionable gain and it would be better
2584		 * if the generic scsi error handling layer could do at
2585		 * least some of that for us.
2586		 *
2587		 * Anyways this is an area for potential future improvement
2588		 * in this driver.
2589		 */
2590		goto out_failure;
2591	}
2592
2593	spin_unlock_irqrestore(esp->host->host_lock, flags);
2594
2595	if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2596		spin_lock_irqsave(esp->host->host_lock, flags);
2597		ent->eh_done = NULL;
2598		spin_unlock_irqrestore(esp->host->host_lock, flags);
2599
2600		return FAILED;
2601	}
2602
2603	return SUCCESS;
2604
2605out_success:
2606	spin_unlock_irqrestore(esp->host->host_lock, flags);
2607	return SUCCESS;
2608
2609out_failure:
2610	/* XXX This might be a good location to set ESP_TGT_BROKEN
2611	 * XXX since we know which target/lun in particular is
2612	 * XXX causing trouble.
2613	 */
2614	spin_unlock_irqrestore(esp->host->host_lock, flags);
2615	return FAILED;
2616}
2617
2618static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2619{
2620	struct esp *esp = shost_priv(cmd->device->host);
2621	struct completion eh_reset;
2622	unsigned long flags;
2623
2624	init_completion(&eh_reset);
2625
2626	spin_lock_irqsave(esp->host->host_lock, flags);
2627
2628	esp->eh_reset = &eh_reset;
2629
2630	/* XXX This is too simple... We should add lots of
2631	 * XXX checks here so that if we find that the chip is
2632	 * XXX very wedged we return failure immediately so
2633	 * XXX that we can perform a full chip reset.
2634	 */
2635	esp->flags |= ESP_FLAG_RESETTING;
2636	scsi_esp_cmd(esp, ESP_CMD_RS);
2637
2638	spin_unlock_irqrestore(esp->host->host_lock, flags);
2639
2640	ssleep(esp_bus_reset_settle);
2641
2642	if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2643		spin_lock_irqsave(esp->host->host_lock, flags);
2644		esp->eh_reset = NULL;
2645		spin_unlock_irqrestore(esp->host->host_lock, flags);
2646
2647		return FAILED;
2648	}
2649
2650	return SUCCESS;
2651}
2652
2653/* All bets are off, reset the entire device.  */
2654static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2655{
2656	struct esp *esp = shost_priv(cmd->device->host);
2657	unsigned long flags;
2658
2659	spin_lock_irqsave(esp->host->host_lock, flags);
2660	esp_bootup_reset(esp);
2661	esp_reset_cleanup(esp);
2662	spin_unlock_irqrestore(esp->host->host_lock, flags);
2663
2664	ssleep(esp_bus_reset_settle);
2665
2666	return SUCCESS;
2667}
2668
2669static const char *esp_info(struct Scsi_Host *host)
2670{
2671	return "esp";
2672}
2673
2674struct scsi_host_template scsi_esp_template = {
2675	.module			= THIS_MODULE,
2676	.name			= "esp",
2677	.info			= esp_info,
2678	.queuecommand		= esp_queuecommand,
2679	.target_alloc		= esp_target_alloc,
2680	.target_destroy		= esp_target_destroy,
2681	.slave_alloc		= esp_slave_alloc,
2682	.slave_configure	= esp_slave_configure,
2683	.slave_destroy		= esp_slave_destroy,
2684	.eh_abort_handler	= esp_eh_abort_handler,
2685	.eh_bus_reset_handler	= esp_eh_bus_reset_handler,
2686	.eh_host_reset_handler	= esp_eh_host_reset_handler,
2687	.can_queue		= 7,
2688	.this_id		= 7,
2689	.sg_tablesize		= SG_ALL,
2690	.max_sectors		= 0xffff,
2691	.skip_settle_delay	= 1,
2692};
2693EXPORT_SYMBOL(scsi_esp_template);
2694
2695static void esp_get_signalling(struct Scsi_Host *host)
2696{
2697	struct esp *esp = shost_priv(host);
2698	enum spi_signal_type type;
2699
2700	if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2701		type = SPI_SIGNAL_HVD;
2702	else
2703		type = SPI_SIGNAL_SE;
2704
2705	spi_signalling(host) = type;
2706}
2707
2708static void esp_set_offset(struct scsi_target *target, int offset)
2709{
2710	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2711	struct esp *esp = shost_priv(host);
2712	struct esp_target_data *tp = &esp->target[target->id];
2713
2714	if (esp->flags & ESP_FLAG_DISABLE_SYNC)
2715		tp->nego_goal_offset = 0;
2716	else
2717		tp->nego_goal_offset = offset;
2718	tp->flags |= ESP_TGT_CHECK_NEGO;
2719}
2720
2721static void esp_set_period(struct scsi_target *target, int period)
2722{
2723	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2724	struct esp *esp = shost_priv(host);
2725	struct esp_target_data *tp = &esp->target[target->id];
2726
2727	tp->nego_goal_period = period;
2728	tp->flags |= ESP_TGT_CHECK_NEGO;
2729}
2730
2731static void esp_set_width(struct scsi_target *target, int width)
2732{
2733	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2734	struct esp *esp = shost_priv(host);
2735	struct esp_target_data *tp = &esp->target[target->id];
2736
2737	tp->nego_goal_width = (width ? 1 : 0);
2738	tp->flags |= ESP_TGT_CHECK_NEGO;
2739}
2740
2741static struct spi_function_template esp_transport_ops = {
2742	.set_offset		= esp_set_offset,
2743	.show_offset		= 1,
2744	.set_period		= esp_set_period,
2745	.show_period		= 1,
2746	.set_width		= esp_set_width,
2747	.show_width		= 1,
2748	.get_signalling		= esp_get_signalling,
2749};
2750
2751static int __init esp_init(void)
2752{
2753	BUILD_BUG_ON(sizeof(struct scsi_pointer) <
2754		     sizeof(struct esp_cmd_priv));
2755
2756	esp_transport_template = spi_attach_transport(&esp_transport_ops);
2757	if (!esp_transport_template)
2758		return -ENODEV;
2759
2760	return 0;
2761}
2762
2763static void __exit esp_exit(void)
2764{
2765	spi_release_transport(esp_transport_template);
2766}
2767
2768MODULE_DESCRIPTION("ESP SCSI driver core");
2769MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
2770MODULE_LICENSE("GPL");
2771MODULE_VERSION(DRV_VERSION);
2772
2773module_param(esp_bus_reset_settle, int, 0);
2774MODULE_PARM_DESC(esp_bus_reset_settle,
2775		 "ESP scsi bus reset delay in seconds");
2776
2777module_param(esp_debug, int, 0);
2778MODULE_PARM_DESC(esp_debug,
2779"ESP bitmapped debugging message enable value:\n"
2780"	0x00000001	Log interrupt events\n"
2781"	0x00000002	Log scsi commands\n"
2782"	0x00000004	Log resets\n"
2783"	0x00000008	Log message in events\n"
2784"	0x00000010	Log message out events\n"
2785"	0x00000020	Log command completion\n"
2786"	0x00000040	Log disconnects\n"
2787"	0x00000080	Log data start\n"
2788"	0x00000100	Log data done\n"
2789"	0x00000200	Log reconnects\n"
2790"	0x00000400	Log auto-sense data\n"
2791);
2792
2793module_init(esp_init);
2794module_exit(esp_exit);
2795
2796#ifdef CONFIG_SCSI_ESP_PIO
2797static inline unsigned int esp_wait_for_fifo(struct esp *esp)
2798{
2799	int i = 500000;
2800
2801	do {
2802		unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
2803
2804		if (fbytes)
2805			return fbytes;
2806
2807		udelay(1);
2808	} while (--i);
2809
2810	shost_printk(KERN_ERR, esp->host, "FIFO is empty. sreg [%02x]\n",
2811		     esp_read8(ESP_STATUS));
2812	return 0;
2813}
2814
2815static inline int esp_wait_for_intr(struct esp *esp)
2816{
2817	int i = 500000;
2818
2819	do {
2820		esp->sreg = esp_read8(ESP_STATUS);
2821		if (esp->sreg & ESP_STAT_INTR)
2822			return 0;
2823
2824		udelay(1);
2825	} while (--i);
2826
2827	shost_printk(KERN_ERR, esp->host, "IRQ timeout. sreg [%02x]\n",
2828		     esp->sreg);
2829	return 1;
2830}
2831
2832#define ESP_FIFO_SIZE 16
2833
2834void esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
2835		      u32 dma_count, int write, u8 cmd)
2836{
2837	u8 phase = esp->sreg & ESP_STAT_PMASK;
2838
2839	cmd &= ~ESP_CMD_DMA;
2840	esp->send_cmd_error = 0;
2841
2842	if (write) {
2843		u8 *dst = (u8 *)addr;
2844		u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
2845
2846		scsi_esp_cmd(esp, cmd);
2847
2848		while (1) {
2849			if (!esp_wait_for_fifo(esp))
2850				break;
2851
2852			*dst++ = readb(esp->fifo_reg);
2853			--esp_count;
2854
2855			if (!esp_count)
2856				break;
2857
2858			if (esp_wait_for_intr(esp)) {
2859				esp->send_cmd_error = 1;
2860				break;
2861			}
2862
2863			if ((esp->sreg & ESP_STAT_PMASK) != phase)
2864				break;
2865
2866			esp->ireg = esp_read8(ESP_INTRPT);
2867			if (esp->ireg & mask) {
2868				esp->send_cmd_error = 1;
2869				break;
2870			}
2871
2872			if (phase == ESP_MIP)
2873				esp_write8(ESP_CMD_MOK, ESP_CMD);
2874
2875			esp_write8(ESP_CMD_TI, ESP_CMD);
2876		}
2877	} else {
2878		unsigned int n = ESP_FIFO_SIZE;
2879		u8 *src = (u8 *)addr;
2880
2881		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
2882
2883		if (n > esp_count)
2884			n = esp_count;
2885		writesb(esp->fifo_reg, src, n);
2886		src += n;
2887		esp_count -= n;
2888
2889		scsi_esp_cmd(esp, cmd);
2890
2891		while (esp_count) {
2892			if (esp_wait_for_intr(esp)) {
2893				esp->send_cmd_error = 1;
2894				break;
2895			}
2896
2897			if ((esp->sreg & ESP_STAT_PMASK) != phase)
2898				break;
2899
2900			esp->ireg = esp_read8(ESP_INTRPT);
2901			if (esp->ireg & ~ESP_INTR_BSERV) {
2902				esp->send_cmd_error = 1;
2903				break;
2904			}
2905
2906			n = ESP_FIFO_SIZE -
2907			    (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
2908
2909			if (n > esp_count)
2910				n = esp_count;
2911			writesb(esp->fifo_reg, src, n);
2912			src += n;
2913			esp_count -= n;
2914
2915			esp_write8(ESP_CMD_TI, ESP_CMD);
2916		}
2917	}
2918
2919	esp->send_cmd_residual = esp_count;
2920}
2921EXPORT_SYMBOL(esp_send_pio_cmd);
2922#endif
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* esp_scsi.c: ESP SCSI driver.
   3 *
   4 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
   5 */
   6
   7#include <linux/kernel.h>
   8#include <linux/types.h>
   9#include <linux/slab.h>
  10#include <linux/delay.h>
  11#include <linux/list.h>
  12#include <linux/completion.h>
  13#include <linux/kallsyms.h>
  14#include <linux/module.h>
  15#include <linux/moduleparam.h>
  16#include <linux/init.h>
  17#include <linux/irqreturn.h>
  18
  19#include <asm/irq.h>
  20#include <asm/io.h>
  21#include <asm/dma.h>
  22
  23#include <scsi/scsi.h>
  24#include <scsi/scsi_host.h>
  25#include <scsi/scsi_cmnd.h>
  26#include <scsi/scsi_device.h>
  27#include <scsi/scsi_tcq.h>
  28#include <scsi/scsi_dbg.h>
  29#include <scsi/scsi_transport_spi.h>
  30
  31#include "esp_scsi.h"
  32
  33#define DRV_MODULE_NAME		"esp"
  34#define PFX DRV_MODULE_NAME	": "
  35#define DRV_VERSION		"2.000"
  36#define DRV_MODULE_RELDATE	"April 19, 2007"
  37
  38/* SCSI bus reset settle time in seconds.  */
  39static int esp_bus_reset_settle = 3;
  40
  41static u32 esp_debug;
  42#define ESP_DEBUG_INTR		0x00000001
  43#define ESP_DEBUG_SCSICMD	0x00000002
  44#define ESP_DEBUG_RESET		0x00000004
  45#define ESP_DEBUG_MSGIN		0x00000008
  46#define ESP_DEBUG_MSGOUT	0x00000010
  47#define ESP_DEBUG_CMDDONE	0x00000020
  48#define ESP_DEBUG_DISCONNECT	0x00000040
  49#define ESP_DEBUG_DATASTART	0x00000080
  50#define ESP_DEBUG_DATADONE	0x00000100
  51#define ESP_DEBUG_RECONNECT	0x00000200
  52#define ESP_DEBUG_AUTOSENSE	0x00000400
  53#define ESP_DEBUG_EVENT		0x00000800
  54#define ESP_DEBUG_COMMAND	0x00001000
  55
  56#define esp_log_intr(f, a...) \
  57do {	if (esp_debug & ESP_DEBUG_INTR) \
  58		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
  59} while (0)
  60
  61#define esp_log_reset(f, a...) \
  62do {	if (esp_debug & ESP_DEBUG_RESET) \
  63		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
  64} while (0)
  65
  66#define esp_log_msgin(f, a...) \
  67do {	if (esp_debug & ESP_DEBUG_MSGIN) \
  68		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
  69} while (0)
  70
  71#define esp_log_msgout(f, a...) \
  72do {	if (esp_debug & ESP_DEBUG_MSGOUT) \
  73		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
  74} while (0)
  75
  76#define esp_log_cmddone(f, a...) \
  77do {	if (esp_debug & ESP_DEBUG_CMDDONE) \
  78		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
  79} while (0)
  80
  81#define esp_log_disconnect(f, a...) \
  82do {	if (esp_debug & ESP_DEBUG_DISCONNECT) \
  83		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
  84} while (0)
  85
  86#define esp_log_datastart(f, a...) \
  87do {	if (esp_debug & ESP_DEBUG_DATASTART) \
  88		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
  89} while (0)
  90
  91#define esp_log_datadone(f, a...) \
  92do {	if (esp_debug & ESP_DEBUG_DATADONE) \
  93		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
  94} while (0)
  95
  96#define esp_log_reconnect(f, a...) \
  97do {	if (esp_debug & ESP_DEBUG_RECONNECT) \
  98		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
  99} while (0)
 100
 101#define esp_log_autosense(f, a...) \
 102do {	if (esp_debug & ESP_DEBUG_AUTOSENSE) \
 103		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
 104} while (0)
 105
 106#define esp_log_event(f, a...) \
 107do {   if (esp_debug & ESP_DEBUG_EVENT)	\
 108		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
 109} while (0)
 110
 111#define esp_log_command(f, a...) \
 112do {   if (esp_debug & ESP_DEBUG_COMMAND)	\
 113		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
 114} while (0)
 115
 116#define esp_read8(REG)		esp->ops->esp_read8(esp, REG)
 117#define esp_write8(VAL,REG)	esp->ops->esp_write8(esp, VAL, REG)
 118
 119static void esp_log_fill_regs(struct esp *esp,
 120			      struct esp_event_ent *p)
 121{
 122	p->sreg = esp->sreg;
 123	p->seqreg = esp->seqreg;
 124	p->sreg2 = esp->sreg2;
 125	p->ireg = esp->ireg;
 126	p->select_state = esp->select_state;
 127	p->event = esp->event;
 128}
 129
 130void scsi_esp_cmd(struct esp *esp, u8 val)
 131{
 132	struct esp_event_ent *p;
 133	int idx = esp->esp_event_cur;
 134
 135	p = &esp->esp_event_log[idx];
 136	p->type = ESP_EVENT_TYPE_CMD;
 137	p->val = val;
 138	esp_log_fill_regs(esp, p);
 139
 140	esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
 141
 142	esp_log_command("cmd[%02x]\n", val);
 143	esp_write8(val, ESP_CMD);
 144}
 145EXPORT_SYMBOL(scsi_esp_cmd);
 146
 147static void esp_send_dma_cmd(struct esp *esp, int len, int max_len, int cmd)
 148{
 149	if (esp->flags & ESP_FLAG_USE_FIFO) {
 150		int i;
 151
 152		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
 153		for (i = 0; i < len; i++)
 154			esp_write8(esp->command_block[i], ESP_FDATA);
 155		scsi_esp_cmd(esp, cmd);
 156	} else {
 157		if (esp->rev == FASHME)
 158			scsi_esp_cmd(esp, ESP_CMD_FLUSH);
 159		cmd |= ESP_CMD_DMA;
 160		esp->ops->send_dma_cmd(esp, esp->command_block_dma,
 161				       len, max_len, 0, cmd);
 162	}
 163}
 164
 165static void esp_event(struct esp *esp, u8 val)
 166{
 167	struct esp_event_ent *p;
 168	int idx = esp->esp_event_cur;
 169
 170	p = &esp->esp_event_log[idx];
 171	p->type = ESP_EVENT_TYPE_EVENT;
 172	p->val = val;
 173	esp_log_fill_regs(esp, p);
 174
 175	esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
 176
 177	esp->event = val;
 178}
 179
 180static void esp_dump_cmd_log(struct esp *esp)
 181{
 182	int idx = esp->esp_event_cur;
 183	int stop = idx;
 184
 185	shost_printk(KERN_INFO, esp->host, "Dumping command log\n");
 186	do {
 187		struct esp_event_ent *p = &esp->esp_event_log[idx];
 188
 189		shost_printk(KERN_INFO, esp->host,
 190			     "ent[%d] %s val[%02x] sreg[%02x] seqreg[%02x] "
 191			     "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
 192			     idx,
 193			     p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT",
 194			     p->val, p->sreg, p->seqreg,
 195			     p->sreg2, p->ireg, p->select_state, p->event);
 196
 197		idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
 198	} while (idx != stop);
 199}
 200
 201static void esp_flush_fifo(struct esp *esp)
 202{
 203	scsi_esp_cmd(esp, ESP_CMD_FLUSH);
 204	if (esp->rev == ESP236) {
 205		int lim = 1000;
 206
 207		while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
 208			if (--lim == 0) {
 209				shost_printk(KERN_ALERT, esp->host,
 210					     "ESP_FF_BYTES will not clear!\n");
 211				break;
 212			}
 213			udelay(1);
 214		}
 215	}
 216}
 217
 218static void hme_read_fifo(struct esp *esp)
 219{
 220	int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
 221	int idx = 0;
 222
 223	while (fcnt--) {
 224		esp->fifo[idx++] = esp_read8(ESP_FDATA);
 225		esp->fifo[idx++] = esp_read8(ESP_FDATA);
 226	}
 227	if (esp->sreg2 & ESP_STAT2_F1BYTE) {
 228		esp_write8(0, ESP_FDATA);
 229		esp->fifo[idx++] = esp_read8(ESP_FDATA);
 230		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
 231	}
 232	esp->fifo_cnt = idx;
 233}
 234
 235static void esp_set_all_config3(struct esp *esp, u8 val)
 236{
 237	int i;
 238
 239	for (i = 0; i < ESP_MAX_TARGET; i++)
 240		esp->target[i].esp_config3 = val;
 241}
 242
 243/* Reset the ESP chip, _not_ the SCSI bus. */
 244static void esp_reset_esp(struct esp *esp)
 245{
 246	u8 family_code, version;
 247
 248	/* Now reset the ESP chip */
 249	scsi_esp_cmd(esp, ESP_CMD_RC);
 250	scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
 251	if (esp->rev == FAST)
 252		esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
 253	scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
 254
 255	/* This is the only point at which it is reliable to read
 256	 * the ID-code for a fast ESP chip variants.
 257	 */
 258	esp->max_period = ((35 * esp->ccycle) / 1000);
 259	if (esp->rev == FAST) {
 260		version = esp_read8(ESP_UID);
 261		family_code = (version & 0xf8) >> 3;
 262		if (family_code == 0x02)
 263			esp->rev = FAS236;
 264		else if (family_code == 0x0a)
 265			esp->rev = FASHME; /* Version is usually '5'. */
 266		else
 
 
 
 
 267			esp->rev = FAS100A;
 
 268		esp->min_period = ((4 * esp->ccycle) / 1000);
 269	} else {
 270		esp->min_period = ((5 * esp->ccycle) / 1000);
 271	}
 272	if (esp->rev == FAS236) {
 273		/*
 274		 * The AM53c974 chip returns the same ID as FAS236;
 275		 * try to configure glitch eater.
 276		 */
 277		u8 config4 = ESP_CONFIG4_GE1;
 278		esp_write8(config4, ESP_CFG4);
 279		config4 = esp_read8(ESP_CFG4);
 280		if (config4 & ESP_CONFIG4_GE1) {
 281			esp->rev = PCSCSI;
 282			esp_write8(esp->config4, ESP_CFG4);
 283		}
 284	}
 285	esp->max_period = (esp->max_period + 3)>>2;
 286	esp->min_period = (esp->min_period + 3)>>2;
 287
 288	esp_write8(esp->config1, ESP_CFG1);
 289	switch (esp->rev) {
 290	case ESP100:
 291		/* nothing to do */
 292		break;
 293
 294	case ESP100A:
 295		esp_write8(esp->config2, ESP_CFG2);
 296		break;
 297
 298	case ESP236:
 299		/* Slow 236 */
 300		esp_write8(esp->config2, ESP_CFG2);
 301		esp->prev_cfg3 = esp->target[0].esp_config3;
 302		esp_write8(esp->prev_cfg3, ESP_CFG3);
 303		break;
 304
 305	case FASHME:
 306		esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
 307		/* fallthrough... */
 308
 309	case FAS236:
 310	case PCSCSI:
 311		/* Fast 236, AM53c974 or HME */
 312		esp_write8(esp->config2, ESP_CFG2);
 313		if (esp->rev == FASHME) {
 314			u8 cfg3 = esp->target[0].esp_config3;
 315
 316			cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
 317			if (esp->scsi_id >= 8)
 318				cfg3 |= ESP_CONFIG3_IDBIT3;
 319			esp_set_all_config3(esp, cfg3);
 320		} else {
 321			u32 cfg3 = esp->target[0].esp_config3;
 322
 323			cfg3 |= ESP_CONFIG3_FCLK;
 324			esp_set_all_config3(esp, cfg3);
 325		}
 326		esp->prev_cfg3 = esp->target[0].esp_config3;
 327		esp_write8(esp->prev_cfg3, ESP_CFG3);
 328		if (esp->rev == FASHME) {
 329			esp->radelay = 80;
 330		} else {
 331			if (esp->flags & ESP_FLAG_DIFFERENTIAL)
 332				esp->radelay = 0;
 333			else
 334				esp->radelay = 96;
 335		}
 336		break;
 337
 338	case FAS100A:
 339		/* Fast 100a */
 340		esp_write8(esp->config2, ESP_CFG2);
 341		esp_set_all_config3(esp,
 342				    (esp->target[0].esp_config3 |
 343				     ESP_CONFIG3_FCLOCK));
 344		esp->prev_cfg3 = esp->target[0].esp_config3;
 345		esp_write8(esp->prev_cfg3, ESP_CFG3);
 346		esp->radelay = 32;
 347		break;
 348
 349	default:
 350		break;
 351	}
 352
 353	/* Reload the configuration registers */
 354	esp_write8(esp->cfact, ESP_CFACT);
 355
 356	esp->prev_stp = 0;
 357	esp_write8(esp->prev_stp, ESP_STP);
 358
 359	esp->prev_soff = 0;
 360	esp_write8(esp->prev_soff, ESP_SOFF);
 361
 362	esp_write8(esp->neg_defp, ESP_TIMEO);
 363
 364	/* Eat any bitrot in the chip */
 365	esp_read8(ESP_INTRPT);
 366	udelay(100);
 367}
 368
 369static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
 370{
 371	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
 372	struct scatterlist *sg = scsi_sglist(cmd);
 373	int total = 0, i;
 374	struct scatterlist *s;
 375
 376	if (cmd->sc_data_direction == DMA_NONE)
 377		return;
 378
 379	if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
 380		/*
 381		 * For pseudo DMA and PIO we need the virtual address instead of
 382		 * a dma address, so perform an identity mapping.
 383		 */
 384		spriv->num_sg = scsi_sg_count(cmd);
 385
 386		scsi_for_each_sg(cmd, s, spriv->num_sg, i) {
 387			s->dma_address = (uintptr_t)sg_virt(s);
 388			total += sg_dma_len(s);
 389		}
 390	} else {
 391		spriv->num_sg = scsi_dma_map(cmd);
 392		scsi_for_each_sg(cmd, s, spriv->num_sg, i)
 393			total += sg_dma_len(s);
 394	}
 395	spriv->cur_residue = sg_dma_len(sg);
 396	spriv->prv_sg = NULL;
 397	spriv->cur_sg = sg;
 398	spriv->tot_residue = total;
 399}
 400
 401static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
 402				   struct scsi_cmnd *cmd)
 403{
 404	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
 405
 406	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 407		return ent->sense_dma +
 408			(ent->sense_ptr - cmd->sense_buffer);
 409	}
 410
 411	return sg_dma_address(p->cur_sg) +
 412		(sg_dma_len(p->cur_sg) -
 413		 p->cur_residue);
 414}
 415
 416static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
 417				    struct scsi_cmnd *cmd)
 418{
 419	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
 420
 421	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 422		return SCSI_SENSE_BUFFERSIZE -
 423			(ent->sense_ptr - cmd->sense_buffer);
 424	}
 425	return p->cur_residue;
 426}
 427
 428static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
 429			    struct scsi_cmnd *cmd, unsigned int len)
 430{
 431	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
 432
 433	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 434		ent->sense_ptr += len;
 435		return;
 436	}
 437
 438	p->cur_residue -= len;
 439	p->tot_residue -= len;
 440	if (p->cur_residue < 0 || p->tot_residue < 0) {
 441		shost_printk(KERN_ERR, esp->host,
 442			     "Data transfer overflow.\n");
 443		shost_printk(KERN_ERR, esp->host,
 444			     "cur_residue[%d] tot_residue[%d] len[%u]\n",
 445			     p->cur_residue, p->tot_residue, len);
 446		p->cur_residue = 0;
 447		p->tot_residue = 0;
 448	}
 449	if (!p->cur_residue && p->tot_residue) {
 450		p->prv_sg = p->cur_sg;
 451		p->cur_sg = sg_next(p->cur_sg);
 452		p->cur_residue = sg_dma_len(p->cur_sg);
 453	}
 454}
 455
 456static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
 457{
 458	if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
 459		scsi_dma_unmap(cmd);
 460}
 461
 462static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
 463{
 464	struct scsi_cmnd *cmd = ent->cmd;
 465	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
 466
 467	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 468		ent->saved_sense_ptr = ent->sense_ptr;
 469		return;
 470	}
 471	ent->saved_cur_residue = spriv->cur_residue;
 472	ent->saved_prv_sg = spriv->prv_sg;
 473	ent->saved_cur_sg = spriv->cur_sg;
 474	ent->saved_tot_residue = spriv->tot_residue;
 475}
 476
 477static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
 478{
 479	struct scsi_cmnd *cmd = ent->cmd;
 480	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
 481
 482	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 483		ent->sense_ptr = ent->saved_sense_ptr;
 484		return;
 485	}
 486	spriv->cur_residue = ent->saved_cur_residue;
 487	spriv->prv_sg = ent->saved_prv_sg;
 488	spriv->cur_sg = ent->saved_cur_sg;
 489	spriv->tot_residue = ent->saved_tot_residue;
 490}
 491
 492static void esp_write_tgt_config3(struct esp *esp, int tgt)
 493{
 494	if (esp->rev > ESP100A) {
 495		u8 val = esp->target[tgt].esp_config3;
 496
 497		if (val != esp->prev_cfg3) {
 498			esp->prev_cfg3 = val;
 499			esp_write8(val, ESP_CFG3);
 500		}
 501	}
 502}
 503
 504static void esp_write_tgt_sync(struct esp *esp, int tgt)
 505{
 506	u8 off = esp->target[tgt].esp_offset;
 507	u8 per = esp->target[tgt].esp_period;
 508
 509	if (off != esp->prev_soff) {
 510		esp->prev_soff = off;
 511		esp_write8(off, ESP_SOFF);
 512	}
 513	if (per != esp->prev_stp) {
 514		esp->prev_stp = per;
 515		esp_write8(per, ESP_STP);
 516	}
 517}
 518
 519static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
 520{
 521	if (esp->rev == FASHME) {
 522		/* Arbitrary segment boundaries, 24-bit counts.  */
 523		if (dma_len > (1U << 24))
 524			dma_len = (1U << 24);
 525	} else {
 526		u32 base, end;
 527
 528		/* ESP chip limits other variants by 16-bits of transfer
 529		 * count.  Actually on FAS100A and FAS236 we could get
 530		 * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB
 531		 * in the ESP_CFG2 register but that causes other unwanted
 532		 * changes so we don't use it currently.
 533		 */
 534		if (dma_len > (1U << 16))
 535			dma_len = (1U << 16);
 536
 537		/* All of the DMA variants hooked up to these chips
 538		 * cannot handle crossing a 24-bit address boundary.
 539		 */
 540		base = dma_addr & ((1U << 24) - 1U);
 541		end = base + dma_len;
 542		if (end > (1U << 24))
 543			end = (1U <<24);
 544		dma_len = end - base;
 545	}
 546	return dma_len;
 547}
 548
 549static int esp_need_to_nego_wide(struct esp_target_data *tp)
 550{
 551	struct scsi_target *target = tp->starget;
 552
 553	return spi_width(target) != tp->nego_goal_width;
 554}
 555
 556static int esp_need_to_nego_sync(struct esp_target_data *tp)
 557{
 558	struct scsi_target *target = tp->starget;
 559
 560	/* When offset is zero, period is "don't care".  */
 561	if (!spi_offset(target) && !tp->nego_goal_offset)
 562		return 0;
 563
 564	if (spi_offset(target) == tp->nego_goal_offset &&
 565	    spi_period(target) == tp->nego_goal_period)
 566		return 0;
 567
 568	return 1;
 569}
 570
 571static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
 572			     struct esp_lun_data *lp)
 573{
 574	if (!ent->orig_tag[0]) {
 575		/* Non-tagged, slot already taken?  */
 576		if (lp->non_tagged_cmd)
 577			return -EBUSY;
 578
 579		if (lp->hold) {
 580			/* We are being held by active tagged
 581			 * commands.
 582			 */
 583			if (lp->num_tagged)
 584				return -EBUSY;
 585
 586			/* Tagged commands completed, we can unplug
 587			 * the queue and run this untagged command.
 588			 */
 589			lp->hold = 0;
 590		} else if (lp->num_tagged) {
 591			/* Plug the queue until num_tagged decreases
 592			 * to zero in esp_free_lun_tag.
 593			 */
 594			lp->hold = 1;
 595			return -EBUSY;
 596		}
 597
 598		lp->non_tagged_cmd = ent;
 599		return 0;
 600	}
 601
 602	/* Tagged command. Check that it isn't blocked by a non-tagged one. */
 603	if (lp->non_tagged_cmd || lp->hold)
 604		return -EBUSY;
 605
 606	BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
 607
 608	lp->tagged_cmds[ent->orig_tag[1]] = ent;
 609	lp->num_tagged++;
 610
 611	return 0;
 612}
 613
 614static void esp_free_lun_tag(struct esp_cmd_entry *ent,
 615			     struct esp_lun_data *lp)
 616{
 617	if (ent->orig_tag[0]) {
 618		BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent);
 619		lp->tagged_cmds[ent->orig_tag[1]] = NULL;
 620		lp->num_tagged--;
 621	} else {
 622		BUG_ON(lp->non_tagged_cmd != ent);
 623		lp->non_tagged_cmd = NULL;
 624	}
 625}
 626
 627static void esp_map_sense(struct esp *esp, struct esp_cmd_entry *ent)
 628{
 629	ent->sense_ptr = ent->cmd->sense_buffer;
 630	if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
 631		ent->sense_dma = (uintptr_t)ent->sense_ptr;
 632		return;
 633	}
 634
 635	ent->sense_dma = dma_map_single(esp->dev, ent->sense_ptr,
 636					SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
 637}
 638
 639static void esp_unmap_sense(struct esp *esp, struct esp_cmd_entry *ent)
 640{
 641	if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
 642		dma_unmap_single(esp->dev, ent->sense_dma,
 643				 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
 644	ent->sense_ptr = NULL;
 645}
 646
 647/* When a contingent allegiance conditon is created, we force feed a
 648 * REQUEST_SENSE command to the device to fetch the sense data.  I
 649 * tried many other schemes, relying on the scsi error handling layer
 650 * to send out the REQUEST_SENSE automatically, but this was difficult
 651 * to get right especially in the presence of applications like smartd
 652 * which use SG_IO to send out their own REQUEST_SENSE commands.
 653 */
 654static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
 655{
 656	struct scsi_cmnd *cmd = ent->cmd;
 657	struct scsi_device *dev = cmd->device;
 658	int tgt, lun;
 659	u8 *p, val;
 660
 661	tgt = dev->id;
 662	lun = dev->lun;
 663
 664
 665	if (!ent->sense_ptr) {
 666		esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n",
 667				  tgt, lun);
 668		esp_map_sense(esp, ent);
 669	}
 670	ent->saved_sense_ptr = ent->sense_ptr;
 671
 672	esp->active_cmd = ent;
 673
 674	p = esp->command_block;
 675	esp->msg_out_len = 0;
 676
 677	*p++ = IDENTIFY(0, lun);
 678	*p++ = REQUEST_SENSE;
 679	*p++ = ((dev->scsi_level <= SCSI_2) ?
 680		(lun << 5) : 0);
 681	*p++ = 0;
 682	*p++ = 0;
 683	*p++ = SCSI_SENSE_BUFFERSIZE;
 684	*p++ = 0;
 685
 686	esp->select_state = ESP_SELECT_BASIC;
 687
 688	val = tgt;
 689	if (esp->rev == FASHME)
 690		val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
 691	esp_write8(val, ESP_BUSID);
 692
 693	esp_write_tgt_sync(esp, tgt);
 694	esp_write_tgt_config3(esp, tgt);
 695
 696	val = (p - esp->command_block);
 697
 698	esp_send_dma_cmd(esp, val, 16, ESP_CMD_SELA);
 699}
 700
 701static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
 702{
 703	struct esp_cmd_entry *ent;
 704
 705	list_for_each_entry(ent, &esp->queued_cmds, list) {
 706		struct scsi_cmnd *cmd = ent->cmd;
 707		struct scsi_device *dev = cmd->device;
 708		struct esp_lun_data *lp = dev->hostdata;
 709
 710		if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 711			ent->tag[0] = 0;
 712			ent->tag[1] = 0;
 713			return ent;
 714		}
 715
 716		if (!spi_populate_tag_msg(&ent->tag[0], cmd)) {
 717			ent->tag[0] = 0;
 718			ent->tag[1] = 0;
 719		}
 720		ent->orig_tag[0] = ent->tag[0];
 721		ent->orig_tag[1] = ent->tag[1];
 722
 723		if (esp_alloc_lun_tag(ent, lp) < 0)
 724			continue;
 725
 726		return ent;
 727	}
 728
 729	return NULL;
 730}
 731
 732static void esp_maybe_execute_command(struct esp *esp)
 733{
 734	struct esp_target_data *tp;
 735	struct scsi_device *dev;
 736	struct scsi_cmnd *cmd;
 737	struct esp_cmd_entry *ent;
 738	bool select_and_stop = false;
 739	int tgt, lun, i;
 740	u32 val, start_cmd;
 741	u8 *p;
 742
 743	if (esp->active_cmd ||
 744	    (esp->flags & ESP_FLAG_RESETTING))
 745		return;
 746
 747	ent = find_and_prep_issuable_command(esp);
 748	if (!ent)
 749		return;
 750
 751	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 752		esp_autosense(esp, ent);
 753		return;
 754	}
 755
 756	cmd = ent->cmd;
 757	dev = cmd->device;
 758	tgt = dev->id;
 759	lun = dev->lun;
 760	tp = &esp->target[tgt];
 761
 762	list_move(&ent->list, &esp->active_cmds);
 763
 764	esp->active_cmd = ent;
 765
 766	esp_map_dma(esp, cmd);
 767	esp_save_pointers(esp, ent);
 768
 769	if (!(cmd->cmd_len == 6 || cmd->cmd_len == 10 || cmd->cmd_len == 12))
 770		select_and_stop = true;
 771
 772	p = esp->command_block;
 773
 774	esp->msg_out_len = 0;
 775	if (tp->flags & ESP_TGT_CHECK_NEGO) {
 776		/* Need to negotiate.  If the target is broken
 777		 * go for synchronous transfers and non-wide.
 778		 */
 779		if (tp->flags & ESP_TGT_BROKEN) {
 780			tp->flags &= ~ESP_TGT_DISCONNECT;
 781			tp->nego_goal_period = 0;
 782			tp->nego_goal_offset = 0;
 783			tp->nego_goal_width = 0;
 784			tp->nego_goal_tags = 0;
 785		}
 786
 787		/* If the settings are not changing, skip this.  */
 788		if (spi_width(tp->starget) == tp->nego_goal_width &&
 789		    spi_period(tp->starget) == tp->nego_goal_period &&
 790		    spi_offset(tp->starget) == tp->nego_goal_offset) {
 791			tp->flags &= ~ESP_TGT_CHECK_NEGO;
 792			goto build_identify;
 793		}
 794
 795		if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
 796			esp->msg_out_len =
 797				spi_populate_width_msg(&esp->msg_out[0],
 798						       (tp->nego_goal_width ?
 799							1 : 0));
 800			tp->flags |= ESP_TGT_NEGO_WIDE;
 801		} else if (esp_need_to_nego_sync(tp)) {
 802			esp->msg_out_len =
 803				spi_populate_sync_msg(&esp->msg_out[0],
 804						      tp->nego_goal_period,
 805						      tp->nego_goal_offset);
 806			tp->flags |= ESP_TGT_NEGO_SYNC;
 807		} else {
 808			tp->flags &= ~ESP_TGT_CHECK_NEGO;
 809		}
 810
 811		/* If there are multiple message bytes, use Select and Stop */
 812		if (esp->msg_out_len)
 813			select_and_stop = true;
 814	}
 815
 816build_identify:
 817	*p++ = IDENTIFY(tp->flags & ESP_TGT_DISCONNECT, lun);
 818
 819	if (ent->tag[0] && esp->rev == ESP100) {
 820		/* ESP100 lacks select w/atn3 command, use select
 821		 * and stop instead.
 822		 */
 823		select_and_stop = true;
 824	}
 825
 826	if (select_and_stop) {
 827		esp->cmd_bytes_left = cmd->cmd_len;
 828		esp->cmd_bytes_ptr = &cmd->cmnd[0];
 829
 830		if (ent->tag[0]) {
 831			for (i = esp->msg_out_len - 1;
 832			     i >= 0; i--)
 833				esp->msg_out[i + 2] = esp->msg_out[i];
 834			esp->msg_out[0] = ent->tag[0];
 835			esp->msg_out[1] = ent->tag[1];
 836			esp->msg_out_len += 2;
 837		}
 838
 839		start_cmd = ESP_CMD_SELAS;
 840		esp->select_state = ESP_SELECT_MSGOUT;
 841	} else {
 842		start_cmd = ESP_CMD_SELA;
 843		if (ent->tag[0]) {
 844			*p++ = ent->tag[0];
 845			*p++ = ent->tag[1];
 846
 847			start_cmd = ESP_CMD_SA3;
 848		}
 849
 850		for (i = 0; i < cmd->cmd_len; i++)
 851			*p++ = cmd->cmnd[i];
 852
 853		esp->select_state = ESP_SELECT_BASIC;
 854	}
 855	val = tgt;
 856	if (esp->rev == FASHME)
 857		val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
 858	esp_write8(val, ESP_BUSID);
 859
 860	esp_write_tgt_sync(esp, tgt);
 861	esp_write_tgt_config3(esp, tgt);
 862
 863	val = (p - esp->command_block);
 864
 865	if (esp_debug & ESP_DEBUG_SCSICMD) {
 866		printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
 867		for (i = 0; i < cmd->cmd_len; i++)
 868			printk("%02x ", cmd->cmnd[i]);
 869		printk("]\n");
 870	}
 871
 872	esp_send_dma_cmd(esp, val, 16, start_cmd);
 873}
 874
 875static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
 876{
 877	struct list_head *head = &esp->esp_cmd_pool;
 878	struct esp_cmd_entry *ret;
 879
 880	if (list_empty(head)) {
 881		ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
 882	} else {
 883		ret = list_entry(head->next, struct esp_cmd_entry, list);
 884		list_del(&ret->list);
 885		memset(ret, 0, sizeof(*ret));
 886	}
 887	return ret;
 888}
 889
 890static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
 891{
 892	list_add(&ent->list, &esp->esp_cmd_pool);
 893}
 894
 895static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
 896			    struct scsi_cmnd *cmd, unsigned int result)
 897{
 898	struct scsi_device *dev = cmd->device;
 899	int tgt = dev->id;
 900	int lun = dev->lun;
 901
 902	esp->active_cmd = NULL;
 903	esp_unmap_dma(esp, cmd);
 904	esp_free_lun_tag(ent, dev->hostdata);
 905	cmd->result = result;
 906
 907	if (ent->eh_done) {
 908		complete(ent->eh_done);
 909		ent->eh_done = NULL;
 910	}
 911
 912	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 913		esp_unmap_sense(esp, ent);
 914
 915		/* Restore the message/status bytes to what we actually
 916		 * saw originally.  Also, report that we are providing
 917		 * the sense data.
 918		 */
 919		cmd->result = ((DRIVER_SENSE << 24) |
 920			       (DID_OK << 16) |
 921			       (COMMAND_COMPLETE << 8) |
 922			       (SAM_STAT_CHECK_CONDITION << 0));
 923
 924		ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
 925		if (esp_debug & ESP_DEBUG_AUTOSENSE) {
 926			int i;
 927
 928			printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
 929			       esp->host->unique_id, tgt, lun);
 930			for (i = 0; i < 18; i++)
 931				printk("%02x ", cmd->sense_buffer[i]);
 932			printk("]\n");
 933		}
 934	}
 935
 936	cmd->scsi_done(cmd);
 937
 938	list_del(&ent->list);
 939	esp_put_ent(esp, ent);
 940
 941	esp_maybe_execute_command(esp);
 942}
 943
 944static unsigned int compose_result(unsigned int status, unsigned int message,
 945				   unsigned int driver_code)
 946{
 947	return (status | (message << 8) | (driver_code << 16));
 948}
 949
 950static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
 951{
 952	struct scsi_device *dev = ent->cmd->device;
 953	struct esp_lun_data *lp = dev->hostdata;
 954
 955	scsi_track_queue_full(dev, lp->num_tagged - 1);
 956}
 957
 958static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
 959{
 960	struct scsi_device *dev = cmd->device;
 961	struct esp *esp = shost_priv(dev->host);
 962	struct esp_cmd_priv *spriv;
 963	struct esp_cmd_entry *ent;
 964
 965	ent = esp_get_ent(esp);
 966	if (!ent)
 967		return SCSI_MLQUEUE_HOST_BUSY;
 968
 969	ent->cmd = cmd;
 970
 971	cmd->scsi_done = done;
 972
 973	spriv = ESP_CMD_PRIV(cmd);
 974	spriv->num_sg = 0;
 975
 976	list_add_tail(&ent->list, &esp->queued_cmds);
 977
 978	esp_maybe_execute_command(esp);
 979
 980	return 0;
 981}
 982
 983static DEF_SCSI_QCMD(esp_queuecommand)
 984
 985static int esp_check_gross_error(struct esp *esp)
 986{
 987	if (esp->sreg & ESP_STAT_SPAM) {
 988		/* Gross Error, could be one of:
 989		 * - top of fifo overwritten
 990		 * - top of command register overwritten
 991		 * - DMA programmed with wrong direction
 992		 * - improper phase change
 993		 */
 994		shost_printk(KERN_ERR, esp->host,
 995			     "Gross error sreg[%02x]\n", esp->sreg);
 996		/* XXX Reset the chip. XXX */
 997		return 1;
 998	}
 999	return 0;
1000}
1001
1002static int esp_check_spur_intr(struct esp *esp)
1003{
1004	switch (esp->rev) {
1005	case ESP100:
1006	case ESP100A:
1007		/* The interrupt pending bit of the status register cannot
1008		 * be trusted on these revisions.
1009		 */
1010		esp->sreg &= ~ESP_STAT_INTR;
1011		break;
1012
1013	default:
1014		if (!(esp->sreg & ESP_STAT_INTR)) {
1015			if (esp->ireg & ESP_INTR_SR)
1016				return 1;
1017
1018			/* If the DMA is indicating interrupt pending and the
1019			 * ESP is not, the only possibility is a DMA error.
1020			 */
1021			if (!esp->ops->dma_error(esp)) {
1022				shost_printk(KERN_ERR, esp->host,
1023					     "Spurious irq, sreg=%02x.\n",
1024					     esp->sreg);
1025				return -1;
1026			}
1027
1028			shost_printk(KERN_ERR, esp->host, "DMA error\n");
1029
1030			/* XXX Reset the chip. XXX */
1031			return -1;
1032		}
1033		break;
1034	}
1035
1036	return 0;
1037}
1038
1039static void esp_schedule_reset(struct esp *esp)
1040{
1041	esp_log_reset("esp_schedule_reset() from %ps\n",
1042		      __builtin_return_address(0));
1043	esp->flags |= ESP_FLAG_RESETTING;
1044	esp_event(esp, ESP_EVENT_RESET);
1045}
1046
1047/* In order to avoid having to add a special half-reconnected state
1048 * into the driver we just sit here and poll through the rest of
1049 * the reselection process to get the tag message bytes.
1050 */
1051static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1052						    struct esp_lun_data *lp)
1053{
1054	struct esp_cmd_entry *ent;
1055	int i;
1056
1057	if (!lp->num_tagged) {
1058		shost_printk(KERN_ERR, esp->host,
1059			     "Reconnect w/num_tagged==0\n");
1060		return NULL;
1061	}
1062
1063	esp_log_reconnect("reconnect tag, ");
1064
1065	for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1066		if (esp->ops->irq_pending(esp))
1067			break;
1068	}
1069	if (i == ESP_QUICKIRQ_LIMIT) {
1070		shost_printk(KERN_ERR, esp->host,
1071			     "Reconnect IRQ1 timeout\n");
1072		return NULL;
1073	}
1074
1075	esp->sreg = esp_read8(ESP_STATUS);
1076	esp->ireg = esp_read8(ESP_INTRPT);
1077
1078	esp_log_reconnect("IRQ(%d:%x:%x), ",
1079			  i, esp->ireg, esp->sreg);
1080
1081	if (esp->ireg & ESP_INTR_DC) {
1082		shost_printk(KERN_ERR, esp->host,
1083			     "Reconnect, got disconnect.\n");
1084		return NULL;
1085	}
1086
1087	if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
1088		shost_printk(KERN_ERR, esp->host,
1089			     "Reconnect, not MIP sreg[%02x].\n", esp->sreg);
1090		return NULL;
1091	}
1092
1093	/* DMA in the tag bytes... */
1094	esp->command_block[0] = 0xff;
1095	esp->command_block[1] = 0xff;
1096	esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1097			       2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1098
1099	/* ACK the message.  */
1100	scsi_esp_cmd(esp, ESP_CMD_MOK);
1101
1102	for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1103		if (esp->ops->irq_pending(esp)) {
1104			esp->sreg = esp_read8(ESP_STATUS);
1105			esp->ireg = esp_read8(ESP_INTRPT);
1106			if (esp->ireg & ESP_INTR_FDONE)
1107				break;
1108		}
1109		udelay(1);
1110	}
1111	if (i == ESP_RESELECT_TAG_LIMIT) {
1112		shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n");
1113		return NULL;
1114	}
1115	esp->ops->dma_drain(esp);
1116	esp->ops->dma_invalidate(esp);
1117
1118	esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1119			  i, esp->ireg, esp->sreg,
1120			  esp->command_block[0],
1121			  esp->command_block[1]);
1122
1123	if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1124	    esp->command_block[0] > ORDERED_QUEUE_TAG) {
1125		shost_printk(KERN_ERR, esp->host,
1126			     "Reconnect, bad tag type %02x.\n",
1127			     esp->command_block[0]);
1128		return NULL;
1129	}
1130
1131	ent = lp->tagged_cmds[esp->command_block[1]];
1132	if (!ent) {
1133		shost_printk(KERN_ERR, esp->host,
1134			     "Reconnect, no entry for tag %02x.\n",
1135			     esp->command_block[1]);
1136		return NULL;
1137	}
1138
1139	return ent;
1140}
1141
1142static int esp_reconnect(struct esp *esp)
1143{
1144	struct esp_cmd_entry *ent;
1145	struct esp_target_data *tp;
1146	struct esp_lun_data *lp;
1147	struct scsi_device *dev;
1148	int target, lun;
1149
1150	BUG_ON(esp->active_cmd);
1151	if (esp->rev == FASHME) {
1152		/* FASHME puts the target and lun numbers directly
1153		 * into the fifo.
1154		 */
1155		target = esp->fifo[0];
1156		lun = esp->fifo[1] & 0x7;
1157	} else {
1158		u8 bits = esp_read8(ESP_FDATA);
1159
1160		/* Older chips put the lun directly into the fifo, but
1161		 * the target is given as a sample of the arbitration
1162		 * lines on the bus at reselection time.  So we should
1163		 * see the ID of the ESP and the one reconnecting target
1164		 * set in the bitmap.
1165		 */
1166		if (!(bits & esp->scsi_id_mask))
1167			goto do_reset;
1168		bits &= ~esp->scsi_id_mask;
1169		if (!bits || (bits & (bits - 1)))
1170			goto do_reset;
1171
1172		target = ffs(bits) - 1;
1173		lun = (esp_read8(ESP_FDATA) & 0x7);
1174
1175		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1176		if (esp->rev == ESP100) {
1177			u8 ireg = esp_read8(ESP_INTRPT);
1178			/* This chip has a bug during reselection that can
1179			 * cause a spurious illegal-command interrupt, which
1180			 * we simply ACK here.  Another possibility is a bus
1181			 * reset so we must check for that.
1182			 */
1183			if (ireg & ESP_INTR_SR)
1184				goto do_reset;
1185		}
1186		scsi_esp_cmd(esp, ESP_CMD_NULL);
1187	}
1188
1189	esp_write_tgt_sync(esp, target);
1190	esp_write_tgt_config3(esp, target);
1191
1192	scsi_esp_cmd(esp, ESP_CMD_MOK);
1193
1194	if (esp->rev == FASHME)
1195		esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1196			   ESP_BUSID);
1197
1198	tp = &esp->target[target];
1199	dev = __scsi_device_lookup_by_target(tp->starget, lun);
1200	if (!dev) {
1201		shost_printk(KERN_ERR, esp->host,
1202			     "Reconnect, no lp tgt[%u] lun[%u]\n",
1203			     target, lun);
1204		goto do_reset;
1205	}
1206	lp = dev->hostdata;
1207
1208	ent = lp->non_tagged_cmd;
1209	if (!ent) {
1210		ent = esp_reconnect_with_tag(esp, lp);
1211		if (!ent)
1212			goto do_reset;
1213	}
1214
1215	esp->active_cmd = ent;
1216
1217	esp_event(esp, ESP_EVENT_CHECK_PHASE);
1218	esp_restore_pointers(esp, ent);
1219	esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1220	return 1;
1221
1222do_reset:
1223	esp_schedule_reset(esp);
1224	return 0;
1225}
1226
1227static int esp_finish_select(struct esp *esp)
1228{
1229	struct esp_cmd_entry *ent;
1230	struct scsi_cmnd *cmd;
1231
1232	/* No longer selecting.  */
1233	esp->select_state = ESP_SELECT_NONE;
1234
1235	esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1236	ent = esp->active_cmd;
1237	cmd = ent->cmd;
1238
1239	if (esp->ops->dma_error(esp)) {
1240		/* If we see a DMA error during or as a result of selection,
1241		 * all bets are off.
1242		 */
1243		esp_schedule_reset(esp);
1244		esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
1245		return 0;
1246	}
1247
1248	esp->ops->dma_invalidate(esp);
1249
1250	if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1251		struct esp_target_data *tp = &esp->target[cmd->device->id];
1252
1253		/* Carefully back out of the selection attempt.  Release
1254		 * resources (such as DMA mapping & TAG) and reset state (such
1255		 * as message out and command delivery variables).
1256		 */
1257		if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1258			esp_unmap_dma(esp, cmd);
1259			esp_free_lun_tag(ent, cmd->device->hostdata);
1260			tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1261			esp->cmd_bytes_ptr = NULL;
1262			esp->cmd_bytes_left = 0;
1263		} else {
1264			esp_unmap_sense(esp, ent);
1265		}
1266
1267		/* Now that the state is unwound properly, put back onto
1268		 * the issue queue.  This command is no longer active.
1269		 */
1270		list_move(&ent->list, &esp->queued_cmds);
1271		esp->active_cmd = NULL;
1272
1273		/* Return value ignored by caller, it directly invokes
1274		 * esp_reconnect().
1275		 */
1276		return 0;
1277	}
1278
1279	if (esp->ireg == ESP_INTR_DC) {
1280		struct scsi_device *dev = cmd->device;
1281
1282		/* Disconnect.  Make sure we re-negotiate sync and
1283		 * wide parameters if this target starts responding
1284		 * again in the future.
1285		 */
1286		esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1287
1288		scsi_esp_cmd(esp, ESP_CMD_ESEL);
1289		esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
1290		return 1;
1291	}
1292
1293	if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1294		/* Selection successful.  On pre-FAST chips we have
1295		 * to do a NOP and possibly clean out the FIFO.
1296		 */
1297		if (esp->rev <= ESP236) {
1298			int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1299
1300			scsi_esp_cmd(esp, ESP_CMD_NULL);
1301
1302			if (!fcnt &&
1303			    (!esp->prev_soff ||
1304			     ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1305				esp_flush_fifo(esp);
1306		}
1307
1308		/* If we are doing a Select And Stop command, negotiation, etc.
1309		 * we'll do the right thing as we transition to the next phase.
1310		 */
1311		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1312		return 0;
1313	}
1314
1315	shost_printk(KERN_INFO, esp->host,
1316		     "Unexpected selection completion ireg[%x]\n", esp->ireg);
1317	esp_schedule_reset(esp);
1318	return 0;
1319}
1320
1321static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1322			       struct scsi_cmnd *cmd)
1323{
1324	int fifo_cnt, ecount, bytes_sent, flush_fifo;
1325
1326	fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1327	if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1328		fifo_cnt <<= 1;
1329
1330	ecount = 0;
1331	if (!(esp->sreg & ESP_STAT_TCNT)) {
1332		ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1333			  (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1334		if (esp->rev == FASHME)
1335			ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
1336		if (esp->rev == PCSCSI && (esp->config2 & ESP_CONFIG2_FENAB))
1337			ecount |= ((unsigned int)esp_read8(ESP_TCHI)) << 16;
1338	}
1339
1340	bytes_sent = esp->data_dma_len;
1341	bytes_sent -= ecount;
1342	bytes_sent -= esp->send_cmd_residual;
1343
1344	/*
1345	 * The am53c974 has a DMA 'pecularity'. The doc states:
1346	 * In some odd byte conditions, one residual byte will
1347	 * be left in the SCSI FIFO, and the FIFO Flags will
1348	 * never count to '0 '. When this happens, the residual
1349	 * byte should be retrieved via PIO following completion
1350	 * of the BLAST operation.
1351	 */
1352	if (fifo_cnt == 1 && ent->flags & ESP_CMD_FLAG_RESIDUAL) {
1353		size_t count = 1;
1354		size_t offset = bytes_sent;
1355		u8 bval = esp_read8(ESP_FDATA);
1356
1357		if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
1358			ent->sense_ptr[bytes_sent] = bval;
1359		else {
1360			struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
1361			u8 *ptr;
1362
1363			ptr = scsi_kmap_atomic_sg(p->cur_sg, p->num_sg,
1364						  &offset, &count);
1365			if (likely(ptr)) {
1366				*(ptr + offset) = bval;
1367				scsi_kunmap_atomic_sg(ptr);
1368			}
1369		}
1370		bytes_sent += fifo_cnt;
1371		ent->flags &= ~ESP_CMD_FLAG_RESIDUAL;
1372	}
1373	if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1374		bytes_sent -= fifo_cnt;
1375
1376	flush_fifo = 0;
1377	if (!esp->prev_soff) {
1378		/* Synchronous data transfer, always flush fifo. */
1379		flush_fifo = 1;
1380	} else {
1381		if (esp->rev == ESP100) {
1382			u32 fflags, phase;
1383
1384			/* ESP100 has a chip bug where in the synchronous data
1385			 * phase it can mistake a final long REQ pulse from the
1386			 * target as an extra data byte.  Fun.
1387			 *
1388			 * To detect this case we resample the status register
1389			 * and fifo flags.  If we're still in a data phase and
1390			 * we see spurious chunks in the fifo, we return error
1391			 * to the caller which should reset and set things up
1392			 * such that we only try future transfers to this
1393			 * target in synchronous mode.
1394			 */
1395			esp->sreg = esp_read8(ESP_STATUS);
1396			phase = esp->sreg & ESP_STAT_PMASK;
1397			fflags = esp_read8(ESP_FFLAGS);
1398
1399			if ((phase == ESP_DOP &&
1400			     (fflags & ESP_FF_ONOTZERO)) ||
1401			    (phase == ESP_DIP &&
1402			     (fflags & ESP_FF_FBYTES)))
1403				return -1;
1404		}
1405		if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1406			flush_fifo = 1;
1407	}
1408
1409	if (flush_fifo)
1410		esp_flush_fifo(esp);
1411
1412	return bytes_sent;
1413}
1414
1415static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1416			u8 scsi_period, u8 scsi_offset,
1417			u8 esp_stp, u8 esp_soff)
1418{
1419	spi_period(tp->starget) = scsi_period;
1420	spi_offset(tp->starget) = scsi_offset;
1421	spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1422
1423	if (esp_soff) {
1424		esp_stp &= 0x1f;
1425		esp_soff |= esp->radelay;
1426		if (esp->rev >= FAS236) {
1427			u8 bit = ESP_CONFIG3_FSCSI;
1428			if (esp->rev >= FAS100A)
1429				bit = ESP_CONFIG3_FAST;
1430
1431			if (scsi_period < 50) {
1432				if (esp->rev == FASHME)
1433					esp_soff &= ~esp->radelay;
1434				tp->esp_config3 |= bit;
1435			} else {
1436				tp->esp_config3 &= ~bit;
1437			}
1438			esp->prev_cfg3 = tp->esp_config3;
1439			esp_write8(esp->prev_cfg3, ESP_CFG3);
1440		}
1441	}
1442
1443	tp->esp_period = esp->prev_stp = esp_stp;
1444	tp->esp_offset = esp->prev_soff = esp_soff;
1445
1446	esp_write8(esp_soff, ESP_SOFF);
1447	esp_write8(esp_stp, ESP_STP);
1448
1449	tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1450
1451	spi_display_xfer_agreement(tp->starget);
1452}
1453
1454static void esp_msgin_reject(struct esp *esp)
1455{
1456	struct esp_cmd_entry *ent = esp->active_cmd;
1457	struct scsi_cmnd *cmd = ent->cmd;
1458	struct esp_target_data *tp;
1459	int tgt;
1460
1461	tgt = cmd->device->id;
1462	tp = &esp->target[tgt];
1463
1464	if (tp->flags & ESP_TGT_NEGO_WIDE) {
1465		tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1466
1467		if (!esp_need_to_nego_sync(tp)) {
1468			tp->flags &= ~ESP_TGT_CHECK_NEGO;
1469			scsi_esp_cmd(esp, ESP_CMD_RATN);
1470		} else {
1471			esp->msg_out_len =
1472				spi_populate_sync_msg(&esp->msg_out[0],
1473						      tp->nego_goal_period,
1474						      tp->nego_goal_offset);
1475			tp->flags |= ESP_TGT_NEGO_SYNC;
1476			scsi_esp_cmd(esp, ESP_CMD_SATN);
1477		}
1478		return;
1479	}
1480
1481	if (tp->flags & ESP_TGT_NEGO_SYNC) {
1482		tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1483		tp->esp_period = 0;
1484		tp->esp_offset = 0;
1485		esp_setsync(esp, tp, 0, 0, 0, 0);
1486		scsi_esp_cmd(esp, ESP_CMD_RATN);
1487		return;
1488	}
1489
1490	shost_printk(KERN_INFO, esp->host, "Unexpected MESSAGE REJECT\n");
1491	esp_schedule_reset(esp);
1492}
1493
1494static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1495{
1496	u8 period = esp->msg_in[3];
1497	u8 offset = esp->msg_in[4];
1498	u8 stp;
1499
1500	if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1501		goto do_reject;
1502
1503	if (offset > 15)
1504		goto do_reject;
1505
1506	if (offset) {
1507		int one_clock;
1508
1509		if (period > esp->max_period) {
1510			period = offset = 0;
1511			goto do_sdtr;
1512		}
1513		if (period < esp->min_period)
1514			goto do_reject;
1515
1516		one_clock = esp->ccycle / 1000;
1517		stp = DIV_ROUND_UP(period << 2, one_clock);
1518		if (stp && esp->rev >= FAS236) {
1519			if (stp >= 50)
1520				stp--;
1521		}
1522	} else {
1523		stp = 0;
1524	}
1525
1526	esp_setsync(esp, tp, period, offset, stp, offset);
1527	return;
1528
1529do_reject:
1530	esp->msg_out[0] = MESSAGE_REJECT;
1531	esp->msg_out_len = 1;
1532	scsi_esp_cmd(esp, ESP_CMD_SATN);
1533	return;
1534
1535do_sdtr:
1536	tp->nego_goal_period = period;
1537	tp->nego_goal_offset = offset;
1538	esp->msg_out_len =
1539		spi_populate_sync_msg(&esp->msg_out[0],
1540				      tp->nego_goal_period,
1541				      tp->nego_goal_offset);
1542	scsi_esp_cmd(esp, ESP_CMD_SATN);
1543}
1544
1545static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1546{
1547	int size = 8 << esp->msg_in[3];
1548	u8 cfg3;
1549
1550	if (esp->rev != FASHME)
1551		goto do_reject;
1552
1553	if (size != 8 && size != 16)
1554		goto do_reject;
1555
1556	if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1557		goto do_reject;
1558
1559	cfg3 = tp->esp_config3;
1560	if (size == 16) {
1561		tp->flags |= ESP_TGT_WIDE;
1562		cfg3 |= ESP_CONFIG3_EWIDE;
1563	} else {
1564		tp->flags &= ~ESP_TGT_WIDE;
1565		cfg3 &= ~ESP_CONFIG3_EWIDE;
1566	}
1567	tp->esp_config3 = cfg3;
1568	esp->prev_cfg3 = cfg3;
1569	esp_write8(cfg3, ESP_CFG3);
1570
1571	tp->flags &= ~ESP_TGT_NEGO_WIDE;
1572
1573	spi_period(tp->starget) = 0;
1574	spi_offset(tp->starget) = 0;
1575	if (!esp_need_to_nego_sync(tp)) {
1576		tp->flags &= ~ESP_TGT_CHECK_NEGO;
1577		scsi_esp_cmd(esp, ESP_CMD_RATN);
1578	} else {
1579		esp->msg_out_len =
1580			spi_populate_sync_msg(&esp->msg_out[0],
1581					      tp->nego_goal_period,
1582					      tp->nego_goal_offset);
1583		tp->flags |= ESP_TGT_NEGO_SYNC;
1584		scsi_esp_cmd(esp, ESP_CMD_SATN);
1585	}
1586	return;
1587
1588do_reject:
1589	esp->msg_out[0] = MESSAGE_REJECT;
1590	esp->msg_out_len = 1;
1591	scsi_esp_cmd(esp, ESP_CMD_SATN);
1592}
1593
1594static void esp_msgin_extended(struct esp *esp)
1595{
1596	struct esp_cmd_entry *ent = esp->active_cmd;
1597	struct scsi_cmnd *cmd = ent->cmd;
1598	struct esp_target_data *tp;
1599	int tgt = cmd->device->id;
1600
1601	tp = &esp->target[tgt];
1602	if (esp->msg_in[2] == EXTENDED_SDTR) {
1603		esp_msgin_sdtr(esp, tp);
1604		return;
1605	}
1606	if (esp->msg_in[2] == EXTENDED_WDTR) {
1607		esp_msgin_wdtr(esp, tp);
1608		return;
1609	}
1610
1611	shost_printk(KERN_INFO, esp->host,
1612		     "Unexpected extended msg type %x\n", esp->msg_in[2]);
1613
1614	esp->msg_out[0] = MESSAGE_REJECT;
1615	esp->msg_out_len = 1;
1616	scsi_esp_cmd(esp, ESP_CMD_SATN);
1617}
1618
1619/* Analyze msgin bytes received from target so far.  Return non-zero
1620 * if there are more bytes needed to complete the message.
1621 */
1622static int esp_msgin_process(struct esp *esp)
1623{
1624	u8 msg0 = esp->msg_in[0];
1625	int len = esp->msg_in_len;
1626
1627	if (msg0 & 0x80) {
1628		/* Identify */
1629		shost_printk(KERN_INFO, esp->host,
1630			     "Unexpected msgin identify\n");
1631		return 0;
1632	}
1633
1634	switch (msg0) {
1635	case EXTENDED_MESSAGE:
1636		if (len == 1)
1637			return 1;
1638		if (len < esp->msg_in[1] + 2)
1639			return 1;
1640		esp_msgin_extended(esp);
1641		return 0;
1642
1643	case IGNORE_WIDE_RESIDUE: {
1644		struct esp_cmd_entry *ent;
1645		struct esp_cmd_priv *spriv;
1646		if (len == 1)
1647			return 1;
1648
1649		if (esp->msg_in[1] != 1)
1650			goto do_reject;
1651
1652		ent = esp->active_cmd;
1653		spriv = ESP_CMD_PRIV(ent->cmd);
1654
1655		if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1656			spriv->cur_sg = spriv->prv_sg;
1657			spriv->cur_residue = 1;
1658		} else
1659			spriv->cur_residue++;
1660		spriv->tot_residue++;
1661		return 0;
1662	}
1663	case NOP:
1664		return 0;
1665	case RESTORE_POINTERS:
1666		esp_restore_pointers(esp, esp->active_cmd);
1667		return 0;
1668	case SAVE_POINTERS:
1669		esp_save_pointers(esp, esp->active_cmd);
1670		return 0;
1671
1672	case COMMAND_COMPLETE:
1673	case DISCONNECT: {
1674		struct esp_cmd_entry *ent = esp->active_cmd;
1675
1676		ent->message = msg0;
1677		esp_event(esp, ESP_EVENT_FREE_BUS);
1678		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1679		return 0;
1680	}
1681	case MESSAGE_REJECT:
1682		esp_msgin_reject(esp);
1683		return 0;
1684
1685	default:
1686	do_reject:
1687		esp->msg_out[0] = MESSAGE_REJECT;
1688		esp->msg_out_len = 1;
1689		scsi_esp_cmd(esp, ESP_CMD_SATN);
1690		return 0;
1691	}
1692}
1693
1694static int esp_process_event(struct esp *esp)
1695{
1696	int write, i;
1697
1698again:
1699	write = 0;
1700	esp_log_event("process event %d phase %x\n",
1701		      esp->event, esp->sreg & ESP_STAT_PMASK);
1702	switch (esp->event) {
1703	case ESP_EVENT_CHECK_PHASE:
1704		switch (esp->sreg & ESP_STAT_PMASK) {
1705		case ESP_DOP:
1706			esp_event(esp, ESP_EVENT_DATA_OUT);
1707			break;
1708		case ESP_DIP:
1709			esp_event(esp, ESP_EVENT_DATA_IN);
1710			break;
1711		case ESP_STATP:
1712			esp_flush_fifo(esp);
1713			scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1714			esp_event(esp, ESP_EVENT_STATUS);
1715			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1716			return 1;
1717
1718		case ESP_MOP:
1719			esp_event(esp, ESP_EVENT_MSGOUT);
1720			break;
1721
1722		case ESP_MIP:
1723			esp_event(esp, ESP_EVENT_MSGIN);
1724			break;
1725
1726		case ESP_CMDP:
1727			esp_event(esp, ESP_EVENT_CMD_START);
1728			break;
1729
1730		default:
1731			shost_printk(KERN_INFO, esp->host,
1732				     "Unexpected phase, sreg=%02x\n",
1733				     esp->sreg);
1734			esp_schedule_reset(esp);
1735			return 0;
1736		}
1737		goto again;
1738
1739	case ESP_EVENT_DATA_IN:
1740		write = 1;
1741		/* fallthru */
1742
1743	case ESP_EVENT_DATA_OUT: {
1744		struct esp_cmd_entry *ent = esp->active_cmd;
1745		struct scsi_cmnd *cmd = ent->cmd;
1746		dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1747		unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1748
1749		if (esp->rev == ESP100)
1750			scsi_esp_cmd(esp, ESP_CMD_NULL);
1751
1752		if (write)
1753			ent->flags |= ESP_CMD_FLAG_WRITE;
1754		else
1755			ent->flags &= ~ESP_CMD_FLAG_WRITE;
1756
1757		if (esp->ops->dma_length_limit)
1758			dma_len = esp->ops->dma_length_limit(esp, dma_addr,
1759							     dma_len);
1760		else
1761			dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1762
1763		esp->data_dma_len = dma_len;
1764
1765		if (!dma_len) {
1766			shost_printk(KERN_ERR, esp->host,
1767				     "DMA length is zero!\n");
1768			shost_printk(KERN_ERR, esp->host,
1769				     "cur adr[%08llx] len[%08x]\n",
1770				     (unsigned long long)esp_cur_dma_addr(ent, cmd),
1771				     esp_cur_dma_len(ent, cmd));
1772			esp_schedule_reset(esp);
1773			return 0;
1774		}
1775
1776		esp_log_datastart("start data addr[%08llx] len[%u] write(%d)\n",
1777				  (unsigned long long)dma_addr, dma_len, write);
1778
1779		esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1780				       write, ESP_CMD_DMA | ESP_CMD_TI);
1781		esp_event(esp, ESP_EVENT_DATA_DONE);
1782		break;
1783	}
1784	case ESP_EVENT_DATA_DONE: {
1785		struct esp_cmd_entry *ent = esp->active_cmd;
1786		struct scsi_cmnd *cmd = ent->cmd;
1787		int bytes_sent;
1788
1789		if (esp->ops->dma_error(esp)) {
1790			shost_printk(KERN_INFO, esp->host,
1791				     "data done, DMA error, resetting\n");
1792			esp_schedule_reset(esp);
1793			return 0;
1794		}
1795
1796		if (ent->flags & ESP_CMD_FLAG_WRITE) {
1797			/* XXX parity errors, etc. XXX */
1798
1799			esp->ops->dma_drain(esp);
1800		}
1801		esp->ops->dma_invalidate(esp);
1802
1803		if (esp->ireg != ESP_INTR_BSERV) {
1804			/* We should always see exactly a bus-service
1805			 * interrupt at the end of a successful transfer.
1806			 */
1807			shost_printk(KERN_INFO, esp->host,
1808				     "data done, not BSERV, resetting\n");
1809			esp_schedule_reset(esp);
1810			return 0;
1811		}
1812
1813		bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1814
1815		esp_log_datadone("data done flgs[%x] sent[%d]\n",
1816				 ent->flags, bytes_sent);
1817
1818		if (bytes_sent < 0) {
1819			/* XXX force sync mode for this target XXX */
1820			esp_schedule_reset(esp);
1821			return 0;
1822		}
1823
1824		esp_advance_dma(esp, ent, cmd, bytes_sent);
1825		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1826		goto again;
1827	}
1828
1829	case ESP_EVENT_STATUS: {
1830		struct esp_cmd_entry *ent = esp->active_cmd;
1831
1832		if (esp->ireg & ESP_INTR_FDONE) {
1833			ent->status = esp_read8(ESP_FDATA);
1834			ent->message = esp_read8(ESP_FDATA);
1835			scsi_esp_cmd(esp, ESP_CMD_MOK);
1836		} else if (esp->ireg == ESP_INTR_BSERV) {
1837			ent->status = esp_read8(ESP_FDATA);
1838			ent->message = 0xff;
1839			esp_event(esp, ESP_EVENT_MSGIN);
1840			return 0;
1841		}
1842
1843		if (ent->message != COMMAND_COMPLETE) {
1844			shost_printk(KERN_INFO, esp->host,
1845				     "Unexpected message %x in status\n",
1846				     ent->message);
1847			esp_schedule_reset(esp);
1848			return 0;
1849		}
1850
1851		esp_event(esp, ESP_EVENT_FREE_BUS);
1852		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1853		break;
1854	}
1855	case ESP_EVENT_FREE_BUS: {
1856		struct esp_cmd_entry *ent = esp->active_cmd;
1857		struct scsi_cmnd *cmd = ent->cmd;
1858
1859		if (ent->message == COMMAND_COMPLETE ||
1860		    ent->message == DISCONNECT)
1861			scsi_esp_cmd(esp, ESP_CMD_ESEL);
1862
1863		if (ent->message == COMMAND_COMPLETE) {
1864			esp_log_cmddone("Command done status[%x] message[%x]\n",
1865					ent->status, ent->message);
1866			if (ent->status == SAM_STAT_TASK_SET_FULL)
1867				esp_event_queue_full(esp, ent);
1868
1869			if (ent->status == SAM_STAT_CHECK_CONDITION &&
1870			    !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1871				ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1872				esp_autosense(esp, ent);
1873			} else {
1874				esp_cmd_is_done(esp, ent, cmd,
1875						compose_result(ent->status,
1876							       ent->message,
1877							       DID_OK));
1878			}
1879		} else if (ent->message == DISCONNECT) {
1880			esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n",
1881					   cmd->device->id,
1882					   ent->tag[0], ent->tag[1]);
1883
1884			esp->active_cmd = NULL;
1885			esp_maybe_execute_command(esp);
1886		} else {
1887			shost_printk(KERN_INFO, esp->host,
1888				     "Unexpected message %x in freebus\n",
1889				     ent->message);
1890			esp_schedule_reset(esp);
1891			return 0;
1892		}
1893		if (esp->active_cmd)
1894			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1895		break;
1896	}
1897	case ESP_EVENT_MSGOUT: {
1898		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1899
1900		if (esp_debug & ESP_DEBUG_MSGOUT) {
1901			int i;
1902			printk("ESP: Sending message [ ");
1903			for (i = 0; i < esp->msg_out_len; i++)
1904				printk("%02x ", esp->msg_out[i]);
1905			printk("]\n");
1906		}
1907
1908		if (esp->rev == FASHME) {
1909			int i;
1910
1911			/* Always use the fifo.  */
1912			for (i = 0; i < esp->msg_out_len; i++) {
1913				esp_write8(esp->msg_out[i], ESP_FDATA);
1914				esp_write8(0, ESP_FDATA);
1915			}
1916			scsi_esp_cmd(esp, ESP_CMD_TI);
1917		} else {
1918			if (esp->msg_out_len == 1) {
1919				esp_write8(esp->msg_out[0], ESP_FDATA);
1920				scsi_esp_cmd(esp, ESP_CMD_TI);
1921			} else if (esp->flags & ESP_FLAG_USE_FIFO) {
1922				for (i = 0; i < esp->msg_out_len; i++)
1923					esp_write8(esp->msg_out[i], ESP_FDATA);
1924				scsi_esp_cmd(esp, ESP_CMD_TI);
1925			} else {
1926				/* Use DMA. */
1927				memcpy(esp->command_block,
1928				       esp->msg_out,
1929				       esp->msg_out_len);
1930
1931				esp->ops->send_dma_cmd(esp,
1932						       esp->command_block_dma,
1933						       esp->msg_out_len,
1934						       esp->msg_out_len,
1935						       0,
1936						       ESP_CMD_DMA|ESP_CMD_TI);
1937			}
1938		}
1939		esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1940		break;
1941	}
1942	case ESP_EVENT_MSGOUT_DONE:
1943		if (esp->rev == FASHME) {
1944			scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1945		} else {
1946			if (esp->msg_out_len > 1)
1947				esp->ops->dma_invalidate(esp);
1948
1949			/* XXX if the chip went into disconnected mode,
1950			 * we can't run the phase state machine anyway.
1951			 */
1952			if (!(esp->ireg & ESP_INTR_DC))
1953				scsi_esp_cmd(esp, ESP_CMD_NULL);
1954		}
1955
1956		esp->msg_out_len = 0;
1957
1958		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1959		goto again;
1960	case ESP_EVENT_MSGIN:
1961		if (esp->ireg & ESP_INTR_BSERV) {
1962			if (esp->rev == FASHME) {
1963				if (!(esp_read8(ESP_STATUS2) &
1964				      ESP_STAT2_FEMPTY))
1965					scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1966			} else {
1967				scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1968				if (esp->rev == ESP100)
1969					scsi_esp_cmd(esp, ESP_CMD_NULL);
1970			}
1971			scsi_esp_cmd(esp, ESP_CMD_TI);
1972			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1973			return 1;
1974		}
1975		if (esp->ireg & ESP_INTR_FDONE) {
1976			u8 val;
1977
1978			if (esp->rev == FASHME)
1979				val = esp->fifo[0];
1980			else
1981				val = esp_read8(ESP_FDATA);
1982			esp->msg_in[esp->msg_in_len++] = val;
1983
1984			esp_log_msgin("Got msgin byte %x\n", val);
1985
1986			if (!esp_msgin_process(esp))
1987				esp->msg_in_len = 0;
1988
1989			if (esp->rev == FASHME)
1990				scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1991
1992			scsi_esp_cmd(esp, ESP_CMD_MOK);
1993
1994			/* Check whether a bus reset is to be done next */
1995			if (esp->event == ESP_EVENT_RESET)
1996				return 0;
1997
1998			if (esp->event != ESP_EVENT_FREE_BUS)
1999				esp_event(esp, ESP_EVENT_CHECK_PHASE);
2000		} else {
2001			shost_printk(KERN_INFO, esp->host,
2002				     "MSGIN neither BSERV not FDON, resetting");
2003			esp_schedule_reset(esp);
2004			return 0;
2005		}
2006		break;
2007	case ESP_EVENT_CMD_START:
2008		memcpy(esp->command_block, esp->cmd_bytes_ptr,
2009		       esp->cmd_bytes_left);
2010		esp_send_dma_cmd(esp, esp->cmd_bytes_left, 16, ESP_CMD_TI);
2011		esp_event(esp, ESP_EVENT_CMD_DONE);
2012		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
2013		break;
2014	case ESP_EVENT_CMD_DONE:
2015		esp->ops->dma_invalidate(esp);
2016		if (esp->ireg & ESP_INTR_BSERV) {
2017			esp_event(esp, ESP_EVENT_CHECK_PHASE);
2018			goto again;
2019		}
2020		esp_schedule_reset(esp);
2021		return 0;
2022
2023	case ESP_EVENT_RESET:
2024		scsi_esp_cmd(esp, ESP_CMD_RS);
2025		break;
2026
2027	default:
2028		shost_printk(KERN_INFO, esp->host,
2029			     "Unexpected event %x, resetting\n", esp->event);
2030		esp_schedule_reset(esp);
2031		return 0;
2032	}
2033	return 1;
2034}
2035
2036static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
2037{
2038	struct scsi_cmnd *cmd = ent->cmd;
2039
2040	esp_unmap_dma(esp, cmd);
2041	esp_free_lun_tag(ent, cmd->device->hostdata);
2042	cmd->result = DID_RESET << 16;
2043
2044	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
2045		esp_unmap_sense(esp, ent);
2046
2047	cmd->scsi_done(cmd);
2048	list_del(&ent->list);
2049	esp_put_ent(esp, ent);
2050}
2051
2052static void esp_clear_hold(struct scsi_device *dev, void *data)
2053{
2054	struct esp_lun_data *lp = dev->hostdata;
2055
2056	BUG_ON(lp->num_tagged);
2057	lp->hold = 0;
2058}
2059
2060static void esp_reset_cleanup(struct esp *esp)
2061{
2062	struct esp_cmd_entry *ent, *tmp;
2063	int i;
2064
2065	list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2066		struct scsi_cmnd *cmd = ent->cmd;
2067
2068		list_del(&ent->list);
2069		cmd->result = DID_RESET << 16;
2070		cmd->scsi_done(cmd);
2071		esp_put_ent(esp, ent);
2072	}
2073
2074	list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2075		if (ent == esp->active_cmd)
2076			esp->active_cmd = NULL;
2077		esp_reset_cleanup_one(esp, ent);
2078	}
2079
2080	BUG_ON(esp->active_cmd != NULL);
2081
2082	/* Force renegotiation of sync/wide transfers.  */
2083	for (i = 0; i < ESP_MAX_TARGET; i++) {
2084		struct esp_target_data *tp = &esp->target[i];
2085
2086		tp->esp_period = 0;
2087		tp->esp_offset = 0;
2088		tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2089				     ESP_CONFIG3_FSCSI |
2090				     ESP_CONFIG3_FAST);
2091		tp->flags &= ~ESP_TGT_WIDE;
2092		tp->flags |= ESP_TGT_CHECK_NEGO;
2093
2094		if (tp->starget)
2095			__starget_for_each_device(tp->starget, NULL,
2096						  esp_clear_hold);
2097	}
2098	esp->flags &= ~ESP_FLAG_RESETTING;
2099}
2100
2101/* Runs under host->lock */
2102static void __esp_interrupt(struct esp *esp)
2103{
2104	int finish_reset, intr_done;
2105	u8 phase;
2106
2107       /*
2108	* Once INTRPT is read STATUS and SSTEP are cleared.
2109	*/
2110	esp->sreg = esp_read8(ESP_STATUS);
2111	esp->seqreg = esp_read8(ESP_SSTEP);
2112	esp->ireg = esp_read8(ESP_INTRPT);
2113
2114	if (esp->flags & ESP_FLAG_RESETTING) {
2115		finish_reset = 1;
2116	} else {
2117		if (esp_check_gross_error(esp))
2118			return;
2119
2120		finish_reset = esp_check_spur_intr(esp);
2121		if (finish_reset < 0)
2122			return;
2123	}
2124
2125	if (esp->ireg & ESP_INTR_SR)
2126		finish_reset = 1;
2127
2128	if (finish_reset) {
2129		esp_reset_cleanup(esp);
2130		if (esp->eh_reset) {
2131			complete(esp->eh_reset);
2132			esp->eh_reset = NULL;
2133		}
2134		return;
2135	}
2136
2137	phase = (esp->sreg & ESP_STAT_PMASK);
2138	if (esp->rev == FASHME) {
2139		if (((phase != ESP_DIP && phase != ESP_DOP) &&
2140		     esp->select_state == ESP_SELECT_NONE &&
2141		     esp->event != ESP_EVENT_STATUS &&
2142		     esp->event != ESP_EVENT_DATA_DONE) ||
2143		    (esp->ireg & ESP_INTR_RSEL)) {
2144			esp->sreg2 = esp_read8(ESP_STATUS2);
2145			if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2146			    (esp->sreg2 & ESP_STAT2_F1BYTE))
2147				hme_read_fifo(esp);
2148		}
2149	}
2150
2151	esp_log_intr("intr sreg[%02x] seqreg[%02x] "
2152		     "sreg2[%02x] ireg[%02x]\n",
2153		     esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2154
2155	intr_done = 0;
2156
2157	if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
2158		shost_printk(KERN_INFO, esp->host,
2159			     "unexpected IREG %02x\n", esp->ireg);
2160		if (esp->ireg & ESP_INTR_IC)
2161			esp_dump_cmd_log(esp);
2162
2163		esp_schedule_reset(esp);
2164	} else {
2165		if (esp->ireg & ESP_INTR_RSEL) {
2166			if (esp->active_cmd)
2167				(void) esp_finish_select(esp);
2168			intr_done = esp_reconnect(esp);
2169		} else {
2170			/* Some combination of FDONE, BSERV, DC. */
2171			if (esp->select_state != ESP_SELECT_NONE)
2172				intr_done = esp_finish_select(esp);
2173		}
2174	}
2175	while (!intr_done)
2176		intr_done = esp_process_event(esp);
2177}
2178
2179irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2180{
2181	struct esp *esp = dev_id;
2182	unsigned long flags;
2183	irqreturn_t ret;
2184
2185	spin_lock_irqsave(esp->host->host_lock, flags);
2186	ret = IRQ_NONE;
2187	if (esp->ops->irq_pending(esp)) {
2188		ret = IRQ_HANDLED;
2189		for (;;) {
2190			int i;
2191
2192			__esp_interrupt(esp);
2193			if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2194				break;
2195			esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2196
2197			for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2198				if (esp->ops->irq_pending(esp))
2199					break;
2200			}
2201			if (i == ESP_QUICKIRQ_LIMIT)
2202				break;
2203		}
2204	}
2205	spin_unlock_irqrestore(esp->host->host_lock, flags);
2206
2207	return ret;
2208}
2209EXPORT_SYMBOL(scsi_esp_intr);
2210
2211static void esp_get_revision(struct esp *esp)
2212{
2213	u8 val;
2214
2215	esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2216	if (esp->config2 == 0) {
2217		esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2218		esp_write8(esp->config2, ESP_CFG2);
2219
2220		val = esp_read8(ESP_CFG2);
2221		val &= ~ESP_CONFIG2_MAGIC;
2222
2223		esp->config2 = 0;
2224		if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2225			/*
2226			 * If what we write to cfg2 does not come back,
2227			 * cfg2 is not implemented.
2228			 * Therefore this must be a plain esp100.
2229			 */
2230			esp->rev = ESP100;
2231			return;
2232		}
2233	}
2234
2235	esp_set_all_config3(esp, 5);
2236	esp->prev_cfg3 = 5;
2237	esp_write8(esp->config2, ESP_CFG2);
2238	esp_write8(0, ESP_CFG3);
2239	esp_write8(esp->prev_cfg3, ESP_CFG3);
2240
2241	val = esp_read8(ESP_CFG3);
2242	if (val != 5) {
2243		/* The cfg2 register is implemented, however
2244		 * cfg3 is not, must be esp100a.
2245		 */
2246		esp->rev = ESP100A;
2247	} else {
2248		esp_set_all_config3(esp, 0);
2249		esp->prev_cfg3 = 0;
2250		esp_write8(esp->prev_cfg3, ESP_CFG3);
2251
2252		/* All of cfg{1,2,3} implemented, must be one of
2253		 * the fas variants, figure out which one.
2254		 */
2255		if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2256			esp->rev = FAST;
2257			esp->sync_defp = SYNC_DEFP_FAST;
2258		} else {
2259			esp->rev = ESP236;
2260		}
2261	}
2262}
2263
2264static void esp_init_swstate(struct esp *esp)
2265{
2266	int i;
2267
2268	INIT_LIST_HEAD(&esp->queued_cmds);
2269	INIT_LIST_HEAD(&esp->active_cmds);
2270	INIT_LIST_HEAD(&esp->esp_cmd_pool);
2271
2272	/* Start with a clear state, domain validation (via ->slave_configure,
2273	 * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
2274	 * commands.
2275	 */
2276	for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2277		esp->target[i].flags = 0;
2278		esp->target[i].nego_goal_period = 0;
2279		esp->target[i].nego_goal_offset = 0;
2280		esp->target[i].nego_goal_width = 0;
2281		esp->target[i].nego_goal_tags = 0;
2282	}
2283}
2284
2285/* This places the ESP into a known state at boot time. */
2286static void esp_bootup_reset(struct esp *esp)
2287{
2288	u8 val;
2289
2290	/* Reset the DMA */
2291	esp->ops->reset_dma(esp);
2292
2293	/* Reset the ESP */
2294	esp_reset_esp(esp);
2295
2296	/* Reset the SCSI bus, but tell ESP not to generate an irq */
2297	val = esp_read8(ESP_CFG1);
2298	val |= ESP_CONFIG1_SRRDISAB;
2299	esp_write8(val, ESP_CFG1);
2300
2301	scsi_esp_cmd(esp, ESP_CMD_RS);
2302	udelay(400);
2303
2304	esp_write8(esp->config1, ESP_CFG1);
2305
2306	/* Eat any bitrot in the chip and we are done... */
2307	esp_read8(ESP_INTRPT);
2308}
2309
2310static void esp_set_clock_params(struct esp *esp)
2311{
2312	int fhz;
2313	u8 ccf;
2314
2315	/* This is getting messy but it has to be done correctly or else
2316	 * you get weird behavior all over the place.  We are trying to
2317	 * basically figure out three pieces of information.
2318	 *
2319	 * a) Clock Conversion Factor
2320	 *
2321	 *    This is a representation of the input crystal clock frequency
2322	 *    going into the ESP on this machine.  Any operation whose timing
2323	 *    is longer than 400ns depends on this value being correct.  For
2324	 *    example, you'll get blips for arbitration/selection during high
2325	 *    load or with multiple targets if this is not set correctly.
2326	 *
2327	 * b) Selection Time-Out
2328	 *
2329	 *    The ESP isn't very bright and will arbitrate for the bus and try
2330	 *    to select a target forever if you let it.  This value tells the
2331	 *    ESP when it has taken too long to negotiate and that it should
2332	 *    interrupt the CPU so we can see what happened.  The value is
2333	 *    computed as follows (from NCR/Symbios chip docs).
2334	 *
2335	 *          (Time Out Period) *  (Input Clock)
2336	 *    STO = ----------------------------------
2337	 *          (8192) * (Clock Conversion Factor)
2338	 *
2339	 *    We use a time out period of 250ms (ESP_BUS_TIMEOUT).
2340	 *
2341	 * c) Imperical constants for synchronous offset and transfer period
2342         *    register values
2343	 *
2344	 *    This entails the smallest and largest sync period we could ever
2345	 *    handle on this ESP.
2346	 */
2347	fhz = esp->cfreq;
2348
2349	ccf = ((fhz / 1000000) + 4) / 5;
2350	if (ccf == 1)
2351		ccf = 2;
2352
2353	/* If we can't find anything reasonable, just assume 20MHZ.
2354	 * This is the clock frequency of the older sun4c's where I've
2355	 * been unable to find the clock-frequency PROM property.  All
2356	 * other machines provide useful values it seems.
2357	 */
2358	if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
2359		fhz = 20000000;
2360		ccf = 4;
2361	}
2362
2363	esp->cfact = (ccf == 8 ? 0 : ccf);
2364	esp->cfreq = fhz;
2365	esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
2366	esp->ctick = ESP_TICK(ccf, esp->ccycle);
2367	esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
2368	esp->sync_defp = SYNC_DEFP_SLOW;
2369}
2370
2371static const char *esp_chip_names[] = {
2372	"ESP100",
2373	"ESP100A",
2374	"ESP236",
2375	"FAS236",
 
 
2376	"FAS100A",
2377	"FAST",
2378	"FASHME",
2379	"AM53C974",
2380};
2381
2382static struct scsi_transport_template *esp_transport_template;
2383
2384int scsi_esp_register(struct esp *esp)
2385{
2386	static int instance;
2387	int err;
2388
2389	if (!esp->num_tags)
2390		esp->num_tags = ESP_DEFAULT_TAGS;
2391	esp->host->transportt = esp_transport_template;
2392	esp->host->max_lun = ESP_MAX_LUN;
2393	esp->host->cmd_per_lun = 2;
2394	esp->host->unique_id = instance;
2395
2396	esp_set_clock_params(esp);
2397
2398	esp_get_revision(esp);
2399
2400	esp_init_swstate(esp);
2401
2402	esp_bootup_reset(esp);
2403
2404	dev_printk(KERN_INFO, esp->dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
2405		   esp->host->unique_id, esp->regs, esp->dma_regs,
2406		   esp->host->irq);
2407	dev_printk(KERN_INFO, esp->dev,
2408		   "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2409		   esp->host->unique_id, esp_chip_names[esp->rev],
2410		   esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
2411
2412	/* Let the SCSI bus reset settle. */
2413	ssleep(esp_bus_reset_settle);
2414
2415	err = scsi_add_host(esp->host, esp->dev);
2416	if (err)
2417		return err;
2418
2419	instance++;
2420
2421	scsi_scan_host(esp->host);
2422
2423	return 0;
2424}
2425EXPORT_SYMBOL(scsi_esp_register);
2426
2427void scsi_esp_unregister(struct esp *esp)
2428{
2429	scsi_remove_host(esp->host);
2430}
2431EXPORT_SYMBOL(scsi_esp_unregister);
2432
2433static int esp_target_alloc(struct scsi_target *starget)
2434{
2435	struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2436	struct esp_target_data *tp = &esp->target[starget->id];
2437
2438	tp->starget = starget;
2439
2440	return 0;
2441}
2442
2443static void esp_target_destroy(struct scsi_target *starget)
2444{
2445	struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2446	struct esp_target_data *tp = &esp->target[starget->id];
2447
2448	tp->starget = NULL;
2449}
2450
2451static int esp_slave_alloc(struct scsi_device *dev)
2452{
2453	struct esp *esp = shost_priv(dev->host);
2454	struct esp_target_data *tp = &esp->target[dev->id];
2455	struct esp_lun_data *lp;
2456
2457	lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2458	if (!lp)
2459		return -ENOMEM;
2460	dev->hostdata = lp;
2461
2462	spi_min_period(tp->starget) = esp->min_period;
2463	spi_max_offset(tp->starget) = 15;
2464
2465	if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2466		spi_max_width(tp->starget) = 1;
2467	else
2468		spi_max_width(tp->starget) = 0;
2469
2470	return 0;
2471}
2472
2473static int esp_slave_configure(struct scsi_device *dev)
2474{
2475	struct esp *esp = shost_priv(dev->host);
2476	struct esp_target_data *tp = &esp->target[dev->id];
2477
2478	if (dev->tagged_supported)
2479		scsi_change_queue_depth(dev, esp->num_tags);
2480
2481	tp->flags |= ESP_TGT_DISCONNECT;
2482
2483	if (!spi_initial_dv(dev->sdev_target))
2484		spi_dv_device(dev);
2485
2486	return 0;
2487}
2488
2489static void esp_slave_destroy(struct scsi_device *dev)
2490{
2491	struct esp_lun_data *lp = dev->hostdata;
2492
2493	kfree(lp);
2494	dev->hostdata = NULL;
2495}
2496
2497static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2498{
2499	struct esp *esp = shost_priv(cmd->device->host);
2500	struct esp_cmd_entry *ent, *tmp;
2501	struct completion eh_done;
2502	unsigned long flags;
2503
2504	/* XXX This helps a lot with debugging but might be a bit
2505	 * XXX much for the final driver.
2506	 */
2507	spin_lock_irqsave(esp->host->host_lock, flags);
2508	shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n",
2509		     cmd, cmd->cmnd[0]);
2510	ent = esp->active_cmd;
2511	if (ent)
2512		shost_printk(KERN_ERR, esp->host,
2513			     "Current command [%p:%02x]\n",
2514			     ent->cmd, ent->cmd->cmnd[0]);
2515	list_for_each_entry(ent, &esp->queued_cmds, list) {
2516		shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n",
2517			     ent->cmd, ent->cmd->cmnd[0]);
2518	}
2519	list_for_each_entry(ent, &esp->active_cmds, list) {
2520		shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n",
2521			     ent->cmd, ent->cmd->cmnd[0]);
2522	}
2523	esp_dump_cmd_log(esp);
2524	spin_unlock_irqrestore(esp->host->host_lock, flags);
2525
2526	spin_lock_irqsave(esp->host->host_lock, flags);
2527
2528	ent = NULL;
2529	list_for_each_entry(tmp, &esp->queued_cmds, list) {
2530		if (tmp->cmd == cmd) {
2531			ent = tmp;
2532			break;
2533		}
2534	}
2535
2536	if (ent) {
2537		/* Easiest case, we didn't even issue the command
2538		 * yet so it is trivial to abort.
2539		 */
2540		list_del(&ent->list);
2541
2542		cmd->result = DID_ABORT << 16;
2543		cmd->scsi_done(cmd);
2544
2545		esp_put_ent(esp, ent);
2546
2547		goto out_success;
2548	}
2549
2550	init_completion(&eh_done);
2551
2552	ent = esp->active_cmd;
2553	if (ent && ent->cmd == cmd) {
2554		/* Command is the currently active command on
2555		 * the bus.  If we already have an output message
2556		 * pending, no dice.
2557		 */
2558		if (esp->msg_out_len)
2559			goto out_failure;
2560
2561		/* Send out an abort, encouraging the target to
2562		 * go to MSGOUT phase by asserting ATN.
2563		 */
2564		esp->msg_out[0] = ABORT_TASK_SET;
2565		esp->msg_out_len = 1;
2566		ent->eh_done = &eh_done;
2567
2568		scsi_esp_cmd(esp, ESP_CMD_SATN);
2569	} else {
2570		/* The command is disconnected.  This is not easy to
2571		 * abort.  For now we fail and let the scsi error
2572		 * handling layer go try a scsi bus reset or host
2573		 * reset.
2574		 *
2575		 * What we could do is put together a scsi command
2576		 * solely for the purpose of sending an abort message
2577		 * to the target.  Coming up with all the code to
2578		 * cook up scsi commands, special case them everywhere,
2579		 * etc. is for questionable gain and it would be better
2580		 * if the generic scsi error handling layer could do at
2581		 * least some of that for us.
2582		 *
2583		 * Anyways this is an area for potential future improvement
2584		 * in this driver.
2585		 */
2586		goto out_failure;
2587	}
2588
2589	spin_unlock_irqrestore(esp->host->host_lock, flags);
2590
2591	if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2592		spin_lock_irqsave(esp->host->host_lock, flags);
2593		ent->eh_done = NULL;
2594		spin_unlock_irqrestore(esp->host->host_lock, flags);
2595
2596		return FAILED;
2597	}
2598
2599	return SUCCESS;
2600
2601out_success:
2602	spin_unlock_irqrestore(esp->host->host_lock, flags);
2603	return SUCCESS;
2604
2605out_failure:
2606	/* XXX This might be a good location to set ESP_TGT_BROKEN
2607	 * XXX since we know which target/lun in particular is
2608	 * XXX causing trouble.
2609	 */
2610	spin_unlock_irqrestore(esp->host->host_lock, flags);
2611	return FAILED;
2612}
2613
2614static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2615{
2616	struct esp *esp = shost_priv(cmd->device->host);
2617	struct completion eh_reset;
2618	unsigned long flags;
2619
2620	init_completion(&eh_reset);
2621
2622	spin_lock_irqsave(esp->host->host_lock, flags);
2623
2624	esp->eh_reset = &eh_reset;
2625
2626	/* XXX This is too simple... We should add lots of
2627	 * XXX checks here so that if we find that the chip is
2628	 * XXX very wedged we return failure immediately so
2629	 * XXX that we can perform a full chip reset.
2630	 */
2631	esp->flags |= ESP_FLAG_RESETTING;
2632	scsi_esp_cmd(esp, ESP_CMD_RS);
2633
2634	spin_unlock_irqrestore(esp->host->host_lock, flags);
2635
2636	ssleep(esp_bus_reset_settle);
2637
2638	if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2639		spin_lock_irqsave(esp->host->host_lock, flags);
2640		esp->eh_reset = NULL;
2641		spin_unlock_irqrestore(esp->host->host_lock, flags);
2642
2643		return FAILED;
2644	}
2645
2646	return SUCCESS;
2647}
2648
2649/* All bets are off, reset the entire device.  */
2650static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2651{
2652	struct esp *esp = shost_priv(cmd->device->host);
2653	unsigned long flags;
2654
2655	spin_lock_irqsave(esp->host->host_lock, flags);
2656	esp_bootup_reset(esp);
2657	esp_reset_cleanup(esp);
2658	spin_unlock_irqrestore(esp->host->host_lock, flags);
2659
2660	ssleep(esp_bus_reset_settle);
2661
2662	return SUCCESS;
2663}
2664
2665static const char *esp_info(struct Scsi_Host *host)
2666{
2667	return "esp";
2668}
2669
2670struct scsi_host_template scsi_esp_template = {
2671	.module			= THIS_MODULE,
2672	.name			= "esp",
2673	.info			= esp_info,
2674	.queuecommand		= esp_queuecommand,
2675	.target_alloc		= esp_target_alloc,
2676	.target_destroy		= esp_target_destroy,
2677	.slave_alloc		= esp_slave_alloc,
2678	.slave_configure	= esp_slave_configure,
2679	.slave_destroy		= esp_slave_destroy,
2680	.eh_abort_handler	= esp_eh_abort_handler,
2681	.eh_bus_reset_handler	= esp_eh_bus_reset_handler,
2682	.eh_host_reset_handler	= esp_eh_host_reset_handler,
2683	.can_queue		= 7,
2684	.this_id		= 7,
2685	.sg_tablesize		= SG_ALL,
2686	.max_sectors		= 0xffff,
2687	.skip_settle_delay	= 1,
2688};
2689EXPORT_SYMBOL(scsi_esp_template);
2690
2691static void esp_get_signalling(struct Scsi_Host *host)
2692{
2693	struct esp *esp = shost_priv(host);
2694	enum spi_signal_type type;
2695
2696	if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2697		type = SPI_SIGNAL_HVD;
2698	else
2699		type = SPI_SIGNAL_SE;
2700
2701	spi_signalling(host) = type;
2702}
2703
2704static void esp_set_offset(struct scsi_target *target, int offset)
2705{
2706	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2707	struct esp *esp = shost_priv(host);
2708	struct esp_target_data *tp = &esp->target[target->id];
2709
2710	if (esp->flags & ESP_FLAG_DISABLE_SYNC)
2711		tp->nego_goal_offset = 0;
2712	else
2713		tp->nego_goal_offset = offset;
2714	tp->flags |= ESP_TGT_CHECK_NEGO;
2715}
2716
2717static void esp_set_period(struct scsi_target *target, int period)
2718{
2719	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2720	struct esp *esp = shost_priv(host);
2721	struct esp_target_data *tp = &esp->target[target->id];
2722
2723	tp->nego_goal_period = period;
2724	tp->flags |= ESP_TGT_CHECK_NEGO;
2725}
2726
2727static void esp_set_width(struct scsi_target *target, int width)
2728{
2729	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2730	struct esp *esp = shost_priv(host);
2731	struct esp_target_data *tp = &esp->target[target->id];
2732
2733	tp->nego_goal_width = (width ? 1 : 0);
2734	tp->flags |= ESP_TGT_CHECK_NEGO;
2735}
2736
2737static struct spi_function_template esp_transport_ops = {
2738	.set_offset		= esp_set_offset,
2739	.show_offset		= 1,
2740	.set_period		= esp_set_period,
2741	.show_period		= 1,
2742	.set_width		= esp_set_width,
2743	.show_width		= 1,
2744	.get_signalling		= esp_get_signalling,
2745};
2746
2747static int __init esp_init(void)
2748{
2749	BUILD_BUG_ON(sizeof(struct scsi_pointer) <
2750		     sizeof(struct esp_cmd_priv));
2751
2752	esp_transport_template = spi_attach_transport(&esp_transport_ops);
2753	if (!esp_transport_template)
2754		return -ENODEV;
2755
2756	return 0;
2757}
2758
2759static void __exit esp_exit(void)
2760{
2761	spi_release_transport(esp_transport_template);
2762}
2763
2764MODULE_DESCRIPTION("ESP SCSI driver core");
2765MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
2766MODULE_LICENSE("GPL");
2767MODULE_VERSION(DRV_VERSION);
2768
2769module_param(esp_bus_reset_settle, int, 0);
2770MODULE_PARM_DESC(esp_bus_reset_settle,
2771		 "ESP scsi bus reset delay in seconds");
2772
2773module_param(esp_debug, int, 0);
2774MODULE_PARM_DESC(esp_debug,
2775"ESP bitmapped debugging message enable value:\n"
2776"	0x00000001	Log interrupt events\n"
2777"	0x00000002	Log scsi commands\n"
2778"	0x00000004	Log resets\n"
2779"	0x00000008	Log message in events\n"
2780"	0x00000010	Log message out events\n"
2781"	0x00000020	Log command completion\n"
2782"	0x00000040	Log disconnects\n"
2783"	0x00000080	Log data start\n"
2784"	0x00000100	Log data done\n"
2785"	0x00000200	Log reconnects\n"
2786"	0x00000400	Log auto-sense data\n"
2787);
2788
2789module_init(esp_init);
2790module_exit(esp_exit);
2791
2792#ifdef CONFIG_SCSI_ESP_PIO
2793static inline unsigned int esp_wait_for_fifo(struct esp *esp)
2794{
2795	int i = 500000;
2796
2797	do {
2798		unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
2799
2800		if (fbytes)
2801			return fbytes;
2802
2803		udelay(1);
2804	} while (--i);
2805
2806	shost_printk(KERN_ERR, esp->host, "FIFO is empty. sreg [%02x]\n",
2807		     esp_read8(ESP_STATUS));
2808	return 0;
2809}
2810
2811static inline int esp_wait_for_intr(struct esp *esp)
2812{
2813	int i = 500000;
2814
2815	do {
2816		esp->sreg = esp_read8(ESP_STATUS);
2817		if (esp->sreg & ESP_STAT_INTR)
2818			return 0;
2819
2820		udelay(1);
2821	} while (--i);
2822
2823	shost_printk(KERN_ERR, esp->host, "IRQ timeout. sreg [%02x]\n",
2824		     esp->sreg);
2825	return 1;
2826}
2827
2828#define ESP_FIFO_SIZE 16
2829
2830void esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
2831		      u32 dma_count, int write, u8 cmd)
2832{
2833	u8 phase = esp->sreg & ESP_STAT_PMASK;
2834
2835	cmd &= ~ESP_CMD_DMA;
2836	esp->send_cmd_error = 0;
2837
2838	if (write) {
2839		u8 *dst = (u8 *)addr;
2840		u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
2841
2842		scsi_esp_cmd(esp, cmd);
2843
2844		while (1) {
2845			if (!esp_wait_for_fifo(esp))
2846				break;
2847
2848			*dst++ = readb(esp->fifo_reg);
2849			--esp_count;
2850
2851			if (!esp_count)
2852				break;
2853
2854			if (esp_wait_for_intr(esp)) {
2855				esp->send_cmd_error = 1;
2856				break;
2857			}
2858
2859			if ((esp->sreg & ESP_STAT_PMASK) != phase)
2860				break;
2861
2862			esp->ireg = esp_read8(ESP_INTRPT);
2863			if (esp->ireg & mask) {
2864				esp->send_cmd_error = 1;
2865				break;
2866			}
2867
2868			if (phase == ESP_MIP)
2869				esp_write8(ESP_CMD_MOK, ESP_CMD);
2870
2871			esp_write8(ESP_CMD_TI, ESP_CMD);
2872		}
2873	} else {
2874		unsigned int n = ESP_FIFO_SIZE;
2875		u8 *src = (u8 *)addr;
2876
2877		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
2878
2879		if (n > esp_count)
2880			n = esp_count;
2881		writesb(esp->fifo_reg, src, n);
2882		src += n;
2883		esp_count -= n;
2884
2885		scsi_esp_cmd(esp, cmd);
2886
2887		while (esp_count) {
2888			if (esp_wait_for_intr(esp)) {
2889				esp->send_cmd_error = 1;
2890				break;
2891			}
2892
2893			if ((esp->sreg & ESP_STAT_PMASK) != phase)
2894				break;
2895
2896			esp->ireg = esp_read8(ESP_INTRPT);
2897			if (esp->ireg & ~ESP_INTR_BSERV) {
2898				esp->send_cmd_error = 1;
2899				break;
2900			}
2901
2902			n = ESP_FIFO_SIZE -
2903			    (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
2904
2905			if (n > esp_count)
2906				n = esp_count;
2907			writesb(esp->fifo_reg, src, n);
2908			src += n;
2909			esp_count -= n;
2910
2911			esp_write8(ESP_CMD_TI, ESP_CMD);
2912		}
2913	}
2914
2915	esp->send_cmd_residual = esp_count;
2916}
2917EXPORT_SYMBOL(esp_send_pio_cmd);
2918#endif