Linux Audio

Check our new training course

Loading...
v3.1
   1/* esp_scsi.c: ESP SCSI driver.
   2 *
   3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/types.h>
   8#include <linux/slab.h>
   9#include <linux/delay.h>
  10#include <linux/list.h>
  11#include <linux/completion.h>
  12#include <linux/kallsyms.h>
  13#include <linux/module.h>
  14#include <linux/moduleparam.h>
  15#include <linux/init.h>
  16#include <linux/irqreturn.h>
  17
  18#include <asm/irq.h>
  19#include <asm/io.h>
  20#include <asm/dma.h>
  21
  22#include <scsi/scsi.h>
  23#include <scsi/scsi_host.h>
  24#include <scsi/scsi_cmnd.h>
  25#include <scsi/scsi_device.h>
  26#include <scsi/scsi_tcq.h>
  27#include <scsi/scsi_dbg.h>
  28#include <scsi/scsi_transport_spi.h>
  29
  30#include "esp_scsi.h"
  31
  32#define DRV_MODULE_NAME		"esp"
  33#define PFX DRV_MODULE_NAME	": "
  34#define DRV_VERSION		"2.000"
  35#define DRV_MODULE_RELDATE	"April 19, 2007"
  36
  37/* SCSI bus reset settle time in seconds.  */
  38static int esp_bus_reset_settle = 3;
  39
  40static u32 esp_debug;
  41#define ESP_DEBUG_INTR		0x00000001
  42#define ESP_DEBUG_SCSICMD	0x00000002
  43#define ESP_DEBUG_RESET		0x00000004
  44#define ESP_DEBUG_MSGIN		0x00000008
  45#define ESP_DEBUG_MSGOUT	0x00000010
  46#define ESP_DEBUG_CMDDONE	0x00000020
  47#define ESP_DEBUG_DISCONNECT	0x00000040
  48#define ESP_DEBUG_DATASTART	0x00000080
  49#define ESP_DEBUG_DATADONE	0x00000100
  50#define ESP_DEBUG_RECONNECT	0x00000200
  51#define ESP_DEBUG_AUTOSENSE	0x00000400
  52
  53#define esp_log_intr(f, a...) \
  54do {	if (esp_debug & ESP_DEBUG_INTR) \
  55		printk(f, ## a); \
  56} while (0)
  57
  58#define esp_log_reset(f, a...) \
  59do {	if (esp_debug & ESP_DEBUG_RESET) \
  60		printk(f, ## a); \
  61} while (0)
  62
  63#define esp_log_msgin(f, a...) \
  64do {	if (esp_debug & ESP_DEBUG_MSGIN) \
  65		printk(f, ## a); \
  66} while (0)
  67
  68#define esp_log_msgout(f, a...) \
  69do {	if (esp_debug & ESP_DEBUG_MSGOUT) \
  70		printk(f, ## a); \
  71} while (0)
  72
  73#define esp_log_cmddone(f, a...) \
  74do {	if (esp_debug & ESP_DEBUG_CMDDONE) \
  75		printk(f, ## a); \
  76} while (0)
  77
  78#define esp_log_disconnect(f, a...) \
  79do {	if (esp_debug & ESP_DEBUG_DISCONNECT) \
  80		printk(f, ## a); \
  81} while (0)
  82
  83#define esp_log_datastart(f, a...) \
  84do {	if (esp_debug & ESP_DEBUG_DATASTART) \
  85		printk(f, ## a); \
  86} while (0)
  87
  88#define esp_log_datadone(f, a...) \
  89do {	if (esp_debug & ESP_DEBUG_DATADONE) \
  90		printk(f, ## a); \
  91} while (0)
  92
  93#define esp_log_reconnect(f, a...) \
  94do {	if (esp_debug & ESP_DEBUG_RECONNECT) \
  95		printk(f, ## a); \
  96} while (0)
  97
  98#define esp_log_autosense(f, a...) \
  99do {	if (esp_debug & ESP_DEBUG_AUTOSENSE) \
 100		printk(f, ## a); \
 101} while (0)
 102
 103#define esp_read8(REG)		esp->ops->esp_read8(esp, REG)
 104#define esp_write8(VAL,REG)	esp->ops->esp_write8(esp, VAL, REG)
 105
 106static void esp_log_fill_regs(struct esp *esp,
 107			      struct esp_event_ent *p)
 108{
 109	p->sreg = esp->sreg;
 110	p->seqreg = esp->seqreg;
 111	p->sreg2 = esp->sreg2;
 112	p->ireg = esp->ireg;
 113	p->select_state = esp->select_state;
 114	p->event = esp->event;
 115}
 116
 117void scsi_esp_cmd(struct esp *esp, u8 val)
 118{
 119	struct esp_event_ent *p;
 120	int idx = esp->esp_event_cur;
 121
 122	p = &esp->esp_event_log[idx];
 123	p->type = ESP_EVENT_TYPE_CMD;
 124	p->val = val;
 125	esp_log_fill_regs(esp, p);
 126
 127	esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
 128
 129	esp_write8(val, ESP_CMD);
 130}
 131EXPORT_SYMBOL(scsi_esp_cmd);
 132
 133static void esp_event(struct esp *esp, u8 val)
 134{
 135	struct esp_event_ent *p;
 136	int idx = esp->esp_event_cur;
 137
 138	p = &esp->esp_event_log[idx];
 139	p->type = ESP_EVENT_TYPE_EVENT;
 140	p->val = val;
 141	esp_log_fill_regs(esp, p);
 142
 143	esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
 144
 145	esp->event = val;
 146}
 147
 148static void esp_dump_cmd_log(struct esp *esp)
 149{
 150	int idx = esp->esp_event_cur;
 151	int stop = idx;
 152
 153	printk(KERN_INFO PFX "esp%d: Dumping command log\n",
 154	       esp->host->unique_id);
 155	do {
 156		struct esp_event_ent *p = &esp->esp_event_log[idx];
 157
 158		printk(KERN_INFO PFX "esp%d: ent[%d] %s ",
 159		       esp->host->unique_id, idx,
 160		       p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT");
 161
 162		printk("val[%02x] sreg[%02x] seqreg[%02x] "
 163		       "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
 164		       p->val, p->sreg, p->seqreg,
 165		       p->sreg2, p->ireg, p->select_state, p->event);
 166
 167		idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
 168	} while (idx != stop);
 169}
 170
 171static void esp_flush_fifo(struct esp *esp)
 172{
 173	scsi_esp_cmd(esp, ESP_CMD_FLUSH);
 174	if (esp->rev == ESP236) {
 175		int lim = 1000;
 176
 177		while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
 178			if (--lim == 0) {
 179				printk(KERN_ALERT PFX "esp%d: ESP_FF_BYTES "
 180				       "will not clear!\n",
 181				       esp->host->unique_id);
 182				break;
 183			}
 184			udelay(1);
 185		}
 186	}
 187}
 188
 189static void hme_read_fifo(struct esp *esp)
 190{
 191	int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
 192	int idx = 0;
 193
 194	while (fcnt--) {
 195		esp->fifo[idx++] = esp_read8(ESP_FDATA);
 196		esp->fifo[idx++] = esp_read8(ESP_FDATA);
 197	}
 198	if (esp->sreg2 & ESP_STAT2_F1BYTE) {
 199		esp_write8(0, ESP_FDATA);
 200		esp->fifo[idx++] = esp_read8(ESP_FDATA);
 201		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
 202	}
 203	esp->fifo_cnt = idx;
 204}
 205
 206static void esp_set_all_config3(struct esp *esp, u8 val)
 207{
 208	int i;
 209
 210	for (i = 0; i < ESP_MAX_TARGET; i++)
 211		esp->target[i].esp_config3 = val;
 212}
 213
 214/* Reset the ESP chip, _not_ the SCSI bus. */
 215static void esp_reset_esp(struct esp *esp)
 216{
 217	u8 family_code, version;
 218
 219	/* Now reset the ESP chip */
 220	scsi_esp_cmd(esp, ESP_CMD_RC);
 221	scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
 222	if (esp->rev == FAST)
 223		esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
 224	scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
 225
 226	/* This is the only point at which it is reliable to read
 227	 * the ID-code for a fast ESP chip variants.
 228	 */
 229	esp->max_period = ((35 * esp->ccycle) / 1000);
 230	if (esp->rev == FAST) {
 231		version = esp_read8(ESP_UID);
 232		family_code = (version & 0xf8) >> 3;
 233		if (family_code == 0x02)
 234			esp->rev = FAS236;
 235		else if (family_code == 0x0a)
 236			esp->rev = FASHME; /* Version is usually '5'. */
 237		else
 238			esp->rev = FAS100A;
 239		esp->min_period = ((4 * esp->ccycle) / 1000);
 240	} else {
 241		esp->min_period = ((5 * esp->ccycle) / 1000);
 242	}
 243	esp->max_period = (esp->max_period + 3)>>2;
 244	esp->min_period = (esp->min_period + 3)>>2;
 245
 246	esp_write8(esp->config1, ESP_CFG1);
 247	switch (esp->rev) {
 248	case ESP100:
 249		/* nothing to do */
 250		break;
 251
 252	case ESP100A:
 253		esp_write8(esp->config2, ESP_CFG2);
 254		break;
 255
 256	case ESP236:
 257		/* Slow 236 */
 258		esp_write8(esp->config2, ESP_CFG2);
 259		esp->prev_cfg3 = esp->target[0].esp_config3;
 260		esp_write8(esp->prev_cfg3, ESP_CFG3);
 261		break;
 262
 263	case FASHME:
 264		esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
 265		/* fallthrough... */
 266
 267	case FAS236:
 268		/* Fast 236 or HME */
 269		esp_write8(esp->config2, ESP_CFG2);
 270		if (esp->rev == FASHME) {
 271			u8 cfg3 = esp->target[0].esp_config3;
 272
 273			cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
 274			if (esp->scsi_id >= 8)
 275				cfg3 |= ESP_CONFIG3_IDBIT3;
 276			esp_set_all_config3(esp, cfg3);
 277		} else {
 278			u32 cfg3 = esp->target[0].esp_config3;
 279
 280			cfg3 |= ESP_CONFIG3_FCLK;
 281			esp_set_all_config3(esp, cfg3);
 282		}
 283		esp->prev_cfg3 = esp->target[0].esp_config3;
 284		esp_write8(esp->prev_cfg3, ESP_CFG3);
 285		if (esp->rev == FASHME) {
 286			esp->radelay = 80;
 287		} else {
 288			if (esp->flags & ESP_FLAG_DIFFERENTIAL)
 289				esp->radelay = 0;
 290			else
 291				esp->radelay = 96;
 292		}
 293		break;
 294
 295	case FAS100A:
 296		/* Fast 100a */
 297		esp_write8(esp->config2, ESP_CFG2);
 298		esp_set_all_config3(esp,
 299				    (esp->target[0].esp_config3 |
 300				     ESP_CONFIG3_FCLOCK));
 301		esp->prev_cfg3 = esp->target[0].esp_config3;
 302		esp_write8(esp->prev_cfg3, ESP_CFG3);
 303		esp->radelay = 32;
 304		break;
 305
 306	default:
 307		break;
 308	}
 309
 310	/* Reload the configuration registers */
 311	esp_write8(esp->cfact, ESP_CFACT);
 312
 313	esp->prev_stp = 0;
 314	esp_write8(esp->prev_stp, ESP_STP);
 315
 316	esp->prev_soff = 0;
 317	esp_write8(esp->prev_soff, ESP_SOFF);
 318
 319	esp_write8(esp->neg_defp, ESP_TIMEO);
 320
 321	/* Eat any bitrot in the chip */
 322	esp_read8(ESP_INTRPT);
 323	udelay(100);
 324}
 325
 326static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
 327{
 328	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
 329	struct scatterlist *sg = scsi_sglist(cmd);
 330	int dir = cmd->sc_data_direction;
 331	int total, i;
 332
 333	if (dir == DMA_NONE)
 334		return;
 335
 336	spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir);
 337	spriv->cur_residue = sg_dma_len(sg);
 338	spriv->cur_sg = sg;
 339
 340	total = 0;
 341	for (i = 0; i < spriv->u.num_sg; i++)
 342		total += sg_dma_len(&sg[i]);
 343	spriv->tot_residue = total;
 344}
 345
 346static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
 347				   struct scsi_cmnd *cmd)
 348{
 349	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
 350
 351	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 352		return ent->sense_dma +
 353			(ent->sense_ptr - cmd->sense_buffer);
 354	}
 355
 356	return sg_dma_address(p->cur_sg) +
 357		(sg_dma_len(p->cur_sg) -
 358		 p->cur_residue);
 359}
 360
 361static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
 362				    struct scsi_cmnd *cmd)
 363{
 364	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
 365
 366	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 367		return SCSI_SENSE_BUFFERSIZE -
 368			(ent->sense_ptr - cmd->sense_buffer);
 369	}
 370	return p->cur_residue;
 371}
 372
 373static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
 374			    struct scsi_cmnd *cmd, unsigned int len)
 375{
 376	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
 377
 378	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 379		ent->sense_ptr += len;
 380		return;
 381	}
 382
 383	p->cur_residue -= len;
 384	p->tot_residue -= len;
 385	if (p->cur_residue < 0 || p->tot_residue < 0) {
 386		printk(KERN_ERR PFX "esp%d: Data transfer overflow.\n",
 387		       esp->host->unique_id);
 388		printk(KERN_ERR PFX "esp%d: cur_residue[%d] tot_residue[%d] "
 389		       "len[%u]\n",
 390		       esp->host->unique_id,
 391		       p->cur_residue, p->tot_residue, len);
 392		p->cur_residue = 0;
 393		p->tot_residue = 0;
 394	}
 395	if (!p->cur_residue && p->tot_residue) {
 396		p->cur_sg++;
 397		p->cur_residue = sg_dma_len(p->cur_sg);
 398	}
 399}
 400
 401static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
 402{
 403	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
 404	int dir = cmd->sc_data_direction;
 405
 406	if (dir == DMA_NONE)
 407		return;
 408
 409	esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir);
 410}
 411
 412static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
 413{
 414	struct scsi_cmnd *cmd = ent->cmd;
 415	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
 416
 417	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 418		ent->saved_sense_ptr = ent->sense_ptr;
 419		return;
 420	}
 421	ent->saved_cur_residue = spriv->cur_residue;
 422	ent->saved_cur_sg = spriv->cur_sg;
 423	ent->saved_tot_residue = spriv->tot_residue;
 424}
 425
 426static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
 427{
 428	struct scsi_cmnd *cmd = ent->cmd;
 429	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
 430
 431	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 432		ent->sense_ptr = ent->saved_sense_ptr;
 433		return;
 434	}
 435	spriv->cur_residue = ent->saved_cur_residue;
 436	spriv->cur_sg = ent->saved_cur_sg;
 437	spriv->tot_residue = ent->saved_tot_residue;
 438}
 439
 440static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
 441{
 442	if (cmd->cmd_len == 6 ||
 443	    cmd->cmd_len == 10 ||
 444	    cmd->cmd_len == 12) {
 445		esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
 446	} else {
 447		esp->flags |= ESP_FLAG_DOING_SLOWCMD;
 448	}
 449}
 450
 451static void esp_write_tgt_config3(struct esp *esp, int tgt)
 452{
 453	if (esp->rev > ESP100A) {
 454		u8 val = esp->target[tgt].esp_config3;
 455
 456		if (val != esp->prev_cfg3) {
 457			esp->prev_cfg3 = val;
 458			esp_write8(val, ESP_CFG3);
 459		}
 460	}
 461}
 462
 463static void esp_write_tgt_sync(struct esp *esp, int tgt)
 464{
 465	u8 off = esp->target[tgt].esp_offset;
 466	u8 per = esp->target[tgt].esp_period;
 467
 468	if (off != esp->prev_soff) {
 469		esp->prev_soff = off;
 470		esp_write8(off, ESP_SOFF);
 471	}
 472	if (per != esp->prev_stp) {
 473		esp->prev_stp = per;
 474		esp_write8(per, ESP_STP);
 475	}
 476}
 477
 478static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
 479{
 480	if (esp->rev == FASHME) {
 481		/* Arbitrary segment boundaries, 24-bit counts.  */
 482		if (dma_len > (1U << 24))
 483			dma_len = (1U << 24);
 484	} else {
 485		u32 base, end;
 486
 487		/* ESP chip limits other variants by 16-bits of transfer
 488		 * count.  Actually on FAS100A and FAS236 we could get
 489		 * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB
 490		 * in the ESP_CFG2 register but that causes other unwanted
 491		 * changes so we don't use it currently.
 492		 */
 493		if (dma_len > (1U << 16))
 494			dma_len = (1U << 16);
 495
 496		/* All of the DMA variants hooked up to these chips
 497		 * cannot handle crossing a 24-bit address boundary.
 498		 */
 499		base = dma_addr & ((1U << 24) - 1U);
 500		end = base + dma_len;
 501		if (end > (1U << 24))
 502			end = (1U <<24);
 503		dma_len = end - base;
 504	}
 505	return dma_len;
 506}
 507
 508static int esp_need_to_nego_wide(struct esp_target_data *tp)
 509{
 510	struct scsi_target *target = tp->starget;
 511
 512	return spi_width(target) != tp->nego_goal_width;
 513}
 514
 515static int esp_need_to_nego_sync(struct esp_target_data *tp)
 516{
 517	struct scsi_target *target = tp->starget;
 518
 519	/* When offset is zero, period is "don't care".  */
 520	if (!spi_offset(target) && !tp->nego_goal_offset)
 521		return 0;
 522
 523	if (spi_offset(target) == tp->nego_goal_offset &&
 524	    spi_period(target) == tp->nego_goal_period)
 525		return 0;
 526
 527	return 1;
 528}
 529
 530static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
 531			     struct esp_lun_data *lp)
 532{
 533	if (!ent->tag[0]) {
 534		/* Non-tagged, slot already taken?  */
 535		if (lp->non_tagged_cmd)
 536			return -EBUSY;
 537
 538		if (lp->hold) {
 539			/* We are being held by active tagged
 540			 * commands.
 541			 */
 542			if (lp->num_tagged)
 543				return -EBUSY;
 544
 545			/* Tagged commands completed, we can unplug
 546			 * the queue and run this untagged command.
 547			 */
 548			lp->hold = 0;
 549		} else if (lp->num_tagged) {
 550			/* Plug the queue until num_tagged decreases
 551			 * to zero in esp_free_lun_tag.
 552			 */
 553			lp->hold = 1;
 554			return -EBUSY;
 555		}
 556
 557		lp->non_tagged_cmd = ent;
 558		return 0;
 559	} else {
 560		/* Tagged command, see if blocked by a
 561		 * non-tagged one.
 562		 */
 563		if (lp->non_tagged_cmd || lp->hold)
 564			return -EBUSY;
 565	}
 566
 567	BUG_ON(lp->tagged_cmds[ent->tag[1]]);
 568
 569	lp->tagged_cmds[ent->tag[1]] = ent;
 570	lp->num_tagged++;
 571
 572	return 0;
 573}
 574
 575static void esp_free_lun_tag(struct esp_cmd_entry *ent,
 576			     struct esp_lun_data *lp)
 577{
 578	if (ent->tag[0]) {
 579		BUG_ON(lp->tagged_cmds[ent->tag[1]] != ent);
 580		lp->tagged_cmds[ent->tag[1]] = NULL;
 581		lp->num_tagged--;
 582	} else {
 583		BUG_ON(lp->non_tagged_cmd != ent);
 584		lp->non_tagged_cmd = NULL;
 585	}
 586}
 587
 588/* When a contingent allegiance conditon is created, we force feed a
 589 * REQUEST_SENSE command to the device to fetch the sense data.  I
 590 * tried many other schemes, relying on the scsi error handling layer
 591 * to send out the REQUEST_SENSE automatically, but this was difficult
 592 * to get right especially in the presence of applications like smartd
 593 * which use SG_IO to send out their own REQUEST_SENSE commands.
 594 */
 595static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
 596{
 597	struct scsi_cmnd *cmd = ent->cmd;
 598	struct scsi_device *dev = cmd->device;
 599	int tgt, lun;
 600	u8 *p, val;
 601
 602	tgt = dev->id;
 603	lun = dev->lun;
 604
 605
 606	if (!ent->sense_ptr) {
 607		esp_log_autosense("esp%d: Doing auto-sense for "
 608				  "tgt[%d] lun[%d]\n",
 609				  esp->host->unique_id, tgt, lun);
 610
 611		ent->sense_ptr = cmd->sense_buffer;
 612		ent->sense_dma = esp->ops->map_single(esp,
 613						      ent->sense_ptr,
 614						      SCSI_SENSE_BUFFERSIZE,
 615						      DMA_FROM_DEVICE);
 616	}
 617	ent->saved_sense_ptr = ent->sense_ptr;
 618
 619	esp->active_cmd = ent;
 620
 621	p = esp->command_block;
 622	esp->msg_out_len = 0;
 623
 624	*p++ = IDENTIFY(0, lun);
 625	*p++ = REQUEST_SENSE;
 626	*p++ = ((dev->scsi_level <= SCSI_2) ?
 627		(lun << 5) : 0);
 628	*p++ = 0;
 629	*p++ = 0;
 630	*p++ = SCSI_SENSE_BUFFERSIZE;
 631	*p++ = 0;
 632
 633	esp->select_state = ESP_SELECT_BASIC;
 634
 635	val = tgt;
 636	if (esp->rev == FASHME)
 637		val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
 638	esp_write8(val, ESP_BUSID);
 639
 640	esp_write_tgt_sync(esp, tgt);
 641	esp_write_tgt_config3(esp, tgt);
 642
 643	val = (p - esp->command_block);
 644
 645	if (esp->rev == FASHME)
 646		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
 647	esp->ops->send_dma_cmd(esp, esp->command_block_dma,
 648			       val, 16, 0, ESP_CMD_DMA | ESP_CMD_SELA);
 649}
 650
 651static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
 652{
 653	struct esp_cmd_entry *ent;
 654
 655	list_for_each_entry(ent, &esp->queued_cmds, list) {
 656		struct scsi_cmnd *cmd = ent->cmd;
 657		struct scsi_device *dev = cmd->device;
 658		struct esp_lun_data *lp = dev->hostdata;
 659
 660		if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 661			ent->tag[0] = 0;
 662			ent->tag[1] = 0;
 663			return ent;
 664		}
 665
 666		if (!scsi_populate_tag_msg(cmd, &ent->tag[0])) {
 667			ent->tag[0] = 0;
 668			ent->tag[1] = 0;
 669		}
 670
 671		if (esp_alloc_lun_tag(ent, lp) < 0)
 672			continue;
 673
 674		return ent;
 675	}
 676
 677	return NULL;
 678}
 679
 680static void esp_maybe_execute_command(struct esp *esp)
 681{
 682	struct esp_target_data *tp;
 683	struct esp_lun_data *lp;
 684	struct scsi_device *dev;
 685	struct scsi_cmnd *cmd;
 686	struct esp_cmd_entry *ent;
 687	int tgt, lun, i;
 688	u32 val, start_cmd;
 689	u8 *p;
 690
 691	if (esp->active_cmd ||
 692	    (esp->flags & ESP_FLAG_RESETTING))
 693		return;
 694
 695	ent = find_and_prep_issuable_command(esp);
 696	if (!ent)
 697		return;
 698
 699	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 700		esp_autosense(esp, ent);
 701		return;
 702	}
 703
 704	cmd = ent->cmd;
 705	dev = cmd->device;
 706	tgt = dev->id;
 707	lun = dev->lun;
 708	tp = &esp->target[tgt];
 709	lp = dev->hostdata;
 710
 711	list_move(&ent->list, &esp->active_cmds);
 712
 713	esp->active_cmd = ent;
 714
 715	esp_map_dma(esp, cmd);
 716	esp_save_pointers(esp, ent);
 717
 718	esp_check_command_len(esp, cmd);
 719
 720	p = esp->command_block;
 721
 722	esp->msg_out_len = 0;
 723	if (tp->flags & ESP_TGT_CHECK_NEGO) {
 724		/* Need to negotiate.  If the target is broken
 725		 * go for synchronous transfers and non-wide.
 726		 */
 727		if (tp->flags & ESP_TGT_BROKEN) {
 728			tp->flags &= ~ESP_TGT_DISCONNECT;
 729			tp->nego_goal_period = 0;
 730			tp->nego_goal_offset = 0;
 731			tp->nego_goal_width = 0;
 732			tp->nego_goal_tags = 0;
 733		}
 734
 735		/* If the settings are not changing, skip this.  */
 736		if (spi_width(tp->starget) == tp->nego_goal_width &&
 737		    spi_period(tp->starget) == tp->nego_goal_period &&
 738		    spi_offset(tp->starget) == tp->nego_goal_offset) {
 739			tp->flags &= ~ESP_TGT_CHECK_NEGO;
 740			goto build_identify;
 741		}
 742
 743		if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
 744			esp->msg_out_len =
 745				spi_populate_width_msg(&esp->msg_out[0],
 746						       (tp->nego_goal_width ?
 747							1 : 0));
 748			tp->flags |= ESP_TGT_NEGO_WIDE;
 749		} else if (esp_need_to_nego_sync(tp)) {
 750			esp->msg_out_len =
 751				spi_populate_sync_msg(&esp->msg_out[0],
 752						      tp->nego_goal_period,
 753						      tp->nego_goal_offset);
 754			tp->flags |= ESP_TGT_NEGO_SYNC;
 755		} else {
 756			tp->flags &= ~ESP_TGT_CHECK_NEGO;
 757		}
 758
 759		/* Process it like a slow command.  */
 760		if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC))
 761			esp->flags |= ESP_FLAG_DOING_SLOWCMD;
 762	}
 763
 764build_identify:
 765	/* If we don't have a lun-data struct yet, we're probing
 766	 * so do not disconnect.  Also, do not disconnect unless
 767	 * we have a tag on this command.
 768	 */
 769	if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
 770		*p++ = IDENTIFY(1, lun);
 771	else
 772		*p++ = IDENTIFY(0, lun);
 773
 774	if (ent->tag[0] && esp->rev == ESP100) {
 775		/* ESP100 lacks select w/atn3 command, use select
 776		 * and stop instead.
 777		 */
 778		esp->flags |= ESP_FLAG_DOING_SLOWCMD;
 779	}
 780
 781	if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
 782		start_cmd = ESP_CMD_DMA | ESP_CMD_SELA;
 783		if (ent->tag[0]) {
 784			*p++ = ent->tag[0];
 785			*p++ = ent->tag[1];
 786
 787			start_cmd = ESP_CMD_DMA | ESP_CMD_SA3;
 788		}
 789
 790		for (i = 0; i < cmd->cmd_len; i++)
 791			*p++ = cmd->cmnd[i];
 792
 793		esp->select_state = ESP_SELECT_BASIC;
 794	} else {
 795		esp->cmd_bytes_left = cmd->cmd_len;
 796		esp->cmd_bytes_ptr = &cmd->cmnd[0];
 797
 798		if (ent->tag[0]) {
 799			for (i = esp->msg_out_len - 1;
 800			     i >= 0; i--)
 801				esp->msg_out[i + 2] = esp->msg_out[i];
 802			esp->msg_out[0] = ent->tag[0];
 803			esp->msg_out[1] = ent->tag[1];
 804			esp->msg_out_len += 2;
 805		}
 806
 807		start_cmd = ESP_CMD_DMA | ESP_CMD_SELAS;
 808		esp->select_state = ESP_SELECT_MSGOUT;
 809	}
 810	val = tgt;
 811	if (esp->rev == FASHME)
 812		val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
 813	esp_write8(val, ESP_BUSID);
 814
 815	esp_write_tgt_sync(esp, tgt);
 816	esp_write_tgt_config3(esp, tgt);
 817
 818	val = (p - esp->command_block);
 819
 820	if (esp_debug & ESP_DEBUG_SCSICMD) {
 821		printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
 822		for (i = 0; i < cmd->cmd_len; i++)
 823			printk("%02x ", cmd->cmnd[i]);
 824		printk("]\n");
 825	}
 826
 827	if (esp->rev == FASHME)
 828		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
 829	esp->ops->send_dma_cmd(esp, esp->command_block_dma,
 830			       val, 16, 0, start_cmd);
 831}
 832
 833static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
 834{
 835	struct list_head *head = &esp->esp_cmd_pool;
 836	struct esp_cmd_entry *ret;
 837
 838	if (list_empty(head)) {
 839		ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
 840	} else {
 841		ret = list_entry(head->next, struct esp_cmd_entry, list);
 842		list_del(&ret->list);
 843		memset(ret, 0, sizeof(*ret));
 844	}
 845	return ret;
 846}
 847
 848static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
 849{
 850	list_add(&ent->list, &esp->esp_cmd_pool);
 851}
 852
 853static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
 854			    struct scsi_cmnd *cmd, unsigned int result)
 855{
 856	struct scsi_device *dev = cmd->device;
 857	int tgt = dev->id;
 858	int lun = dev->lun;
 859
 860	esp->active_cmd = NULL;
 861	esp_unmap_dma(esp, cmd);
 862	esp_free_lun_tag(ent, dev->hostdata);
 863	cmd->result = result;
 864
 865	if (ent->eh_done) {
 866		complete(ent->eh_done);
 867		ent->eh_done = NULL;
 868	}
 869
 870	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 871		esp->ops->unmap_single(esp, ent->sense_dma,
 872				       SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
 873		ent->sense_ptr = NULL;
 874
 875		/* Restore the message/status bytes to what we actually
 876		 * saw originally.  Also, report that we are providing
 877		 * the sense data.
 878		 */
 879		cmd->result = ((DRIVER_SENSE << 24) |
 880			       (DID_OK << 16) |
 881			       (COMMAND_COMPLETE << 8) |
 882			       (SAM_STAT_CHECK_CONDITION << 0));
 883
 884		ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
 885		if (esp_debug & ESP_DEBUG_AUTOSENSE) {
 886			int i;
 887
 888			printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
 889			       esp->host->unique_id, tgt, lun);
 890			for (i = 0; i < 18; i++)
 891				printk("%02x ", cmd->sense_buffer[i]);
 892			printk("]\n");
 893		}
 894	}
 895
 896	cmd->scsi_done(cmd);
 897
 898	list_del(&ent->list);
 899	esp_put_ent(esp, ent);
 900
 901	esp_maybe_execute_command(esp);
 902}
 903
 904static unsigned int compose_result(unsigned int status, unsigned int message,
 905				   unsigned int driver_code)
 906{
 907	return (status | (message << 8) | (driver_code << 16));
 908}
 909
 910static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
 911{
 912	struct scsi_device *dev = ent->cmd->device;
 913	struct esp_lun_data *lp = dev->hostdata;
 914
 915	scsi_track_queue_full(dev, lp->num_tagged - 1);
 916}
 917
 918static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
 919{
 920	struct scsi_device *dev = cmd->device;
 921	struct esp *esp = shost_priv(dev->host);
 922	struct esp_cmd_priv *spriv;
 923	struct esp_cmd_entry *ent;
 924
 925	ent = esp_get_ent(esp);
 926	if (!ent)
 927		return SCSI_MLQUEUE_HOST_BUSY;
 928
 929	ent->cmd = cmd;
 930
 931	cmd->scsi_done = done;
 932
 933	spriv = ESP_CMD_PRIV(cmd);
 934	spriv->u.dma_addr = ~(dma_addr_t)0x0;
 935
 936	list_add_tail(&ent->list, &esp->queued_cmds);
 937
 938	esp_maybe_execute_command(esp);
 939
 940	return 0;
 941}
 942
 943static DEF_SCSI_QCMD(esp_queuecommand)
 944
 945static int esp_check_gross_error(struct esp *esp)
 946{
 947	if (esp->sreg & ESP_STAT_SPAM) {
 948		/* Gross Error, could be one of:
 949		 * - top of fifo overwritten
 950		 * - top of command register overwritten
 951		 * - DMA programmed with wrong direction
 952		 * - improper phase change
 953		 */
 954		printk(KERN_ERR PFX "esp%d: Gross error sreg[%02x]\n",
 955		       esp->host->unique_id, esp->sreg);
 956		/* XXX Reset the chip. XXX */
 957		return 1;
 958	}
 959	return 0;
 960}
 961
 962static int esp_check_spur_intr(struct esp *esp)
 963{
 964	switch (esp->rev) {
 965	case ESP100:
 966	case ESP100A:
 967		/* The interrupt pending bit of the status register cannot
 968		 * be trusted on these revisions.
 969		 */
 970		esp->sreg &= ~ESP_STAT_INTR;
 971		break;
 972
 973	default:
 974		if (!(esp->sreg & ESP_STAT_INTR)) {
 975			esp->ireg = esp_read8(ESP_INTRPT);
 976			if (esp->ireg & ESP_INTR_SR)
 977				return 1;
 978
 979			/* If the DMA is indicating interrupt pending and the
 980			 * ESP is not, the only possibility is a DMA error.
 981			 */
 982			if (!esp->ops->dma_error(esp)) {
 983				printk(KERN_ERR PFX "esp%d: Spurious irq, "
 984				       "sreg=%02x.\n",
 985				       esp->host->unique_id, esp->sreg);
 986				return -1;
 987			}
 988
 989			printk(KERN_ERR PFX "esp%d: DMA error\n",
 990			       esp->host->unique_id);
 991
 992			/* XXX Reset the chip. XXX */
 993			return -1;
 994		}
 995		break;
 996	}
 997
 998	return 0;
 999}
1000
1001static void esp_schedule_reset(struct esp *esp)
1002{
1003	esp_log_reset("ESP: esp_schedule_reset() from %p\n",
1004		      __builtin_return_address(0));
1005	esp->flags |= ESP_FLAG_RESETTING;
1006	esp_event(esp, ESP_EVENT_RESET);
1007}
1008
1009/* In order to avoid having to add a special half-reconnected state
1010 * into the driver we just sit here and poll through the rest of
1011 * the reselection process to get the tag message bytes.
1012 */
1013static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1014						    struct esp_lun_data *lp)
1015{
1016	struct esp_cmd_entry *ent;
1017	int i;
1018
1019	if (!lp->num_tagged) {
1020		printk(KERN_ERR PFX "esp%d: Reconnect w/num_tagged==0\n",
1021		       esp->host->unique_id);
1022		return NULL;
1023	}
1024
1025	esp_log_reconnect("ESP: reconnect tag, ");
1026
1027	for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1028		if (esp->ops->irq_pending(esp))
1029			break;
1030	}
1031	if (i == ESP_QUICKIRQ_LIMIT) {
1032		printk(KERN_ERR PFX "esp%d: Reconnect IRQ1 timeout\n",
1033		       esp->host->unique_id);
1034		return NULL;
1035	}
1036
1037	esp->sreg = esp_read8(ESP_STATUS);
1038	esp->ireg = esp_read8(ESP_INTRPT);
1039
1040	esp_log_reconnect("IRQ(%d:%x:%x), ",
1041			  i, esp->ireg, esp->sreg);
1042
1043	if (esp->ireg & ESP_INTR_DC) {
1044		printk(KERN_ERR PFX "esp%d: Reconnect, got disconnect.\n",
1045		       esp->host->unique_id);
1046		return NULL;
1047	}
1048
1049	if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
1050		printk(KERN_ERR PFX "esp%d: Reconnect, not MIP sreg[%02x].\n",
1051		       esp->host->unique_id, esp->sreg);
1052		return NULL;
1053	}
1054
1055	/* DMA in the tag bytes... */
1056	esp->command_block[0] = 0xff;
1057	esp->command_block[1] = 0xff;
1058	esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1059			       2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1060
1061	/* ACK the message.  */
1062	scsi_esp_cmd(esp, ESP_CMD_MOK);
1063
1064	for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1065		if (esp->ops->irq_pending(esp)) {
1066			esp->sreg = esp_read8(ESP_STATUS);
1067			esp->ireg = esp_read8(ESP_INTRPT);
1068			if (esp->ireg & ESP_INTR_FDONE)
1069				break;
1070		}
1071		udelay(1);
1072	}
1073	if (i == ESP_RESELECT_TAG_LIMIT) {
1074		printk(KERN_ERR PFX "esp%d: Reconnect IRQ2 timeout\n",
1075		       esp->host->unique_id);
1076		return NULL;
1077	}
1078	esp->ops->dma_drain(esp);
1079	esp->ops->dma_invalidate(esp);
1080
1081	esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1082			  i, esp->ireg, esp->sreg,
1083			  esp->command_block[0],
1084			  esp->command_block[1]);
1085
1086	if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1087	    esp->command_block[0] > ORDERED_QUEUE_TAG) {
1088		printk(KERN_ERR PFX "esp%d: Reconnect, bad tag "
1089		       "type %02x.\n",
1090		       esp->host->unique_id, esp->command_block[0]);
1091		return NULL;
1092	}
1093
1094	ent = lp->tagged_cmds[esp->command_block[1]];
1095	if (!ent) {
1096		printk(KERN_ERR PFX "esp%d: Reconnect, no entry for "
1097		       "tag %02x.\n",
1098		       esp->host->unique_id, esp->command_block[1]);
1099		return NULL;
1100	}
1101
1102	return ent;
1103}
1104
1105static int esp_reconnect(struct esp *esp)
1106{
1107	struct esp_cmd_entry *ent;
1108	struct esp_target_data *tp;
1109	struct esp_lun_data *lp;
1110	struct scsi_device *dev;
1111	int target, lun;
1112
1113	BUG_ON(esp->active_cmd);
1114	if (esp->rev == FASHME) {
1115		/* FASHME puts the target and lun numbers directly
1116		 * into the fifo.
1117		 */
1118		target = esp->fifo[0];
1119		lun = esp->fifo[1] & 0x7;
1120	} else {
1121		u8 bits = esp_read8(ESP_FDATA);
1122
1123		/* Older chips put the lun directly into the fifo, but
1124		 * the target is given as a sample of the arbitration
1125		 * lines on the bus at reselection time.  So we should
1126		 * see the ID of the ESP and the one reconnecting target
1127		 * set in the bitmap.
1128		 */
1129		if (!(bits & esp->scsi_id_mask))
1130			goto do_reset;
1131		bits &= ~esp->scsi_id_mask;
1132		if (!bits || (bits & (bits - 1)))
1133			goto do_reset;
1134
1135		target = ffs(bits) - 1;
1136		lun = (esp_read8(ESP_FDATA) & 0x7);
1137
1138		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1139		if (esp->rev == ESP100) {
1140			u8 ireg = esp_read8(ESP_INTRPT);
1141			/* This chip has a bug during reselection that can
1142			 * cause a spurious illegal-command interrupt, which
1143			 * we simply ACK here.  Another possibility is a bus
1144			 * reset so we must check for that.
1145			 */
1146			if (ireg & ESP_INTR_SR)
1147				goto do_reset;
1148		}
1149		scsi_esp_cmd(esp, ESP_CMD_NULL);
1150	}
1151
1152	esp_write_tgt_sync(esp, target);
1153	esp_write_tgt_config3(esp, target);
1154
1155	scsi_esp_cmd(esp, ESP_CMD_MOK);
1156
1157	if (esp->rev == FASHME)
1158		esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1159			   ESP_BUSID);
1160
1161	tp = &esp->target[target];
1162	dev = __scsi_device_lookup_by_target(tp->starget, lun);
1163	if (!dev) {
1164		printk(KERN_ERR PFX "esp%d: Reconnect, no lp "
1165		       "tgt[%u] lun[%u]\n",
1166		       esp->host->unique_id, target, lun);
1167		goto do_reset;
1168	}
1169	lp = dev->hostdata;
1170
1171	ent = lp->non_tagged_cmd;
1172	if (!ent) {
1173		ent = esp_reconnect_with_tag(esp, lp);
1174		if (!ent)
1175			goto do_reset;
1176	}
1177
1178	esp->active_cmd = ent;
1179
1180	if (ent->flags & ESP_CMD_FLAG_ABORT) {
1181		esp->msg_out[0] = ABORT_TASK_SET;
1182		esp->msg_out_len = 1;
1183		scsi_esp_cmd(esp, ESP_CMD_SATN);
1184	}
1185
1186	esp_event(esp, ESP_EVENT_CHECK_PHASE);
1187	esp_restore_pointers(esp, ent);
1188	esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1189	return 1;
1190
1191do_reset:
1192	esp_schedule_reset(esp);
1193	return 0;
1194}
1195
1196static int esp_finish_select(struct esp *esp)
1197{
1198	struct esp_cmd_entry *ent;
1199	struct scsi_cmnd *cmd;
1200	u8 orig_select_state;
1201
1202	orig_select_state = esp->select_state;
1203
1204	/* No longer selecting.  */
1205	esp->select_state = ESP_SELECT_NONE;
1206
1207	esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1208	ent = esp->active_cmd;
1209	cmd = ent->cmd;
1210
1211	if (esp->ops->dma_error(esp)) {
1212		/* If we see a DMA error during or as a result of selection,
1213		 * all bets are off.
1214		 */
1215		esp_schedule_reset(esp);
1216		esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
1217		return 0;
1218	}
1219
1220	esp->ops->dma_invalidate(esp);
1221
1222	if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1223		struct esp_target_data *tp = &esp->target[cmd->device->id];
1224
1225		/* Carefully back out of the selection attempt.  Release
1226		 * resources (such as DMA mapping & TAG) and reset state (such
1227		 * as message out and command delivery variables).
1228		 */
1229		if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1230			esp_unmap_dma(esp, cmd);
1231			esp_free_lun_tag(ent, cmd->device->hostdata);
1232			tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1233			esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
1234			esp->cmd_bytes_ptr = NULL;
1235			esp->cmd_bytes_left = 0;
1236		} else {
1237			esp->ops->unmap_single(esp, ent->sense_dma,
1238					       SCSI_SENSE_BUFFERSIZE,
1239					       DMA_FROM_DEVICE);
1240			ent->sense_ptr = NULL;
1241		}
1242
1243		/* Now that the state is unwound properly, put back onto
1244		 * the issue queue.  This command is no longer active.
1245		 */
1246		list_move(&ent->list, &esp->queued_cmds);
1247		esp->active_cmd = NULL;
1248
1249		/* Return value ignored by caller, it directly invokes
1250		 * esp_reconnect().
1251		 */
1252		return 0;
1253	}
1254
1255	if (esp->ireg == ESP_INTR_DC) {
1256		struct scsi_device *dev = cmd->device;
1257
1258		/* Disconnect.  Make sure we re-negotiate sync and
1259		 * wide parameters if this target starts responding
1260		 * again in the future.
1261		 */
1262		esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1263
1264		scsi_esp_cmd(esp, ESP_CMD_ESEL);
1265		esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
1266		return 1;
1267	}
1268
1269	if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1270		/* Selection successful.  On pre-FAST chips we have
1271		 * to do a NOP and possibly clean out the FIFO.
1272		 */
1273		if (esp->rev <= ESP236) {
1274			int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1275
1276			scsi_esp_cmd(esp, ESP_CMD_NULL);
1277
1278			if (!fcnt &&
1279			    (!esp->prev_soff ||
1280			     ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1281				esp_flush_fifo(esp);
1282		}
1283
1284		/* If we are doing a slow command, negotiation, etc.
1285		 * we'll do the right thing as we transition to the
1286		 * next phase.
1287		 */
1288		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1289		return 0;
1290	}
1291
1292	printk("ESP: Unexpected selection completion ireg[%x].\n",
1293	       esp->ireg);
1294	esp_schedule_reset(esp);
1295	return 0;
1296}
1297
1298static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1299			       struct scsi_cmnd *cmd)
1300{
1301	int fifo_cnt, ecount, bytes_sent, flush_fifo;
1302
1303	fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1304	if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1305		fifo_cnt <<= 1;
1306
1307	ecount = 0;
1308	if (!(esp->sreg & ESP_STAT_TCNT)) {
1309		ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1310			  (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1311		if (esp->rev == FASHME)
1312			ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
1313	}
1314
1315	bytes_sent = esp->data_dma_len;
1316	bytes_sent -= ecount;
1317
1318	if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1319		bytes_sent -= fifo_cnt;
1320
1321	flush_fifo = 0;
1322	if (!esp->prev_soff) {
1323		/* Synchronous data transfer, always flush fifo. */
1324		flush_fifo = 1;
1325	} else {
1326		if (esp->rev == ESP100) {
1327			u32 fflags, phase;
1328
1329			/* ESP100 has a chip bug where in the synchronous data
1330			 * phase it can mistake a final long REQ pulse from the
1331			 * target as an extra data byte.  Fun.
1332			 *
1333			 * To detect this case we resample the status register
1334			 * and fifo flags.  If we're still in a data phase and
1335			 * we see spurious chunks in the fifo, we return error
1336			 * to the caller which should reset and set things up
1337			 * such that we only try future transfers to this
1338			 * target in synchronous mode.
1339			 */
1340			esp->sreg = esp_read8(ESP_STATUS);
1341			phase = esp->sreg & ESP_STAT_PMASK;
1342			fflags = esp_read8(ESP_FFLAGS);
1343
1344			if ((phase == ESP_DOP &&
1345			     (fflags & ESP_FF_ONOTZERO)) ||
1346			    (phase == ESP_DIP &&
1347			     (fflags & ESP_FF_FBYTES)))
1348				return -1;
1349		}
1350		if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1351			flush_fifo = 1;
1352	}
1353
1354	if (flush_fifo)
1355		esp_flush_fifo(esp);
1356
1357	return bytes_sent;
1358}
1359
1360static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1361			u8 scsi_period, u8 scsi_offset,
1362			u8 esp_stp, u8 esp_soff)
1363{
1364	spi_period(tp->starget) = scsi_period;
1365	spi_offset(tp->starget) = scsi_offset;
1366	spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1367
1368	if (esp_soff) {
1369		esp_stp &= 0x1f;
1370		esp_soff |= esp->radelay;
1371		if (esp->rev >= FAS236) {
1372			u8 bit = ESP_CONFIG3_FSCSI;
1373			if (esp->rev >= FAS100A)
1374				bit = ESP_CONFIG3_FAST;
1375
1376			if (scsi_period < 50) {
1377				if (esp->rev == FASHME)
1378					esp_soff &= ~esp->radelay;
1379				tp->esp_config3 |= bit;
1380			} else {
1381				tp->esp_config3 &= ~bit;
1382			}
1383			esp->prev_cfg3 = tp->esp_config3;
1384			esp_write8(esp->prev_cfg3, ESP_CFG3);
1385		}
1386	}
1387
1388	tp->esp_period = esp->prev_stp = esp_stp;
1389	tp->esp_offset = esp->prev_soff = esp_soff;
1390
1391	esp_write8(esp_soff, ESP_SOFF);
1392	esp_write8(esp_stp, ESP_STP);
1393
1394	tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1395
1396	spi_display_xfer_agreement(tp->starget);
1397}
1398
1399static void esp_msgin_reject(struct esp *esp)
1400{
1401	struct esp_cmd_entry *ent = esp->active_cmd;
1402	struct scsi_cmnd *cmd = ent->cmd;
1403	struct esp_target_data *tp;
1404	int tgt;
1405
1406	tgt = cmd->device->id;
1407	tp = &esp->target[tgt];
1408
1409	if (tp->flags & ESP_TGT_NEGO_WIDE) {
1410		tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1411
1412		if (!esp_need_to_nego_sync(tp)) {
1413			tp->flags &= ~ESP_TGT_CHECK_NEGO;
1414			scsi_esp_cmd(esp, ESP_CMD_RATN);
1415		} else {
1416			esp->msg_out_len =
1417				spi_populate_sync_msg(&esp->msg_out[0],
1418						      tp->nego_goal_period,
1419						      tp->nego_goal_offset);
1420			tp->flags |= ESP_TGT_NEGO_SYNC;
1421			scsi_esp_cmd(esp, ESP_CMD_SATN);
1422		}
1423		return;
1424	}
1425
1426	if (tp->flags & ESP_TGT_NEGO_SYNC) {
1427		tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1428		tp->esp_period = 0;
1429		tp->esp_offset = 0;
1430		esp_setsync(esp, tp, 0, 0, 0, 0);
1431		scsi_esp_cmd(esp, ESP_CMD_RATN);
1432		return;
1433	}
1434
1435	esp->msg_out[0] = ABORT_TASK_SET;
1436	esp->msg_out_len = 1;
1437	scsi_esp_cmd(esp, ESP_CMD_SATN);
1438}
1439
1440static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1441{
1442	u8 period = esp->msg_in[3];
1443	u8 offset = esp->msg_in[4];
1444	u8 stp;
1445
1446	if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1447		goto do_reject;
1448
1449	if (offset > 15)
1450		goto do_reject;
1451
1452	if (offset) {
1453		int one_clock;
1454
1455		if (period > esp->max_period) {
1456			period = offset = 0;
1457			goto do_sdtr;
1458		}
1459		if (period < esp->min_period)
1460			goto do_reject;
1461
1462		one_clock = esp->ccycle / 1000;
1463		stp = DIV_ROUND_UP(period << 2, one_clock);
1464		if (stp && esp->rev >= FAS236) {
1465			if (stp >= 50)
1466				stp--;
1467		}
1468	} else {
1469		stp = 0;
1470	}
1471
1472	esp_setsync(esp, tp, period, offset, stp, offset);
1473	return;
1474
1475do_reject:
1476	esp->msg_out[0] = MESSAGE_REJECT;
1477	esp->msg_out_len = 1;
1478	scsi_esp_cmd(esp, ESP_CMD_SATN);
1479	return;
1480
1481do_sdtr:
1482	tp->nego_goal_period = period;
1483	tp->nego_goal_offset = offset;
1484	esp->msg_out_len =
1485		spi_populate_sync_msg(&esp->msg_out[0],
1486				      tp->nego_goal_period,
1487				      tp->nego_goal_offset);
1488	scsi_esp_cmd(esp, ESP_CMD_SATN);
1489}
1490
1491static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1492{
1493	int size = 8 << esp->msg_in[3];
1494	u8 cfg3;
1495
1496	if (esp->rev != FASHME)
1497		goto do_reject;
1498
1499	if (size != 8 && size != 16)
1500		goto do_reject;
1501
1502	if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1503		goto do_reject;
1504
1505	cfg3 = tp->esp_config3;
1506	if (size == 16) {
1507		tp->flags |= ESP_TGT_WIDE;
1508		cfg3 |= ESP_CONFIG3_EWIDE;
1509	} else {
1510		tp->flags &= ~ESP_TGT_WIDE;
1511		cfg3 &= ~ESP_CONFIG3_EWIDE;
1512	}
1513	tp->esp_config3 = cfg3;
1514	esp->prev_cfg3 = cfg3;
1515	esp_write8(cfg3, ESP_CFG3);
1516
1517	tp->flags &= ~ESP_TGT_NEGO_WIDE;
1518
1519	spi_period(tp->starget) = 0;
1520	spi_offset(tp->starget) = 0;
1521	if (!esp_need_to_nego_sync(tp)) {
1522		tp->flags &= ~ESP_TGT_CHECK_NEGO;
1523		scsi_esp_cmd(esp, ESP_CMD_RATN);
1524	} else {
1525		esp->msg_out_len =
1526			spi_populate_sync_msg(&esp->msg_out[0],
1527					      tp->nego_goal_period,
1528					      tp->nego_goal_offset);
1529		tp->flags |= ESP_TGT_NEGO_SYNC;
1530		scsi_esp_cmd(esp, ESP_CMD_SATN);
1531	}
1532	return;
1533
1534do_reject:
1535	esp->msg_out[0] = MESSAGE_REJECT;
1536	esp->msg_out_len = 1;
1537	scsi_esp_cmd(esp, ESP_CMD_SATN);
1538}
1539
1540static void esp_msgin_extended(struct esp *esp)
1541{
1542	struct esp_cmd_entry *ent = esp->active_cmd;
1543	struct scsi_cmnd *cmd = ent->cmd;
1544	struct esp_target_data *tp;
1545	int tgt = cmd->device->id;
1546
1547	tp = &esp->target[tgt];
1548	if (esp->msg_in[2] == EXTENDED_SDTR) {
1549		esp_msgin_sdtr(esp, tp);
1550		return;
1551	}
1552	if (esp->msg_in[2] == EXTENDED_WDTR) {
1553		esp_msgin_wdtr(esp, tp);
1554		return;
1555	}
1556
1557	printk("ESP: Unexpected extended msg type %x\n",
1558	       esp->msg_in[2]);
1559
1560	esp->msg_out[0] = ABORT_TASK_SET;
1561	esp->msg_out_len = 1;
1562	scsi_esp_cmd(esp, ESP_CMD_SATN);
1563}
1564
1565/* Analyze msgin bytes received from target so far.  Return non-zero
1566 * if there are more bytes needed to complete the message.
1567 */
1568static int esp_msgin_process(struct esp *esp)
1569{
1570	u8 msg0 = esp->msg_in[0];
1571	int len = esp->msg_in_len;
1572
1573	if (msg0 & 0x80) {
1574		/* Identify */
1575		printk("ESP: Unexpected msgin identify\n");
1576		return 0;
1577	}
1578
1579	switch (msg0) {
1580	case EXTENDED_MESSAGE:
1581		if (len == 1)
1582			return 1;
1583		if (len < esp->msg_in[1] + 2)
1584			return 1;
1585		esp_msgin_extended(esp);
1586		return 0;
1587
1588	case IGNORE_WIDE_RESIDUE: {
1589		struct esp_cmd_entry *ent;
1590		struct esp_cmd_priv *spriv;
1591		if (len == 1)
1592			return 1;
1593
1594		if (esp->msg_in[1] != 1)
1595			goto do_reject;
1596
1597		ent = esp->active_cmd;
1598		spriv = ESP_CMD_PRIV(ent->cmd);
1599
1600		if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1601			spriv->cur_sg--;
1602			spriv->cur_residue = 1;
1603		} else
1604			spriv->cur_residue++;
1605		spriv->tot_residue++;
1606		return 0;
1607	}
1608	case NOP:
1609		return 0;
1610	case RESTORE_POINTERS:
1611		esp_restore_pointers(esp, esp->active_cmd);
1612		return 0;
1613	case SAVE_POINTERS:
1614		esp_save_pointers(esp, esp->active_cmd);
1615		return 0;
1616
1617	case COMMAND_COMPLETE:
1618	case DISCONNECT: {
1619		struct esp_cmd_entry *ent = esp->active_cmd;
1620
1621		ent->message = msg0;
1622		esp_event(esp, ESP_EVENT_FREE_BUS);
1623		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1624		return 0;
1625	}
1626	case MESSAGE_REJECT:
1627		esp_msgin_reject(esp);
1628		return 0;
1629
1630	default:
1631	do_reject:
1632		esp->msg_out[0] = MESSAGE_REJECT;
1633		esp->msg_out_len = 1;
1634		scsi_esp_cmd(esp, ESP_CMD_SATN);
1635		return 0;
1636	}
1637}
1638
1639static int esp_process_event(struct esp *esp)
1640{
1641	int write;
1642
1643again:
1644	write = 0;
1645	switch (esp->event) {
1646	case ESP_EVENT_CHECK_PHASE:
1647		switch (esp->sreg & ESP_STAT_PMASK) {
1648		case ESP_DOP:
1649			esp_event(esp, ESP_EVENT_DATA_OUT);
1650			break;
1651		case ESP_DIP:
1652			esp_event(esp, ESP_EVENT_DATA_IN);
1653			break;
1654		case ESP_STATP:
1655			esp_flush_fifo(esp);
1656			scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1657			esp_event(esp, ESP_EVENT_STATUS);
1658			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1659			return 1;
1660
1661		case ESP_MOP:
1662			esp_event(esp, ESP_EVENT_MSGOUT);
1663			break;
1664
1665		case ESP_MIP:
1666			esp_event(esp, ESP_EVENT_MSGIN);
1667			break;
1668
1669		case ESP_CMDP:
1670			esp_event(esp, ESP_EVENT_CMD_START);
1671			break;
1672
1673		default:
1674			printk("ESP: Unexpected phase, sreg=%02x\n",
1675			       esp->sreg);
1676			esp_schedule_reset(esp);
1677			return 0;
1678		}
1679		goto again;
1680		break;
1681
1682	case ESP_EVENT_DATA_IN:
1683		write = 1;
1684		/* fallthru */
1685
1686	case ESP_EVENT_DATA_OUT: {
1687		struct esp_cmd_entry *ent = esp->active_cmd;
1688		struct scsi_cmnd *cmd = ent->cmd;
1689		dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1690		unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1691
1692		if (esp->rev == ESP100)
1693			scsi_esp_cmd(esp, ESP_CMD_NULL);
1694
1695		if (write)
1696			ent->flags |= ESP_CMD_FLAG_WRITE;
1697		else
1698			ent->flags &= ~ESP_CMD_FLAG_WRITE;
1699
1700		if (esp->ops->dma_length_limit)
1701			dma_len = esp->ops->dma_length_limit(esp, dma_addr,
1702							     dma_len);
1703		else
1704			dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1705
1706		esp->data_dma_len = dma_len;
1707
1708		if (!dma_len) {
1709			printk(KERN_ERR PFX "esp%d: DMA length is zero!\n",
1710			       esp->host->unique_id);
1711			printk(KERN_ERR PFX "esp%d: cur adr[%08llx] len[%08x]\n",
1712			       esp->host->unique_id,
1713			       (unsigned long long)esp_cur_dma_addr(ent, cmd),
1714			       esp_cur_dma_len(ent, cmd));
1715			esp_schedule_reset(esp);
1716			return 0;
1717		}
1718
1719		esp_log_datastart("ESP: start data addr[%08llx] len[%u] "
1720				  "write(%d)\n",
1721				  (unsigned long long)dma_addr, dma_len, write);
1722
1723		esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1724				       write, ESP_CMD_DMA | ESP_CMD_TI);
1725		esp_event(esp, ESP_EVENT_DATA_DONE);
1726		break;
1727	}
1728	case ESP_EVENT_DATA_DONE: {
1729		struct esp_cmd_entry *ent = esp->active_cmd;
1730		struct scsi_cmnd *cmd = ent->cmd;
1731		int bytes_sent;
1732
1733		if (esp->ops->dma_error(esp)) {
1734			printk("ESP: data done, DMA error, resetting\n");
1735			esp_schedule_reset(esp);
1736			return 0;
1737		}
1738
1739		if (ent->flags & ESP_CMD_FLAG_WRITE) {
1740			/* XXX parity errors, etc. XXX */
1741
1742			esp->ops->dma_drain(esp);
1743		}
1744		esp->ops->dma_invalidate(esp);
1745
1746		if (esp->ireg != ESP_INTR_BSERV) {
1747			/* We should always see exactly a bus-service
1748			 * interrupt at the end of a successful transfer.
1749			 */
1750			printk("ESP: data done, not BSERV, resetting\n");
1751			esp_schedule_reset(esp);
1752			return 0;
1753		}
1754
1755		bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1756
1757		esp_log_datadone("ESP: data done flgs[%x] sent[%d]\n",
1758				 ent->flags, bytes_sent);
1759
1760		if (bytes_sent < 0) {
1761			/* XXX force sync mode for this target XXX */
1762			esp_schedule_reset(esp);
1763			return 0;
1764		}
1765
1766		esp_advance_dma(esp, ent, cmd, bytes_sent);
1767		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1768		goto again;
1769	}
1770
1771	case ESP_EVENT_STATUS: {
1772		struct esp_cmd_entry *ent = esp->active_cmd;
1773
1774		if (esp->ireg & ESP_INTR_FDONE) {
1775			ent->status = esp_read8(ESP_FDATA);
1776			ent->message = esp_read8(ESP_FDATA);
1777			scsi_esp_cmd(esp, ESP_CMD_MOK);
1778		} else if (esp->ireg == ESP_INTR_BSERV) {
1779			ent->status = esp_read8(ESP_FDATA);
1780			ent->message = 0xff;
1781			esp_event(esp, ESP_EVENT_MSGIN);
1782			return 0;
1783		}
1784
1785		if (ent->message != COMMAND_COMPLETE) {
1786			printk("ESP: Unexpected message %x in status\n",
1787			       ent->message);
1788			esp_schedule_reset(esp);
1789			return 0;
1790		}
1791
1792		esp_event(esp, ESP_EVENT_FREE_BUS);
1793		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1794		break;
1795	}
1796	case ESP_EVENT_FREE_BUS: {
1797		struct esp_cmd_entry *ent = esp->active_cmd;
1798		struct scsi_cmnd *cmd = ent->cmd;
1799
1800		if (ent->message == COMMAND_COMPLETE ||
1801		    ent->message == DISCONNECT)
1802			scsi_esp_cmd(esp, ESP_CMD_ESEL);
1803
1804		if (ent->message == COMMAND_COMPLETE) {
1805			esp_log_cmddone("ESP: Command done status[%x] "
1806					"message[%x]\n",
1807					ent->status, ent->message);
1808			if (ent->status == SAM_STAT_TASK_SET_FULL)
1809				esp_event_queue_full(esp, ent);
1810
1811			if (ent->status == SAM_STAT_CHECK_CONDITION &&
1812			    !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1813				ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1814				esp_autosense(esp, ent);
1815			} else {
1816				esp_cmd_is_done(esp, ent, cmd,
1817						compose_result(ent->status,
1818							       ent->message,
1819							       DID_OK));
1820			}
1821		} else if (ent->message == DISCONNECT) {
1822			esp_log_disconnect("ESP: Disconnecting tgt[%d] "
1823					   "tag[%x:%x]\n",
1824					   cmd->device->id,
1825					   ent->tag[0], ent->tag[1]);
1826
1827			esp->active_cmd = NULL;
1828			esp_maybe_execute_command(esp);
1829		} else {
1830			printk("ESP: Unexpected message %x in freebus\n",
1831			       ent->message);
1832			esp_schedule_reset(esp);
1833			return 0;
1834		}
1835		if (esp->active_cmd)
1836			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1837		break;
1838	}
1839	case ESP_EVENT_MSGOUT: {
1840		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1841
1842		if (esp_debug & ESP_DEBUG_MSGOUT) {
1843			int i;
1844			printk("ESP: Sending message [ ");
1845			for (i = 0; i < esp->msg_out_len; i++)
1846				printk("%02x ", esp->msg_out[i]);
1847			printk("]\n");
1848		}
1849
1850		if (esp->rev == FASHME) {
1851			int i;
1852
1853			/* Always use the fifo.  */
1854			for (i = 0; i < esp->msg_out_len; i++) {
1855				esp_write8(esp->msg_out[i], ESP_FDATA);
1856				esp_write8(0, ESP_FDATA);
1857			}
1858			scsi_esp_cmd(esp, ESP_CMD_TI);
1859		} else {
1860			if (esp->msg_out_len == 1) {
1861				esp_write8(esp->msg_out[0], ESP_FDATA);
1862				scsi_esp_cmd(esp, ESP_CMD_TI);
1863			} else {
1864				/* Use DMA. */
1865				memcpy(esp->command_block,
1866				       esp->msg_out,
1867				       esp->msg_out_len);
1868
1869				esp->ops->send_dma_cmd(esp,
1870						       esp->command_block_dma,
1871						       esp->msg_out_len,
1872						       esp->msg_out_len,
1873						       0,
1874						       ESP_CMD_DMA|ESP_CMD_TI);
1875			}
1876		}
1877		esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1878		break;
1879	}
1880	case ESP_EVENT_MSGOUT_DONE:
1881		if (esp->rev == FASHME) {
1882			scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1883		} else {
1884			if (esp->msg_out_len > 1)
1885				esp->ops->dma_invalidate(esp);
1886		}
1887
1888		if (!(esp->ireg & ESP_INTR_DC)) {
1889			if (esp->rev != FASHME)
1890				scsi_esp_cmd(esp, ESP_CMD_NULL);
1891		}
1892		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1893		goto again;
1894	case ESP_EVENT_MSGIN:
1895		if (esp->ireg & ESP_INTR_BSERV) {
1896			if (esp->rev == FASHME) {
1897				if (!(esp_read8(ESP_STATUS2) &
1898				      ESP_STAT2_FEMPTY))
1899					scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1900			} else {
1901				scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1902				if (esp->rev == ESP100)
1903					scsi_esp_cmd(esp, ESP_CMD_NULL);
1904			}
1905			scsi_esp_cmd(esp, ESP_CMD_TI);
1906			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1907			return 1;
1908		}
1909		if (esp->ireg & ESP_INTR_FDONE) {
1910			u8 val;
1911
1912			if (esp->rev == FASHME)
1913				val = esp->fifo[0];
1914			else
1915				val = esp_read8(ESP_FDATA);
1916			esp->msg_in[esp->msg_in_len++] = val;
1917
1918			esp_log_msgin("ESP: Got msgin byte %x\n", val);
1919
1920			if (!esp_msgin_process(esp))
1921				esp->msg_in_len = 0;
1922
1923			if (esp->rev == FASHME)
1924				scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1925
1926			scsi_esp_cmd(esp, ESP_CMD_MOK);
1927
1928			if (esp->event != ESP_EVENT_FREE_BUS)
1929				esp_event(esp, ESP_EVENT_CHECK_PHASE);
1930		} else {
1931			printk("ESP: MSGIN neither BSERV not FDON, resetting");
1932			esp_schedule_reset(esp);
1933			return 0;
1934		}
1935		break;
1936	case ESP_EVENT_CMD_START:
1937		memcpy(esp->command_block, esp->cmd_bytes_ptr,
1938		       esp->cmd_bytes_left);
1939		if (esp->rev == FASHME)
1940			scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1941		esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1942				       esp->cmd_bytes_left, 16, 0,
1943				       ESP_CMD_DMA | ESP_CMD_TI);
1944		esp_event(esp, ESP_EVENT_CMD_DONE);
1945		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1946		break;
1947	case ESP_EVENT_CMD_DONE:
1948		esp->ops->dma_invalidate(esp);
1949		if (esp->ireg & ESP_INTR_BSERV) {
1950			esp_event(esp, ESP_EVENT_CHECK_PHASE);
1951			goto again;
1952		}
1953		esp_schedule_reset(esp);
1954		return 0;
1955		break;
1956
1957	case ESP_EVENT_RESET:
1958		scsi_esp_cmd(esp, ESP_CMD_RS);
1959		break;
1960
1961	default:
1962		printk("ESP: Unexpected event %x, resetting\n",
1963		       esp->event);
1964		esp_schedule_reset(esp);
1965		return 0;
1966		break;
1967	}
1968	return 1;
1969}
1970
1971static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
1972{
1973	struct scsi_cmnd *cmd = ent->cmd;
1974
1975	esp_unmap_dma(esp, cmd);
1976	esp_free_lun_tag(ent, cmd->device->hostdata);
1977	cmd->result = DID_RESET << 16;
1978
1979	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
1980		esp->ops->unmap_single(esp, ent->sense_dma,
1981				       SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1982		ent->sense_ptr = NULL;
1983	}
1984
1985	cmd->scsi_done(cmd);
1986	list_del(&ent->list);
1987	esp_put_ent(esp, ent);
1988}
1989
1990static void esp_clear_hold(struct scsi_device *dev, void *data)
1991{
1992	struct esp_lun_data *lp = dev->hostdata;
1993
1994	BUG_ON(lp->num_tagged);
1995	lp->hold = 0;
1996}
1997
1998static void esp_reset_cleanup(struct esp *esp)
1999{
2000	struct esp_cmd_entry *ent, *tmp;
2001	int i;
2002
2003	list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2004		struct scsi_cmnd *cmd = ent->cmd;
2005
2006		list_del(&ent->list);
2007		cmd->result = DID_RESET << 16;
2008		cmd->scsi_done(cmd);
2009		esp_put_ent(esp, ent);
2010	}
2011
2012	list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2013		if (ent == esp->active_cmd)
2014			esp->active_cmd = NULL;
2015		esp_reset_cleanup_one(esp, ent);
2016	}
2017
2018	BUG_ON(esp->active_cmd != NULL);
2019
2020	/* Force renegotiation of sync/wide transfers.  */
2021	for (i = 0; i < ESP_MAX_TARGET; i++) {
2022		struct esp_target_data *tp = &esp->target[i];
2023
2024		tp->esp_period = 0;
2025		tp->esp_offset = 0;
2026		tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2027				     ESP_CONFIG3_FSCSI |
2028				     ESP_CONFIG3_FAST);
2029		tp->flags &= ~ESP_TGT_WIDE;
2030		tp->flags |= ESP_TGT_CHECK_NEGO;
2031
2032		if (tp->starget)
2033			__starget_for_each_device(tp->starget, NULL,
2034						  esp_clear_hold);
2035	}
2036	esp->flags &= ~ESP_FLAG_RESETTING;
2037}
2038
2039/* Runs under host->lock */
2040static void __esp_interrupt(struct esp *esp)
2041{
2042	int finish_reset, intr_done;
2043	u8 phase;
2044
2045	esp->sreg = esp_read8(ESP_STATUS);
2046
2047	if (esp->flags & ESP_FLAG_RESETTING) {
2048		finish_reset = 1;
2049	} else {
2050		if (esp_check_gross_error(esp))
2051			return;
2052
2053		finish_reset = esp_check_spur_intr(esp);
2054		if (finish_reset < 0)
2055			return;
2056	}
2057
2058	esp->ireg = esp_read8(ESP_INTRPT);
2059
2060	if (esp->ireg & ESP_INTR_SR)
2061		finish_reset = 1;
2062
2063	if (finish_reset) {
2064		esp_reset_cleanup(esp);
2065		if (esp->eh_reset) {
2066			complete(esp->eh_reset);
2067			esp->eh_reset = NULL;
2068		}
2069		return;
2070	}
2071
2072	phase = (esp->sreg & ESP_STAT_PMASK);
2073	if (esp->rev == FASHME) {
2074		if (((phase != ESP_DIP && phase != ESP_DOP) &&
2075		     esp->select_state == ESP_SELECT_NONE &&
2076		     esp->event != ESP_EVENT_STATUS &&
2077		     esp->event != ESP_EVENT_DATA_DONE) ||
2078		    (esp->ireg & ESP_INTR_RSEL)) {
2079			esp->sreg2 = esp_read8(ESP_STATUS2);
2080			if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2081			    (esp->sreg2 & ESP_STAT2_F1BYTE))
2082				hme_read_fifo(esp);
2083		}
2084	}
2085
2086	esp_log_intr("ESP: intr sreg[%02x] seqreg[%02x] "
2087		     "sreg2[%02x] ireg[%02x]\n",
2088		     esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2089
2090	intr_done = 0;
2091
2092	if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
2093		printk("ESP: unexpected IREG %02x\n", esp->ireg);
2094		if (esp->ireg & ESP_INTR_IC)
2095			esp_dump_cmd_log(esp);
2096
2097		esp_schedule_reset(esp);
2098	} else {
2099		if (!(esp->ireg & ESP_INTR_RSEL)) {
2100			/* Some combination of FDONE, BSERV, DC.  */
2101			if (esp->select_state != ESP_SELECT_NONE)
2102				intr_done = esp_finish_select(esp);
2103		} else if (esp->ireg & ESP_INTR_RSEL) {
2104			if (esp->active_cmd)
2105				(void) esp_finish_select(esp);
2106			intr_done = esp_reconnect(esp);
2107		}
2108	}
2109	while (!intr_done)
2110		intr_done = esp_process_event(esp);
2111}
2112
2113irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2114{
2115	struct esp *esp = dev_id;
2116	unsigned long flags;
2117	irqreturn_t ret;
2118
2119	spin_lock_irqsave(esp->host->host_lock, flags);
2120	ret = IRQ_NONE;
2121	if (esp->ops->irq_pending(esp)) {
2122		ret = IRQ_HANDLED;
2123		for (;;) {
2124			int i;
2125
2126			__esp_interrupt(esp);
2127			if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2128				break;
2129			esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2130
2131			for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2132				if (esp->ops->irq_pending(esp))
2133					break;
2134			}
2135			if (i == ESP_QUICKIRQ_LIMIT)
2136				break;
2137		}
2138	}
2139	spin_unlock_irqrestore(esp->host->host_lock, flags);
2140
2141	return ret;
2142}
2143EXPORT_SYMBOL(scsi_esp_intr);
2144
2145static void esp_get_revision(struct esp *esp)
2146{
2147	u8 val;
2148
2149	esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2150	esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2151	esp_write8(esp->config2, ESP_CFG2);
2152
2153	val = esp_read8(ESP_CFG2);
2154	val &= ~ESP_CONFIG2_MAGIC;
2155	if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2156		/* If what we write to cfg2 does not come back, cfg2 is not
2157		 * implemented, therefore this must be a plain esp100.
2158		 */
2159		esp->rev = ESP100;
2160	} else {
2161		esp->config2 = 0;
2162		esp_set_all_config3(esp, 5);
2163		esp->prev_cfg3 = 5;
2164		esp_write8(esp->config2, ESP_CFG2);
2165		esp_write8(0, ESP_CFG3);
2166		esp_write8(esp->prev_cfg3, ESP_CFG3);
2167
2168		val = esp_read8(ESP_CFG3);
2169		if (val != 5) {
2170			/* The cfg2 register is implemented, however
2171			 * cfg3 is not, must be esp100a.
2172			 */
2173			esp->rev = ESP100A;
2174		} else {
2175			esp_set_all_config3(esp, 0);
2176			esp->prev_cfg3 = 0;
2177			esp_write8(esp->prev_cfg3, ESP_CFG3);
2178
2179			/* All of cfg{1,2,3} implemented, must be one of
2180			 * the fas variants, figure out which one.
2181			 */
2182			if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2183				esp->rev = FAST;
2184				esp->sync_defp = SYNC_DEFP_FAST;
2185			} else {
2186				esp->rev = ESP236;
2187			}
2188			esp->config2 = 0;
2189			esp_write8(esp->config2, ESP_CFG2);
2190		}
2191	}
2192}
2193
2194static void esp_init_swstate(struct esp *esp)
2195{
2196	int i;
2197
2198	INIT_LIST_HEAD(&esp->queued_cmds);
2199	INIT_LIST_HEAD(&esp->active_cmds);
2200	INIT_LIST_HEAD(&esp->esp_cmd_pool);
2201
2202	/* Start with a clear state, domain validation (via ->slave_configure,
2203	 * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
2204	 * commands.
2205	 */
2206	for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2207		esp->target[i].flags = 0;
2208		esp->target[i].nego_goal_period = 0;
2209		esp->target[i].nego_goal_offset = 0;
2210		esp->target[i].nego_goal_width = 0;
2211		esp->target[i].nego_goal_tags = 0;
2212	}
2213}
2214
2215/* This places the ESP into a known state at boot time. */
2216static void esp_bootup_reset(struct esp *esp)
2217{
2218	u8 val;
2219
2220	/* Reset the DMA */
2221	esp->ops->reset_dma(esp);
2222
2223	/* Reset the ESP */
2224	esp_reset_esp(esp);
2225
2226	/* Reset the SCSI bus, but tell ESP not to generate an irq */
2227	val = esp_read8(ESP_CFG1);
2228	val |= ESP_CONFIG1_SRRDISAB;
2229	esp_write8(val, ESP_CFG1);
2230
2231	scsi_esp_cmd(esp, ESP_CMD_RS);
2232	udelay(400);
2233
2234	esp_write8(esp->config1, ESP_CFG1);
2235
2236	/* Eat any bitrot in the chip and we are done... */
2237	esp_read8(ESP_INTRPT);
2238}
2239
2240static void esp_set_clock_params(struct esp *esp)
2241{
2242	int fhz;
2243	u8 ccf;
2244
2245	/* This is getting messy but it has to be done correctly or else
2246	 * you get weird behavior all over the place.  We are trying to
2247	 * basically figure out three pieces of information.
2248	 *
2249	 * a) Clock Conversion Factor
2250	 *
2251	 *    This is a representation of the input crystal clock frequency
2252	 *    going into the ESP on this machine.  Any operation whose timing
2253	 *    is longer than 400ns depends on this value being correct.  For
2254	 *    example, you'll get blips for arbitration/selection during high
2255	 *    load or with multiple targets if this is not set correctly.
2256	 *
2257	 * b) Selection Time-Out
2258	 *
2259	 *    The ESP isn't very bright and will arbitrate for the bus and try
2260	 *    to select a target forever if you let it.  This value tells the
2261	 *    ESP when it has taken too long to negotiate and that it should
2262	 *    interrupt the CPU so we can see what happened.  The value is
2263	 *    computed as follows (from NCR/Symbios chip docs).
2264	 *
2265	 *          (Time Out Period) *  (Input Clock)
2266	 *    STO = ----------------------------------
2267	 *          (8192) * (Clock Conversion Factor)
2268	 *
2269	 *    We use a time out period of 250ms (ESP_BUS_TIMEOUT).
2270	 *
2271	 * c) Imperical constants for synchronous offset and transfer period
2272         *    register values
2273	 *
2274	 *    This entails the smallest and largest sync period we could ever
2275	 *    handle on this ESP.
2276	 */
2277	fhz = esp->cfreq;
2278
2279	ccf = ((fhz / 1000000) + 4) / 5;
2280	if (ccf == 1)
2281		ccf = 2;
2282
2283	/* If we can't find anything reasonable, just assume 20MHZ.
2284	 * This is the clock frequency of the older sun4c's where I've
2285	 * been unable to find the clock-frequency PROM property.  All
2286	 * other machines provide useful values it seems.
2287	 */
2288	if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
2289		fhz = 20000000;
2290		ccf = 4;
2291	}
2292
2293	esp->cfact = (ccf == 8 ? 0 : ccf);
2294	esp->cfreq = fhz;
2295	esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
2296	esp->ctick = ESP_TICK(ccf, esp->ccycle);
2297	esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
2298	esp->sync_defp = SYNC_DEFP_SLOW;
2299}
2300
2301static const char *esp_chip_names[] = {
2302	"ESP100",
2303	"ESP100A",
2304	"ESP236",
2305	"FAS236",
2306	"FAS100A",
2307	"FAST",
2308	"FASHME",
2309};
2310
2311static struct scsi_transport_template *esp_transport_template;
2312
2313int scsi_esp_register(struct esp *esp, struct device *dev)
2314{
2315	static int instance;
2316	int err;
2317
2318	esp->host->transportt = esp_transport_template;
2319	esp->host->max_lun = ESP_MAX_LUN;
2320	esp->host->cmd_per_lun = 2;
2321	esp->host->unique_id = instance;
2322
2323	esp_set_clock_params(esp);
2324
2325	esp_get_revision(esp);
2326
2327	esp_init_swstate(esp);
2328
2329	esp_bootup_reset(esp);
2330
2331	printk(KERN_INFO PFX "esp%u, regs[%1p:%1p] irq[%u]\n",
2332	       esp->host->unique_id, esp->regs, esp->dma_regs,
2333	       esp->host->irq);
2334	printk(KERN_INFO PFX "esp%u is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2335	       esp->host->unique_id, esp_chip_names[esp->rev],
2336	       esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
2337
2338	/* Let the SCSI bus reset settle. */
2339	ssleep(esp_bus_reset_settle);
2340
2341	err = scsi_add_host(esp->host, dev);
2342	if (err)
2343		return err;
2344
2345	instance++;
2346
2347	scsi_scan_host(esp->host);
2348
2349	return 0;
2350}
2351EXPORT_SYMBOL(scsi_esp_register);
2352
2353void scsi_esp_unregister(struct esp *esp)
2354{
2355	scsi_remove_host(esp->host);
2356}
2357EXPORT_SYMBOL(scsi_esp_unregister);
2358
2359static int esp_target_alloc(struct scsi_target *starget)
2360{
2361	struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2362	struct esp_target_data *tp = &esp->target[starget->id];
2363
2364	tp->starget = starget;
2365
2366	return 0;
2367}
2368
2369static void esp_target_destroy(struct scsi_target *starget)
2370{
2371	struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2372	struct esp_target_data *tp = &esp->target[starget->id];
2373
2374	tp->starget = NULL;
2375}
2376
2377static int esp_slave_alloc(struct scsi_device *dev)
2378{
2379	struct esp *esp = shost_priv(dev->host);
2380	struct esp_target_data *tp = &esp->target[dev->id];
2381	struct esp_lun_data *lp;
2382
2383	lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2384	if (!lp)
2385		return -ENOMEM;
2386	dev->hostdata = lp;
2387
2388	spi_min_period(tp->starget) = esp->min_period;
2389	spi_max_offset(tp->starget) = 15;
2390
2391	if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2392		spi_max_width(tp->starget) = 1;
2393	else
2394		spi_max_width(tp->starget) = 0;
2395
2396	return 0;
2397}
2398
2399static int esp_slave_configure(struct scsi_device *dev)
2400{
2401	struct esp *esp = shost_priv(dev->host);
2402	struct esp_target_data *tp = &esp->target[dev->id];
2403	int goal_tags, queue_depth;
2404
2405	goal_tags = 0;
2406
2407	if (dev->tagged_supported) {
2408		/* XXX make this configurable somehow XXX */
2409		goal_tags = ESP_DEFAULT_TAGS;
2410
2411		if (goal_tags > ESP_MAX_TAG)
2412			goal_tags = ESP_MAX_TAG;
2413	}
2414
2415	queue_depth = goal_tags;
2416	if (queue_depth < dev->host->cmd_per_lun)
2417		queue_depth = dev->host->cmd_per_lun;
2418
2419	if (goal_tags) {
2420		scsi_set_tag_type(dev, MSG_ORDERED_TAG);
2421		scsi_activate_tcq(dev, queue_depth);
2422	} else {
2423		scsi_deactivate_tcq(dev, queue_depth);
2424	}
2425	tp->flags |= ESP_TGT_DISCONNECT;
2426
2427	if (!spi_initial_dv(dev->sdev_target))
2428		spi_dv_device(dev);
2429
2430	return 0;
2431}
2432
2433static void esp_slave_destroy(struct scsi_device *dev)
2434{
2435	struct esp_lun_data *lp = dev->hostdata;
2436
2437	kfree(lp);
2438	dev->hostdata = NULL;
2439}
2440
2441static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2442{
2443	struct esp *esp = shost_priv(cmd->device->host);
2444	struct esp_cmd_entry *ent, *tmp;
2445	struct completion eh_done;
2446	unsigned long flags;
2447
2448	/* XXX This helps a lot with debugging but might be a bit
2449	 * XXX much for the final driver.
2450	 */
2451	spin_lock_irqsave(esp->host->host_lock, flags);
2452	printk(KERN_ERR PFX "esp%d: Aborting command [%p:%02x]\n",
2453	       esp->host->unique_id, cmd, cmd->cmnd[0]);
2454	ent = esp->active_cmd;
2455	if (ent)
2456		printk(KERN_ERR PFX "esp%d: Current command [%p:%02x]\n",
2457		       esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2458	list_for_each_entry(ent, &esp->queued_cmds, list) {
2459		printk(KERN_ERR PFX "esp%d: Queued command [%p:%02x]\n",
2460		       esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2461	}
2462	list_for_each_entry(ent, &esp->active_cmds, list) {
2463		printk(KERN_ERR PFX "esp%d: Active command [%p:%02x]\n",
2464		       esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2465	}
2466	esp_dump_cmd_log(esp);
2467	spin_unlock_irqrestore(esp->host->host_lock, flags);
2468
2469	spin_lock_irqsave(esp->host->host_lock, flags);
2470
2471	ent = NULL;
2472	list_for_each_entry(tmp, &esp->queued_cmds, list) {
2473		if (tmp->cmd == cmd) {
2474			ent = tmp;
2475			break;
2476		}
2477	}
2478
2479	if (ent) {
2480		/* Easiest case, we didn't even issue the command
2481		 * yet so it is trivial to abort.
2482		 */
2483		list_del(&ent->list);
2484
2485		cmd->result = DID_ABORT << 16;
2486		cmd->scsi_done(cmd);
2487
2488		esp_put_ent(esp, ent);
2489
2490		goto out_success;
2491	}
2492
2493	init_completion(&eh_done);
2494
2495	ent = esp->active_cmd;
2496	if (ent && ent->cmd == cmd) {
2497		/* Command is the currently active command on
2498		 * the bus.  If we already have an output message
2499		 * pending, no dice.
2500		 */
2501		if (esp->msg_out_len)
2502			goto out_failure;
2503
2504		/* Send out an abort, encouraging the target to
2505		 * go to MSGOUT phase by asserting ATN.
2506		 */
2507		esp->msg_out[0] = ABORT_TASK_SET;
2508		esp->msg_out_len = 1;
2509		ent->eh_done = &eh_done;
2510
2511		scsi_esp_cmd(esp, ESP_CMD_SATN);
2512	} else {
2513		/* The command is disconnected.  This is not easy to
2514		 * abort.  For now we fail and let the scsi error
2515		 * handling layer go try a scsi bus reset or host
2516		 * reset.
2517		 *
2518		 * What we could do is put together a scsi command
2519		 * solely for the purpose of sending an abort message
2520		 * to the target.  Coming up with all the code to
2521		 * cook up scsi commands, special case them everywhere,
2522		 * etc. is for questionable gain and it would be better
2523		 * if the generic scsi error handling layer could do at
2524		 * least some of that for us.
2525		 *
2526		 * Anyways this is an area for potential future improvement
2527		 * in this driver.
2528		 */
2529		goto out_failure;
2530	}
2531
2532	spin_unlock_irqrestore(esp->host->host_lock, flags);
2533
2534	if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2535		spin_lock_irqsave(esp->host->host_lock, flags);
2536		ent->eh_done = NULL;
2537		spin_unlock_irqrestore(esp->host->host_lock, flags);
2538
2539		return FAILED;
2540	}
2541
2542	return SUCCESS;
2543
2544out_success:
2545	spin_unlock_irqrestore(esp->host->host_lock, flags);
2546	return SUCCESS;
2547
2548out_failure:
2549	/* XXX This might be a good location to set ESP_TGT_BROKEN
2550	 * XXX since we know which target/lun in particular is
2551	 * XXX causing trouble.
2552	 */
2553	spin_unlock_irqrestore(esp->host->host_lock, flags);
2554	return FAILED;
2555}
2556
2557static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2558{
2559	struct esp *esp = shost_priv(cmd->device->host);
2560	struct completion eh_reset;
2561	unsigned long flags;
2562
2563	init_completion(&eh_reset);
2564
2565	spin_lock_irqsave(esp->host->host_lock, flags);
2566
2567	esp->eh_reset = &eh_reset;
2568
2569	/* XXX This is too simple... We should add lots of
2570	 * XXX checks here so that if we find that the chip is
2571	 * XXX very wedged we return failure immediately so
2572	 * XXX that we can perform a full chip reset.
2573	 */
2574	esp->flags |= ESP_FLAG_RESETTING;
2575	scsi_esp_cmd(esp, ESP_CMD_RS);
2576
2577	spin_unlock_irqrestore(esp->host->host_lock, flags);
2578
2579	ssleep(esp_bus_reset_settle);
2580
2581	if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2582		spin_lock_irqsave(esp->host->host_lock, flags);
2583		esp->eh_reset = NULL;
2584		spin_unlock_irqrestore(esp->host->host_lock, flags);
2585
2586		return FAILED;
2587	}
2588
2589	return SUCCESS;
2590}
2591
2592/* All bets are off, reset the entire device.  */
2593static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2594{
2595	struct esp *esp = shost_priv(cmd->device->host);
2596	unsigned long flags;
2597
2598	spin_lock_irqsave(esp->host->host_lock, flags);
2599	esp_bootup_reset(esp);
2600	esp_reset_cleanup(esp);
2601	spin_unlock_irqrestore(esp->host->host_lock, flags);
2602
2603	ssleep(esp_bus_reset_settle);
2604
2605	return SUCCESS;
2606}
2607
2608static const char *esp_info(struct Scsi_Host *host)
2609{
2610	return "esp";
2611}
2612
2613struct scsi_host_template scsi_esp_template = {
2614	.module			= THIS_MODULE,
2615	.name			= "esp",
2616	.info			= esp_info,
2617	.queuecommand		= esp_queuecommand,
2618	.target_alloc		= esp_target_alloc,
2619	.target_destroy		= esp_target_destroy,
2620	.slave_alloc		= esp_slave_alloc,
2621	.slave_configure	= esp_slave_configure,
2622	.slave_destroy		= esp_slave_destroy,
2623	.eh_abort_handler	= esp_eh_abort_handler,
2624	.eh_bus_reset_handler	= esp_eh_bus_reset_handler,
2625	.eh_host_reset_handler	= esp_eh_host_reset_handler,
2626	.can_queue		= 7,
2627	.this_id		= 7,
2628	.sg_tablesize		= SG_ALL,
2629	.use_clustering		= ENABLE_CLUSTERING,
2630	.max_sectors		= 0xffff,
2631	.skip_settle_delay	= 1,
2632};
2633EXPORT_SYMBOL(scsi_esp_template);
2634
2635static void esp_get_signalling(struct Scsi_Host *host)
2636{
2637	struct esp *esp = shost_priv(host);
2638	enum spi_signal_type type;
2639
2640	if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2641		type = SPI_SIGNAL_HVD;
2642	else
2643		type = SPI_SIGNAL_SE;
2644
2645	spi_signalling(host) = type;
2646}
2647
2648static void esp_set_offset(struct scsi_target *target, int offset)
2649{
2650	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2651	struct esp *esp = shost_priv(host);
2652	struct esp_target_data *tp = &esp->target[target->id];
2653
2654	if (esp->flags & ESP_FLAG_DISABLE_SYNC)
2655		tp->nego_goal_offset = 0;
2656	else
2657		tp->nego_goal_offset = offset;
2658	tp->flags |= ESP_TGT_CHECK_NEGO;
2659}
2660
2661static void esp_set_period(struct scsi_target *target, int period)
2662{
2663	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2664	struct esp *esp = shost_priv(host);
2665	struct esp_target_data *tp = &esp->target[target->id];
2666
2667	tp->nego_goal_period = period;
2668	tp->flags |= ESP_TGT_CHECK_NEGO;
2669}
2670
2671static void esp_set_width(struct scsi_target *target, int width)
2672{
2673	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2674	struct esp *esp = shost_priv(host);
2675	struct esp_target_data *tp = &esp->target[target->id];
2676
2677	tp->nego_goal_width = (width ? 1 : 0);
2678	tp->flags |= ESP_TGT_CHECK_NEGO;
2679}
2680
2681static struct spi_function_template esp_transport_ops = {
2682	.set_offset		= esp_set_offset,
2683	.show_offset		= 1,
2684	.set_period		= esp_set_period,
2685	.show_period		= 1,
2686	.set_width		= esp_set_width,
2687	.show_width		= 1,
2688	.get_signalling		= esp_get_signalling,
2689};
2690
2691static int __init esp_init(void)
2692{
2693	BUILD_BUG_ON(sizeof(struct scsi_pointer) <
2694		     sizeof(struct esp_cmd_priv));
2695
2696	esp_transport_template = spi_attach_transport(&esp_transport_ops);
2697	if (!esp_transport_template)
2698		return -ENODEV;
2699
2700	return 0;
2701}
2702
2703static void __exit esp_exit(void)
2704{
2705	spi_release_transport(esp_transport_template);
2706}
2707
2708MODULE_DESCRIPTION("ESP SCSI driver core");
2709MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
2710MODULE_LICENSE("GPL");
2711MODULE_VERSION(DRV_VERSION);
2712
2713module_param(esp_bus_reset_settle, int, 0);
2714MODULE_PARM_DESC(esp_bus_reset_settle,
2715		 "ESP scsi bus reset delay in seconds");
2716
2717module_param(esp_debug, int, 0);
2718MODULE_PARM_DESC(esp_debug,
2719"ESP bitmapped debugging message enable value:\n"
2720"	0x00000001	Log interrupt events\n"
2721"	0x00000002	Log scsi commands\n"
2722"	0x00000004	Log resets\n"
2723"	0x00000008	Log message in events\n"
2724"	0x00000010	Log message out events\n"
2725"	0x00000020	Log command completion\n"
2726"	0x00000040	Log disconnects\n"
2727"	0x00000080	Log data start\n"
2728"	0x00000100	Log data done\n"
2729"	0x00000200	Log reconnects\n"
2730"	0x00000400	Log auto-sense data\n"
2731);
2732
2733module_init(esp_init);
2734module_exit(esp_exit);
v3.5.6
   1/* esp_scsi.c: ESP SCSI driver.
   2 *
   3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/types.h>
   8#include <linux/slab.h>
   9#include <linux/delay.h>
  10#include <linux/list.h>
  11#include <linux/completion.h>
  12#include <linux/kallsyms.h>
  13#include <linux/module.h>
  14#include <linux/moduleparam.h>
  15#include <linux/init.h>
  16#include <linux/irqreturn.h>
  17
  18#include <asm/irq.h>
  19#include <asm/io.h>
  20#include <asm/dma.h>
  21
  22#include <scsi/scsi.h>
  23#include <scsi/scsi_host.h>
  24#include <scsi/scsi_cmnd.h>
  25#include <scsi/scsi_device.h>
  26#include <scsi/scsi_tcq.h>
  27#include <scsi/scsi_dbg.h>
  28#include <scsi/scsi_transport_spi.h>
  29
  30#include "esp_scsi.h"
  31
  32#define DRV_MODULE_NAME		"esp"
  33#define PFX DRV_MODULE_NAME	": "
  34#define DRV_VERSION		"2.000"
  35#define DRV_MODULE_RELDATE	"April 19, 2007"
  36
  37/* SCSI bus reset settle time in seconds.  */
  38static int esp_bus_reset_settle = 3;
  39
  40static u32 esp_debug;
  41#define ESP_DEBUG_INTR		0x00000001
  42#define ESP_DEBUG_SCSICMD	0x00000002
  43#define ESP_DEBUG_RESET		0x00000004
  44#define ESP_DEBUG_MSGIN		0x00000008
  45#define ESP_DEBUG_MSGOUT	0x00000010
  46#define ESP_DEBUG_CMDDONE	0x00000020
  47#define ESP_DEBUG_DISCONNECT	0x00000040
  48#define ESP_DEBUG_DATASTART	0x00000080
  49#define ESP_DEBUG_DATADONE	0x00000100
  50#define ESP_DEBUG_RECONNECT	0x00000200
  51#define ESP_DEBUG_AUTOSENSE	0x00000400
  52
  53#define esp_log_intr(f, a...) \
  54do {	if (esp_debug & ESP_DEBUG_INTR) \
  55		printk(f, ## a); \
  56} while (0)
  57
  58#define esp_log_reset(f, a...) \
  59do {	if (esp_debug & ESP_DEBUG_RESET) \
  60		printk(f, ## a); \
  61} while (0)
  62
  63#define esp_log_msgin(f, a...) \
  64do {	if (esp_debug & ESP_DEBUG_MSGIN) \
  65		printk(f, ## a); \
  66} while (0)
  67
  68#define esp_log_msgout(f, a...) \
  69do {	if (esp_debug & ESP_DEBUG_MSGOUT) \
  70		printk(f, ## a); \
  71} while (0)
  72
  73#define esp_log_cmddone(f, a...) \
  74do {	if (esp_debug & ESP_DEBUG_CMDDONE) \
  75		printk(f, ## a); \
  76} while (0)
  77
  78#define esp_log_disconnect(f, a...) \
  79do {	if (esp_debug & ESP_DEBUG_DISCONNECT) \
  80		printk(f, ## a); \
  81} while (0)
  82
  83#define esp_log_datastart(f, a...) \
  84do {	if (esp_debug & ESP_DEBUG_DATASTART) \
  85		printk(f, ## a); \
  86} while (0)
  87
  88#define esp_log_datadone(f, a...) \
  89do {	if (esp_debug & ESP_DEBUG_DATADONE) \
  90		printk(f, ## a); \
  91} while (0)
  92
  93#define esp_log_reconnect(f, a...) \
  94do {	if (esp_debug & ESP_DEBUG_RECONNECT) \
  95		printk(f, ## a); \
  96} while (0)
  97
  98#define esp_log_autosense(f, a...) \
  99do {	if (esp_debug & ESP_DEBUG_AUTOSENSE) \
 100		printk(f, ## a); \
 101} while (0)
 102
 103#define esp_read8(REG)		esp->ops->esp_read8(esp, REG)
 104#define esp_write8(VAL,REG)	esp->ops->esp_write8(esp, VAL, REG)
 105
 106static void esp_log_fill_regs(struct esp *esp,
 107			      struct esp_event_ent *p)
 108{
 109	p->sreg = esp->sreg;
 110	p->seqreg = esp->seqreg;
 111	p->sreg2 = esp->sreg2;
 112	p->ireg = esp->ireg;
 113	p->select_state = esp->select_state;
 114	p->event = esp->event;
 115}
 116
 117void scsi_esp_cmd(struct esp *esp, u8 val)
 118{
 119	struct esp_event_ent *p;
 120	int idx = esp->esp_event_cur;
 121
 122	p = &esp->esp_event_log[idx];
 123	p->type = ESP_EVENT_TYPE_CMD;
 124	p->val = val;
 125	esp_log_fill_regs(esp, p);
 126
 127	esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
 128
 129	esp_write8(val, ESP_CMD);
 130}
 131EXPORT_SYMBOL(scsi_esp_cmd);
 132
 133static void esp_event(struct esp *esp, u8 val)
 134{
 135	struct esp_event_ent *p;
 136	int idx = esp->esp_event_cur;
 137
 138	p = &esp->esp_event_log[idx];
 139	p->type = ESP_EVENT_TYPE_EVENT;
 140	p->val = val;
 141	esp_log_fill_regs(esp, p);
 142
 143	esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
 144
 145	esp->event = val;
 146}
 147
 148static void esp_dump_cmd_log(struct esp *esp)
 149{
 150	int idx = esp->esp_event_cur;
 151	int stop = idx;
 152
 153	printk(KERN_INFO PFX "esp%d: Dumping command log\n",
 154	       esp->host->unique_id);
 155	do {
 156		struct esp_event_ent *p = &esp->esp_event_log[idx];
 157
 158		printk(KERN_INFO PFX "esp%d: ent[%d] %s ",
 159		       esp->host->unique_id, idx,
 160		       p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT");
 161
 162		printk("val[%02x] sreg[%02x] seqreg[%02x] "
 163		       "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
 164		       p->val, p->sreg, p->seqreg,
 165		       p->sreg2, p->ireg, p->select_state, p->event);
 166
 167		idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
 168	} while (idx != stop);
 169}
 170
 171static void esp_flush_fifo(struct esp *esp)
 172{
 173	scsi_esp_cmd(esp, ESP_CMD_FLUSH);
 174	if (esp->rev == ESP236) {
 175		int lim = 1000;
 176
 177		while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
 178			if (--lim == 0) {
 179				printk(KERN_ALERT PFX "esp%d: ESP_FF_BYTES "
 180				       "will not clear!\n",
 181				       esp->host->unique_id);
 182				break;
 183			}
 184			udelay(1);
 185		}
 186	}
 187}
 188
 189static void hme_read_fifo(struct esp *esp)
 190{
 191	int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
 192	int idx = 0;
 193
 194	while (fcnt--) {
 195		esp->fifo[idx++] = esp_read8(ESP_FDATA);
 196		esp->fifo[idx++] = esp_read8(ESP_FDATA);
 197	}
 198	if (esp->sreg2 & ESP_STAT2_F1BYTE) {
 199		esp_write8(0, ESP_FDATA);
 200		esp->fifo[idx++] = esp_read8(ESP_FDATA);
 201		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
 202	}
 203	esp->fifo_cnt = idx;
 204}
 205
 206static void esp_set_all_config3(struct esp *esp, u8 val)
 207{
 208	int i;
 209
 210	for (i = 0; i < ESP_MAX_TARGET; i++)
 211		esp->target[i].esp_config3 = val;
 212}
 213
 214/* Reset the ESP chip, _not_ the SCSI bus. */
 215static void esp_reset_esp(struct esp *esp)
 216{
 217	u8 family_code, version;
 218
 219	/* Now reset the ESP chip */
 220	scsi_esp_cmd(esp, ESP_CMD_RC);
 221	scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
 222	if (esp->rev == FAST)
 223		esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
 224	scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
 225
 226	/* This is the only point at which it is reliable to read
 227	 * the ID-code for a fast ESP chip variants.
 228	 */
 229	esp->max_period = ((35 * esp->ccycle) / 1000);
 230	if (esp->rev == FAST) {
 231		version = esp_read8(ESP_UID);
 232		family_code = (version & 0xf8) >> 3;
 233		if (family_code == 0x02)
 234			esp->rev = FAS236;
 235		else if (family_code == 0x0a)
 236			esp->rev = FASHME; /* Version is usually '5'. */
 237		else
 238			esp->rev = FAS100A;
 239		esp->min_period = ((4 * esp->ccycle) / 1000);
 240	} else {
 241		esp->min_period = ((5 * esp->ccycle) / 1000);
 242	}
 243	esp->max_period = (esp->max_period + 3)>>2;
 244	esp->min_period = (esp->min_period + 3)>>2;
 245
 246	esp_write8(esp->config1, ESP_CFG1);
 247	switch (esp->rev) {
 248	case ESP100:
 249		/* nothing to do */
 250		break;
 251
 252	case ESP100A:
 253		esp_write8(esp->config2, ESP_CFG2);
 254		break;
 255
 256	case ESP236:
 257		/* Slow 236 */
 258		esp_write8(esp->config2, ESP_CFG2);
 259		esp->prev_cfg3 = esp->target[0].esp_config3;
 260		esp_write8(esp->prev_cfg3, ESP_CFG3);
 261		break;
 262
 263	case FASHME:
 264		esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
 265		/* fallthrough... */
 266
 267	case FAS236:
 268		/* Fast 236 or HME */
 269		esp_write8(esp->config2, ESP_CFG2);
 270		if (esp->rev == FASHME) {
 271			u8 cfg3 = esp->target[0].esp_config3;
 272
 273			cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
 274			if (esp->scsi_id >= 8)
 275				cfg3 |= ESP_CONFIG3_IDBIT3;
 276			esp_set_all_config3(esp, cfg3);
 277		} else {
 278			u32 cfg3 = esp->target[0].esp_config3;
 279
 280			cfg3 |= ESP_CONFIG3_FCLK;
 281			esp_set_all_config3(esp, cfg3);
 282		}
 283		esp->prev_cfg3 = esp->target[0].esp_config3;
 284		esp_write8(esp->prev_cfg3, ESP_CFG3);
 285		if (esp->rev == FASHME) {
 286			esp->radelay = 80;
 287		} else {
 288			if (esp->flags & ESP_FLAG_DIFFERENTIAL)
 289				esp->radelay = 0;
 290			else
 291				esp->radelay = 96;
 292		}
 293		break;
 294
 295	case FAS100A:
 296		/* Fast 100a */
 297		esp_write8(esp->config2, ESP_CFG2);
 298		esp_set_all_config3(esp,
 299				    (esp->target[0].esp_config3 |
 300				     ESP_CONFIG3_FCLOCK));
 301		esp->prev_cfg3 = esp->target[0].esp_config3;
 302		esp_write8(esp->prev_cfg3, ESP_CFG3);
 303		esp->radelay = 32;
 304		break;
 305
 306	default:
 307		break;
 308	}
 309
 310	/* Reload the configuration registers */
 311	esp_write8(esp->cfact, ESP_CFACT);
 312
 313	esp->prev_stp = 0;
 314	esp_write8(esp->prev_stp, ESP_STP);
 315
 316	esp->prev_soff = 0;
 317	esp_write8(esp->prev_soff, ESP_SOFF);
 318
 319	esp_write8(esp->neg_defp, ESP_TIMEO);
 320
 321	/* Eat any bitrot in the chip */
 322	esp_read8(ESP_INTRPT);
 323	udelay(100);
 324}
 325
 326static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
 327{
 328	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
 329	struct scatterlist *sg = scsi_sglist(cmd);
 330	int dir = cmd->sc_data_direction;
 331	int total, i;
 332
 333	if (dir == DMA_NONE)
 334		return;
 335
 336	spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir);
 337	spriv->cur_residue = sg_dma_len(sg);
 338	spriv->cur_sg = sg;
 339
 340	total = 0;
 341	for (i = 0; i < spriv->u.num_sg; i++)
 342		total += sg_dma_len(&sg[i]);
 343	spriv->tot_residue = total;
 344}
 345
 346static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
 347				   struct scsi_cmnd *cmd)
 348{
 349	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
 350
 351	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 352		return ent->sense_dma +
 353			(ent->sense_ptr - cmd->sense_buffer);
 354	}
 355
 356	return sg_dma_address(p->cur_sg) +
 357		(sg_dma_len(p->cur_sg) -
 358		 p->cur_residue);
 359}
 360
 361static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
 362				    struct scsi_cmnd *cmd)
 363{
 364	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
 365
 366	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 367		return SCSI_SENSE_BUFFERSIZE -
 368			(ent->sense_ptr - cmd->sense_buffer);
 369	}
 370	return p->cur_residue;
 371}
 372
 373static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
 374			    struct scsi_cmnd *cmd, unsigned int len)
 375{
 376	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
 377
 378	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 379		ent->sense_ptr += len;
 380		return;
 381	}
 382
 383	p->cur_residue -= len;
 384	p->tot_residue -= len;
 385	if (p->cur_residue < 0 || p->tot_residue < 0) {
 386		printk(KERN_ERR PFX "esp%d: Data transfer overflow.\n",
 387		       esp->host->unique_id);
 388		printk(KERN_ERR PFX "esp%d: cur_residue[%d] tot_residue[%d] "
 389		       "len[%u]\n",
 390		       esp->host->unique_id,
 391		       p->cur_residue, p->tot_residue, len);
 392		p->cur_residue = 0;
 393		p->tot_residue = 0;
 394	}
 395	if (!p->cur_residue && p->tot_residue) {
 396		p->cur_sg++;
 397		p->cur_residue = sg_dma_len(p->cur_sg);
 398	}
 399}
 400
 401static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
 402{
 403	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
 404	int dir = cmd->sc_data_direction;
 405
 406	if (dir == DMA_NONE)
 407		return;
 408
 409	esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir);
 410}
 411
 412static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
 413{
 414	struct scsi_cmnd *cmd = ent->cmd;
 415	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
 416
 417	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 418		ent->saved_sense_ptr = ent->sense_ptr;
 419		return;
 420	}
 421	ent->saved_cur_residue = spriv->cur_residue;
 422	ent->saved_cur_sg = spriv->cur_sg;
 423	ent->saved_tot_residue = spriv->tot_residue;
 424}
 425
 426static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
 427{
 428	struct scsi_cmnd *cmd = ent->cmd;
 429	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
 430
 431	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 432		ent->sense_ptr = ent->saved_sense_ptr;
 433		return;
 434	}
 435	spriv->cur_residue = ent->saved_cur_residue;
 436	spriv->cur_sg = ent->saved_cur_sg;
 437	spriv->tot_residue = ent->saved_tot_residue;
 438}
 439
 440static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
 441{
 442	if (cmd->cmd_len == 6 ||
 443	    cmd->cmd_len == 10 ||
 444	    cmd->cmd_len == 12) {
 445		esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
 446	} else {
 447		esp->flags |= ESP_FLAG_DOING_SLOWCMD;
 448	}
 449}
 450
 451static void esp_write_tgt_config3(struct esp *esp, int tgt)
 452{
 453	if (esp->rev > ESP100A) {
 454		u8 val = esp->target[tgt].esp_config3;
 455
 456		if (val != esp->prev_cfg3) {
 457			esp->prev_cfg3 = val;
 458			esp_write8(val, ESP_CFG3);
 459		}
 460	}
 461}
 462
 463static void esp_write_tgt_sync(struct esp *esp, int tgt)
 464{
 465	u8 off = esp->target[tgt].esp_offset;
 466	u8 per = esp->target[tgt].esp_period;
 467
 468	if (off != esp->prev_soff) {
 469		esp->prev_soff = off;
 470		esp_write8(off, ESP_SOFF);
 471	}
 472	if (per != esp->prev_stp) {
 473		esp->prev_stp = per;
 474		esp_write8(per, ESP_STP);
 475	}
 476}
 477
 478static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
 479{
 480	if (esp->rev == FASHME) {
 481		/* Arbitrary segment boundaries, 24-bit counts.  */
 482		if (dma_len > (1U << 24))
 483			dma_len = (1U << 24);
 484	} else {
 485		u32 base, end;
 486
 487		/* ESP chip limits other variants by 16-bits of transfer
 488		 * count.  Actually on FAS100A and FAS236 we could get
 489		 * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB
 490		 * in the ESP_CFG2 register but that causes other unwanted
 491		 * changes so we don't use it currently.
 492		 */
 493		if (dma_len > (1U << 16))
 494			dma_len = (1U << 16);
 495
 496		/* All of the DMA variants hooked up to these chips
 497		 * cannot handle crossing a 24-bit address boundary.
 498		 */
 499		base = dma_addr & ((1U << 24) - 1U);
 500		end = base + dma_len;
 501		if (end > (1U << 24))
 502			end = (1U <<24);
 503		dma_len = end - base;
 504	}
 505	return dma_len;
 506}
 507
 508static int esp_need_to_nego_wide(struct esp_target_data *tp)
 509{
 510	struct scsi_target *target = tp->starget;
 511
 512	return spi_width(target) != tp->nego_goal_width;
 513}
 514
 515static int esp_need_to_nego_sync(struct esp_target_data *tp)
 516{
 517	struct scsi_target *target = tp->starget;
 518
 519	/* When offset is zero, period is "don't care".  */
 520	if (!spi_offset(target) && !tp->nego_goal_offset)
 521		return 0;
 522
 523	if (spi_offset(target) == tp->nego_goal_offset &&
 524	    spi_period(target) == tp->nego_goal_period)
 525		return 0;
 526
 527	return 1;
 528}
 529
 530static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
 531			     struct esp_lun_data *lp)
 532{
 533	if (!ent->tag[0]) {
 534		/* Non-tagged, slot already taken?  */
 535		if (lp->non_tagged_cmd)
 536			return -EBUSY;
 537
 538		if (lp->hold) {
 539			/* We are being held by active tagged
 540			 * commands.
 541			 */
 542			if (lp->num_tagged)
 543				return -EBUSY;
 544
 545			/* Tagged commands completed, we can unplug
 546			 * the queue and run this untagged command.
 547			 */
 548			lp->hold = 0;
 549		} else if (lp->num_tagged) {
 550			/* Plug the queue until num_tagged decreases
 551			 * to zero in esp_free_lun_tag.
 552			 */
 553			lp->hold = 1;
 554			return -EBUSY;
 555		}
 556
 557		lp->non_tagged_cmd = ent;
 558		return 0;
 559	} else {
 560		/* Tagged command, see if blocked by a
 561		 * non-tagged one.
 562		 */
 563		if (lp->non_tagged_cmd || lp->hold)
 564			return -EBUSY;
 565	}
 566
 567	BUG_ON(lp->tagged_cmds[ent->tag[1]]);
 568
 569	lp->tagged_cmds[ent->tag[1]] = ent;
 570	lp->num_tagged++;
 571
 572	return 0;
 573}
 574
 575static void esp_free_lun_tag(struct esp_cmd_entry *ent,
 576			     struct esp_lun_data *lp)
 577{
 578	if (ent->tag[0]) {
 579		BUG_ON(lp->tagged_cmds[ent->tag[1]] != ent);
 580		lp->tagged_cmds[ent->tag[1]] = NULL;
 581		lp->num_tagged--;
 582	} else {
 583		BUG_ON(lp->non_tagged_cmd != ent);
 584		lp->non_tagged_cmd = NULL;
 585	}
 586}
 587
 588/* When a contingent allegiance conditon is created, we force feed a
 589 * REQUEST_SENSE command to the device to fetch the sense data.  I
 590 * tried many other schemes, relying on the scsi error handling layer
 591 * to send out the REQUEST_SENSE automatically, but this was difficult
 592 * to get right especially in the presence of applications like smartd
 593 * which use SG_IO to send out their own REQUEST_SENSE commands.
 594 */
 595static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
 596{
 597	struct scsi_cmnd *cmd = ent->cmd;
 598	struct scsi_device *dev = cmd->device;
 599	int tgt, lun;
 600	u8 *p, val;
 601
 602	tgt = dev->id;
 603	lun = dev->lun;
 604
 605
 606	if (!ent->sense_ptr) {
 607		esp_log_autosense("esp%d: Doing auto-sense for "
 608				  "tgt[%d] lun[%d]\n",
 609				  esp->host->unique_id, tgt, lun);
 610
 611		ent->sense_ptr = cmd->sense_buffer;
 612		ent->sense_dma = esp->ops->map_single(esp,
 613						      ent->sense_ptr,
 614						      SCSI_SENSE_BUFFERSIZE,
 615						      DMA_FROM_DEVICE);
 616	}
 617	ent->saved_sense_ptr = ent->sense_ptr;
 618
 619	esp->active_cmd = ent;
 620
 621	p = esp->command_block;
 622	esp->msg_out_len = 0;
 623
 624	*p++ = IDENTIFY(0, lun);
 625	*p++ = REQUEST_SENSE;
 626	*p++ = ((dev->scsi_level <= SCSI_2) ?
 627		(lun << 5) : 0);
 628	*p++ = 0;
 629	*p++ = 0;
 630	*p++ = SCSI_SENSE_BUFFERSIZE;
 631	*p++ = 0;
 632
 633	esp->select_state = ESP_SELECT_BASIC;
 634
 635	val = tgt;
 636	if (esp->rev == FASHME)
 637		val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
 638	esp_write8(val, ESP_BUSID);
 639
 640	esp_write_tgt_sync(esp, tgt);
 641	esp_write_tgt_config3(esp, tgt);
 642
 643	val = (p - esp->command_block);
 644
 645	if (esp->rev == FASHME)
 646		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
 647	esp->ops->send_dma_cmd(esp, esp->command_block_dma,
 648			       val, 16, 0, ESP_CMD_DMA | ESP_CMD_SELA);
 649}
 650
 651static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
 652{
 653	struct esp_cmd_entry *ent;
 654
 655	list_for_each_entry(ent, &esp->queued_cmds, list) {
 656		struct scsi_cmnd *cmd = ent->cmd;
 657		struct scsi_device *dev = cmd->device;
 658		struct esp_lun_data *lp = dev->hostdata;
 659
 660		if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 661			ent->tag[0] = 0;
 662			ent->tag[1] = 0;
 663			return ent;
 664		}
 665
 666		if (!scsi_populate_tag_msg(cmd, &ent->tag[0])) {
 667			ent->tag[0] = 0;
 668			ent->tag[1] = 0;
 669		}
 670
 671		if (esp_alloc_lun_tag(ent, lp) < 0)
 672			continue;
 673
 674		return ent;
 675	}
 676
 677	return NULL;
 678}
 679
 680static void esp_maybe_execute_command(struct esp *esp)
 681{
 682	struct esp_target_data *tp;
 683	struct esp_lun_data *lp;
 684	struct scsi_device *dev;
 685	struct scsi_cmnd *cmd;
 686	struct esp_cmd_entry *ent;
 687	int tgt, lun, i;
 688	u32 val, start_cmd;
 689	u8 *p;
 690
 691	if (esp->active_cmd ||
 692	    (esp->flags & ESP_FLAG_RESETTING))
 693		return;
 694
 695	ent = find_and_prep_issuable_command(esp);
 696	if (!ent)
 697		return;
 698
 699	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 700		esp_autosense(esp, ent);
 701		return;
 702	}
 703
 704	cmd = ent->cmd;
 705	dev = cmd->device;
 706	tgt = dev->id;
 707	lun = dev->lun;
 708	tp = &esp->target[tgt];
 709	lp = dev->hostdata;
 710
 711	list_move(&ent->list, &esp->active_cmds);
 712
 713	esp->active_cmd = ent;
 714
 715	esp_map_dma(esp, cmd);
 716	esp_save_pointers(esp, ent);
 717
 718	esp_check_command_len(esp, cmd);
 719
 720	p = esp->command_block;
 721
 722	esp->msg_out_len = 0;
 723	if (tp->flags & ESP_TGT_CHECK_NEGO) {
 724		/* Need to negotiate.  If the target is broken
 725		 * go for synchronous transfers and non-wide.
 726		 */
 727		if (tp->flags & ESP_TGT_BROKEN) {
 728			tp->flags &= ~ESP_TGT_DISCONNECT;
 729			tp->nego_goal_period = 0;
 730			tp->nego_goal_offset = 0;
 731			tp->nego_goal_width = 0;
 732			tp->nego_goal_tags = 0;
 733		}
 734
 735		/* If the settings are not changing, skip this.  */
 736		if (spi_width(tp->starget) == tp->nego_goal_width &&
 737		    spi_period(tp->starget) == tp->nego_goal_period &&
 738		    spi_offset(tp->starget) == tp->nego_goal_offset) {
 739			tp->flags &= ~ESP_TGT_CHECK_NEGO;
 740			goto build_identify;
 741		}
 742
 743		if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
 744			esp->msg_out_len =
 745				spi_populate_width_msg(&esp->msg_out[0],
 746						       (tp->nego_goal_width ?
 747							1 : 0));
 748			tp->flags |= ESP_TGT_NEGO_WIDE;
 749		} else if (esp_need_to_nego_sync(tp)) {
 750			esp->msg_out_len =
 751				spi_populate_sync_msg(&esp->msg_out[0],
 752						      tp->nego_goal_period,
 753						      tp->nego_goal_offset);
 754			tp->flags |= ESP_TGT_NEGO_SYNC;
 755		} else {
 756			tp->flags &= ~ESP_TGT_CHECK_NEGO;
 757		}
 758
 759		/* Process it like a slow command.  */
 760		if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC))
 761			esp->flags |= ESP_FLAG_DOING_SLOWCMD;
 762	}
 763
 764build_identify:
 765	/* If we don't have a lun-data struct yet, we're probing
 766	 * so do not disconnect.  Also, do not disconnect unless
 767	 * we have a tag on this command.
 768	 */
 769	if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
 770		*p++ = IDENTIFY(1, lun);
 771	else
 772		*p++ = IDENTIFY(0, lun);
 773
 774	if (ent->tag[0] && esp->rev == ESP100) {
 775		/* ESP100 lacks select w/atn3 command, use select
 776		 * and stop instead.
 777		 */
 778		esp->flags |= ESP_FLAG_DOING_SLOWCMD;
 779	}
 780
 781	if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
 782		start_cmd = ESP_CMD_DMA | ESP_CMD_SELA;
 783		if (ent->tag[0]) {
 784			*p++ = ent->tag[0];
 785			*p++ = ent->tag[1];
 786
 787			start_cmd = ESP_CMD_DMA | ESP_CMD_SA3;
 788		}
 789
 790		for (i = 0; i < cmd->cmd_len; i++)
 791			*p++ = cmd->cmnd[i];
 792
 793		esp->select_state = ESP_SELECT_BASIC;
 794	} else {
 795		esp->cmd_bytes_left = cmd->cmd_len;
 796		esp->cmd_bytes_ptr = &cmd->cmnd[0];
 797
 798		if (ent->tag[0]) {
 799			for (i = esp->msg_out_len - 1;
 800			     i >= 0; i--)
 801				esp->msg_out[i + 2] = esp->msg_out[i];
 802			esp->msg_out[0] = ent->tag[0];
 803			esp->msg_out[1] = ent->tag[1];
 804			esp->msg_out_len += 2;
 805		}
 806
 807		start_cmd = ESP_CMD_DMA | ESP_CMD_SELAS;
 808		esp->select_state = ESP_SELECT_MSGOUT;
 809	}
 810	val = tgt;
 811	if (esp->rev == FASHME)
 812		val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
 813	esp_write8(val, ESP_BUSID);
 814
 815	esp_write_tgt_sync(esp, tgt);
 816	esp_write_tgt_config3(esp, tgt);
 817
 818	val = (p - esp->command_block);
 819
 820	if (esp_debug & ESP_DEBUG_SCSICMD) {
 821		printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
 822		for (i = 0; i < cmd->cmd_len; i++)
 823			printk("%02x ", cmd->cmnd[i]);
 824		printk("]\n");
 825	}
 826
 827	if (esp->rev == FASHME)
 828		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
 829	esp->ops->send_dma_cmd(esp, esp->command_block_dma,
 830			       val, 16, 0, start_cmd);
 831}
 832
 833static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
 834{
 835	struct list_head *head = &esp->esp_cmd_pool;
 836	struct esp_cmd_entry *ret;
 837
 838	if (list_empty(head)) {
 839		ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
 840	} else {
 841		ret = list_entry(head->next, struct esp_cmd_entry, list);
 842		list_del(&ret->list);
 843		memset(ret, 0, sizeof(*ret));
 844	}
 845	return ret;
 846}
 847
 848static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
 849{
 850	list_add(&ent->list, &esp->esp_cmd_pool);
 851}
 852
 853static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
 854			    struct scsi_cmnd *cmd, unsigned int result)
 855{
 856	struct scsi_device *dev = cmd->device;
 857	int tgt = dev->id;
 858	int lun = dev->lun;
 859
 860	esp->active_cmd = NULL;
 861	esp_unmap_dma(esp, cmd);
 862	esp_free_lun_tag(ent, dev->hostdata);
 863	cmd->result = result;
 864
 865	if (ent->eh_done) {
 866		complete(ent->eh_done);
 867		ent->eh_done = NULL;
 868	}
 869
 870	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 871		esp->ops->unmap_single(esp, ent->sense_dma,
 872				       SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
 873		ent->sense_ptr = NULL;
 874
 875		/* Restore the message/status bytes to what we actually
 876		 * saw originally.  Also, report that we are providing
 877		 * the sense data.
 878		 */
 879		cmd->result = ((DRIVER_SENSE << 24) |
 880			       (DID_OK << 16) |
 881			       (COMMAND_COMPLETE << 8) |
 882			       (SAM_STAT_CHECK_CONDITION << 0));
 883
 884		ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
 885		if (esp_debug & ESP_DEBUG_AUTOSENSE) {
 886			int i;
 887
 888			printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
 889			       esp->host->unique_id, tgt, lun);
 890			for (i = 0; i < 18; i++)
 891				printk("%02x ", cmd->sense_buffer[i]);
 892			printk("]\n");
 893		}
 894	}
 895
 896	cmd->scsi_done(cmd);
 897
 898	list_del(&ent->list);
 899	esp_put_ent(esp, ent);
 900
 901	esp_maybe_execute_command(esp);
 902}
 903
 904static unsigned int compose_result(unsigned int status, unsigned int message,
 905				   unsigned int driver_code)
 906{
 907	return (status | (message << 8) | (driver_code << 16));
 908}
 909
 910static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
 911{
 912	struct scsi_device *dev = ent->cmd->device;
 913	struct esp_lun_data *lp = dev->hostdata;
 914
 915	scsi_track_queue_full(dev, lp->num_tagged - 1);
 916}
 917
 918static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
 919{
 920	struct scsi_device *dev = cmd->device;
 921	struct esp *esp = shost_priv(dev->host);
 922	struct esp_cmd_priv *spriv;
 923	struct esp_cmd_entry *ent;
 924
 925	ent = esp_get_ent(esp);
 926	if (!ent)
 927		return SCSI_MLQUEUE_HOST_BUSY;
 928
 929	ent->cmd = cmd;
 930
 931	cmd->scsi_done = done;
 932
 933	spriv = ESP_CMD_PRIV(cmd);
 934	spriv->u.dma_addr = ~(dma_addr_t)0x0;
 935
 936	list_add_tail(&ent->list, &esp->queued_cmds);
 937
 938	esp_maybe_execute_command(esp);
 939
 940	return 0;
 941}
 942
 943static DEF_SCSI_QCMD(esp_queuecommand)
 944
 945static int esp_check_gross_error(struct esp *esp)
 946{
 947	if (esp->sreg & ESP_STAT_SPAM) {
 948		/* Gross Error, could be one of:
 949		 * - top of fifo overwritten
 950		 * - top of command register overwritten
 951		 * - DMA programmed with wrong direction
 952		 * - improper phase change
 953		 */
 954		printk(KERN_ERR PFX "esp%d: Gross error sreg[%02x]\n",
 955		       esp->host->unique_id, esp->sreg);
 956		/* XXX Reset the chip. XXX */
 957		return 1;
 958	}
 959	return 0;
 960}
 961
 962static int esp_check_spur_intr(struct esp *esp)
 963{
 964	switch (esp->rev) {
 965	case ESP100:
 966	case ESP100A:
 967		/* The interrupt pending bit of the status register cannot
 968		 * be trusted on these revisions.
 969		 */
 970		esp->sreg &= ~ESP_STAT_INTR;
 971		break;
 972
 973	default:
 974		if (!(esp->sreg & ESP_STAT_INTR)) {
 975			esp->ireg = esp_read8(ESP_INTRPT);
 976			if (esp->ireg & ESP_INTR_SR)
 977				return 1;
 978
 979			/* If the DMA is indicating interrupt pending and the
 980			 * ESP is not, the only possibility is a DMA error.
 981			 */
 982			if (!esp->ops->dma_error(esp)) {
 983				printk(KERN_ERR PFX "esp%d: Spurious irq, "
 984				       "sreg=%02x.\n",
 985				       esp->host->unique_id, esp->sreg);
 986				return -1;
 987			}
 988
 989			printk(KERN_ERR PFX "esp%d: DMA error\n",
 990			       esp->host->unique_id);
 991
 992			/* XXX Reset the chip. XXX */
 993			return -1;
 994		}
 995		break;
 996	}
 997
 998	return 0;
 999}
1000
1001static void esp_schedule_reset(struct esp *esp)
1002{
1003	esp_log_reset("ESP: esp_schedule_reset() from %pf\n",
1004		      __builtin_return_address(0));
1005	esp->flags |= ESP_FLAG_RESETTING;
1006	esp_event(esp, ESP_EVENT_RESET);
1007}
1008
1009/* In order to avoid having to add a special half-reconnected state
1010 * into the driver we just sit here and poll through the rest of
1011 * the reselection process to get the tag message bytes.
1012 */
1013static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1014						    struct esp_lun_data *lp)
1015{
1016	struct esp_cmd_entry *ent;
1017	int i;
1018
1019	if (!lp->num_tagged) {
1020		printk(KERN_ERR PFX "esp%d: Reconnect w/num_tagged==0\n",
1021		       esp->host->unique_id);
1022		return NULL;
1023	}
1024
1025	esp_log_reconnect("ESP: reconnect tag, ");
1026
1027	for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1028		if (esp->ops->irq_pending(esp))
1029			break;
1030	}
1031	if (i == ESP_QUICKIRQ_LIMIT) {
1032		printk(KERN_ERR PFX "esp%d: Reconnect IRQ1 timeout\n",
1033		       esp->host->unique_id);
1034		return NULL;
1035	}
1036
1037	esp->sreg = esp_read8(ESP_STATUS);
1038	esp->ireg = esp_read8(ESP_INTRPT);
1039
1040	esp_log_reconnect("IRQ(%d:%x:%x), ",
1041			  i, esp->ireg, esp->sreg);
1042
1043	if (esp->ireg & ESP_INTR_DC) {
1044		printk(KERN_ERR PFX "esp%d: Reconnect, got disconnect.\n",
1045		       esp->host->unique_id);
1046		return NULL;
1047	}
1048
1049	if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
1050		printk(KERN_ERR PFX "esp%d: Reconnect, not MIP sreg[%02x].\n",
1051		       esp->host->unique_id, esp->sreg);
1052		return NULL;
1053	}
1054
1055	/* DMA in the tag bytes... */
1056	esp->command_block[0] = 0xff;
1057	esp->command_block[1] = 0xff;
1058	esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1059			       2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1060
1061	/* ACK the message.  */
1062	scsi_esp_cmd(esp, ESP_CMD_MOK);
1063
1064	for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1065		if (esp->ops->irq_pending(esp)) {
1066			esp->sreg = esp_read8(ESP_STATUS);
1067			esp->ireg = esp_read8(ESP_INTRPT);
1068			if (esp->ireg & ESP_INTR_FDONE)
1069				break;
1070		}
1071		udelay(1);
1072	}
1073	if (i == ESP_RESELECT_TAG_LIMIT) {
1074		printk(KERN_ERR PFX "esp%d: Reconnect IRQ2 timeout\n",
1075		       esp->host->unique_id);
1076		return NULL;
1077	}
1078	esp->ops->dma_drain(esp);
1079	esp->ops->dma_invalidate(esp);
1080
1081	esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1082			  i, esp->ireg, esp->sreg,
1083			  esp->command_block[0],
1084			  esp->command_block[1]);
1085
1086	if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1087	    esp->command_block[0] > ORDERED_QUEUE_TAG) {
1088		printk(KERN_ERR PFX "esp%d: Reconnect, bad tag "
1089		       "type %02x.\n",
1090		       esp->host->unique_id, esp->command_block[0]);
1091		return NULL;
1092	}
1093
1094	ent = lp->tagged_cmds[esp->command_block[1]];
1095	if (!ent) {
1096		printk(KERN_ERR PFX "esp%d: Reconnect, no entry for "
1097		       "tag %02x.\n",
1098		       esp->host->unique_id, esp->command_block[1]);
1099		return NULL;
1100	}
1101
1102	return ent;
1103}
1104
1105static int esp_reconnect(struct esp *esp)
1106{
1107	struct esp_cmd_entry *ent;
1108	struct esp_target_data *tp;
1109	struct esp_lun_data *lp;
1110	struct scsi_device *dev;
1111	int target, lun;
1112
1113	BUG_ON(esp->active_cmd);
1114	if (esp->rev == FASHME) {
1115		/* FASHME puts the target and lun numbers directly
1116		 * into the fifo.
1117		 */
1118		target = esp->fifo[0];
1119		lun = esp->fifo[1] & 0x7;
1120	} else {
1121		u8 bits = esp_read8(ESP_FDATA);
1122
1123		/* Older chips put the lun directly into the fifo, but
1124		 * the target is given as a sample of the arbitration
1125		 * lines on the bus at reselection time.  So we should
1126		 * see the ID of the ESP and the one reconnecting target
1127		 * set in the bitmap.
1128		 */
1129		if (!(bits & esp->scsi_id_mask))
1130			goto do_reset;
1131		bits &= ~esp->scsi_id_mask;
1132		if (!bits || (bits & (bits - 1)))
1133			goto do_reset;
1134
1135		target = ffs(bits) - 1;
1136		lun = (esp_read8(ESP_FDATA) & 0x7);
1137
1138		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1139		if (esp->rev == ESP100) {
1140			u8 ireg = esp_read8(ESP_INTRPT);
1141			/* This chip has a bug during reselection that can
1142			 * cause a spurious illegal-command interrupt, which
1143			 * we simply ACK here.  Another possibility is a bus
1144			 * reset so we must check for that.
1145			 */
1146			if (ireg & ESP_INTR_SR)
1147				goto do_reset;
1148		}
1149		scsi_esp_cmd(esp, ESP_CMD_NULL);
1150	}
1151
1152	esp_write_tgt_sync(esp, target);
1153	esp_write_tgt_config3(esp, target);
1154
1155	scsi_esp_cmd(esp, ESP_CMD_MOK);
1156
1157	if (esp->rev == FASHME)
1158		esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1159			   ESP_BUSID);
1160
1161	tp = &esp->target[target];
1162	dev = __scsi_device_lookup_by_target(tp->starget, lun);
1163	if (!dev) {
1164		printk(KERN_ERR PFX "esp%d: Reconnect, no lp "
1165		       "tgt[%u] lun[%u]\n",
1166		       esp->host->unique_id, target, lun);
1167		goto do_reset;
1168	}
1169	lp = dev->hostdata;
1170
1171	ent = lp->non_tagged_cmd;
1172	if (!ent) {
1173		ent = esp_reconnect_with_tag(esp, lp);
1174		if (!ent)
1175			goto do_reset;
1176	}
1177
1178	esp->active_cmd = ent;
1179
1180	if (ent->flags & ESP_CMD_FLAG_ABORT) {
1181		esp->msg_out[0] = ABORT_TASK_SET;
1182		esp->msg_out_len = 1;
1183		scsi_esp_cmd(esp, ESP_CMD_SATN);
1184	}
1185
1186	esp_event(esp, ESP_EVENT_CHECK_PHASE);
1187	esp_restore_pointers(esp, ent);
1188	esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1189	return 1;
1190
1191do_reset:
1192	esp_schedule_reset(esp);
1193	return 0;
1194}
1195
1196static int esp_finish_select(struct esp *esp)
1197{
1198	struct esp_cmd_entry *ent;
1199	struct scsi_cmnd *cmd;
1200	u8 orig_select_state;
1201
1202	orig_select_state = esp->select_state;
1203
1204	/* No longer selecting.  */
1205	esp->select_state = ESP_SELECT_NONE;
1206
1207	esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1208	ent = esp->active_cmd;
1209	cmd = ent->cmd;
1210
1211	if (esp->ops->dma_error(esp)) {
1212		/* If we see a DMA error during or as a result of selection,
1213		 * all bets are off.
1214		 */
1215		esp_schedule_reset(esp);
1216		esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
1217		return 0;
1218	}
1219
1220	esp->ops->dma_invalidate(esp);
1221
1222	if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1223		struct esp_target_data *tp = &esp->target[cmd->device->id];
1224
1225		/* Carefully back out of the selection attempt.  Release
1226		 * resources (such as DMA mapping & TAG) and reset state (such
1227		 * as message out and command delivery variables).
1228		 */
1229		if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1230			esp_unmap_dma(esp, cmd);
1231			esp_free_lun_tag(ent, cmd->device->hostdata);
1232			tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1233			esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
1234			esp->cmd_bytes_ptr = NULL;
1235			esp->cmd_bytes_left = 0;
1236		} else {
1237			esp->ops->unmap_single(esp, ent->sense_dma,
1238					       SCSI_SENSE_BUFFERSIZE,
1239					       DMA_FROM_DEVICE);
1240			ent->sense_ptr = NULL;
1241		}
1242
1243		/* Now that the state is unwound properly, put back onto
1244		 * the issue queue.  This command is no longer active.
1245		 */
1246		list_move(&ent->list, &esp->queued_cmds);
1247		esp->active_cmd = NULL;
1248
1249		/* Return value ignored by caller, it directly invokes
1250		 * esp_reconnect().
1251		 */
1252		return 0;
1253	}
1254
1255	if (esp->ireg == ESP_INTR_DC) {
1256		struct scsi_device *dev = cmd->device;
1257
1258		/* Disconnect.  Make sure we re-negotiate sync and
1259		 * wide parameters if this target starts responding
1260		 * again in the future.
1261		 */
1262		esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1263
1264		scsi_esp_cmd(esp, ESP_CMD_ESEL);
1265		esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
1266		return 1;
1267	}
1268
1269	if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1270		/* Selection successful.  On pre-FAST chips we have
1271		 * to do a NOP and possibly clean out the FIFO.
1272		 */
1273		if (esp->rev <= ESP236) {
1274			int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1275
1276			scsi_esp_cmd(esp, ESP_CMD_NULL);
1277
1278			if (!fcnt &&
1279			    (!esp->prev_soff ||
1280			     ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1281				esp_flush_fifo(esp);
1282		}
1283
1284		/* If we are doing a slow command, negotiation, etc.
1285		 * we'll do the right thing as we transition to the
1286		 * next phase.
1287		 */
1288		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1289		return 0;
1290	}
1291
1292	printk("ESP: Unexpected selection completion ireg[%x].\n",
1293	       esp->ireg);
1294	esp_schedule_reset(esp);
1295	return 0;
1296}
1297
1298static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1299			       struct scsi_cmnd *cmd)
1300{
1301	int fifo_cnt, ecount, bytes_sent, flush_fifo;
1302
1303	fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1304	if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1305		fifo_cnt <<= 1;
1306
1307	ecount = 0;
1308	if (!(esp->sreg & ESP_STAT_TCNT)) {
1309		ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1310			  (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1311		if (esp->rev == FASHME)
1312			ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
1313	}
1314
1315	bytes_sent = esp->data_dma_len;
1316	bytes_sent -= ecount;
1317
1318	if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1319		bytes_sent -= fifo_cnt;
1320
1321	flush_fifo = 0;
1322	if (!esp->prev_soff) {
1323		/* Synchronous data transfer, always flush fifo. */
1324		flush_fifo = 1;
1325	} else {
1326		if (esp->rev == ESP100) {
1327			u32 fflags, phase;
1328
1329			/* ESP100 has a chip bug where in the synchronous data
1330			 * phase it can mistake a final long REQ pulse from the
1331			 * target as an extra data byte.  Fun.
1332			 *
1333			 * To detect this case we resample the status register
1334			 * and fifo flags.  If we're still in a data phase and
1335			 * we see spurious chunks in the fifo, we return error
1336			 * to the caller which should reset and set things up
1337			 * such that we only try future transfers to this
1338			 * target in synchronous mode.
1339			 */
1340			esp->sreg = esp_read8(ESP_STATUS);
1341			phase = esp->sreg & ESP_STAT_PMASK;
1342			fflags = esp_read8(ESP_FFLAGS);
1343
1344			if ((phase == ESP_DOP &&
1345			     (fflags & ESP_FF_ONOTZERO)) ||
1346			    (phase == ESP_DIP &&
1347			     (fflags & ESP_FF_FBYTES)))
1348				return -1;
1349		}
1350		if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1351			flush_fifo = 1;
1352	}
1353
1354	if (flush_fifo)
1355		esp_flush_fifo(esp);
1356
1357	return bytes_sent;
1358}
1359
1360static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1361			u8 scsi_period, u8 scsi_offset,
1362			u8 esp_stp, u8 esp_soff)
1363{
1364	spi_period(tp->starget) = scsi_period;
1365	spi_offset(tp->starget) = scsi_offset;
1366	spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1367
1368	if (esp_soff) {
1369		esp_stp &= 0x1f;
1370		esp_soff |= esp->radelay;
1371		if (esp->rev >= FAS236) {
1372			u8 bit = ESP_CONFIG3_FSCSI;
1373			if (esp->rev >= FAS100A)
1374				bit = ESP_CONFIG3_FAST;
1375
1376			if (scsi_period < 50) {
1377				if (esp->rev == FASHME)
1378					esp_soff &= ~esp->radelay;
1379				tp->esp_config3 |= bit;
1380			} else {
1381				tp->esp_config3 &= ~bit;
1382			}
1383			esp->prev_cfg3 = tp->esp_config3;
1384			esp_write8(esp->prev_cfg3, ESP_CFG3);
1385		}
1386	}
1387
1388	tp->esp_period = esp->prev_stp = esp_stp;
1389	tp->esp_offset = esp->prev_soff = esp_soff;
1390
1391	esp_write8(esp_soff, ESP_SOFF);
1392	esp_write8(esp_stp, ESP_STP);
1393
1394	tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1395
1396	spi_display_xfer_agreement(tp->starget);
1397}
1398
1399static void esp_msgin_reject(struct esp *esp)
1400{
1401	struct esp_cmd_entry *ent = esp->active_cmd;
1402	struct scsi_cmnd *cmd = ent->cmd;
1403	struct esp_target_data *tp;
1404	int tgt;
1405
1406	tgt = cmd->device->id;
1407	tp = &esp->target[tgt];
1408
1409	if (tp->flags & ESP_TGT_NEGO_WIDE) {
1410		tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1411
1412		if (!esp_need_to_nego_sync(tp)) {
1413			tp->flags &= ~ESP_TGT_CHECK_NEGO;
1414			scsi_esp_cmd(esp, ESP_CMD_RATN);
1415		} else {
1416			esp->msg_out_len =
1417				spi_populate_sync_msg(&esp->msg_out[0],
1418						      tp->nego_goal_period,
1419						      tp->nego_goal_offset);
1420			tp->flags |= ESP_TGT_NEGO_SYNC;
1421			scsi_esp_cmd(esp, ESP_CMD_SATN);
1422		}
1423		return;
1424	}
1425
1426	if (tp->flags & ESP_TGT_NEGO_SYNC) {
1427		tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1428		tp->esp_period = 0;
1429		tp->esp_offset = 0;
1430		esp_setsync(esp, tp, 0, 0, 0, 0);
1431		scsi_esp_cmd(esp, ESP_CMD_RATN);
1432		return;
1433	}
1434
1435	esp->msg_out[0] = ABORT_TASK_SET;
1436	esp->msg_out_len = 1;
1437	scsi_esp_cmd(esp, ESP_CMD_SATN);
1438}
1439
1440static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1441{
1442	u8 period = esp->msg_in[3];
1443	u8 offset = esp->msg_in[4];
1444	u8 stp;
1445
1446	if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1447		goto do_reject;
1448
1449	if (offset > 15)
1450		goto do_reject;
1451
1452	if (offset) {
1453		int one_clock;
1454
1455		if (period > esp->max_period) {
1456			period = offset = 0;
1457			goto do_sdtr;
1458		}
1459		if (period < esp->min_period)
1460			goto do_reject;
1461
1462		one_clock = esp->ccycle / 1000;
1463		stp = DIV_ROUND_UP(period << 2, one_clock);
1464		if (stp && esp->rev >= FAS236) {
1465			if (stp >= 50)
1466				stp--;
1467		}
1468	} else {
1469		stp = 0;
1470	}
1471
1472	esp_setsync(esp, tp, period, offset, stp, offset);
1473	return;
1474
1475do_reject:
1476	esp->msg_out[0] = MESSAGE_REJECT;
1477	esp->msg_out_len = 1;
1478	scsi_esp_cmd(esp, ESP_CMD_SATN);
1479	return;
1480
1481do_sdtr:
1482	tp->nego_goal_period = period;
1483	tp->nego_goal_offset = offset;
1484	esp->msg_out_len =
1485		spi_populate_sync_msg(&esp->msg_out[0],
1486				      tp->nego_goal_period,
1487				      tp->nego_goal_offset);
1488	scsi_esp_cmd(esp, ESP_CMD_SATN);
1489}
1490
1491static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1492{
1493	int size = 8 << esp->msg_in[3];
1494	u8 cfg3;
1495
1496	if (esp->rev != FASHME)
1497		goto do_reject;
1498
1499	if (size != 8 && size != 16)
1500		goto do_reject;
1501
1502	if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1503		goto do_reject;
1504
1505	cfg3 = tp->esp_config3;
1506	if (size == 16) {
1507		tp->flags |= ESP_TGT_WIDE;
1508		cfg3 |= ESP_CONFIG3_EWIDE;
1509	} else {
1510		tp->flags &= ~ESP_TGT_WIDE;
1511		cfg3 &= ~ESP_CONFIG3_EWIDE;
1512	}
1513	tp->esp_config3 = cfg3;
1514	esp->prev_cfg3 = cfg3;
1515	esp_write8(cfg3, ESP_CFG3);
1516
1517	tp->flags &= ~ESP_TGT_NEGO_WIDE;
1518
1519	spi_period(tp->starget) = 0;
1520	spi_offset(tp->starget) = 0;
1521	if (!esp_need_to_nego_sync(tp)) {
1522		tp->flags &= ~ESP_TGT_CHECK_NEGO;
1523		scsi_esp_cmd(esp, ESP_CMD_RATN);
1524	} else {
1525		esp->msg_out_len =
1526			spi_populate_sync_msg(&esp->msg_out[0],
1527					      tp->nego_goal_period,
1528					      tp->nego_goal_offset);
1529		tp->flags |= ESP_TGT_NEGO_SYNC;
1530		scsi_esp_cmd(esp, ESP_CMD_SATN);
1531	}
1532	return;
1533
1534do_reject:
1535	esp->msg_out[0] = MESSAGE_REJECT;
1536	esp->msg_out_len = 1;
1537	scsi_esp_cmd(esp, ESP_CMD_SATN);
1538}
1539
1540static void esp_msgin_extended(struct esp *esp)
1541{
1542	struct esp_cmd_entry *ent = esp->active_cmd;
1543	struct scsi_cmnd *cmd = ent->cmd;
1544	struct esp_target_data *tp;
1545	int tgt = cmd->device->id;
1546
1547	tp = &esp->target[tgt];
1548	if (esp->msg_in[2] == EXTENDED_SDTR) {
1549		esp_msgin_sdtr(esp, tp);
1550		return;
1551	}
1552	if (esp->msg_in[2] == EXTENDED_WDTR) {
1553		esp_msgin_wdtr(esp, tp);
1554		return;
1555	}
1556
1557	printk("ESP: Unexpected extended msg type %x\n",
1558	       esp->msg_in[2]);
1559
1560	esp->msg_out[0] = ABORT_TASK_SET;
1561	esp->msg_out_len = 1;
1562	scsi_esp_cmd(esp, ESP_CMD_SATN);
1563}
1564
1565/* Analyze msgin bytes received from target so far.  Return non-zero
1566 * if there are more bytes needed to complete the message.
1567 */
1568static int esp_msgin_process(struct esp *esp)
1569{
1570	u8 msg0 = esp->msg_in[0];
1571	int len = esp->msg_in_len;
1572
1573	if (msg0 & 0x80) {
1574		/* Identify */
1575		printk("ESP: Unexpected msgin identify\n");
1576		return 0;
1577	}
1578
1579	switch (msg0) {
1580	case EXTENDED_MESSAGE:
1581		if (len == 1)
1582			return 1;
1583		if (len < esp->msg_in[1] + 2)
1584			return 1;
1585		esp_msgin_extended(esp);
1586		return 0;
1587
1588	case IGNORE_WIDE_RESIDUE: {
1589		struct esp_cmd_entry *ent;
1590		struct esp_cmd_priv *spriv;
1591		if (len == 1)
1592			return 1;
1593
1594		if (esp->msg_in[1] != 1)
1595			goto do_reject;
1596
1597		ent = esp->active_cmd;
1598		spriv = ESP_CMD_PRIV(ent->cmd);
1599
1600		if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1601			spriv->cur_sg--;
1602			spriv->cur_residue = 1;
1603		} else
1604			spriv->cur_residue++;
1605		spriv->tot_residue++;
1606		return 0;
1607	}
1608	case NOP:
1609		return 0;
1610	case RESTORE_POINTERS:
1611		esp_restore_pointers(esp, esp->active_cmd);
1612		return 0;
1613	case SAVE_POINTERS:
1614		esp_save_pointers(esp, esp->active_cmd);
1615		return 0;
1616
1617	case COMMAND_COMPLETE:
1618	case DISCONNECT: {
1619		struct esp_cmd_entry *ent = esp->active_cmd;
1620
1621		ent->message = msg0;
1622		esp_event(esp, ESP_EVENT_FREE_BUS);
1623		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1624		return 0;
1625	}
1626	case MESSAGE_REJECT:
1627		esp_msgin_reject(esp);
1628		return 0;
1629
1630	default:
1631	do_reject:
1632		esp->msg_out[0] = MESSAGE_REJECT;
1633		esp->msg_out_len = 1;
1634		scsi_esp_cmd(esp, ESP_CMD_SATN);
1635		return 0;
1636	}
1637}
1638
1639static int esp_process_event(struct esp *esp)
1640{
1641	int write;
1642
1643again:
1644	write = 0;
1645	switch (esp->event) {
1646	case ESP_EVENT_CHECK_PHASE:
1647		switch (esp->sreg & ESP_STAT_PMASK) {
1648		case ESP_DOP:
1649			esp_event(esp, ESP_EVENT_DATA_OUT);
1650			break;
1651		case ESP_DIP:
1652			esp_event(esp, ESP_EVENT_DATA_IN);
1653			break;
1654		case ESP_STATP:
1655			esp_flush_fifo(esp);
1656			scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1657			esp_event(esp, ESP_EVENT_STATUS);
1658			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1659			return 1;
1660
1661		case ESP_MOP:
1662			esp_event(esp, ESP_EVENT_MSGOUT);
1663			break;
1664
1665		case ESP_MIP:
1666			esp_event(esp, ESP_EVENT_MSGIN);
1667			break;
1668
1669		case ESP_CMDP:
1670			esp_event(esp, ESP_EVENT_CMD_START);
1671			break;
1672
1673		default:
1674			printk("ESP: Unexpected phase, sreg=%02x\n",
1675			       esp->sreg);
1676			esp_schedule_reset(esp);
1677			return 0;
1678		}
1679		goto again;
1680		break;
1681
1682	case ESP_EVENT_DATA_IN:
1683		write = 1;
1684		/* fallthru */
1685
1686	case ESP_EVENT_DATA_OUT: {
1687		struct esp_cmd_entry *ent = esp->active_cmd;
1688		struct scsi_cmnd *cmd = ent->cmd;
1689		dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1690		unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1691
1692		if (esp->rev == ESP100)
1693			scsi_esp_cmd(esp, ESP_CMD_NULL);
1694
1695		if (write)
1696			ent->flags |= ESP_CMD_FLAG_WRITE;
1697		else
1698			ent->flags &= ~ESP_CMD_FLAG_WRITE;
1699
1700		if (esp->ops->dma_length_limit)
1701			dma_len = esp->ops->dma_length_limit(esp, dma_addr,
1702							     dma_len);
1703		else
1704			dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1705
1706		esp->data_dma_len = dma_len;
1707
1708		if (!dma_len) {
1709			printk(KERN_ERR PFX "esp%d: DMA length is zero!\n",
1710			       esp->host->unique_id);
1711			printk(KERN_ERR PFX "esp%d: cur adr[%08llx] len[%08x]\n",
1712			       esp->host->unique_id,
1713			       (unsigned long long)esp_cur_dma_addr(ent, cmd),
1714			       esp_cur_dma_len(ent, cmd));
1715			esp_schedule_reset(esp);
1716			return 0;
1717		}
1718
1719		esp_log_datastart("ESP: start data addr[%08llx] len[%u] "
1720				  "write(%d)\n",
1721				  (unsigned long long)dma_addr, dma_len, write);
1722
1723		esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1724				       write, ESP_CMD_DMA | ESP_CMD_TI);
1725		esp_event(esp, ESP_EVENT_DATA_DONE);
1726		break;
1727	}
1728	case ESP_EVENT_DATA_DONE: {
1729		struct esp_cmd_entry *ent = esp->active_cmd;
1730		struct scsi_cmnd *cmd = ent->cmd;
1731		int bytes_sent;
1732
1733		if (esp->ops->dma_error(esp)) {
1734			printk("ESP: data done, DMA error, resetting\n");
1735			esp_schedule_reset(esp);
1736			return 0;
1737		}
1738
1739		if (ent->flags & ESP_CMD_FLAG_WRITE) {
1740			/* XXX parity errors, etc. XXX */
1741
1742			esp->ops->dma_drain(esp);
1743		}
1744		esp->ops->dma_invalidate(esp);
1745
1746		if (esp->ireg != ESP_INTR_BSERV) {
1747			/* We should always see exactly a bus-service
1748			 * interrupt at the end of a successful transfer.
1749			 */
1750			printk("ESP: data done, not BSERV, resetting\n");
1751			esp_schedule_reset(esp);
1752			return 0;
1753		}
1754
1755		bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1756
1757		esp_log_datadone("ESP: data done flgs[%x] sent[%d]\n",
1758				 ent->flags, bytes_sent);
1759
1760		if (bytes_sent < 0) {
1761			/* XXX force sync mode for this target XXX */
1762			esp_schedule_reset(esp);
1763			return 0;
1764		}
1765
1766		esp_advance_dma(esp, ent, cmd, bytes_sent);
1767		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1768		goto again;
1769	}
1770
1771	case ESP_EVENT_STATUS: {
1772		struct esp_cmd_entry *ent = esp->active_cmd;
1773
1774		if (esp->ireg & ESP_INTR_FDONE) {
1775			ent->status = esp_read8(ESP_FDATA);
1776			ent->message = esp_read8(ESP_FDATA);
1777			scsi_esp_cmd(esp, ESP_CMD_MOK);
1778		} else if (esp->ireg == ESP_INTR_BSERV) {
1779			ent->status = esp_read8(ESP_FDATA);
1780			ent->message = 0xff;
1781			esp_event(esp, ESP_EVENT_MSGIN);
1782			return 0;
1783		}
1784
1785		if (ent->message != COMMAND_COMPLETE) {
1786			printk("ESP: Unexpected message %x in status\n",
1787			       ent->message);
1788			esp_schedule_reset(esp);
1789			return 0;
1790		}
1791
1792		esp_event(esp, ESP_EVENT_FREE_BUS);
1793		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1794		break;
1795	}
1796	case ESP_EVENT_FREE_BUS: {
1797		struct esp_cmd_entry *ent = esp->active_cmd;
1798		struct scsi_cmnd *cmd = ent->cmd;
1799
1800		if (ent->message == COMMAND_COMPLETE ||
1801		    ent->message == DISCONNECT)
1802			scsi_esp_cmd(esp, ESP_CMD_ESEL);
1803
1804		if (ent->message == COMMAND_COMPLETE) {
1805			esp_log_cmddone("ESP: Command done status[%x] "
1806					"message[%x]\n",
1807					ent->status, ent->message);
1808			if (ent->status == SAM_STAT_TASK_SET_FULL)
1809				esp_event_queue_full(esp, ent);
1810
1811			if (ent->status == SAM_STAT_CHECK_CONDITION &&
1812			    !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1813				ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1814				esp_autosense(esp, ent);
1815			} else {
1816				esp_cmd_is_done(esp, ent, cmd,
1817						compose_result(ent->status,
1818							       ent->message,
1819							       DID_OK));
1820			}
1821		} else if (ent->message == DISCONNECT) {
1822			esp_log_disconnect("ESP: Disconnecting tgt[%d] "
1823					   "tag[%x:%x]\n",
1824					   cmd->device->id,
1825					   ent->tag[0], ent->tag[1]);
1826
1827			esp->active_cmd = NULL;
1828			esp_maybe_execute_command(esp);
1829		} else {
1830			printk("ESP: Unexpected message %x in freebus\n",
1831			       ent->message);
1832			esp_schedule_reset(esp);
1833			return 0;
1834		}
1835		if (esp->active_cmd)
1836			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1837		break;
1838	}
1839	case ESP_EVENT_MSGOUT: {
1840		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1841
1842		if (esp_debug & ESP_DEBUG_MSGOUT) {
1843			int i;
1844			printk("ESP: Sending message [ ");
1845			for (i = 0; i < esp->msg_out_len; i++)
1846				printk("%02x ", esp->msg_out[i]);
1847			printk("]\n");
1848		}
1849
1850		if (esp->rev == FASHME) {
1851			int i;
1852
1853			/* Always use the fifo.  */
1854			for (i = 0; i < esp->msg_out_len; i++) {
1855				esp_write8(esp->msg_out[i], ESP_FDATA);
1856				esp_write8(0, ESP_FDATA);
1857			}
1858			scsi_esp_cmd(esp, ESP_CMD_TI);
1859		} else {
1860			if (esp->msg_out_len == 1) {
1861				esp_write8(esp->msg_out[0], ESP_FDATA);
1862				scsi_esp_cmd(esp, ESP_CMD_TI);
1863			} else {
1864				/* Use DMA. */
1865				memcpy(esp->command_block,
1866				       esp->msg_out,
1867				       esp->msg_out_len);
1868
1869				esp->ops->send_dma_cmd(esp,
1870						       esp->command_block_dma,
1871						       esp->msg_out_len,
1872						       esp->msg_out_len,
1873						       0,
1874						       ESP_CMD_DMA|ESP_CMD_TI);
1875			}
1876		}
1877		esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1878		break;
1879	}
1880	case ESP_EVENT_MSGOUT_DONE:
1881		if (esp->rev == FASHME) {
1882			scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1883		} else {
1884			if (esp->msg_out_len > 1)
1885				esp->ops->dma_invalidate(esp);
1886		}
1887
1888		if (!(esp->ireg & ESP_INTR_DC)) {
1889			if (esp->rev != FASHME)
1890				scsi_esp_cmd(esp, ESP_CMD_NULL);
1891		}
1892		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1893		goto again;
1894	case ESP_EVENT_MSGIN:
1895		if (esp->ireg & ESP_INTR_BSERV) {
1896			if (esp->rev == FASHME) {
1897				if (!(esp_read8(ESP_STATUS2) &
1898				      ESP_STAT2_FEMPTY))
1899					scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1900			} else {
1901				scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1902				if (esp->rev == ESP100)
1903					scsi_esp_cmd(esp, ESP_CMD_NULL);
1904			}
1905			scsi_esp_cmd(esp, ESP_CMD_TI);
1906			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1907			return 1;
1908		}
1909		if (esp->ireg & ESP_INTR_FDONE) {
1910			u8 val;
1911
1912			if (esp->rev == FASHME)
1913				val = esp->fifo[0];
1914			else
1915				val = esp_read8(ESP_FDATA);
1916			esp->msg_in[esp->msg_in_len++] = val;
1917
1918			esp_log_msgin("ESP: Got msgin byte %x\n", val);
1919
1920			if (!esp_msgin_process(esp))
1921				esp->msg_in_len = 0;
1922
1923			if (esp->rev == FASHME)
1924				scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1925
1926			scsi_esp_cmd(esp, ESP_CMD_MOK);
1927
1928			if (esp->event != ESP_EVENT_FREE_BUS)
1929				esp_event(esp, ESP_EVENT_CHECK_PHASE);
1930		} else {
1931			printk("ESP: MSGIN neither BSERV not FDON, resetting");
1932			esp_schedule_reset(esp);
1933			return 0;
1934		}
1935		break;
1936	case ESP_EVENT_CMD_START:
1937		memcpy(esp->command_block, esp->cmd_bytes_ptr,
1938		       esp->cmd_bytes_left);
1939		if (esp->rev == FASHME)
1940			scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1941		esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1942				       esp->cmd_bytes_left, 16, 0,
1943				       ESP_CMD_DMA | ESP_CMD_TI);
1944		esp_event(esp, ESP_EVENT_CMD_DONE);
1945		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1946		break;
1947	case ESP_EVENT_CMD_DONE:
1948		esp->ops->dma_invalidate(esp);
1949		if (esp->ireg & ESP_INTR_BSERV) {
1950			esp_event(esp, ESP_EVENT_CHECK_PHASE);
1951			goto again;
1952		}
1953		esp_schedule_reset(esp);
1954		return 0;
1955		break;
1956
1957	case ESP_EVENT_RESET:
1958		scsi_esp_cmd(esp, ESP_CMD_RS);
1959		break;
1960
1961	default:
1962		printk("ESP: Unexpected event %x, resetting\n",
1963		       esp->event);
1964		esp_schedule_reset(esp);
1965		return 0;
1966		break;
1967	}
1968	return 1;
1969}
1970
1971static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
1972{
1973	struct scsi_cmnd *cmd = ent->cmd;
1974
1975	esp_unmap_dma(esp, cmd);
1976	esp_free_lun_tag(ent, cmd->device->hostdata);
1977	cmd->result = DID_RESET << 16;
1978
1979	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
1980		esp->ops->unmap_single(esp, ent->sense_dma,
1981				       SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1982		ent->sense_ptr = NULL;
1983	}
1984
1985	cmd->scsi_done(cmd);
1986	list_del(&ent->list);
1987	esp_put_ent(esp, ent);
1988}
1989
1990static void esp_clear_hold(struct scsi_device *dev, void *data)
1991{
1992	struct esp_lun_data *lp = dev->hostdata;
1993
1994	BUG_ON(lp->num_tagged);
1995	lp->hold = 0;
1996}
1997
1998static void esp_reset_cleanup(struct esp *esp)
1999{
2000	struct esp_cmd_entry *ent, *tmp;
2001	int i;
2002
2003	list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2004		struct scsi_cmnd *cmd = ent->cmd;
2005
2006		list_del(&ent->list);
2007		cmd->result = DID_RESET << 16;
2008		cmd->scsi_done(cmd);
2009		esp_put_ent(esp, ent);
2010	}
2011
2012	list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2013		if (ent == esp->active_cmd)
2014			esp->active_cmd = NULL;
2015		esp_reset_cleanup_one(esp, ent);
2016	}
2017
2018	BUG_ON(esp->active_cmd != NULL);
2019
2020	/* Force renegotiation of sync/wide transfers.  */
2021	for (i = 0; i < ESP_MAX_TARGET; i++) {
2022		struct esp_target_data *tp = &esp->target[i];
2023
2024		tp->esp_period = 0;
2025		tp->esp_offset = 0;
2026		tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2027				     ESP_CONFIG3_FSCSI |
2028				     ESP_CONFIG3_FAST);
2029		tp->flags &= ~ESP_TGT_WIDE;
2030		tp->flags |= ESP_TGT_CHECK_NEGO;
2031
2032		if (tp->starget)
2033			__starget_for_each_device(tp->starget, NULL,
2034						  esp_clear_hold);
2035	}
2036	esp->flags &= ~ESP_FLAG_RESETTING;
2037}
2038
2039/* Runs under host->lock */
2040static void __esp_interrupt(struct esp *esp)
2041{
2042	int finish_reset, intr_done;
2043	u8 phase;
2044
2045	esp->sreg = esp_read8(ESP_STATUS);
2046
2047	if (esp->flags & ESP_FLAG_RESETTING) {
2048		finish_reset = 1;
2049	} else {
2050		if (esp_check_gross_error(esp))
2051			return;
2052
2053		finish_reset = esp_check_spur_intr(esp);
2054		if (finish_reset < 0)
2055			return;
2056	}
2057
2058	esp->ireg = esp_read8(ESP_INTRPT);
2059
2060	if (esp->ireg & ESP_INTR_SR)
2061		finish_reset = 1;
2062
2063	if (finish_reset) {
2064		esp_reset_cleanup(esp);
2065		if (esp->eh_reset) {
2066			complete(esp->eh_reset);
2067			esp->eh_reset = NULL;
2068		}
2069		return;
2070	}
2071
2072	phase = (esp->sreg & ESP_STAT_PMASK);
2073	if (esp->rev == FASHME) {
2074		if (((phase != ESP_DIP && phase != ESP_DOP) &&
2075		     esp->select_state == ESP_SELECT_NONE &&
2076		     esp->event != ESP_EVENT_STATUS &&
2077		     esp->event != ESP_EVENT_DATA_DONE) ||
2078		    (esp->ireg & ESP_INTR_RSEL)) {
2079			esp->sreg2 = esp_read8(ESP_STATUS2);
2080			if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2081			    (esp->sreg2 & ESP_STAT2_F1BYTE))
2082				hme_read_fifo(esp);
2083		}
2084	}
2085
2086	esp_log_intr("ESP: intr sreg[%02x] seqreg[%02x] "
2087		     "sreg2[%02x] ireg[%02x]\n",
2088		     esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2089
2090	intr_done = 0;
2091
2092	if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
2093		printk("ESP: unexpected IREG %02x\n", esp->ireg);
2094		if (esp->ireg & ESP_INTR_IC)
2095			esp_dump_cmd_log(esp);
2096
2097		esp_schedule_reset(esp);
2098	} else {
2099		if (!(esp->ireg & ESP_INTR_RSEL)) {
2100			/* Some combination of FDONE, BSERV, DC.  */
2101			if (esp->select_state != ESP_SELECT_NONE)
2102				intr_done = esp_finish_select(esp);
2103		} else if (esp->ireg & ESP_INTR_RSEL) {
2104			if (esp->active_cmd)
2105				(void) esp_finish_select(esp);
2106			intr_done = esp_reconnect(esp);
2107		}
2108	}
2109	while (!intr_done)
2110		intr_done = esp_process_event(esp);
2111}
2112
2113irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2114{
2115	struct esp *esp = dev_id;
2116	unsigned long flags;
2117	irqreturn_t ret;
2118
2119	spin_lock_irqsave(esp->host->host_lock, flags);
2120	ret = IRQ_NONE;
2121	if (esp->ops->irq_pending(esp)) {
2122		ret = IRQ_HANDLED;
2123		for (;;) {
2124			int i;
2125
2126			__esp_interrupt(esp);
2127			if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2128				break;
2129			esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2130
2131			for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2132				if (esp->ops->irq_pending(esp))
2133					break;
2134			}
2135			if (i == ESP_QUICKIRQ_LIMIT)
2136				break;
2137		}
2138	}
2139	spin_unlock_irqrestore(esp->host->host_lock, flags);
2140
2141	return ret;
2142}
2143EXPORT_SYMBOL(scsi_esp_intr);
2144
2145static void esp_get_revision(struct esp *esp)
2146{
2147	u8 val;
2148
2149	esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2150	esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2151	esp_write8(esp->config2, ESP_CFG2);
2152
2153	val = esp_read8(ESP_CFG2);
2154	val &= ~ESP_CONFIG2_MAGIC;
2155	if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2156		/* If what we write to cfg2 does not come back, cfg2 is not
2157		 * implemented, therefore this must be a plain esp100.
2158		 */
2159		esp->rev = ESP100;
2160	} else {
2161		esp->config2 = 0;
2162		esp_set_all_config3(esp, 5);
2163		esp->prev_cfg3 = 5;
2164		esp_write8(esp->config2, ESP_CFG2);
2165		esp_write8(0, ESP_CFG3);
2166		esp_write8(esp->prev_cfg3, ESP_CFG3);
2167
2168		val = esp_read8(ESP_CFG3);
2169		if (val != 5) {
2170			/* The cfg2 register is implemented, however
2171			 * cfg3 is not, must be esp100a.
2172			 */
2173			esp->rev = ESP100A;
2174		} else {
2175			esp_set_all_config3(esp, 0);
2176			esp->prev_cfg3 = 0;
2177			esp_write8(esp->prev_cfg3, ESP_CFG3);
2178
2179			/* All of cfg{1,2,3} implemented, must be one of
2180			 * the fas variants, figure out which one.
2181			 */
2182			if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2183				esp->rev = FAST;
2184				esp->sync_defp = SYNC_DEFP_FAST;
2185			} else {
2186				esp->rev = ESP236;
2187			}
2188			esp->config2 = 0;
2189			esp_write8(esp->config2, ESP_CFG2);
2190		}
2191	}
2192}
2193
2194static void esp_init_swstate(struct esp *esp)
2195{
2196	int i;
2197
2198	INIT_LIST_HEAD(&esp->queued_cmds);
2199	INIT_LIST_HEAD(&esp->active_cmds);
2200	INIT_LIST_HEAD(&esp->esp_cmd_pool);
2201
2202	/* Start with a clear state, domain validation (via ->slave_configure,
2203	 * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
2204	 * commands.
2205	 */
2206	for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2207		esp->target[i].flags = 0;
2208		esp->target[i].nego_goal_period = 0;
2209		esp->target[i].nego_goal_offset = 0;
2210		esp->target[i].nego_goal_width = 0;
2211		esp->target[i].nego_goal_tags = 0;
2212	}
2213}
2214
2215/* This places the ESP into a known state at boot time. */
2216static void esp_bootup_reset(struct esp *esp)
2217{
2218	u8 val;
2219
2220	/* Reset the DMA */
2221	esp->ops->reset_dma(esp);
2222
2223	/* Reset the ESP */
2224	esp_reset_esp(esp);
2225
2226	/* Reset the SCSI bus, but tell ESP not to generate an irq */
2227	val = esp_read8(ESP_CFG1);
2228	val |= ESP_CONFIG1_SRRDISAB;
2229	esp_write8(val, ESP_CFG1);
2230
2231	scsi_esp_cmd(esp, ESP_CMD_RS);
2232	udelay(400);
2233
2234	esp_write8(esp->config1, ESP_CFG1);
2235
2236	/* Eat any bitrot in the chip and we are done... */
2237	esp_read8(ESP_INTRPT);
2238}
2239
2240static void esp_set_clock_params(struct esp *esp)
2241{
2242	int fhz;
2243	u8 ccf;
2244
2245	/* This is getting messy but it has to be done correctly or else
2246	 * you get weird behavior all over the place.  We are trying to
2247	 * basically figure out three pieces of information.
2248	 *
2249	 * a) Clock Conversion Factor
2250	 *
2251	 *    This is a representation of the input crystal clock frequency
2252	 *    going into the ESP on this machine.  Any operation whose timing
2253	 *    is longer than 400ns depends on this value being correct.  For
2254	 *    example, you'll get blips for arbitration/selection during high
2255	 *    load or with multiple targets if this is not set correctly.
2256	 *
2257	 * b) Selection Time-Out
2258	 *
2259	 *    The ESP isn't very bright and will arbitrate for the bus and try
2260	 *    to select a target forever if you let it.  This value tells the
2261	 *    ESP when it has taken too long to negotiate and that it should
2262	 *    interrupt the CPU so we can see what happened.  The value is
2263	 *    computed as follows (from NCR/Symbios chip docs).
2264	 *
2265	 *          (Time Out Period) *  (Input Clock)
2266	 *    STO = ----------------------------------
2267	 *          (8192) * (Clock Conversion Factor)
2268	 *
2269	 *    We use a time out period of 250ms (ESP_BUS_TIMEOUT).
2270	 *
2271	 * c) Imperical constants for synchronous offset and transfer period
2272         *    register values
2273	 *
2274	 *    This entails the smallest and largest sync period we could ever
2275	 *    handle on this ESP.
2276	 */
2277	fhz = esp->cfreq;
2278
2279	ccf = ((fhz / 1000000) + 4) / 5;
2280	if (ccf == 1)
2281		ccf = 2;
2282
2283	/* If we can't find anything reasonable, just assume 20MHZ.
2284	 * This is the clock frequency of the older sun4c's where I've
2285	 * been unable to find the clock-frequency PROM property.  All
2286	 * other machines provide useful values it seems.
2287	 */
2288	if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
2289		fhz = 20000000;
2290		ccf = 4;
2291	}
2292
2293	esp->cfact = (ccf == 8 ? 0 : ccf);
2294	esp->cfreq = fhz;
2295	esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
2296	esp->ctick = ESP_TICK(ccf, esp->ccycle);
2297	esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
2298	esp->sync_defp = SYNC_DEFP_SLOW;
2299}
2300
2301static const char *esp_chip_names[] = {
2302	"ESP100",
2303	"ESP100A",
2304	"ESP236",
2305	"FAS236",
2306	"FAS100A",
2307	"FAST",
2308	"FASHME",
2309};
2310
2311static struct scsi_transport_template *esp_transport_template;
2312
2313int scsi_esp_register(struct esp *esp, struct device *dev)
2314{
2315	static int instance;
2316	int err;
2317
2318	esp->host->transportt = esp_transport_template;
2319	esp->host->max_lun = ESP_MAX_LUN;
2320	esp->host->cmd_per_lun = 2;
2321	esp->host->unique_id = instance;
2322
2323	esp_set_clock_params(esp);
2324
2325	esp_get_revision(esp);
2326
2327	esp_init_swstate(esp);
2328
2329	esp_bootup_reset(esp);
2330
2331	printk(KERN_INFO PFX "esp%u, regs[%1p:%1p] irq[%u]\n",
2332	       esp->host->unique_id, esp->regs, esp->dma_regs,
2333	       esp->host->irq);
2334	printk(KERN_INFO PFX "esp%u is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2335	       esp->host->unique_id, esp_chip_names[esp->rev],
2336	       esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
2337
2338	/* Let the SCSI bus reset settle. */
2339	ssleep(esp_bus_reset_settle);
2340
2341	err = scsi_add_host(esp->host, dev);
2342	if (err)
2343		return err;
2344
2345	instance++;
2346
2347	scsi_scan_host(esp->host);
2348
2349	return 0;
2350}
2351EXPORT_SYMBOL(scsi_esp_register);
2352
2353void scsi_esp_unregister(struct esp *esp)
2354{
2355	scsi_remove_host(esp->host);
2356}
2357EXPORT_SYMBOL(scsi_esp_unregister);
2358
2359static int esp_target_alloc(struct scsi_target *starget)
2360{
2361	struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2362	struct esp_target_data *tp = &esp->target[starget->id];
2363
2364	tp->starget = starget;
2365
2366	return 0;
2367}
2368
2369static void esp_target_destroy(struct scsi_target *starget)
2370{
2371	struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2372	struct esp_target_data *tp = &esp->target[starget->id];
2373
2374	tp->starget = NULL;
2375}
2376
2377static int esp_slave_alloc(struct scsi_device *dev)
2378{
2379	struct esp *esp = shost_priv(dev->host);
2380	struct esp_target_data *tp = &esp->target[dev->id];
2381	struct esp_lun_data *lp;
2382
2383	lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2384	if (!lp)
2385		return -ENOMEM;
2386	dev->hostdata = lp;
2387
2388	spi_min_period(tp->starget) = esp->min_period;
2389	spi_max_offset(tp->starget) = 15;
2390
2391	if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2392		spi_max_width(tp->starget) = 1;
2393	else
2394		spi_max_width(tp->starget) = 0;
2395
2396	return 0;
2397}
2398
2399static int esp_slave_configure(struct scsi_device *dev)
2400{
2401	struct esp *esp = shost_priv(dev->host);
2402	struct esp_target_data *tp = &esp->target[dev->id];
2403	int goal_tags, queue_depth;
2404
2405	goal_tags = 0;
2406
2407	if (dev->tagged_supported) {
2408		/* XXX make this configurable somehow XXX */
2409		goal_tags = ESP_DEFAULT_TAGS;
2410
2411		if (goal_tags > ESP_MAX_TAG)
2412			goal_tags = ESP_MAX_TAG;
2413	}
2414
2415	queue_depth = goal_tags;
2416	if (queue_depth < dev->host->cmd_per_lun)
2417		queue_depth = dev->host->cmd_per_lun;
2418
2419	if (goal_tags) {
2420		scsi_set_tag_type(dev, MSG_ORDERED_TAG);
2421		scsi_activate_tcq(dev, queue_depth);
2422	} else {
2423		scsi_deactivate_tcq(dev, queue_depth);
2424	}
2425	tp->flags |= ESP_TGT_DISCONNECT;
2426
2427	if (!spi_initial_dv(dev->sdev_target))
2428		spi_dv_device(dev);
2429
2430	return 0;
2431}
2432
2433static void esp_slave_destroy(struct scsi_device *dev)
2434{
2435	struct esp_lun_data *lp = dev->hostdata;
2436
2437	kfree(lp);
2438	dev->hostdata = NULL;
2439}
2440
2441static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2442{
2443	struct esp *esp = shost_priv(cmd->device->host);
2444	struct esp_cmd_entry *ent, *tmp;
2445	struct completion eh_done;
2446	unsigned long flags;
2447
2448	/* XXX This helps a lot with debugging but might be a bit
2449	 * XXX much for the final driver.
2450	 */
2451	spin_lock_irqsave(esp->host->host_lock, flags);
2452	printk(KERN_ERR PFX "esp%d: Aborting command [%p:%02x]\n",
2453	       esp->host->unique_id, cmd, cmd->cmnd[0]);
2454	ent = esp->active_cmd;
2455	if (ent)
2456		printk(KERN_ERR PFX "esp%d: Current command [%p:%02x]\n",
2457		       esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2458	list_for_each_entry(ent, &esp->queued_cmds, list) {
2459		printk(KERN_ERR PFX "esp%d: Queued command [%p:%02x]\n",
2460		       esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2461	}
2462	list_for_each_entry(ent, &esp->active_cmds, list) {
2463		printk(KERN_ERR PFX "esp%d: Active command [%p:%02x]\n",
2464		       esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2465	}
2466	esp_dump_cmd_log(esp);
2467	spin_unlock_irqrestore(esp->host->host_lock, flags);
2468
2469	spin_lock_irqsave(esp->host->host_lock, flags);
2470
2471	ent = NULL;
2472	list_for_each_entry(tmp, &esp->queued_cmds, list) {
2473		if (tmp->cmd == cmd) {
2474			ent = tmp;
2475			break;
2476		}
2477	}
2478
2479	if (ent) {
2480		/* Easiest case, we didn't even issue the command
2481		 * yet so it is trivial to abort.
2482		 */
2483		list_del(&ent->list);
2484
2485		cmd->result = DID_ABORT << 16;
2486		cmd->scsi_done(cmd);
2487
2488		esp_put_ent(esp, ent);
2489
2490		goto out_success;
2491	}
2492
2493	init_completion(&eh_done);
2494
2495	ent = esp->active_cmd;
2496	if (ent && ent->cmd == cmd) {
2497		/* Command is the currently active command on
2498		 * the bus.  If we already have an output message
2499		 * pending, no dice.
2500		 */
2501		if (esp->msg_out_len)
2502			goto out_failure;
2503
2504		/* Send out an abort, encouraging the target to
2505		 * go to MSGOUT phase by asserting ATN.
2506		 */
2507		esp->msg_out[0] = ABORT_TASK_SET;
2508		esp->msg_out_len = 1;
2509		ent->eh_done = &eh_done;
2510
2511		scsi_esp_cmd(esp, ESP_CMD_SATN);
2512	} else {
2513		/* The command is disconnected.  This is not easy to
2514		 * abort.  For now we fail and let the scsi error
2515		 * handling layer go try a scsi bus reset or host
2516		 * reset.
2517		 *
2518		 * What we could do is put together a scsi command
2519		 * solely for the purpose of sending an abort message
2520		 * to the target.  Coming up with all the code to
2521		 * cook up scsi commands, special case them everywhere,
2522		 * etc. is for questionable gain and it would be better
2523		 * if the generic scsi error handling layer could do at
2524		 * least some of that for us.
2525		 *
2526		 * Anyways this is an area for potential future improvement
2527		 * in this driver.
2528		 */
2529		goto out_failure;
2530	}
2531
2532	spin_unlock_irqrestore(esp->host->host_lock, flags);
2533
2534	if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2535		spin_lock_irqsave(esp->host->host_lock, flags);
2536		ent->eh_done = NULL;
2537		spin_unlock_irqrestore(esp->host->host_lock, flags);
2538
2539		return FAILED;
2540	}
2541
2542	return SUCCESS;
2543
2544out_success:
2545	spin_unlock_irqrestore(esp->host->host_lock, flags);
2546	return SUCCESS;
2547
2548out_failure:
2549	/* XXX This might be a good location to set ESP_TGT_BROKEN
2550	 * XXX since we know which target/lun in particular is
2551	 * XXX causing trouble.
2552	 */
2553	spin_unlock_irqrestore(esp->host->host_lock, flags);
2554	return FAILED;
2555}
2556
2557static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2558{
2559	struct esp *esp = shost_priv(cmd->device->host);
2560	struct completion eh_reset;
2561	unsigned long flags;
2562
2563	init_completion(&eh_reset);
2564
2565	spin_lock_irqsave(esp->host->host_lock, flags);
2566
2567	esp->eh_reset = &eh_reset;
2568
2569	/* XXX This is too simple... We should add lots of
2570	 * XXX checks here so that if we find that the chip is
2571	 * XXX very wedged we return failure immediately so
2572	 * XXX that we can perform a full chip reset.
2573	 */
2574	esp->flags |= ESP_FLAG_RESETTING;
2575	scsi_esp_cmd(esp, ESP_CMD_RS);
2576
2577	spin_unlock_irqrestore(esp->host->host_lock, flags);
2578
2579	ssleep(esp_bus_reset_settle);
2580
2581	if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2582		spin_lock_irqsave(esp->host->host_lock, flags);
2583		esp->eh_reset = NULL;
2584		spin_unlock_irqrestore(esp->host->host_lock, flags);
2585
2586		return FAILED;
2587	}
2588
2589	return SUCCESS;
2590}
2591
2592/* All bets are off, reset the entire device.  */
2593static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2594{
2595	struct esp *esp = shost_priv(cmd->device->host);
2596	unsigned long flags;
2597
2598	spin_lock_irqsave(esp->host->host_lock, flags);
2599	esp_bootup_reset(esp);
2600	esp_reset_cleanup(esp);
2601	spin_unlock_irqrestore(esp->host->host_lock, flags);
2602
2603	ssleep(esp_bus_reset_settle);
2604
2605	return SUCCESS;
2606}
2607
2608static const char *esp_info(struct Scsi_Host *host)
2609{
2610	return "esp";
2611}
2612
2613struct scsi_host_template scsi_esp_template = {
2614	.module			= THIS_MODULE,
2615	.name			= "esp",
2616	.info			= esp_info,
2617	.queuecommand		= esp_queuecommand,
2618	.target_alloc		= esp_target_alloc,
2619	.target_destroy		= esp_target_destroy,
2620	.slave_alloc		= esp_slave_alloc,
2621	.slave_configure	= esp_slave_configure,
2622	.slave_destroy		= esp_slave_destroy,
2623	.eh_abort_handler	= esp_eh_abort_handler,
2624	.eh_bus_reset_handler	= esp_eh_bus_reset_handler,
2625	.eh_host_reset_handler	= esp_eh_host_reset_handler,
2626	.can_queue		= 7,
2627	.this_id		= 7,
2628	.sg_tablesize		= SG_ALL,
2629	.use_clustering		= ENABLE_CLUSTERING,
2630	.max_sectors		= 0xffff,
2631	.skip_settle_delay	= 1,
2632};
2633EXPORT_SYMBOL(scsi_esp_template);
2634
2635static void esp_get_signalling(struct Scsi_Host *host)
2636{
2637	struct esp *esp = shost_priv(host);
2638	enum spi_signal_type type;
2639
2640	if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2641		type = SPI_SIGNAL_HVD;
2642	else
2643		type = SPI_SIGNAL_SE;
2644
2645	spi_signalling(host) = type;
2646}
2647
2648static void esp_set_offset(struct scsi_target *target, int offset)
2649{
2650	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2651	struct esp *esp = shost_priv(host);
2652	struct esp_target_data *tp = &esp->target[target->id];
2653
2654	if (esp->flags & ESP_FLAG_DISABLE_SYNC)
2655		tp->nego_goal_offset = 0;
2656	else
2657		tp->nego_goal_offset = offset;
2658	tp->flags |= ESP_TGT_CHECK_NEGO;
2659}
2660
2661static void esp_set_period(struct scsi_target *target, int period)
2662{
2663	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2664	struct esp *esp = shost_priv(host);
2665	struct esp_target_data *tp = &esp->target[target->id];
2666
2667	tp->nego_goal_period = period;
2668	tp->flags |= ESP_TGT_CHECK_NEGO;
2669}
2670
2671static void esp_set_width(struct scsi_target *target, int width)
2672{
2673	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2674	struct esp *esp = shost_priv(host);
2675	struct esp_target_data *tp = &esp->target[target->id];
2676
2677	tp->nego_goal_width = (width ? 1 : 0);
2678	tp->flags |= ESP_TGT_CHECK_NEGO;
2679}
2680
2681static struct spi_function_template esp_transport_ops = {
2682	.set_offset		= esp_set_offset,
2683	.show_offset		= 1,
2684	.set_period		= esp_set_period,
2685	.show_period		= 1,
2686	.set_width		= esp_set_width,
2687	.show_width		= 1,
2688	.get_signalling		= esp_get_signalling,
2689};
2690
2691static int __init esp_init(void)
2692{
2693	BUILD_BUG_ON(sizeof(struct scsi_pointer) <
2694		     sizeof(struct esp_cmd_priv));
2695
2696	esp_transport_template = spi_attach_transport(&esp_transport_ops);
2697	if (!esp_transport_template)
2698		return -ENODEV;
2699
2700	return 0;
2701}
2702
2703static void __exit esp_exit(void)
2704{
2705	spi_release_transport(esp_transport_template);
2706}
2707
2708MODULE_DESCRIPTION("ESP SCSI driver core");
2709MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
2710MODULE_LICENSE("GPL");
2711MODULE_VERSION(DRV_VERSION);
2712
2713module_param(esp_bus_reset_settle, int, 0);
2714MODULE_PARM_DESC(esp_bus_reset_settle,
2715		 "ESP scsi bus reset delay in seconds");
2716
2717module_param(esp_debug, int, 0);
2718MODULE_PARM_DESC(esp_debug,
2719"ESP bitmapped debugging message enable value:\n"
2720"	0x00000001	Log interrupt events\n"
2721"	0x00000002	Log scsi commands\n"
2722"	0x00000004	Log resets\n"
2723"	0x00000008	Log message in events\n"
2724"	0x00000010	Log message out events\n"
2725"	0x00000020	Log command completion\n"
2726"	0x00000040	Log disconnects\n"
2727"	0x00000080	Log data start\n"
2728"	0x00000100	Log data done\n"
2729"	0x00000200	Log reconnects\n"
2730"	0x00000400	Log auto-sense data\n"
2731);
2732
2733module_init(esp_init);
2734module_exit(esp_exit);