Linux Audio

Check our new training course

Loading...
v3.15
   1/* esp_scsi.c: ESP SCSI driver.
   2 *
   3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/types.h>
   8#include <linux/slab.h>
   9#include <linux/delay.h>
  10#include <linux/list.h>
  11#include <linux/completion.h>
  12#include <linux/kallsyms.h>
  13#include <linux/module.h>
  14#include <linux/moduleparam.h>
  15#include <linux/init.h>
  16#include <linux/irqreturn.h>
  17
  18#include <asm/irq.h>
  19#include <asm/io.h>
  20#include <asm/dma.h>
  21
  22#include <scsi/scsi.h>
  23#include <scsi/scsi_host.h>
  24#include <scsi/scsi_cmnd.h>
  25#include <scsi/scsi_device.h>
  26#include <scsi/scsi_tcq.h>
  27#include <scsi/scsi_dbg.h>
  28#include <scsi/scsi_transport_spi.h>
  29
  30#include "esp_scsi.h"
  31
  32#define DRV_MODULE_NAME		"esp"
  33#define PFX DRV_MODULE_NAME	": "
  34#define DRV_VERSION		"2.000"
  35#define DRV_MODULE_RELDATE	"April 19, 2007"
  36
  37/* SCSI bus reset settle time in seconds.  */
  38static int esp_bus_reset_settle = 3;
  39
  40static u32 esp_debug;
  41#define ESP_DEBUG_INTR		0x00000001
  42#define ESP_DEBUG_SCSICMD	0x00000002
  43#define ESP_DEBUG_RESET		0x00000004
  44#define ESP_DEBUG_MSGIN		0x00000008
  45#define ESP_DEBUG_MSGOUT	0x00000010
  46#define ESP_DEBUG_CMDDONE	0x00000020
  47#define ESP_DEBUG_DISCONNECT	0x00000040
  48#define ESP_DEBUG_DATASTART	0x00000080
  49#define ESP_DEBUG_DATADONE	0x00000100
  50#define ESP_DEBUG_RECONNECT	0x00000200
  51#define ESP_DEBUG_AUTOSENSE	0x00000400
 
 
  52
  53#define esp_log_intr(f, a...) \
  54do {	if (esp_debug & ESP_DEBUG_INTR) \
  55		printk(f, ## a); \
  56} while (0)
  57
  58#define esp_log_reset(f, a...) \
  59do {	if (esp_debug & ESP_DEBUG_RESET) \
  60		printk(f, ## a); \
  61} while (0)
  62
  63#define esp_log_msgin(f, a...) \
  64do {	if (esp_debug & ESP_DEBUG_MSGIN) \
  65		printk(f, ## a); \
  66} while (0)
  67
  68#define esp_log_msgout(f, a...) \
  69do {	if (esp_debug & ESP_DEBUG_MSGOUT) \
  70		printk(f, ## a); \
  71} while (0)
  72
  73#define esp_log_cmddone(f, a...) \
  74do {	if (esp_debug & ESP_DEBUG_CMDDONE) \
  75		printk(f, ## a); \
  76} while (0)
  77
  78#define esp_log_disconnect(f, a...) \
  79do {	if (esp_debug & ESP_DEBUG_DISCONNECT) \
  80		printk(f, ## a); \
  81} while (0)
  82
  83#define esp_log_datastart(f, a...) \
  84do {	if (esp_debug & ESP_DEBUG_DATASTART) \
  85		printk(f, ## a); \
  86} while (0)
  87
  88#define esp_log_datadone(f, a...) \
  89do {	if (esp_debug & ESP_DEBUG_DATADONE) \
  90		printk(f, ## a); \
  91} while (0)
  92
  93#define esp_log_reconnect(f, a...) \
  94do {	if (esp_debug & ESP_DEBUG_RECONNECT) \
  95		printk(f, ## a); \
  96} while (0)
  97
  98#define esp_log_autosense(f, a...) \
  99do {	if (esp_debug & ESP_DEBUG_AUTOSENSE) \
 100		printk(f, ## a); \
 
 
 
 
 
 
 
 
 
 
 101} while (0)
 102
 103#define esp_read8(REG)		esp->ops->esp_read8(esp, REG)
 104#define esp_write8(VAL,REG)	esp->ops->esp_write8(esp, VAL, REG)
 105
 106static void esp_log_fill_regs(struct esp *esp,
 107			      struct esp_event_ent *p)
 108{
 109	p->sreg = esp->sreg;
 110	p->seqreg = esp->seqreg;
 111	p->sreg2 = esp->sreg2;
 112	p->ireg = esp->ireg;
 113	p->select_state = esp->select_state;
 114	p->event = esp->event;
 115}
 116
 117void scsi_esp_cmd(struct esp *esp, u8 val)
 118{
 119	struct esp_event_ent *p;
 120	int idx = esp->esp_event_cur;
 121
 122	p = &esp->esp_event_log[idx];
 123	p->type = ESP_EVENT_TYPE_CMD;
 124	p->val = val;
 125	esp_log_fill_regs(esp, p);
 126
 127	esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
 128
 
 129	esp_write8(val, ESP_CMD);
 130}
 131EXPORT_SYMBOL(scsi_esp_cmd);
 132
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 133static void esp_event(struct esp *esp, u8 val)
 134{
 135	struct esp_event_ent *p;
 136	int idx = esp->esp_event_cur;
 137
 138	p = &esp->esp_event_log[idx];
 139	p->type = ESP_EVENT_TYPE_EVENT;
 140	p->val = val;
 141	esp_log_fill_regs(esp, p);
 142
 143	esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
 144
 145	esp->event = val;
 146}
 147
 148static void esp_dump_cmd_log(struct esp *esp)
 149{
 150	int idx = esp->esp_event_cur;
 151	int stop = idx;
 152
 153	printk(KERN_INFO PFX "esp%d: Dumping command log\n",
 154	       esp->host->unique_id);
 155	do {
 156		struct esp_event_ent *p = &esp->esp_event_log[idx];
 157
 158		printk(KERN_INFO PFX "esp%d: ent[%d] %s ",
 159		       esp->host->unique_id, idx,
 160		       p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT");
 161
 162		printk("val[%02x] sreg[%02x] seqreg[%02x] "
 163		       "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
 164		       p->val, p->sreg, p->seqreg,
 165		       p->sreg2, p->ireg, p->select_state, p->event);
 166
 167		idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
 168	} while (idx != stop);
 169}
 170
 171static void esp_flush_fifo(struct esp *esp)
 172{
 173	scsi_esp_cmd(esp, ESP_CMD_FLUSH);
 174	if (esp->rev == ESP236) {
 175		int lim = 1000;
 176
 177		while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
 178			if (--lim == 0) {
 179				printk(KERN_ALERT PFX "esp%d: ESP_FF_BYTES "
 180				       "will not clear!\n",
 181				       esp->host->unique_id);
 182				break;
 183			}
 184			udelay(1);
 185		}
 186	}
 187}
 188
 189static void hme_read_fifo(struct esp *esp)
 190{
 191	int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
 192	int idx = 0;
 193
 194	while (fcnt--) {
 195		esp->fifo[idx++] = esp_read8(ESP_FDATA);
 196		esp->fifo[idx++] = esp_read8(ESP_FDATA);
 197	}
 198	if (esp->sreg2 & ESP_STAT2_F1BYTE) {
 199		esp_write8(0, ESP_FDATA);
 200		esp->fifo[idx++] = esp_read8(ESP_FDATA);
 201		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
 202	}
 203	esp->fifo_cnt = idx;
 204}
 205
 206static void esp_set_all_config3(struct esp *esp, u8 val)
 207{
 208	int i;
 209
 210	for (i = 0; i < ESP_MAX_TARGET; i++)
 211		esp->target[i].esp_config3 = val;
 212}
 213
 214/* Reset the ESP chip, _not_ the SCSI bus. */
 215static void esp_reset_esp(struct esp *esp)
 216{
 217	u8 family_code, version;
 218
 219	/* Now reset the ESP chip */
 220	scsi_esp_cmd(esp, ESP_CMD_RC);
 221	scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
 222	if (esp->rev == FAST)
 223		esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
 224	scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
 225
 226	/* This is the only point at which it is reliable to read
 227	 * the ID-code for a fast ESP chip variants.
 228	 */
 229	esp->max_period = ((35 * esp->ccycle) / 1000);
 230	if (esp->rev == FAST) {
 231		version = esp_read8(ESP_UID);
 232		family_code = (version & 0xf8) >> 3;
 233		if (family_code == 0x02)
 234			esp->rev = FAS236;
 235		else if (family_code == 0x0a)
 236			esp->rev = FASHME; /* Version is usually '5'. */
 237		else
 238			esp->rev = FAS100A;
 239		esp->min_period = ((4 * esp->ccycle) / 1000);
 240	} else {
 241		esp->min_period = ((5 * esp->ccycle) / 1000);
 242	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 243	esp->max_period = (esp->max_period + 3)>>2;
 244	esp->min_period = (esp->min_period + 3)>>2;
 245
 246	esp_write8(esp->config1, ESP_CFG1);
 247	switch (esp->rev) {
 248	case ESP100:
 249		/* nothing to do */
 250		break;
 251
 252	case ESP100A:
 253		esp_write8(esp->config2, ESP_CFG2);
 254		break;
 255
 256	case ESP236:
 257		/* Slow 236 */
 258		esp_write8(esp->config2, ESP_CFG2);
 259		esp->prev_cfg3 = esp->target[0].esp_config3;
 260		esp_write8(esp->prev_cfg3, ESP_CFG3);
 261		break;
 262
 263	case FASHME:
 264		esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
 265		/* fallthrough... */
 266
 267	case FAS236:
 268		/* Fast 236 or HME */
 
 269		esp_write8(esp->config2, ESP_CFG2);
 270		if (esp->rev == FASHME) {
 271			u8 cfg3 = esp->target[0].esp_config3;
 272
 273			cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
 274			if (esp->scsi_id >= 8)
 275				cfg3 |= ESP_CONFIG3_IDBIT3;
 276			esp_set_all_config3(esp, cfg3);
 277		} else {
 278			u32 cfg3 = esp->target[0].esp_config3;
 279
 280			cfg3 |= ESP_CONFIG3_FCLK;
 281			esp_set_all_config3(esp, cfg3);
 282		}
 283		esp->prev_cfg3 = esp->target[0].esp_config3;
 284		esp_write8(esp->prev_cfg3, ESP_CFG3);
 285		if (esp->rev == FASHME) {
 286			esp->radelay = 80;
 287		} else {
 288			if (esp->flags & ESP_FLAG_DIFFERENTIAL)
 289				esp->radelay = 0;
 290			else
 291				esp->radelay = 96;
 292		}
 293		break;
 294
 295	case FAS100A:
 296		/* Fast 100a */
 297		esp_write8(esp->config2, ESP_CFG2);
 298		esp_set_all_config3(esp,
 299				    (esp->target[0].esp_config3 |
 300				     ESP_CONFIG3_FCLOCK));
 301		esp->prev_cfg3 = esp->target[0].esp_config3;
 302		esp_write8(esp->prev_cfg3, ESP_CFG3);
 303		esp->radelay = 32;
 304		break;
 305
 306	default:
 307		break;
 308	}
 309
 310	/* Reload the configuration registers */
 311	esp_write8(esp->cfact, ESP_CFACT);
 312
 313	esp->prev_stp = 0;
 314	esp_write8(esp->prev_stp, ESP_STP);
 315
 316	esp->prev_soff = 0;
 317	esp_write8(esp->prev_soff, ESP_SOFF);
 318
 319	esp_write8(esp->neg_defp, ESP_TIMEO);
 320
 321	/* Eat any bitrot in the chip */
 322	esp_read8(ESP_INTRPT);
 323	udelay(100);
 324}
 325
 326static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
 327{
 328	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
 329	struct scatterlist *sg = scsi_sglist(cmd);
 330	int dir = cmd->sc_data_direction;
 331	int total, i;
 332
 333	if (dir == DMA_NONE)
 334		return;
 335
 336	spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir);
 337	spriv->cur_residue = sg_dma_len(sg);
 338	spriv->cur_sg = sg;
 339
 340	total = 0;
 341	for (i = 0; i < spriv->u.num_sg; i++)
 342		total += sg_dma_len(&sg[i]);
 343	spriv->tot_residue = total;
 344}
 345
 346static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
 347				   struct scsi_cmnd *cmd)
 348{
 349	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
 350
 351	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 352		return ent->sense_dma +
 353			(ent->sense_ptr - cmd->sense_buffer);
 354	}
 355
 356	return sg_dma_address(p->cur_sg) +
 357		(sg_dma_len(p->cur_sg) -
 358		 p->cur_residue);
 359}
 360
 361static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
 362				    struct scsi_cmnd *cmd)
 363{
 364	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
 365
 366	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 367		return SCSI_SENSE_BUFFERSIZE -
 368			(ent->sense_ptr - cmd->sense_buffer);
 369	}
 370	return p->cur_residue;
 371}
 372
 373static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
 374			    struct scsi_cmnd *cmd, unsigned int len)
 375{
 376	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
 377
 378	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 379		ent->sense_ptr += len;
 380		return;
 381	}
 382
 383	p->cur_residue -= len;
 384	p->tot_residue -= len;
 385	if (p->cur_residue < 0 || p->tot_residue < 0) {
 386		printk(KERN_ERR PFX "esp%d: Data transfer overflow.\n",
 387		       esp->host->unique_id);
 388		printk(KERN_ERR PFX "esp%d: cur_residue[%d] tot_residue[%d] "
 389		       "len[%u]\n",
 390		       esp->host->unique_id,
 391		       p->cur_residue, p->tot_residue, len);
 392		p->cur_residue = 0;
 393		p->tot_residue = 0;
 394	}
 395	if (!p->cur_residue && p->tot_residue) {
 396		p->cur_sg++;
 397		p->cur_residue = sg_dma_len(p->cur_sg);
 398	}
 399}
 400
 401static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
 402{
 403	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
 404	int dir = cmd->sc_data_direction;
 405
 406	if (dir == DMA_NONE)
 407		return;
 408
 409	esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir);
 410}
 411
 412static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
 413{
 414	struct scsi_cmnd *cmd = ent->cmd;
 415	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
 416
 417	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 418		ent->saved_sense_ptr = ent->sense_ptr;
 419		return;
 420	}
 421	ent->saved_cur_residue = spriv->cur_residue;
 422	ent->saved_cur_sg = spriv->cur_sg;
 423	ent->saved_tot_residue = spriv->tot_residue;
 424}
 425
 426static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
 427{
 428	struct scsi_cmnd *cmd = ent->cmd;
 429	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
 430
 431	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 432		ent->sense_ptr = ent->saved_sense_ptr;
 433		return;
 434	}
 435	spriv->cur_residue = ent->saved_cur_residue;
 436	spriv->cur_sg = ent->saved_cur_sg;
 437	spriv->tot_residue = ent->saved_tot_residue;
 438}
 439
 440static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
 441{
 442	if (cmd->cmd_len == 6 ||
 443	    cmd->cmd_len == 10 ||
 444	    cmd->cmd_len == 12) {
 445		esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
 446	} else {
 447		esp->flags |= ESP_FLAG_DOING_SLOWCMD;
 448	}
 449}
 450
 451static void esp_write_tgt_config3(struct esp *esp, int tgt)
 452{
 453	if (esp->rev > ESP100A) {
 454		u8 val = esp->target[tgt].esp_config3;
 455
 456		if (val != esp->prev_cfg3) {
 457			esp->prev_cfg3 = val;
 458			esp_write8(val, ESP_CFG3);
 459		}
 460	}
 461}
 462
 463static void esp_write_tgt_sync(struct esp *esp, int tgt)
 464{
 465	u8 off = esp->target[tgt].esp_offset;
 466	u8 per = esp->target[tgt].esp_period;
 467
 468	if (off != esp->prev_soff) {
 469		esp->prev_soff = off;
 470		esp_write8(off, ESP_SOFF);
 471	}
 472	if (per != esp->prev_stp) {
 473		esp->prev_stp = per;
 474		esp_write8(per, ESP_STP);
 475	}
 476}
 477
 478static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
 479{
 480	if (esp->rev == FASHME) {
 481		/* Arbitrary segment boundaries, 24-bit counts.  */
 482		if (dma_len > (1U << 24))
 483			dma_len = (1U << 24);
 484	} else {
 485		u32 base, end;
 486
 487		/* ESP chip limits other variants by 16-bits of transfer
 488		 * count.  Actually on FAS100A and FAS236 we could get
 489		 * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB
 490		 * in the ESP_CFG2 register but that causes other unwanted
 491		 * changes so we don't use it currently.
 492		 */
 493		if (dma_len > (1U << 16))
 494			dma_len = (1U << 16);
 495
 496		/* All of the DMA variants hooked up to these chips
 497		 * cannot handle crossing a 24-bit address boundary.
 498		 */
 499		base = dma_addr & ((1U << 24) - 1U);
 500		end = base + dma_len;
 501		if (end > (1U << 24))
 502			end = (1U <<24);
 503		dma_len = end - base;
 504	}
 505	return dma_len;
 506}
 507
 508static int esp_need_to_nego_wide(struct esp_target_data *tp)
 509{
 510	struct scsi_target *target = tp->starget;
 511
 512	return spi_width(target) != tp->nego_goal_width;
 513}
 514
 515static int esp_need_to_nego_sync(struct esp_target_data *tp)
 516{
 517	struct scsi_target *target = tp->starget;
 518
 519	/* When offset is zero, period is "don't care".  */
 520	if (!spi_offset(target) && !tp->nego_goal_offset)
 521		return 0;
 522
 523	if (spi_offset(target) == tp->nego_goal_offset &&
 524	    spi_period(target) == tp->nego_goal_period)
 525		return 0;
 526
 527	return 1;
 528}
 529
 530static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
 531			     struct esp_lun_data *lp)
 532{
 533	if (!ent->orig_tag[0]) {
 534		/* Non-tagged, slot already taken?  */
 535		if (lp->non_tagged_cmd)
 536			return -EBUSY;
 537
 538		if (lp->hold) {
 539			/* We are being held by active tagged
 540			 * commands.
 541			 */
 542			if (lp->num_tagged)
 543				return -EBUSY;
 544
 545			/* Tagged commands completed, we can unplug
 546			 * the queue and run this untagged command.
 547			 */
 548			lp->hold = 0;
 549		} else if (lp->num_tagged) {
 550			/* Plug the queue until num_tagged decreases
 551			 * to zero in esp_free_lun_tag.
 552			 */
 553			lp->hold = 1;
 554			return -EBUSY;
 555		}
 556
 557		lp->non_tagged_cmd = ent;
 558		return 0;
 559	} else {
 560		/* Tagged command, see if blocked by a
 561		 * non-tagged one.
 562		 */
 563		if (lp->non_tagged_cmd || lp->hold)
 564			return -EBUSY;
 565	}
 566
 
 
 
 
 567	BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
 568
 569	lp->tagged_cmds[ent->orig_tag[1]] = ent;
 570	lp->num_tagged++;
 571
 572	return 0;
 573}
 574
 575static void esp_free_lun_tag(struct esp_cmd_entry *ent,
 576			     struct esp_lun_data *lp)
 577{
 578	if (ent->orig_tag[0]) {
 579		BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent);
 580		lp->tagged_cmds[ent->orig_tag[1]] = NULL;
 581		lp->num_tagged--;
 582	} else {
 583		BUG_ON(lp->non_tagged_cmd != ent);
 584		lp->non_tagged_cmd = NULL;
 585	}
 586}
 587
 588/* When a contingent allegiance conditon is created, we force feed a
 589 * REQUEST_SENSE command to the device to fetch the sense data.  I
 590 * tried many other schemes, relying on the scsi error handling layer
 591 * to send out the REQUEST_SENSE automatically, but this was difficult
 592 * to get right especially in the presence of applications like smartd
 593 * which use SG_IO to send out their own REQUEST_SENSE commands.
 594 */
 595static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
 596{
 597	struct scsi_cmnd *cmd = ent->cmd;
 598	struct scsi_device *dev = cmd->device;
 599	int tgt, lun;
 600	u8 *p, val;
 601
 602	tgt = dev->id;
 603	lun = dev->lun;
 604
 605
 606	if (!ent->sense_ptr) {
 607		esp_log_autosense("esp%d: Doing auto-sense for "
 608				  "tgt[%d] lun[%d]\n",
 609				  esp->host->unique_id, tgt, lun);
 610
 611		ent->sense_ptr = cmd->sense_buffer;
 612		ent->sense_dma = esp->ops->map_single(esp,
 613						      ent->sense_ptr,
 614						      SCSI_SENSE_BUFFERSIZE,
 615						      DMA_FROM_DEVICE);
 616	}
 617	ent->saved_sense_ptr = ent->sense_ptr;
 618
 619	esp->active_cmd = ent;
 620
 621	p = esp->command_block;
 622	esp->msg_out_len = 0;
 623
 624	*p++ = IDENTIFY(0, lun);
 625	*p++ = REQUEST_SENSE;
 626	*p++ = ((dev->scsi_level <= SCSI_2) ?
 627		(lun << 5) : 0);
 628	*p++ = 0;
 629	*p++ = 0;
 630	*p++ = SCSI_SENSE_BUFFERSIZE;
 631	*p++ = 0;
 632
 633	esp->select_state = ESP_SELECT_BASIC;
 634
 635	val = tgt;
 636	if (esp->rev == FASHME)
 637		val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
 638	esp_write8(val, ESP_BUSID);
 639
 640	esp_write_tgt_sync(esp, tgt);
 641	esp_write_tgt_config3(esp, tgt);
 642
 643	val = (p - esp->command_block);
 644
 645	if (esp->rev == FASHME)
 646		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
 647	esp->ops->send_dma_cmd(esp, esp->command_block_dma,
 648			       val, 16, 0, ESP_CMD_DMA | ESP_CMD_SELA);
 649}
 650
 651static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
 652{
 653	struct esp_cmd_entry *ent;
 654
 655	list_for_each_entry(ent, &esp->queued_cmds, list) {
 656		struct scsi_cmnd *cmd = ent->cmd;
 657		struct scsi_device *dev = cmd->device;
 658		struct esp_lun_data *lp = dev->hostdata;
 659
 660		if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 661			ent->tag[0] = 0;
 662			ent->tag[1] = 0;
 663			return ent;
 664		}
 665
 666		if (!scsi_populate_tag_msg(cmd, &ent->tag[0])) {
 667			ent->tag[0] = 0;
 668			ent->tag[1] = 0;
 669		}
 670		ent->orig_tag[0] = ent->tag[0];
 671		ent->orig_tag[1] = ent->tag[1];
 672
 673		if (esp_alloc_lun_tag(ent, lp) < 0)
 674			continue;
 675
 676		return ent;
 677	}
 678
 679	return NULL;
 680}
 681
 682static void esp_maybe_execute_command(struct esp *esp)
 683{
 684	struct esp_target_data *tp;
 685	struct esp_lun_data *lp;
 686	struct scsi_device *dev;
 687	struct scsi_cmnd *cmd;
 688	struct esp_cmd_entry *ent;
 689	int tgt, lun, i;
 690	u32 val, start_cmd;
 691	u8 *p;
 692
 693	if (esp->active_cmd ||
 694	    (esp->flags & ESP_FLAG_RESETTING))
 695		return;
 696
 697	ent = find_and_prep_issuable_command(esp);
 698	if (!ent)
 699		return;
 700
 701	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 702		esp_autosense(esp, ent);
 703		return;
 704	}
 705
 706	cmd = ent->cmd;
 707	dev = cmd->device;
 708	tgt = dev->id;
 709	lun = dev->lun;
 710	tp = &esp->target[tgt];
 711	lp = dev->hostdata;
 712
 713	list_move(&ent->list, &esp->active_cmds);
 714
 715	esp->active_cmd = ent;
 716
 717	esp_map_dma(esp, cmd);
 718	esp_save_pointers(esp, ent);
 719
 720	esp_check_command_len(esp, cmd);
 721
 722	p = esp->command_block;
 723
 724	esp->msg_out_len = 0;
 725	if (tp->flags & ESP_TGT_CHECK_NEGO) {
 726		/* Need to negotiate.  If the target is broken
 727		 * go for synchronous transfers and non-wide.
 728		 */
 729		if (tp->flags & ESP_TGT_BROKEN) {
 730			tp->flags &= ~ESP_TGT_DISCONNECT;
 731			tp->nego_goal_period = 0;
 732			tp->nego_goal_offset = 0;
 733			tp->nego_goal_width = 0;
 734			tp->nego_goal_tags = 0;
 735		}
 736
 737		/* If the settings are not changing, skip this.  */
 738		if (spi_width(tp->starget) == tp->nego_goal_width &&
 739		    spi_period(tp->starget) == tp->nego_goal_period &&
 740		    spi_offset(tp->starget) == tp->nego_goal_offset) {
 741			tp->flags &= ~ESP_TGT_CHECK_NEGO;
 742			goto build_identify;
 743		}
 744
 745		if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
 746			esp->msg_out_len =
 747				spi_populate_width_msg(&esp->msg_out[0],
 748						       (tp->nego_goal_width ?
 749							1 : 0));
 750			tp->flags |= ESP_TGT_NEGO_WIDE;
 751		} else if (esp_need_to_nego_sync(tp)) {
 752			esp->msg_out_len =
 753				spi_populate_sync_msg(&esp->msg_out[0],
 754						      tp->nego_goal_period,
 755						      tp->nego_goal_offset);
 756			tp->flags |= ESP_TGT_NEGO_SYNC;
 757		} else {
 758			tp->flags &= ~ESP_TGT_CHECK_NEGO;
 759		}
 760
 761		/* Process it like a slow command.  */
 762		if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC))
 763			esp->flags |= ESP_FLAG_DOING_SLOWCMD;
 764	}
 765
 766build_identify:
 767	/* If we don't have a lun-data struct yet, we're probing
 768	 * so do not disconnect.  Also, do not disconnect unless
 769	 * we have a tag on this command.
 770	 */
 771	if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
 772		*p++ = IDENTIFY(1, lun);
 773	else
 774		*p++ = IDENTIFY(0, lun);
 775
 776	if (ent->tag[0] && esp->rev == ESP100) {
 777		/* ESP100 lacks select w/atn3 command, use select
 778		 * and stop instead.
 779		 */
 780		esp->flags |= ESP_FLAG_DOING_SLOWCMD;
 781	}
 782
 783	if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
 784		start_cmd = ESP_CMD_DMA | ESP_CMD_SELA;
 785		if (ent->tag[0]) {
 786			*p++ = ent->tag[0];
 787			*p++ = ent->tag[1];
 788
 789			start_cmd = ESP_CMD_DMA | ESP_CMD_SA3;
 790		}
 791
 792		for (i = 0; i < cmd->cmd_len; i++)
 793			*p++ = cmd->cmnd[i];
 794
 795		esp->select_state = ESP_SELECT_BASIC;
 796	} else {
 797		esp->cmd_bytes_left = cmd->cmd_len;
 798		esp->cmd_bytes_ptr = &cmd->cmnd[0];
 799
 800		if (ent->tag[0]) {
 801			for (i = esp->msg_out_len - 1;
 802			     i >= 0; i--)
 803				esp->msg_out[i + 2] = esp->msg_out[i];
 804			esp->msg_out[0] = ent->tag[0];
 805			esp->msg_out[1] = ent->tag[1];
 806			esp->msg_out_len += 2;
 807		}
 808
 809		start_cmd = ESP_CMD_DMA | ESP_CMD_SELAS;
 810		esp->select_state = ESP_SELECT_MSGOUT;
 811	}
 812	val = tgt;
 813	if (esp->rev == FASHME)
 814		val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
 815	esp_write8(val, ESP_BUSID);
 816
 817	esp_write_tgt_sync(esp, tgt);
 818	esp_write_tgt_config3(esp, tgt);
 819
 820	val = (p - esp->command_block);
 821
 822	if (esp_debug & ESP_DEBUG_SCSICMD) {
 823		printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
 824		for (i = 0; i < cmd->cmd_len; i++)
 825			printk("%02x ", cmd->cmnd[i]);
 826		printk("]\n");
 827	}
 828
 829	if (esp->rev == FASHME)
 830		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
 831	esp->ops->send_dma_cmd(esp, esp->command_block_dma,
 832			       val, 16, 0, start_cmd);
 833}
 834
 835static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
 836{
 837	struct list_head *head = &esp->esp_cmd_pool;
 838	struct esp_cmd_entry *ret;
 839
 840	if (list_empty(head)) {
 841		ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
 842	} else {
 843		ret = list_entry(head->next, struct esp_cmd_entry, list);
 844		list_del(&ret->list);
 845		memset(ret, 0, sizeof(*ret));
 846	}
 847	return ret;
 848}
 849
 850static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
 851{
 852	list_add(&ent->list, &esp->esp_cmd_pool);
 853}
 854
 855static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
 856			    struct scsi_cmnd *cmd, unsigned int result)
 857{
 858	struct scsi_device *dev = cmd->device;
 859	int tgt = dev->id;
 860	int lun = dev->lun;
 861
 862	esp->active_cmd = NULL;
 863	esp_unmap_dma(esp, cmd);
 864	esp_free_lun_tag(ent, dev->hostdata);
 865	cmd->result = result;
 866
 867	if (ent->eh_done) {
 868		complete(ent->eh_done);
 869		ent->eh_done = NULL;
 870	}
 871
 872	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 873		esp->ops->unmap_single(esp, ent->sense_dma,
 874				       SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
 875		ent->sense_ptr = NULL;
 876
 877		/* Restore the message/status bytes to what we actually
 878		 * saw originally.  Also, report that we are providing
 879		 * the sense data.
 880		 */
 881		cmd->result = ((DRIVER_SENSE << 24) |
 882			       (DID_OK << 16) |
 883			       (COMMAND_COMPLETE << 8) |
 884			       (SAM_STAT_CHECK_CONDITION << 0));
 885
 886		ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
 887		if (esp_debug & ESP_DEBUG_AUTOSENSE) {
 888			int i;
 889
 890			printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
 891			       esp->host->unique_id, tgt, lun);
 892			for (i = 0; i < 18; i++)
 893				printk("%02x ", cmd->sense_buffer[i]);
 894			printk("]\n");
 895		}
 896	}
 897
 898	cmd->scsi_done(cmd);
 899
 900	list_del(&ent->list);
 901	esp_put_ent(esp, ent);
 902
 903	esp_maybe_execute_command(esp);
 904}
 905
 906static unsigned int compose_result(unsigned int status, unsigned int message,
 907				   unsigned int driver_code)
 908{
 909	return (status | (message << 8) | (driver_code << 16));
 910}
 911
 912static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
 913{
 914	struct scsi_device *dev = ent->cmd->device;
 915	struct esp_lun_data *lp = dev->hostdata;
 916
 917	scsi_track_queue_full(dev, lp->num_tagged - 1);
 918}
 919
 920static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
 921{
 922	struct scsi_device *dev = cmd->device;
 923	struct esp *esp = shost_priv(dev->host);
 924	struct esp_cmd_priv *spriv;
 925	struct esp_cmd_entry *ent;
 926
 927	ent = esp_get_ent(esp);
 928	if (!ent)
 929		return SCSI_MLQUEUE_HOST_BUSY;
 930
 931	ent->cmd = cmd;
 932
 933	cmd->scsi_done = done;
 934
 935	spriv = ESP_CMD_PRIV(cmd);
 936	spriv->u.dma_addr = ~(dma_addr_t)0x0;
 937
 938	list_add_tail(&ent->list, &esp->queued_cmds);
 939
 940	esp_maybe_execute_command(esp);
 941
 942	return 0;
 943}
 944
 945static DEF_SCSI_QCMD(esp_queuecommand)
 946
 947static int esp_check_gross_error(struct esp *esp)
 948{
 949	if (esp->sreg & ESP_STAT_SPAM) {
 950		/* Gross Error, could be one of:
 951		 * - top of fifo overwritten
 952		 * - top of command register overwritten
 953		 * - DMA programmed with wrong direction
 954		 * - improper phase change
 955		 */
 956		printk(KERN_ERR PFX "esp%d: Gross error sreg[%02x]\n",
 957		       esp->host->unique_id, esp->sreg);
 958		/* XXX Reset the chip. XXX */
 959		return 1;
 960	}
 961	return 0;
 962}
 963
 964static int esp_check_spur_intr(struct esp *esp)
 965{
 966	switch (esp->rev) {
 967	case ESP100:
 968	case ESP100A:
 969		/* The interrupt pending bit of the status register cannot
 970		 * be trusted on these revisions.
 971		 */
 972		esp->sreg &= ~ESP_STAT_INTR;
 973		break;
 974
 975	default:
 976		if (!(esp->sreg & ESP_STAT_INTR)) {
 977			esp->ireg = esp_read8(ESP_INTRPT);
 978			if (esp->ireg & ESP_INTR_SR)
 979				return 1;
 980
 981			/* If the DMA is indicating interrupt pending and the
 982			 * ESP is not, the only possibility is a DMA error.
 983			 */
 984			if (!esp->ops->dma_error(esp)) {
 985				printk(KERN_ERR PFX "esp%d: Spurious irq, "
 986				       "sreg=%02x.\n",
 987				       esp->host->unique_id, esp->sreg);
 988				return -1;
 989			}
 990
 991			printk(KERN_ERR PFX "esp%d: DMA error\n",
 992			       esp->host->unique_id);
 993
 994			/* XXX Reset the chip. XXX */
 995			return -1;
 996		}
 997		break;
 998	}
 999
1000	return 0;
1001}
1002
1003static void esp_schedule_reset(struct esp *esp)
1004{
1005	esp_log_reset("ESP: esp_schedule_reset() from %pf\n",
1006		      __builtin_return_address(0));
1007	esp->flags |= ESP_FLAG_RESETTING;
1008	esp_event(esp, ESP_EVENT_RESET);
1009}
1010
1011/* In order to avoid having to add a special half-reconnected state
1012 * into the driver we just sit here and poll through the rest of
1013 * the reselection process to get the tag message bytes.
1014 */
1015static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1016						    struct esp_lun_data *lp)
1017{
1018	struct esp_cmd_entry *ent;
1019	int i;
1020
1021	if (!lp->num_tagged) {
1022		printk(KERN_ERR PFX "esp%d: Reconnect w/num_tagged==0\n",
1023		       esp->host->unique_id);
1024		return NULL;
1025	}
1026
1027	esp_log_reconnect("ESP: reconnect tag, ");
1028
1029	for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1030		if (esp->ops->irq_pending(esp))
1031			break;
1032	}
1033	if (i == ESP_QUICKIRQ_LIMIT) {
1034		printk(KERN_ERR PFX "esp%d: Reconnect IRQ1 timeout\n",
1035		       esp->host->unique_id);
1036		return NULL;
1037	}
1038
1039	esp->sreg = esp_read8(ESP_STATUS);
1040	esp->ireg = esp_read8(ESP_INTRPT);
1041
1042	esp_log_reconnect("IRQ(%d:%x:%x), ",
1043			  i, esp->ireg, esp->sreg);
1044
1045	if (esp->ireg & ESP_INTR_DC) {
1046		printk(KERN_ERR PFX "esp%d: Reconnect, got disconnect.\n",
1047		       esp->host->unique_id);
1048		return NULL;
1049	}
1050
1051	if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
1052		printk(KERN_ERR PFX "esp%d: Reconnect, not MIP sreg[%02x].\n",
1053		       esp->host->unique_id, esp->sreg);
1054		return NULL;
1055	}
1056
1057	/* DMA in the tag bytes... */
1058	esp->command_block[0] = 0xff;
1059	esp->command_block[1] = 0xff;
1060	esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1061			       2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1062
1063	/* ACK the message.  */
1064	scsi_esp_cmd(esp, ESP_CMD_MOK);
1065
1066	for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1067		if (esp->ops->irq_pending(esp)) {
1068			esp->sreg = esp_read8(ESP_STATUS);
1069			esp->ireg = esp_read8(ESP_INTRPT);
1070			if (esp->ireg & ESP_INTR_FDONE)
1071				break;
1072		}
1073		udelay(1);
1074	}
1075	if (i == ESP_RESELECT_TAG_LIMIT) {
1076		printk(KERN_ERR PFX "esp%d: Reconnect IRQ2 timeout\n",
1077		       esp->host->unique_id);
1078		return NULL;
1079	}
1080	esp->ops->dma_drain(esp);
1081	esp->ops->dma_invalidate(esp);
1082
1083	esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1084			  i, esp->ireg, esp->sreg,
1085			  esp->command_block[0],
1086			  esp->command_block[1]);
1087
1088	if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1089	    esp->command_block[0] > ORDERED_QUEUE_TAG) {
1090		printk(KERN_ERR PFX "esp%d: Reconnect, bad tag "
1091		       "type %02x.\n",
1092		       esp->host->unique_id, esp->command_block[0]);
1093		return NULL;
1094	}
1095
1096	ent = lp->tagged_cmds[esp->command_block[1]];
1097	if (!ent) {
1098		printk(KERN_ERR PFX "esp%d: Reconnect, no entry for "
1099		       "tag %02x.\n",
1100		       esp->host->unique_id, esp->command_block[1]);
1101		return NULL;
1102	}
1103
1104	return ent;
1105}
1106
1107static int esp_reconnect(struct esp *esp)
1108{
1109	struct esp_cmd_entry *ent;
1110	struct esp_target_data *tp;
1111	struct esp_lun_data *lp;
1112	struct scsi_device *dev;
1113	int target, lun;
1114
1115	BUG_ON(esp->active_cmd);
1116	if (esp->rev == FASHME) {
1117		/* FASHME puts the target and lun numbers directly
1118		 * into the fifo.
1119		 */
1120		target = esp->fifo[0];
1121		lun = esp->fifo[1] & 0x7;
1122	} else {
1123		u8 bits = esp_read8(ESP_FDATA);
1124
1125		/* Older chips put the lun directly into the fifo, but
1126		 * the target is given as a sample of the arbitration
1127		 * lines on the bus at reselection time.  So we should
1128		 * see the ID of the ESP and the one reconnecting target
1129		 * set in the bitmap.
1130		 */
1131		if (!(bits & esp->scsi_id_mask))
1132			goto do_reset;
1133		bits &= ~esp->scsi_id_mask;
1134		if (!bits || (bits & (bits - 1)))
1135			goto do_reset;
1136
1137		target = ffs(bits) - 1;
1138		lun = (esp_read8(ESP_FDATA) & 0x7);
1139
1140		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1141		if (esp->rev == ESP100) {
1142			u8 ireg = esp_read8(ESP_INTRPT);
1143			/* This chip has a bug during reselection that can
1144			 * cause a spurious illegal-command interrupt, which
1145			 * we simply ACK here.  Another possibility is a bus
1146			 * reset so we must check for that.
1147			 */
1148			if (ireg & ESP_INTR_SR)
1149				goto do_reset;
1150		}
1151		scsi_esp_cmd(esp, ESP_CMD_NULL);
1152	}
1153
1154	esp_write_tgt_sync(esp, target);
1155	esp_write_tgt_config3(esp, target);
1156
1157	scsi_esp_cmd(esp, ESP_CMD_MOK);
1158
1159	if (esp->rev == FASHME)
1160		esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1161			   ESP_BUSID);
1162
1163	tp = &esp->target[target];
1164	dev = __scsi_device_lookup_by_target(tp->starget, lun);
1165	if (!dev) {
1166		printk(KERN_ERR PFX "esp%d: Reconnect, no lp "
1167		       "tgt[%u] lun[%u]\n",
1168		       esp->host->unique_id, target, lun);
1169		goto do_reset;
1170	}
1171	lp = dev->hostdata;
1172
1173	ent = lp->non_tagged_cmd;
1174	if (!ent) {
1175		ent = esp_reconnect_with_tag(esp, lp);
1176		if (!ent)
1177			goto do_reset;
1178	}
1179
1180	esp->active_cmd = ent;
1181
1182	if (ent->flags & ESP_CMD_FLAG_ABORT) {
1183		esp->msg_out[0] = ABORT_TASK_SET;
1184		esp->msg_out_len = 1;
1185		scsi_esp_cmd(esp, ESP_CMD_SATN);
1186	}
1187
1188	esp_event(esp, ESP_EVENT_CHECK_PHASE);
1189	esp_restore_pointers(esp, ent);
1190	esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1191	return 1;
1192
1193do_reset:
1194	esp_schedule_reset(esp);
1195	return 0;
1196}
1197
1198static int esp_finish_select(struct esp *esp)
1199{
1200	struct esp_cmd_entry *ent;
1201	struct scsi_cmnd *cmd;
1202	u8 orig_select_state;
1203
1204	orig_select_state = esp->select_state;
1205
1206	/* No longer selecting.  */
1207	esp->select_state = ESP_SELECT_NONE;
1208
1209	esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1210	ent = esp->active_cmd;
1211	cmd = ent->cmd;
1212
1213	if (esp->ops->dma_error(esp)) {
1214		/* If we see a DMA error during or as a result of selection,
1215		 * all bets are off.
1216		 */
1217		esp_schedule_reset(esp);
1218		esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
1219		return 0;
1220	}
1221
1222	esp->ops->dma_invalidate(esp);
1223
1224	if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1225		struct esp_target_data *tp = &esp->target[cmd->device->id];
1226
1227		/* Carefully back out of the selection attempt.  Release
1228		 * resources (such as DMA mapping & TAG) and reset state (such
1229		 * as message out and command delivery variables).
1230		 */
1231		if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1232			esp_unmap_dma(esp, cmd);
1233			esp_free_lun_tag(ent, cmd->device->hostdata);
1234			tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1235			esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
1236			esp->cmd_bytes_ptr = NULL;
1237			esp->cmd_bytes_left = 0;
1238		} else {
1239			esp->ops->unmap_single(esp, ent->sense_dma,
1240					       SCSI_SENSE_BUFFERSIZE,
1241					       DMA_FROM_DEVICE);
1242			ent->sense_ptr = NULL;
1243		}
1244
1245		/* Now that the state is unwound properly, put back onto
1246		 * the issue queue.  This command is no longer active.
1247		 */
1248		list_move(&ent->list, &esp->queued_cmds);
1249		esp->active_cmd = NULL;
1250
1251		/* Return value ignored by caller, it directly invokes
1252		 * esp_reconnect().
1253		 */
1254		return 0;
1255	}
1256
1257	if (esp->ireg == ESP_INTR_DC) {
1258		struct scsi_device *dev = cmd->device;
1259
1260		/* Disconnect.  Make sure we re-negotiate sync and
1261		 * wide parameters if this target starts responding
1262		 * again in the future.
1263		 */
1264		esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1265
1266		scsi_esp_cmd(esp, ESP_CMD_ESEL);
1267		esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
1268		return 1;
1269	}
1270
1271	if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1272		/* Selection successful.  On pre-FAST chips we have
1273		 * to do a NOP and possibly clean out the FIFO.
1274		 */
1275		if (esp->rev <= ESP236) {
1276			int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1277
1278			scsi_esp_cmd(esp, ESP_CMD_NULL);
1279
1280			if (!fcnt &&
1281			    (!esp->prev_soff ||
1282			     ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1283				esp_flush_fifo(esp);
1284		}
1285
1286		/* If we are doing a slow command, negotiation, etc.
1287		 * we'll do the right thing as we transition to the
1288		 * next phase.
1289		 */
1290		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1291		return 0;
1292	}
1293
1294	printk("ESP: Unexpected selection completion ireg[%x].\n",
1295	       esp->ireg);
1296	esp_schedule_reset(esp);
1297	return 0;
1298}
1299
1300static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1301			       struct scsi_cmnd *cmd)
1302{
1303	int fifo_cnt, ecount, bytes_sent, flush_fifo;
1304
1305	fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1306	if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1307		fifo_cnt <<= 1;
1308
1309	ecount = 0;
1310	if (!(esp->sreg & ESP_STAT_TCNT)) {
1311		ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1312			  (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1313		if (esp->rev == FASHME)
1314			ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
 
 
1315	}
1316
1317	bytes_sent = esp->data_dma_len;
1318	bytes_sent -= ecount;
1319
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1320	if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1321		bytes_sent -= fifo_cnt;
1322
1323	flush_fifo = 0;
1324	if (!esp->prev_soff) {
1325		/* Synchronous data transfer, always flush fifo. */
1326		flush_fifo = 1;
1327	} else {
1328		if (esp->rev == ESP100) {
1329			u32 fflags, phase;
1330
1331			/* ESP100 has a chip bug where in the synchronous data
1332			 * phase it can mistake a final long REQ pulse from the
1333			 * target as an extra data byte.  Fun.
1334			 *
1335			 * To detect this case we resample the status register
1336			 * and fifo flags.  If we're still in a data phase and
1337			 * we see spurious chunks in the fifo, we return error
1338			 * to the caller which should reset and set things up
1339			 * such that we only try future transfers to this
1340			 * target in synchronous mode.
1341			 */
1342			esp->sreg = esp_read8(ESP_STATUS);
1343			phase = esp->sreg & ESP_STAT_PMASK;
1344			fflags = esp_read8(ESP_FFLAGS);
1345
1346			if ((phase == ESP_DOP &&
1347			     (fflags & ESP_FF_ONOTZERO)) ||
1348			    (phase == ESP_DIP &&
1349			     (fflags & ESP_FF_FBYTES)))
1350				return -1;
1351		}
1352		if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1353			flush_fifo = 1;
1354	}
1355
1356	if (flush_fifo)
1357		esp_flush_fifo(esp);
1358
1359	return bytes_sent;
1360}
1361
1362static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1363			u8 scsi_period, u8 scsi_offset,
1364			u8 esp_stp, u8 esp_soff)
1365{
1366	spi_period(tp->starget) = scsi_period;
1367	spi_offset(tp->starget) = scsi_offset;
1368	spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1369
1370	if (esp_soff) {
1371		esp_stp &= 0x1f;
1372		esp_soff |= esp->radelay;
1373		if (esp->rev >= FAS236) {
1374			u8 bit = ESP_CONFIG3_FSCSI;
1375			if (esp->rev >= FAS100A)
1376				bit = ESP_CONFIG3_FAST;
1377
1378			if (scsi_period < 50) {
1379				if (esp->rev == FASHME)
1380					esp_soff &= ~esp->radelay;
1381				tp->esp_config3 |= bit;
1382			} else {
1383				tp->esp_config3 &= ~bit;
1384			}
1385			esp->prev_cfg3 = tp->esp_config3;
1386			esp_write8(esp->prev_cfg3, ESP_CFG3);
1387		}
1388	}
1389
1390	tp->esp_period = esp->prev_stp = esp_stp;
1391	tp->esp_offset = esp->prev_soff = esp_soff;
1392
1393	esp_write8(esp_soff, ESP_SOFF);
1394	esp_write8(esp_stp, ESP_STP);
1395
1396	tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1397
1398	spi_display_xfer_agreement(tp->starget);
1399}
1400
1401static void esp_msgin_reject(struct esp *esp)
1402{
1403	struct esp_cmd_entry *ent = esp->active_cmd;
1404	struct scsi_cmnd *cmd = ent->cmd;
1405	struct esp_target_data *tp;
1406	int tgt;
1407
1408	tgt = cmd->device->id;
1409	tp = &esp->target[tgt];
1410
1411	if (tp->flags & ESP_TGT_NEGO_WIDE) {
1412		tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1413
1414		if (!esp_need_to_nego_sync(tp)) {
1415			tp->flags &= ~ESP_TGT_CHECK_NEGO;
1416			scsi_esp_cmd(esp, ESP_CMD_RATN);
1417		} else {
1418			esp->msg_out_len =
1419				spi_populate_sync_msg(&esp->msg_out[0],
1420						      tp->nego_goal_period,
1421						      tp->nego_goal_offset);
1422			tp->flags |= ESP_TGT_NEGO_SYNC;
1423			scsi_esp_cmd(esp, ESP_CMD_SATN);
1424		}
1425		return;
1426	}
1427
1428	if (tp->flags & ESP_TGT_NEGO_SYNC) {
1429		tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1430		tp->esp_period = 0;
1431		tp->esp_offset = 0;
1432		esp_setsync(esp, tp, 0, 0, 0, 0);
1433		scsi_esp_cmd(esp, ESP_CMD_RATN);
1434		return;
1435	}
1436
1437	esp->msg_out[0] = ABORT_TASK_SET;
1438	esp->msg_out_len = 1;
1439	scsi_esp_cmd(esp, ESP_CMD_SATN);
1440}
1441
1442static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1443{
1444	u8 period = esp->msg_in[3];
1445	u8 offset = esp->msg_in[4];
1446	u8 stp;
1447
1448	if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1449		goto do_reject;
1450
1451	if (offset > 15)
1452		goto do_reject;
1453
1454	if (offset) {
1455		int one_clock;
1456
1457		if (period > esp->max_period) {
1458			period = offset = 0;
1459			goto do_sdtr;
1460		}
1461		if (period < esp->min_period)
1462			goto do_reject;
1463
1464		one_clock = esp->ccycle / 1000;
1465		stp = DIV_ROUND_UP(period << 2, one_clock);
1466		if (stp && esp->rev >= FAS236) {
1467			if (stp >= 50)
1468				stp--;
1469		}
1470	} else {
1471		stp = 0;
1472	}
1473
1474	esp_setsync(esp, tp, period, offset, stp, offset);
1475	return;
1476
1477do_reject:
1478	esp->msg_out[0] = MESSAGE_REJECT;
1479	esp->msg_out_len = 1;
1480	scsi_esp_cmd(esp, ESP_CMD_SATN);
1481	return;
1482
1483do_sdtr:
1484	tp->nego_goal_period = period;
1485	tp->nego_goal_offset = offset;
1486	esp->msg_out_len =
1487		spi_populate_sync_msg(&esp->msg_out[0],
1488				      tp->nego_goal_period,
1489				      tp->nego_goal_offset);
1490	scsi_esp_cmd(esp, ESP_CMD_SATN);
1491}
1492
1493static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1494{
1495	int size = 8 << esp->msg_in[3];
1496	u8 cfg3;
1497
1498	if (esp->rev != FASHME)
1499		goto do_reject;
1500
1501	if (size != 8 && size != 16)
1502		goto do_reject;
1503
1504	if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1505		goto do_reject;
1506
1507	cfg3 = tp->esp_config3;
1508	if (size == 16) {
1509		tp->flags |= ESP_TGT_WIDE;
1510		cfg3 |= ESP_CONFIG3_EWIDE;
1511	} else {
1512		tp->flags &= ~ESP_TGT_WIDE;
1513		cfg3 &= ~ESP_CONFIG3_EWIDE;
1514	}
1515	tp->esp_config3 = cfg3;
1516	esp->prev_cfg3 = cfg3;
1517	esp_write8(cfg3, ESP_CFG3);
1518
1519	tp->flags &= ~ESP_TGT_NEGO_WIDE;
1520
1521	spi_period(tp->starget) = 0;
1522	spi_offset(tp->starget) = 0;
1523	if (!esp_need_to_nego_sync(tp)) {
1524		tp->flags &= ~ESP_TGT_CHECK_NEGO;
1525		scsi_esp_cmd(esp, ESP_CMD_RATN);
1526	} else {
1527		esp->msg_out_len =
1528			spi_populate_sync_msg(&esp->msg_out[0],
1529					      tp->nego_goal_period,
1530					      tp->nego_goal_offset);
1531		tp->flags |= ESP_TGT_NEGO_SYNC;
1532		scsi_esp_cmd(esp, ESP_CMD_SATN);
1533	}
1534	return;
1535
1536do_reject:
1537	esp->msg_out[0] = MESSAGE_REJECT;
1538	esp->msg_out_len = 1;
1539	scsi_esp_cmd(esp, ESP_CMD_SATN);
1540}
1541
1542static void esp_msgin_extended(struct esp *esp)
1543{
1544	struct esp_cmd_entry *ent = esp->active_cmd;
1545	struct scsi_cmnd *cmd = ent->cmd;
1546	struct esp_target_data *tp;
1547	int tgt = cmd->device->id;
1548
1549	tp = &esp->target[tgt];
1550	if (esp->msg_in[2] == EXTENDED_SDTR) {
1551		esp_msgin_sdtr(esp, tp);
1552		return;
1553	}
1554	if (esp->msg_in[2] == EXTENDED_WDTR) {
1555		esp_msgin_wdtr(esp, tp);
1556		return;
1557	}
1558
1559	printk("ESP: Unexpected extended msg type %x\n",
1560	       esp->msg_in[2]);
1561
1562	esp->msg_out[0] = ABORT_TASK_SET;
1563	esp->msg_out_len = 1;
1564	scsi_esp_cmd(esp, ESP_CMD_SATN);
1565}
1566
1567/* Analyze msgin bytes received from target so far.  Return non-zero
1568 * if there are more bytes needed to complete the message.
1569 */
1570static int esp_msgin_process(struct esp *esp)
1571{
1572	u8 msg0 = esp->msg_in[0];
1573	int len = esp->msg_in_len;
1574
1575	if (msg0 & 0x80) {
1576		/* Identify */
1577		printk("ESP: Unexpected msgin identify\n");
 
1578		return 0;
1579	}
1580
1581	switch (msg0) {
1582	case EXTENDED_MESSAGE:
1583		if (len == 1)
1584			return 1;
1585		if (len < esp->msg_in[1] + 2)
1586			return 1;
1587		esp_msgin_extended(esp);
1588		return 0;
1589
1590	case IGNORE_WIDE_RESIDUE: {
1591		struct esp_cmd_entry *ent;
1592		struct esp_cmd_priv *spriv;
1593		if (len == 1)
1594			return 1;
1595
1596		if (esp->msg_in[1] != 1)
1597			goto do_reject;
1598
1599		ent = esp->active_cmd;
1600		spriv = ESP_CMD_PRIV(ent->cmd);
1601
1602		if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1603			spriv->cur_sg--;
1604			spriv->cur_residue = 1;
1605		} else
1606			spriv->cur_residue++;
1607		spriv->tot_residue++;
1608		return 0;
1609	}
1610	case NOP:
1611		return 0;
1612	case RESTORE_POINTERS:
1613		esp_restore_pointers(esp, esp->active_cmd);
1614		return 0;
1615	case SAVE_POINTERS:
1616		esp_save_pointers(esp, esp->active_cmd);
1617		return 0;
1618
1619	case COMMAND_COMPLETE:
1620	case DISCONNECT: {
1621		struct esp_cmd_entry *ent = esp->active_cmd;
1622
1623		ent->message = msg0;
1624		esp_event(esp, ESP_EVENT_FREE_BUS);
1625		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1626		return 0;
1627	}
1628	case MESSAGE_REJECT:
1629		esp_msgin_reject(esp);
1630		return 0;
1631
1632	default:
1633	do_reject:
1634		esp->msg_out[0] = MESSAGE_REJECT;
1635		esp->msg_out_len = 1;
1636		scsi_esp_cmd(esp, ESP_CMD_SATN);
1637		return 0;
1638	}
1639}
1640
1641static int esp_process_event(struct esp *esp)
1642{
1643	int write;
1644
1645again:
1646	write = 0;
 
 
1647	switch (esp->event) {
1648	case ESP_EVENT_CHECK_PHASE:
1649		switch (esp->sreg & ESP_STAT_PMASK) {
1650		case ESP_DOP:
1651			esp_event(esp, ESP_EVENT_DATA_OUT);
1652			break;
1653		case ESP_DIP:
1654			esp_event(esp, ESP_EVENT_DATA_IN);
1655			break;
1656		case ESP_STATP:
1657			esp_flush_fifo(esp);
1658			scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1659			esp_event(esp, ESP_EVENT_STATUS);
1660			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1661			return 1;
1662
1663		case ESP_MOP:
1664			esp_event(esp, ESP_EVENT_MSGOUT);
1665			break;
1666
1667		case ESP_MIP:
1668			esp_event(esp, ESP_EVENT_MSGIN);
1669			break;
1670
1671		case ESP_CMDP:
1672			esp_event(esp, ESP_EVENT_CMD_START);
1673			break;
1674
1675		default:
1676			printk("ESP: Unexpected phase, sreg=%02x\n",
1677			       esp->sreg);
 
1678			esp_schedule_reset(esp);
1679			return 0;
1680		}
1681		goto again;
1682		break;
1683
1684	case ESP_EVENT_DATA_IN:
1685		write = 1;
1686		/* fallthru */
1687
1688	case ESP_EVENT_DATA_OUT: {
1689		struct esp_cmd_entry *ent = esp->active_cmd;
1690		struct scsi_cmnd *cmd = ent->cmd;
1691		dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1692		unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1693
1694		if (esp->rev == ESP100)
1695			scsi_esp_cmd(esp, ESP_CMD_NULL);
1696
1697		if (write)
1698			ent->flags |= ESP_CMD_FLAG_WRITE;
1699		else
1700			ent->flags &= ~ESP_CMD_FLAG_WRITE;
1701
1702		if (esp->ops->dma_length_limit)
1703			dma_len = esp->ops->dma_length_limit(esp, dma_addr,
1704							     dma_len);
1705		else
1706			dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1707
1708		esp->data_dma_len = dma_len;
1709
1710		if (!dma_len) {
1711			printk(KERN_ERR PFX "esp%d: DMA length is zero!\n",
1712			       esp->host->unique_id);
1713			printk(KERN_ERR PFX "esp%d: cur adr[%08llx] len[%08x]\n",
1714			       esp->host->unique_id,
1715			       (unsigned long long)esp_cur_dma_addr(ent, cmd),
1716			       esp_cur_dma_len(ent, cmd));
1717			esp_schedule_reset(esp);
1718			return 0;
1719		}
1720
1721		esp_log_datastart("ESP: start data addr[%08llx] len[%u] "
1722				  "write(%d)\n",
1723				  (unsigned long long)dma_addr, dma_len, write);
1724
1725		esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1726				       write, ESP_CMD_DMA | ESP_CMD_TI);
1727		esp_event(esp, ESP_EVENT_DATA_DONE);
1728		break;
1729	}
1730	case ESP_EVENT_DATA_DONE: {
1731		struct esp_cmd_entry *ent = esp->active_cmd;
1732		struct scsi_cmnd *cmd = ent->cmd;
1733		int bytes_sent;
1734
1735		if (esp->ops->dma_error(esp)) {
1736			printk("ESP: data done, DMA error, resetting\n");
 
1737			esp_schedule_reset(esp);
1738			return 0;
1739		}
1740
1741		if (ent->flags & ESP_CMD_FLAG_WRITE) {
1742			/* XXX parity errors, etc. XXX */
1743
1744			esp->ops->dma_drain(esp);
1745		}
1746		esp->ops->dma_invalidate(esp);
1747
1748		if (esp->ireg != ESP_INTR_BSERV) {
1749			/* We should always see exactly a bus-service
1750			 * interrupt at the end of a successful transfer.
1751			 */
1752			printk("ESP: data done, not BSERV, resetting\n");
 
1753			esp_schedule_reset(esp);
1754			return 0;
1755		}
1756
1757		bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1758
1759		esp_log_datadone("ESP: data done flgs[%x] sent[%d]\n",
1760				 ent->flags, bytes_sent);
1761
1762		if (bytes_sent < 0) {
1763			/* XXX force sync mode for this target XXX */
1764			esp_schedule_reset(esp);
1765			return 0;
1766		}
1767
1768		esp_advance_dma(esp, ent, cmd, bytes_sent);
1769		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1770		goto again;
1771	}
1772
1773	case ESP_EVENT_STATUS: {
1774		struct esp_cmd_entry *ent = esp->active_cmd;
1775
1776		if (esp->ireg & ESP_INTR_FDONE) {
1777			ent->status = esp_read8(ESP_FDATA);
1778			ent->message = esp_read8(ESP_FDATA);
1779			scsi_esp_cmd(esp, ESP_CMD_MOK);
1780		} else if (esp->ireg == ESP_INTR_BSERV) {
1781			ent->status = esp_read8(ESP_FDATA);
1782			ent->message = 0xff;
1783			esp_event(esp, ESP_EVENT_MSGIN);
1784			return 0;
1785		}
1786
1787		if (ent->message != COMMAND_COMPLETE) {
1788			printk("ESP: Unexpected message %x in status\n",
1789			       ent->message);
 
1790			esp_schedule_reset(esp);
1791			return 0;
1792		}
1793
1794		esp_event(esp, ESP_EVENT_FREE_BUS);
1795		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1796		break;
1797	}
1798	case ESP_EVENT_FREE_BUS: {
1799		struct esp_cmd_entry *ent = esp->active_cmd;
1800		struct scsi_cmnd *cmd = ent->cmd;
1801
1802		if (ent->message == COMMAND_COMPLETE ||
1803		    ent->message == DISCONNECT)
1804			scsi_esp_cmd(esp, ESP_CMD_ESEL);
1805
1806		if (ent->message == COMMAND_COMPLETE) {
1807			esp_log_cmddone("ESP: Command done status[%x] "
1808					"message[%x]\n",
1809					ent->status, ent->message);
1810			if (ent->status == SAM_STAT_TASK_SET_FULL)
1811				esp_event_queue_full(esp, ent);
1812
1813			if (ent->status == SAM_STAT_CHECK_CONDITION &&
1814			    !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1815				ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1816				esp_autosense(esp, ent);
1817			} else {
1818				esp_cmd_is_done(esp, ent, cmd,
1819						compose_result(ent->status,
1820							       ent->message,
1821							       DID_OK));
1822			}
1823		} else if (ent->message == DISCONNECT) {
1824			esp_log_disconnect("ESP: Disconnecting tgt[%d] "
1825					   "tag[%x:%x]\n",
1826					   cmd->device->id,
1827					   ent->tag[0], ent->tag[1]);
1828
1829			esp->active_cmd = NULL;
1830			esp_maybe_execute_command(esp);
1831		} else {
1832			printk("ESP: Unexpected message %x in freebus\n",
1833			       ent->message);
 
1834			esp_schedule_reset(esp);
1835			return 0;
1836		}
1837		if (esp->active_cmd)
1838			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1839		break;
1840	}
1841	case ESP_EVENT_MSGOUT: {
1842		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1843
1844		if (esp_debug & ESP_DEBUG_MSGOUT) {
1845			int i;
1846			printk("ESP: Sending message [ ");
1847			for (i = 0; i < esp->msg_out_len; i++)
1848				printk("%02x ", esp->msg_out[i]);
1849			printk("]\n");
1850		}
1851
1852		if (esp->rev == FASHME) {
1853			int i;
1854
1855			/* Always use the fifo.  */
1856			for (i = 0; i < esp->msg_out_len; i++) {
1857				esp_write8(esp->msg_out[i], ESP_FDATA);
1858				esp_write8(0, ESP_FDATA);
1859			}
1860			scsi_esp_cmd(esp, ESP_CMD_TI);
1861		} else {
1862			if (esp->msg_out_len == 1) {
1863				esp_write8(esp->msg_out[0], ESP_FDATA);
1864				scsi_esp_cmd(esp, ESP_CMD_TI);
 
 
 
 
1865			} else {
1866				/* Use DMA. */
1867				memcpy(esp->command_block,
1868				       esp->msg_out,
1869				       esp->msg_out_len);
1870
1871				esp->ops->send_dma_cmd(esp,
1872						       esp->command_block_dma,
1873						       esp->msg_out_len,
1874						       esp->msg_out_len,
1875						       0,
1876						       ESP_CMD_DMA|ESP_CMD_TI);
1877			}
1878		}
1879		esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1880		break;
1881	}
1882	case ESP_EVENT_MSGOUT_DONE:
1883		if (esp->rev == FASHME) {
1884			scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1885		} else {
1886			if (esp->msg_out_len > 1)
1887				esp->ops->dma_invalidate(esp);
1888		}
1889
1890		if (!(esp->ireg & ESP_INTR_DC)) {
1891			if (esp->rev != FASHME)
 
 
1892				scsi_esp_cmd(esp, ESP_CMD_NULL);
1893		}
 
 
 
1894		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1895		goto again;
1896	case ESP_EVENT_MSGIN:
1897		if (esp->ireg & ESP_INTR_BSERV) {
1898			if (esp->rev == FASHME) {
1899				if (!(esp_read8(ESP_STATUS2) &
1900				      ESP_STAT2_FEMPTY))
1901					scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1902			} else {
1903				scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1904				if (esp->rev == ESP100)
1905					scsi_esp_cmd(esp, ESP_CMD_NULL);
1906			}
1907			scsi_esp_cmd(esp, ESP_CMD_TI);
1908			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1909			return 1;
1910		}
1911		if (esp->ireg & ESP_INTR_FDONE) {
1912			u8 val;
1913
1914			if (esp->rev == FASHME)
1915				val = esp->fifo[0];
1916			else
1917				val = esp_read8(ESP_FDATA);
1918			esp->msg_in[esp->msg_in_len++] = val;
1919
1920			esp_log_msgin("ESP: Got msgin byte %x\n", val);
1921
1922			if (!esp_msgin_process(esp))
1923				esp->msg_in_len = 0;
1924
1925			if (esp->rev == FASHME)
1926				scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1927
1928			scsi_esp_cmd(esp, ESP_CMD_MOK);
1929
 
 
 
 
1930			if (esp->event != ESP_EVENT_FREE_BUS)
1931				esp_event(esp, ESP_EVENT_CHECK_PHASE);
1932		} else {
1933			printk("ESP: MSGIN neither BSERV not FDON, resetting");
 
1934			esp_schedule_reset(esp);
1935			return 0;
1936		}
1937		break;
1938	case ESP_EVENT_CMD_START:
1939		memcpy(esp->command_block, esp->cmd_bytes_ptr,
1940		       esp->cmd_bytes_left);
1941		if (esp->rev == FASHME)
1942			scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1943		esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1944				       esp->cmd_bytes_left, 16, 0,
1945				       ESP_CMD_DMA | ESP_CMD_TI);
1946		esp_event(esp, ESP_EVENT_CMD_DONE);
1947		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1948		break;
1949	case ESP_EVENT_CMD_DONE:
1950		esp->ops->dma_invalidate(esp);
1951		if (esp->ireg & ESP_INTR_BSERV) {
1952			esp_event(esp, ESP_EVENT_CHECK_PHASE);
1953			goto again;
1954		}
1955		esp_schedule_reset(esp);
1956		return 0;
1957		break;
1958
1959	case ESP_EVENT_RESET:
1960		scsi_esp_cmd(esp, ESP_CMD_RS);
1961		break;
1962
1963	default:
1964		printk("ESP: Unexpected event %x, resetting\n",
1965		       esp->event);
1966		esp_schedule_reset(esp);
1967		return 0;
1968		break;
1969	}
1970	return 1;
1971}
1972
1973static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
1974{
1975	struct scsi_cmnd *cmd = ent->cmd;
1976
1977	esp_unmap_dma(esp, cmd);
1978	esp_free_lun_tag(ent, cmd->device->hostdata);
1979	cmd->result = DID_RESET << 16;
1980
1981	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
1982		esp->ops->unmap_single(esp, ent->sense_dma,
1983				       SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1984		ent->sense_ptr = NULL;
1985	}
1986
1987	cmd->scsi_done(cmd);
1988	list_del(&ent->list);
1989	esp_put_ent(esp, ent);
1990}
1991
1992static void esp_clear_hold(struct scsi_device *dev, void *data)
1993{
1994	struct esp_lun_data *lp = dev->hostdata;
1995
1996	BUG_ON(lp->num_tagged);
1997	lp->hold = 0;
1998}
1999
2000static void esp_reset_cleanup(struct esp *esp)
2001{
2002	struct esp_cmd_entry *ent, *tmp;
2003	int i;
2004
2005	list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2006		struct scsi_cmnd *cmd = ent->cmd;
2007
2008		list_del(&ent->list);
2009		cmd->result = DID_RESET << 16;
2010		cmd->scsi_done(cmd);
2011		esp_put_ent(esp, ent);
2012	}
2013
2014	list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2015		if (ent == esp->active_cmd)
2016			esp->active_cmd = NULL;
2017		esp_reset_cleanup_one(esp, ent);
2018	}
2019
2020	BUG_ON(esp->active_cmd != NULL);
2021
2022	/* Force renegotiation of sync/wide transfers.  */
2023	for (i = 0; i < ESP_MAX_TARGET; i++) {
2024		struct esp_target_data *tp = &esp->target[i];
2025
2026		tp->esp_period = 0;
2027		tp->esp_offset = 0;
2028		tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2029				     ESP_CONFIG3_FSCSI |
2030				     ESP_CONFIG3_FAST);
2031		tp->flags &= ~ESP_TGT_WIDE;
2032		tp->flags |= ESP_TGT_CHECK_NEGO;
2033
2034		if (tp->starget)
2035			__starget_for_each_device(tp->starget, NULL,
2036						  esp_clear_hold);
2037	}
2038	esp->flags &= ~ESP_FLAG_RESETTING;
2039}
2040
2041/* Runs under host->lock */
2042static void __esp_interrupt(struct esp *esp)
2043{
2044	int finish_reset, intr_done;
2045	u8 phase;
2046
 
 
 
2047	esp->sreg = esp_read8(ESP_STATUS);
 
 
2048
2049	if (esp->flags & ESP_FLAG_RESETTING) {
2050		finish_reset = 1;
2051	} else {
2052		if (esp_check_gross_error(esp))
2053			return;
2054
2055		finish_reset = esp_check_spur_intr(esp);
2056		if (finish_reset < 0)
2057			return;
2058	}
2059
2060	esp->ireg = esp_read8(ESP_INTRPT);
2061
2062	if (esp->ireg & ESP_INTR_SR)
2063		finish_reset = 1;
2064
2065	if (finish_reset) {
2066		esp_reset_cleanup(esp);
2067		if (esp->eh_reset) {
2068			complete(esp->eh_reset);
2069			esp->eh_reset = NULL;
2070		}
2071		return;
2072	}
2073
2074	phase = (esp->sreg & ESP_STAT_PMASK);
2075	if (esp->rev == FASHME) {
2076		if (((phase != ESP_DIP && phase != ESP_DOP) &&
2077		     esp->select_state == ESP_SELECT_NONE &&
2078		     esp->event != ESP_EVENT_STATUS &&
2079		     esp->event != ESP_EVENT_DATA_DONE) ||
2080		    (esp->ireg & ESP_INTR_RSEL)) {
2081			esp->sreg2 = esp_read8(ESP_STATUS2);
2082			if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2083			    (esp->sreg2 & ESP_STAT2_F1BYTE))
2084				hme_read_fifo(esp);
2085		}
2086	}
2087
2088	esp_log_intr("ESP: intr sreg[%02x] seqreg[%02x] "
2089		     "sreg2[%02x] ireg[%02x]\n",
2090		     esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2091
2092	intr_done = 0;
2093
2094	if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
2095		printk("ESP: unexpected IREG %02x\n", esp->ireg);
 
2096		if (esp->ireg & ESP_INTR_IC)
2097			esp_dump_cmd_log(esp);
2098
2099		esp_schedule_reset(esp);
2100	} else {
2101		if (!(esp->ireg & ESP_INTR_RSEL)) {
2102			/* Some combination of FDONE, BSERV, DC.  */
2103			if (esp->select_state != ESP_SELECT_NONE)
2104				intr_done = esp_finish_select(esp);
2105		} else if (esp->ireg & ESP_INTR_RSEL) {
2106			if (esp->active_cmd)
2107				(void) esp_finish_select(esp);
2108			intr_done = esp_reconnect(esp);
 
 
 
 
2109		}
2110	}
2111	while (!intr_done)
2112		intr_done = esp_process_event(esp);
2113}
2114
2115irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2116{
2117	struct esp *esp = dev_id;
2118	unsigned long flags;
2119	irqreturn_t ret;
2120
2121	spin_lock_irqsave(esp->host->host_lock, flags);
2122	ret = IRQ_NONE;
2123	if (esp->ops->irq_pending(esp)) {
2124		ret = IRQ_HANDLED;
2125		for (;;) {
2126			int i;
2127
2128			__esp_interrupt(esp);
2129			if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2130				break;
2131			esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2132
2133			for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2134				if (esp->ops->irq_pending(esp))
2135					break;
2136			}
2137			if (i == ESP_QUICKIRQ_LIMIT)
2138				break;
2139		}
2140	}
2141	spin_unlock_irqrestore(esp->host->host_lock, flags);
2142
2143	return ret;
2144}
2145EXPORT_SYMBOL(scsi_esp_intr);
2146
2147static void esp_get_revision(struct esp *esp)
2148{
2149	u8 val;
2150
2151	esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2152	esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2153	esp_write8(esp->config2, ESP_CFG2);
 
 
2154
2155	val = esp_read8(ESP_CFG2);
2156	val &= ~ESP_CONFIG2_MAGIC;
2157	if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2158		/* If what we write to cfg2 does not come back, cfg2 is not
2159		 * implemented, therefore this must be a plain esp100.
2160		 */
2161		esp->rev = ESP100;
2162	} else {
2163		esp->config2 = 0;
2164		esp_set_all_config3(esp, 5);
2165		esp->prev_cfg3 = 5;
2166		esp_write8(esp->config2, ESP_CFG2);
2167		esp_write8(0, ESP_CFG3);
2168		esp_write8(esp->prev_cfg3, ESP_CFG3);
2169
2170		val = esp_read8(ESP_CFG3);
2171		if (val != 5) {
2172			/* The cfg2 register is implemented, however
2173			 * cfg3 is not, must be esp100a.
2174			 */
2175			esp->rev = ESP100A;
2176		} else {
2177			esp_set_all_config3(esp, 0);
2178			esp->prev_cfg3 = 0;
2179			esp_write8(esp->prev_cfg3, ESP_CFG3);
2180
2181			/* All of cfg{1,2,3} implemented, must be one of
2182			 * the fas variants, figure out which one.
2183			 */
2184			if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2185				esp->rev = FAST;
2186				esp->sync_defp = SYNC_DEFP_FAST;
2187			} else {
2188				esp->rev = ESP236;
2189			}
2190			esp->config2 = 0;
2191			esp_write8(esp->config2, ESP_CFG2);
2192		}
2193	}
2194}
2195
2196static void esp_init_swstate(struct esp *esp)
2197{
2198	int i;
2199
2200	INIT_LIST_HEAD(&esp->queued_cmds);
2201	INIT_LIST_HEAD(&esp->active_cmds);
2202	INIT_LIST_HEAD(&esp->esp_cmd_pool);
2203
2204	/* Start with a clear state, domain validation (via ->slave_configure,
2205	 * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
2206	 * commands.
2207	 */
2208	for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2209		esp->target[i].flags = 0;
2210		esp->target[i].nego_goal_period = 0;
2211		esp->target[i].nego_goal_offset = 0;
2212		esp->target[i].nego_goal_width = 0;
2213		esp->target[i].nego_goal_tags = 0;
2214	}
2215}
2216
2217/* This places the ESP into a known state at boot time. */
2218static void esp_bootup_reset(struct esp *esp)
2219{
2220	u8 val;
2221
2222	/* Reset the DMA */
2223	esp->ops->reset_dma(esp);
2224
2225	/* Reset the ESP */
2226	esp_reset_esp(esp);
2227
2228	/* Reset the SCSI bus, but tell ESP not to generate an irq */
2229	val = esp_read8(ESP_CFG1);
2230	val |= ESP_CONFIG1_SRRDISAB;
2231	esp_write8(val, ESP_CFG1);
2232
2233	scsi_esp_cmd(esp, ESP_CMD_RS);
2234	udelay(400);
2235
2236	esp_write8(esp->config1, ESP_CFG1);
2237
2238	/* Eat any bitrot in the chip and we are done... */
2239	esp_read8(ESP_INTRPT);
2240}
2241
2242static void esp_set_clock_params(struct esp *esp)
2243{
2244	int fhz;
2245	u8 ccf;
2246
2247	/* This is getting messy but it has to be done correctly or else
2248	 * you get weird behavior all over the place.  We are trying to
2249	 * basically figure out three pieces of information.
2250	 *
2251	 * a) Clock Conversion Factor
2252	 *
2253	 *    This is a representation of the input crystal clock frequency
2254	 *    going into the ESP on this machine.  Any operation whose timing
2255	 *    is longer than 400ns depends on this value being correct.  For
2256	 *    example, you'll get blips for arbitration/selection during high
2257	 *    load or with multiple targets if this is not set correctly.
2258	 *
2259	 * b) Selection Time-Out
2260	 *
2261	 *    The ESP isn't very bright and will arbitrate for the bus and try
2262	 *    to select a target forever if you let it.  This value tells the
2263	 *    ESP when it has taken too long to negotiate and that it should
2264	 *    interrupt the CPU so we can see what happened.  The value is
2265	 *    computed as follows (from NCR/Symbios chip docs).
2266	 *
2267	 *          (Time Out Period) *  (Input Clock)
2268	 *    STO = ----------------------------------
2269	 *          (8192) * (Clock Conversion Factor)
2270	 *
2271	 *    We use a time out period of 250ms (ESP_BUS_TIMEOUT).
2272	 *
2273	 * c) Imperical constants for synchronous offset and transfer period
2274         *    register values
2275	 *
2276	 *    This entails the smallest and largest sync period we could ever
2277	 *    handle on this ESP.
2278	 */
2279	fhz = esp->cfreq;
2280
2281	ccf = ((fhz / 1000000) + 4) / 5;
2282	if (ccf == 1)
2283		ccf = 2;
2284
2285	/* If we can't find anything reasonable, just assume 20MHZ.
2286	 * This is the clock frequency of the older sun4c's where I've
2287	 * been unable to find the clock-frequency PROM property.  All
2288	 * other machines provide useful values it seems.
2289	 */
2290	if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
2291		fhz = 20000000;
2292		ccf = 4;
2293	}
2294
2295	esp->cfact = (ccf == 8 ? 0 : ccf);
2296	esp->cfreq = fhz;
2297	esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
2298	esp->ctick = ESP_TICK(ccf, esp->ccycle);
2299	esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
2300	esp->sync_defp = SYNC_DEFP_SLOW;
2301}
2302
2303static const char *esp_chip_names[] = {
2304	"ESP100",
2305	"ESP100A",
2306	"ESP236",
2307	"FAS236",
2308	"FAS100A",
2309	"FAST",
2310	"FASHME",
 
2311};
2312
2313static struct scsi_transport_template *esp_transport_template;
2314
2315int scsi_esp_register(struct esp *esp, struct device *dev)
2316{
2317	static int instance;
2318	int err;
2319
 
 
2320	esp->host->transportt = esp_transport_template;
2321	esp->host->max_lun = ESP_MAX_LUN;
2322	esp->host->cmd_per_lun = 2;
2323	esp->host->unique_id = instance;
2324
2325	esp_set_clock_params(esp);
2326
2327	esp_get_revision(esp);
2328
2329	esp_init_swstate(esp);
2330
2331	esp_bootup_reset(esp);
2332
2333	printk(KERN_INFO PFX "esp%u, regs[%1p:%1p] irq[%u]\n",
2334	       esp->host->unique_id, esp->regs, esp->dma_regs,
2335	       esp->host->irq);
2336	printk(KERN_INFO PFX "esp%u is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2337	       esp->host->unique_id, esp_chip_names[esp->rev],
2338	       esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
 
2339
2340	/* Let the SCSI bus reset settle. */
2341	ssleep(esp_bus_reset_settle);
2342
2343	err = scsi_add_host(esp->host, dev);
2344	if (err)
2345		return err;
2346
2347	instance++;
2348
2349	scsi_scan_host(esp->host);
2350
2351	return 0;
2352}
2353EXPORT_SYMBOL(scsi_esp_register);
2354
2355void scsi_esp_unregister(struct esp *esp)
2356{
2357	scsi_remove_host(esp->host);
2358}
2359EXPORT_SYMBOL(scsi_esp_unregister);
2360
2361static int esp_target_alloc(struct scsi_target *starget)
2362{
2363	struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2364	struct esp_target_data *tp = &esp->target[starget->id];
2365
2366	tp->starget = starget;
2367
2368	return 0;
2369}
2370
2371static void esp_target_destroy(struct scsi_target *starget)
2372{
2373	struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2374	struct esp_target_data *tp = &esp->target[starget->id];
2375
2376	tp->starget = NULL;
2377}
2378
2379static int esp_slave_alloc(struct scsi_device *dev)
2380{
2381	struct esp *esp = shost_priv(dev->host);
2382	struct esp_target_data *tp = &esp->target[dev->id];
2383	struct esp_lun_data *lp;
2384
2385	lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2386	if (!lp)
2387		return -ENOMEM;
2388	dev->hostdata = lp;
2389
2390	spi_min_period(tp->starget) = esp->min_period;
2391	spi_max_offset(tp->starget) = 15;
2392
2393	if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2394		spi_max_width(tp->starget) = 1;
2395	else
2396		spi_max_width(tp->starget) = 0;
2397
2398	return 0;
2399}
2400
2401static int esp_slave_configure(struct scsi_device *dev)
2402{
2403	struct esp *esp = shost_priv(dev->host);
2404	struct esp_target_data *tp = &esp->target[dev->id];
2405	int goal_tags, queue_depth;
2406
2407	goal_tags = 0;
2408
2409	if (dev->tagged_supported) {
2410		/* XXX make this configurable somehow XXX */
2411		goal_tags = ESP_DEFAULT_TAGS;
2412
2413		if (goal_tags > ESP_MAX_TAG)
2414			goal_tags = ESP_MAX_TAG;
2415	}
2416
2417	queue_depth = goal_tags;
2418	if (queue_depth < dev->host->cmd_per_lun)
2419		queue_depth = dev->host->cmd_per_lun;
2420
2421	if (goal_tags) {
2422		scsi_set_tag_type(dev, MSG_ORDERED_TAG);
2423		scsi_activate_tcq(dev, queue_depth);
2424	} else {
2425		scsi_deactivate_tcq(dev, queue_depth);
2426	}
2427	tp->flags |= ESP_TGT_DISCONNECT;
2428
2429	if (!spi_initial_dv(dev->sdev_target))
2430		spi_dv_device(dev);
2431
2432	return 0;
2433}
2434
2435static void esp_slave_destroy(struct scsi_device *dev)
2436{
2437	struct esp_lun_data *lp = dev->hostdata;
2438
2439	kfree(lp);
2440	dev->hostdata = NULL;
2441}
2442
2443static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2444{
2445	struct esp *esp = shost_priv(cmd->device->host);
2446	struct esp_cmd_entry *ent, *tmp;
2447	struct completion eh_done;
2448	unsigned long flags;
2449
2450	/* XXX This helps a lot with debugging but might be a bit
2451	 * XXX much for the final driver.
2452	 */
2453	spin_lock_irqsave(esp->host->host_lock, flags);
2454	printk(KERN_ERR PFX "esp%d: Aborting command [%p:%02x]\n",
2455	       esp->host->unique_id, cmd, cmd->cmnd[0]);
2456	ent = esp->active_cmd;
2457	if (ent)
2458		printk(KERN_ERR PFX "esp%d: Current command [%p:%02x]\n",
2459		       esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
 
2460	list_for_each_entry(ent, &esp->queued_cmds, list) {
2461		printk(KERN_ERR PFX "esp%d: Queued command [%p:%02x]\n",
2462		       esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2463	}
2464	list_for_each_entry(ent, &esp->active_cmds, list) {
2465		printk(KERN_ERR PFX "esp%d: Active command [%p:%02x]\n",
2466		       esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2467	}
2468	esp_dump_cmd_log(esp);
2469	spin_unlock_irqrestore(esp->host->host_lock, flags);
2470
2471	spin_lock_irqsave(esp->host->host_lock, flags);
2472
2473	ent = NULL;
2474	list_for_each_entry(tmp, &esp->queued_cmds, list) {
2475		if (tmp->cmd == cmd) {
2476			ent = tmp;
2477			break;
2478		}
2479	}
2480
2481	if (ent) {
2482		/* Easiest case, we didn't even issue the command
2483		 * yet so it is trivial to abort.
2484		 */
2485		list_del(&ent->list);
2486
2487		cmd->result = DID_ABORT << 16;
2488		cmd->scsi_done(cmd);
2489
2490		esp_put_ent(esp, ent);
2491
2492		goto out_success;
2493	}
2494
2495	init_completion(&eh_done);
2496
2497	ent = esp->active_cmd;
2498	if (ent && ent->cmd == cmd) {
2499		/* Command is the currently active command on
2500		 * the bus.  If we already have an output message
2501		 * pending, no dice.
2502		 */
2503		if (esp->msg_out_len)
2504			goto out_failure;
2505
2506		/* Send out an abort, encouraging the target to
2507		 * go to MSGOUT phase by asserting ATN.
2508		 */
2509		esp->msg_out[0] = ABORT_TASK_SET;
2510		esp->msg_out_len = 1;
2511		ent->eh_done = &eh_done;
2512
2513		scsi_esp_cmd(esp, ESP_CMD_SATN);
2514	} else {
2515		/* The command is disconnected.  This is not easy to
2516		 * abort.  For now we fail and let the scsi error
2517		 * handling layer go try a scsi bus reset or host
2518		 * reset.
2519		 *
2520		 * What we could do is put together a scsi command
2521		 * solely for the purpose of sending an abort message
2522		 * to the target.  Coming up with all the code to
2523		 * cook up scsi commands, special case them everywhere,
2524		 * etc. is for questionable gain and it would be better
2525		 * if the generic scsi error handling layer could do at
2526		 * least some of that for us.
2527		 *
2528		 * Anyways this is an area for potential future improvement
2529		 * in this driver.
2530		 */
2531		goto out_failure;
2532	}
2533
2534	spin_unlock_irqrestore(esp->host->host_lock, flags);
2535
2536	if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2537		spin_lock_irqsave(esp->host->host_lock, flags);
2538		ent->eh_done = NULL;
2539		spin_unlock_irqrestore(esp->host->host_lock, flags);
2540
2541		return FAILED;
2542	}
2543
2544	return SUCCESS;
2545
2546out_success:
2547	spin_unlock_irqrestore(esp->host->host_lock, flags);
2548	return SUCCESS;
2549
2550out_failure:
2551	/* XXX This might be a good location to set ESP_TGT_BROKEN
2552	 * XXX since we know which target/lun in particular is
2553	 * XXX causing trouble.
2554	 */
2555	spin_unlock_irqrestore(esp->host->host_lock, flags);
2556	return FAILED;
2557}
2558
2559static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2560{
2561	struct esp *esp = shost_priv(cmd->device->host);
2562	struct completion eh_reset;
2563	unsigned long flags;
2564
2565	init_completion(&eh_reset);
2566
2567	spin_lock_irqsave(esp->host->host_lock, flags);
2568
2569	esp->eh_reset = &eh_reset;
2570
2571	/* XXX This is too simple... We should add lots of
2572	 * XXX checks here so that if we find that the chip is
2573	 * XXX very wedged we return failure immediately so
2574	 * XXX that we can perform a full chip reset.
2575	 */
2576	esp->flags |= ESP_FLAG_RESETTING;
2577	scsi_esp_cmd(esp, ESP_CMD_RS);
2578
2579	spin_unlock_irqrestore(esp->host->host_lock, flags);
2580
2581	ssleep(esp_bus_reset_settle);
2582
2583	if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2584		spin_lock_irqsave(esp->host->host_lock, flags);
2585		esp->eh_reset = NULL;
2586		spin_unlock_irqrestore(esp->host->host_lock, flags);
2587
2588		return FAILED;
2589	}
2590
2591	return SUCCESS;
2592}
2593
2594/* All bets are off, reset the entire device.  */
2595static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2596{
2597	struct esp *esp = shost_priv(cmd->device->host);
2598	unsigned long flags;
2599
2600	spin_lock_irqsave(esp->host->host_lock, flags);
2601	esp_bootup_reset(esp);
2602	esp_reset_cleanup(esp);
2603	spin_unlock_irqrestore(esp->host->host_lock, flags);
2604
2605	ssleep(esp_bus_reset_settle);
2606
2607	return SUCCESS;
2608}
2609
2610static const char *esp_info(struct Scsi_Host *host)
2611{
2612	return "esp";
2613}
2614
2615struct scsi_host_template scsi_esp_template = {
2616	.module			= THIS_MODULE,
2617	.name			= "esp",
2618	.info			= esp_info,
2619	.queuecommand		= esp_queuecommand,
2620	.target_alloc		= esp_target_alloc,
2621	.target_destroy		= esp_target_destroy,
2622	.slave_alloc		= esp_slave_alloc,
2623	.slave_configure	= esp_slave_configure,
2624	.slave_destroy		= esp_slave_destroy,
2625	.eh_abort_handler	= esp_eh_abort_handler,
2626	.eh_bus_reset_handler	= esp_eh_bus_reset_handler,
2627	.eh_host_reset_handler	= esp_eh_host_reset_handler,
2628	.can_queue		= 7,
2629	.this_id		= 7,
2630	.sg_tablesize		= SG_ALL,
2631	.use_clustering		= ENABLE_CLUSTERING,
2632	.max_sectors		= 0xffff,
2633	.skip_settle_delay	= 1,
2634};
2635EXPORT_SYMBOL(scsi_esp_template);
2636
2637static void esp_get_signalling(struct Scsi_Host *host)
2638{
2639	struct esp *esp = shost_priv(host);
2640	enum spi_signal_type type;
2641
2642	if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2643		type = SPI_SIGNAL_HVD;
2644	else
2645		type = SPI_SIGNAL_SE;
2646
2647	spi_signalling(host) = type;
2648}
2649
2650static void esp_set_offset(struct scsi_target *target, int offset)
2651{
2652	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2653	struct esp *esp = shost_priv(host);
2654	struct esp_target_data *tp = &esp->target[target->id];
2655
2656	if (esp->flags & ESP_FLAG_DISABLE_SYNC)
2657		tp->nego_goal_offset = 0;
2658	else
2659		tp->nego_goal_offset = offset;
2660	tp->flags |= ESP_TGT_CHECK_NEGO;
2661}
2662
2663static void esp_set_period(struct scsi_target *target, int period)
2664{
2665	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2666	struct esp *esp = shost_priv(host);
2667	struct esp_target_data *tp = &esp->target[target->id];
2668
2669	tp->nego_goal_period = period;
2670	tp->flags |= ESP_TGT_CHECK_NEGO;
2671}
2672
2673static void esp_set_width(struct scsi_target *target, int width)
2674{
2675	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2676	struct esp *esp = shost_priv(host);
2677	struct esp_target_data *tp = &esp->target[target->id];
2678
2679	tp->nego_goal_width = (width ? 1 : 0);
2680	tp->flags |= ESP_TGT_CHECK_NEGO;
2681}
2682
2683static struct spi_function_template esp_transport_ops = {
2684	.set_offset		= esp_set_offset,
2685	.show_offset		= 1,
2686	.set_period		= esp_set_period,
2687	.show_period		= 1,
2688	.set_width		= esp_set_width,
2689	.show_width		= 1,
2690	.get_signalling		= esp_get_signalling,
2691};
2692
2693static int __init esp_init(void)
2694{
2695	BUILD_BUG_ON(sizeof(struct scsi_pointer) <
2696		     sizeof(struct esp_cmd_priv));
2697
2698	esp_transport_template = spi_attach_transport(&esp_transport_ops);
2699	if (!esp_transport_template)
2700		return -ENODEV;
2701
2702	return 0;
2703}
2704
2705static void __exit esp_exit(void)
2706{
2707	spi_release_transport(esp_transport_template);
2708}
2709
2710MODULE_DESCRIPTION("ESP SCSI driver core");
2711MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
2712MODULE_LICENSE("GPL");
2713MODULE_VERSION(DRV_VERSION);
2714
2715module_param(esp_bus_reset_settle, int, 0);
2716MODULE_PARM_DESC(esp_bus_reset_settle,
2717		 "ESP scsi bus reset delay in seconds");
2718
2719module_param(esp_debug, int, 0);
2720MODULE_PARM_DESC(esp_debug,
2721"ESP bitmapped debugging message enable value:\n"
2722"	0x00000001	Log interrupt events\n"
2723"	0x00000002	Log scsi commands\n"
2724"	0x00000004	Log resets\n"
2725"	0x00000008	Log message in events\n"
2726"	0x00000010	Log message out events\n"
2727"	0x00000020	Log command completion\n"
2728"	0x00000040	Log disconnects\n"
2729"	0x00000080	Log data start\n"
2730"	0x00000100	Log data done\n"
2731"	0x00000200	Log reconnects\n"
2732"	0x00000400	Log auto-sense data\n"
2733);
2734
2735module_init(esp_init);
2736module_exit(esp_exit);
v4.17
   1/* esp_scsi.c: ESP SCSI driver.
   2 *
   3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/types.h>
   8#include <linux/slab.h>
   9#include <linux/delay.h>
  10#include <linux/list.h>
  11#include <linux/completion.h>
  12#include <linux/kallsyms.h>
  13#include <linux/module.h>
  14#include <linux/moduleparam.h>
  15#include <linux/init.h>
  16#include <linux/irqreturn.h>
  17
  18#include <asm/irq.h>
  19#include <asm/io.h>
  20#include <asm/dma.h>
  21
  22#include <scsi/scsi.h>
  23#include <scsi/scsi_host.h>
  24#include <scsi/scsi_cmnd.h>
  25#include <scsi/scsi_device.h>
  26#include <scsi/scsi_tcq.h>
  27#include <scsi/scsi_dbg.h>
  28#include <scsi/scsi_transport_spi.h>
  29
  30#include "esp_scsi.h"
  31
  32#define DRV_MODULE_NAME		"esp"
  33#define PFX DRV_MODULE_NAME	": "
  34#define DRV_VERSION		"2.000"
  35#define DRV_MODULE_RELDATE	"April 19, 2007"
  36
  37/* SCSI bus reset settle time in seconds.  */
  38static int esp_bus_reset_settle = 3;
  39
  40static u32 esp_debug;
  41#define ESP_DEBUG_INTR		0x00000001
  42#define ESP_DEBUG_SCSICMD	0x00000002
  43#define ESP_DEBUG_RESET		0x00000004
  44#define ESP_DEBUG_MSGIN		0x00000008
  45#define ESP_DEBUG_MSGOUT	0x00000010
  46#define ESP_DEBUG_CMDDONE	0x00000020
  47#define ESP_DEBUG_DISCONNECT	0x00000040
  48#define ESP_DEBUG_DATASTART	0x00000080
  49#define ESP_DEBUG_DATADONE	0x00000100
  50#define ESP_DEBUG_RECONNECT	0x00000200
  51#define ESP_DEBUG_AUTOSENSE	0x00000400
  52#define ESP_DEBUG_EVENT		0x00000800
  53#define ESP_DEBUG_COMMAND	0x00001000
  54
  55#define esp_log_intr(f, a...) \
  56do {	if (esp_debug & ESP_DEBUG_INTR) \
  57		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
  58} while (0)
  59
  60#define esp_log_reset(f, a...) \
  61do {	if (esp_debug & ESP_DEBUG_RESET) \
  62		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
  63} while (0)
  64
  65#define esp_log_msgin(f, a...) \
  66do {	if (esp_debug & ESP_DEBUG_MSGIN) \
  67		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
  68} while (0)
  69
  70#define esp_log_msgout(f, a...) \
  71do {	if (esp_debug & ESP_DEBUG_MSGOUT) \
  72		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
  73} while (0)
  74
  75#define esp_log_cmddone(f, a...) \
  76do {	if (esp_debug & ESP_DEBUG_CMDDONE) \
  77		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
  78} while (0)
  79
  80#define esp_log_disconnect(f, a...) \
  81do {	if (esp_debug & ESP_DEBUG_DISCONNECT) \
  82		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
  83} while (0)
  84
  85#define esp_log_datastart(f, a...) \
  86do {	if (esp_debug & ESP_DEBUG_DATASTART) \
  87		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
  88} while (0)
  89
  90#define esp_log_datadone(f, a...) \
  91do {	if (esp_debug & ESP_DEBUG_DATADONE) \
  92		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
  93} while (0)
  94
  95#define esp_log_reconnect(f, a...) \
  96do {	if (esp_debug & ESP_DEBUG_RECONNECT) \
  97		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
  98} while (0)
  99
 100#define esp_log_autosense(f, a...) \
 101do {	if (esp_debug & ESP_DEBUG_AUTOSENSE) \
 102		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
 103} while (0)
 104
 105#define esp_log_event(f, a...) \
 106do {   if (esp_debug & ESP_DEBUG_EVENT)	\
 107		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
 108} while (0)
 109
 110#define esp_log_command(f, a...) \
 111do {   if (esp_debug & ESP_DEBUG_COMMAND)	\
 112		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
 113} while (0)
 114
 115#define esp_read8(REG)		esp->ops->esp_read8(esp, REG)
 116#define esp_write8(VAL,REG)	esp->ops->esp_write8(esp, VAL, REG)
 117
 118static void esp_log_fill_regs(struct esp *esp,
 119			      struct esp_event_ent *p)
 120{
 121	p->sreg = esp->sreg;
 122	p->seqreg = esp->seqreg;
 123	p->sreg2 = esp->sreg2;
 124	p->ireg = esp->ireg;
 125	p->select_state = esp->select_state;
 126	p->event = esp->event;
 127}
 128
 129void scsi_esp_cmd(struct esp *esp, u8 val)
 130{
 131	struct esp_event_ent *p;
 132	int idx = esp->esp_event_cur;
 133
 134	p = &esp->esp_event_log[idx];
 135	p->type = ESP_EVENT_TYPE_CMD;
 136	p->val = val;
 137	esp_log_fill_regs(esp, p);
 138
 139	esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
 140
 141	esp_log_command("cmd[%02x]\n", val);
 142	esp_write8(val, ESP_CMD);
 143}
 144EXPORT_SYMBOL(scsi_esp_cmd);
 145
 146static void esp_send_dma_cmd(struct esp *esp, int len, int max_len, int cmd)
 147{
 148	if (esp->flags & ESP_FLAG_USE_FIFO) {
 149		int i;
 150
 151		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
 152		for (i = 0; i < len; i++)
 153			esp_write8(esp->command_block[i], ESP_FDATA);
 154		scsi_esp_cmd(esp, cmd);
 155	} else {
 156		if (esp->rev == FASHME)
 157			scsi_esp_cmd(esp, ESP_CMD_FLUSH);
 158		cmd |= ESP_CMD_DMA;
 159		esp->ops->send_dma_cmd(esp, esp->command_block_dma,
 160				       len, max_len, 0, cmd);
 161	}
 162}
 163
 164static void esp_event(struct esp *esp, u8 val)
 165{
 166	struct esp_event_ent *p;
 167	int idx = esp->esp_event_cur;
 168
 169	p = &esp->esp_event_log[idx];
 170	p->type = ESP_EVENT_TYPE_EVENT;
 171	p->val = val;
 172	esp_log_fill_regs(esp, p);
 173
 174	esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
 175
 176	esp->event = val;
 177}
 178
 179static void esp_dump_cmd_log(struct esp *esp)
 180{
 181	int idx = esp->esp_event_cur;
 182	int stop = idx;
 183
 184	shost_printk(KERN_INFO, esp->host, "Dumping command log\n");
 
 185	do {
 186		struct esp_event_ent *p = &esp->esp_event_log[idx];
 187
 188		shost_printk(KERN_INFO, esp->host,
 189			     "ent[%d] %s val[%02x] sreg[%02x] seqreg[%02x] "
 190			     "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
 191			     idx,
 192			     p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT",
 193			     p->val, p->sreg, p->seqreg,
 194			     p->sreg2, p->ireg, p->select_state, p->event);
 
 195
 196		idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
 197	} while (idx != stop);
 198}
 199
 200static void esp_flush_fifo(struct esp *esp)
 201{
 202	scsi_esp_cmd(esp, ESP_CMD_FLUSH);
 203	if (esp->rev == ESP236) {
 204		int lim = 1000;
 205
 206		while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
 207			if (--lim == 0) {
 208				shost_printk(KERN_ALERT, esp->host,
 209					     "ESP_FF_BYTES will not clear!\n");
 
 210				break;
 211			}
 212			udelay(1);
 213		}
 214	}
 215}
 216
 217static void hme_read_fifo(struct esp *esp)
 218{
 219	int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
 220	int idx = 0;
 221
 222	while (fcnt--) {
 223		esp->fifo[idx++] = esp_read8(ESP_FDATA);
 224		esp->fifo[idx++] = esp_read8(ESP_FDATA);
 225	}
 226	if (esp->sreg2 & ESP_STAT2_F1BYTE) {
 227		esp_write8(0, ESP_FDATA);
 228		esp->fifo[idx++] = esp_read8(ESP_FDATA);
 229		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
 230	}
 231	esp->fifo_cnt = idx;
 232}
 233
 234static void esp_set_all_config3(struct esp *esp, u8 val)
 235{
 236	int i;
 237
 238	for (i = 0; i < ESP_MAX_TARGET; i++)
 239		esp->target[i].esp_config3 = val;
 240}
 241
 242/* Reset the ESP chip, _not_ the SCSI bus. */
 243static void esp_reset_esp(struct esp *esp)
 244{
 245	u8 family_code, version;
 246
 247	/* Now reset the ESP chip */
 248	scsi_esp_cmd(esp, ESP_CMD_RC);
 249	scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
 250	if (esp->rev == FAST)
 251		esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
 252	scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
 253
 254	/* This is the only point at which it is reliable to read
 255	 * the ID-code for a fast ESP chip variants.
 256	 */
 257	esp->max_period = ((35 * esp->ccycle) / 1000);
 258	if (esp->rev == FAST) {
 259		version = esp_read8(ESP_UID);
 260		family_code = (version & 0xf8) >> 3;
 261		if (family_code == 0x02)
 262			esp->rev = FAS236;
 263		else if (family_code == 0x0a)
 264			esp->rev = FASHME; /* Version is usually '5'. */
 265		else
 266			esp->rev = FAS100A;
 267		esp->min_period = ((4 * esp->ccycle) / 1000);
 268	} else {
 269		esp->min_period = ((5 * esp->ccycle) / 1000);
 270	}
 271	if (esp->rev == FAS236) {
 272		/*
 273		 * The AM53c974 chip returns the same ID as FAS236;
 274		 * try to configure glitch eater.
 275		 */
 276		u8 config4 = ESP_CONFIG4_GE1;
 277		esp_write8(config4, ESP_CFG4);
 278		config4 = esp_read8(ESP_CFG4);
 279		if (config4 & ESP_CONFIG4_GE1) {
 280			esp->rev = PCSCSI;
 281			esp_write8(esp->config4, ESP_CFG4);
 282		}
 283	}
 284	esp->max_period = (esp->max_period + 3)>>2;
 285	esp->min_period = (esp->min_period + 3)>>2;
 286
 287	esp_write8(esp->config1, ESP_CFG1);
 288	switch (esp->rev) {
 289	case ESP100:
 290		/* nothing to do */
 291		break;
 292
 293	case ESP100A:
 294		esp_write8(esp->config2, ESP_CFG2);
 295		break;
 296
 297	case ESP236:
 298		/* Slow 236 */
 299		esp_write8(esp->config2, ESP_CFG2);
 300		esp->prev_cfg3 = esp->target[0].esp_config3;
 301		esp_write8(esp->prev_cfg3, ESP_CFG3);
 302		break;
 303
 304	case FASHME:
 305		esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
 306		/* fallthrough... */
 307
 308	case FAS236:
 309	case PCSCSI:
 310		/* Fast 236, AM53c974 or HME */
 311		esp_write8(esp->config2, ESP_CFG2);
 312		if (esp->rev == FASHME) {
 313			u8 cfg3 = esp->target[0].esp_config3;
 314
 315			cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
 316			if (esp->scsi_id >= 8)
 317				cfg3 |= ESP_CONFIG3_IDBIT3;
 318			esp_set_all_config3(esp, cfg3);
 319		} else {
 320			u32 cfg3 = esp->target[0].esp_config3;
 321
 322			cfg3 |= ESP_CONFIG3_FCLK;
 323			esp_set_all_config3(esp, cfg3);
 324		}
 325		esp->prev_cfg3 = esp->target[0].esp_config3;
 326		esp_write8(esp->prev_cfg3, ESP_CFG3);
 327		if (esp->rev == FASHME) {
 328			esp->radelay = 80;
 329		} else {
 330			if (esp->flags & ESP_FLAG_DIFFERENTIAL)
 331				esp->radelay = 0;
 332			else
 333				esp->radelay = 96;
 334		}
 335		break;
 336
 337	case FAS100A:
 338		/* Fast 100a */
 339		esp_write8(esp->config2, ESP_CFG2);
 340		esp_set_all_config3(esp,
 341				    (esp->target[0].esp_config3 |
 342				     ESP_CONFIG3_FCLOCK));
 343		esp->prev_cfg3 = esp->target[0].esp_config3;
 344		esp_write8(esp->prev_cfg3, ESP_CFG3);
 345		esp->radelay = 32;
 346		break;
 347
 348	default:
 349		break;
 350	}
 351
 352	/* Reload the configuration registers */
 353	esp_write8(esp->cfact, ESP_CFACT);
 354
 355	esp->prev_stp = 0;
 356	esp_write8(esp->prev_stp, ESP_STP);
 357
 358	esp->prev_soff = 0;
 359	esp_write8(esp->prev_soff, ESP_SOFF);
 360
 361	esp_write8(esp->neg_defp, ESP_TIMEO);
 362
 363	/* Eat any bitrot in the chip */
 364	esp_read8(ESP_INTRPT);
 365	udelay(100);
 366}
 367
 368static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
 369{
 370	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
 371	struct scatterlist *sg = scsi_sglist(cmd);
 372	int dir = cmd->sc_data_direction;
 373	int total, i;
 374
 375	if (dir == DMA_NONE)
 376		return;
 377
 378	spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir);
 379	spriv->cur_residue = sg_dma_len(sg);
 380	spriv->cur_sg = sg;
 381
 382	total = 0;
 383	for (i = 0; i < spriv->u.num_sg; i++)
 384		total += sg_dma_len(&sg[i]);
 385	spriv->tot_residue = total;
 386}
 387
 388static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
 389				   struct scsi_cmnd *cmd)
 390{
 391	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
 392
 393	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 394		return ent->sense_dma +
 395			(ent->sense_ptr - cmd->sense_buffer);
 396	}
 397
 398	return sg_dma_address(p->cur_sg) +
 399		(sg_dma_len(p->cur_sg) -
 400		 p->cur_residue);
 401}
 402
 403static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
 404				    struct scsi_cmnd *cmd)
 405{
 406	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
 407
 408	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 409		return SCSI_SENSE_BUFFERSIZE -
 410			(ent->sense_ptr - cmd->sense_buffer);
 411	}
 412	return p->cur_residue;
 413}
 414
 415static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
 416			    struct scsi_cmnd *cmd, unsigned int len)
 417{
 418	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
 419
 420	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 421		ent->sense_ptr += len;
 422		return;
 423	}
 424
 425	p->cur_residue -= len;
 426	p->tot_residue -= len;
 427	if (p->cur_residue < 0 || p->tot_residue < 0) {
 428		shost_printk(KERN_ERR, esp->host,
 429			     "Data transfer overflow.\n");
 430		shost_printk(KERN_ERR, esp->host,
 431			     "cur_residue[%d] tot_residue[%d] len[%u]\n",
 432			     p->cur_residue, p->tot_residue, len);
 
 433		p->cur_residue = 0;
 434		p->tot_residue = 0;
 435	}
 436	if (!p->cur_residue && p->tot_residue) {
 437		p->cur_sg++;
 438		p->cur_residue = sg_dma_len(p->cur_sg);
 439	}
 440}
 441
 442static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
 443{
 444	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
 445	int dir = cmd->sc_data_direction;
 446
 447	if (dir == DMA_NONE)
 448		return;
 449
 450	esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir);
 451}
 452
 453static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
 454{
 455	struct scsi_cmnd *cmd = ent->cmd;
 456	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
 457
 458	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 459		ent->saved_sense_ptr = ent->sense_ptr;
 460		return;
 461	}
 462	ent->saved_cur_residue = spriv->cur_residue;
 463	ent->saved_cur_sg = spriv->cur_sg;
 464	ent->saved_tot_residue = spriv->tot_residue;
 465}
 466
 467static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
 468{
 469	struct scsi_cmnd *cmd = ent->cmd;
 470	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
 471
 472	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 473		ent->sense_ptr = ent->saved_sense_ptr;
 474		return;
 475	}
 476	spriv->cur_residue = ent->saved_cur_residue;
 477	spriv->cur_sg = ent->saved_cur_sg;
 478	spriv->tot_residue = ent->saved_tot_residue;
 479}
 480
 481static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
 482{
 483	if (cmd->cmd_len == 6 ||
 484	    cmd->cmd_len == 10 ||
 485	    cmd->cmd_len == 12) {
 486		esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
 487	} else {
 488		esp->flags |= ESP_FLAG_DOING_SLOWCMD;
 489	}
 490}
 491
 492static void esp_write_tgt_config3(struct esp *esp, int tgt)
 493{
 494	if (esp->rev > ESP100A) {
 495		u8 val = esp->target[tgt].esp_config3;
 496
 497		if (val != esp->prev_cfg3) {
 498			esp->prev_cfg3 = val;
 499			esp_write8(val, ESP_CFG3);
 500		}
 501	}
 502}
 503
 504static void esp_write_tgt_sync(struct esp *esp, int tgt)
 505{
 506	u8 off = esp->target[tgt].esp_offset;
 507	u8 per = esp->target[tgt].esp_period;
 508
 509	if (off != esp->prev_soff) {
 510		esp->prev_soff = off;
 511		esp_write8(off, ESP_SOFF);
 512	}
 513	if (per != esp->prev_stp) {
 514		esp->prev_stp = per;
 515		esp_write8(per, ESP_STP);
 516	}
 517}
 518
 519static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
 520{
 521	if (esp->rev == FASHME) {
 522		/* Arbitrary segment boundaries, 24-bit counts.  */
 523		if (dma_len > (1U << 24))
 524			dma_len = (1U << 24);
 525	} else {
 526		u32 base, end;
 527
 528		/* ESP chip limits other variants by 16-bits of transfer
 529		 * count.  Actually on FAS100A and FAS236 we could get
 530		 * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB
 531		 * in the ESP_CFG2 register but that causes other unwanted
 532		 * changes so we don't use it currently.
 533		 */
 534		if (dma_len > (1U << 16))
 535			dma_len = (1U << 16);
 536
 537		/* All of the DMA variants hooked up to these chips
 538		 * cannot handle crossing a 24-bit address boundary.
 539		 */
 540		base = dma_addr & ((1U << 24) - 1U);
 541		end = base + dma_len;
 542		if (end > (1U << 24))
 543			end = (1U <<24);
 544		dma_len = end - base;
 545	}
 546	return dma_len;
 547}
 548
 549static int esp_need_to_nego_wide(struct esp_target_data *tp)
 550{
 551	struct scsi_target *target = tp->starget;
 552
 553	return spi_width(target) != tp->nego_goal_width;
 554}
 555
 556static int esp_need_to_nego_sync(struct esp_target_data *tp)
 557{
 558	struct scsi_target *target = tp->starget;
 559
 560	/* When offset is zero, period is "don't care".  */
 561	if (!spi_offset(target) && !tp->nego_goal_offset)
 562		return 0;
 563
 564	if (spi_offset(target) == tp->nego_goal_offset &&
 565	    spi_period(target) == tp->nego_goal_period)
 566		return 0;
 567
 568	return 1;
 569}
 570
 571static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
 572			     struct esp_lun_data *lp)
 573{
 574	if (!ent->orig_tag[0]) {
 575		/* Non-tagged, slot already taken?  */
 576		if (lp->non_tagged_cmd)
 577			return -EBUSY;
 578
 579		if (lp->hold) {
 580			/* We are being held by active tagged
 581			 * commands.
 582			 */
 583			if (lp->num_tagged)
 584				return -EBUSY;
 585
 586			/* Tagged commands completed, we can unplug
 587			 * the queue and run this untagged command.
 588			 */
 589			lp->hold = 0;
 590		} else if (lp->num_tagged) {
 591			/* Plug the queue until num_tagged decreases
 592			 * to zero in esp_free_lun_tag.
 593			 */
 594			lp->hold = 1;
 595			return -EBUSY;
 596		}
 597
 598		lp->non_tagged_cmd = ent;
 599		return 0;
 
 
 
 
 
 
 600	}
 601
 602	/* Tagged command. Check that it isn't blocked by a non-tagged one. */
 603	if (lp->non_tagged_cmd || lp->hold)
 604		return -EBUSY;
 605
 606	BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
 607
 608	lp->tagged_cmds[ent->orig_tag[1]] = ent;
 609	lp->num_tagged++;
 610
 611	return 0;
 612}
 613
 614static void esp_free_lun_tag(struct esp_cmd_entry *ent,
 615			     struct esp_lun_data *lp)
 616{
 617	if (ent->orig_tag[0]) {
 618		BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent);
 619		lp->tagged_cmds[ent->orig_tag[1]] = NULL;
 620		lp->num_tagged--;
 621	} else {
 622		BUG_ON(lp->non_tagged_cmd != ent);
 623		lp->non_tagged_cmd = NULL;
 624	}
 625}
 626
 627/* When a contingent allegiance conditon is created, we force feed a
 628 * REQUEST_SENSE command to the device to fetch the sense data.  I
 629 * tried many other schemes, relying on the scsi error handling layer
 630 * to send out the REQUEST_SENSE automatically, but this was difficult
 631 * to get right especially in the presence of applications like smartd
 632 * which use SG_IO to send out their own REQUEST_SENSE commands.
 633 */
 634static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
 635{
 636	struct scsi_cmnd *cmd = ent->cmd;
 637	struct scsi_device *dev = cmd->device;
 638	int tgt, lun;
 639	u8 *p, val;
 640
 641	tgt = dev->id;
 642	lun = dev->lun;
 643
 644
 645	if (!ent->sense_ptr) {
 646		esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n",
 647				  tgt, lun);
 
 648
 649		ent->sense_ptr = cmd->sense_buffer;
 650		ent->sense_dma = esp->ops->map_single(esp,
 651						      ent->sense_ptr,
 652						      SCSI_SENSE_BUFFERSIZE,
 653						      DMA_FROM_DEVICE);
 654	}
 655	ent->saved_sense_ptr = ent->sense_ptr;
 656
 657	esp->active_cmd = ent;
 658
 659	p = esp->command_block;
 660	esp->msg_out_len = 0;
 661
 662	*p++ = IDENTIFY(0, lun);
 663	*p++ = REQUEST_SENSE;
 664	*p++ = ((dev->scsi_level <= SCSI_2) ?
 665		(lun << 5) : 0);
 666	*p++ = 0;
 667	*p++ = 0;
 668	*p++ = SCSI_SENSE_BUFFERSIZE;
 669	*p++ = 0;
 670
 671	esp->select_state = ESP_SELECT_BASIC;
 672
 673	val = tgt;
 674	if (esp->rev == FASHME)
 675		val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
 676	esp_write8(val, ESP_BUSID);
 677
 678	esp_write_tgt_sync(esp, tgt);
 679	esp_write_tgt_config3(esp, tgt);
 680
 681	val = (p - esp->command_block);
 682
 683	esp_send_dma_cmd(esp, val, 16, ESP_CMD_SELA);
 
 
 
 684}
 685
 686static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
 687{
 688	struct esp_cmd_entry *ent;
 689
 690	list_for_each_entry(ent, &esp->queued_cmds, list) {
 691		struct scsi_cmnd *cmd = ent->cmd;
 692		struct scsi_device *dev = cmd->device;
 693		struct esp_lun_data *lp = dev->hostdata;
 694
 695		if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 696			ent->tag[0] = 0;
 697			ent->tag[1] = 0;
 698			return ent;
 699		}
 700
 701		if (!spi_populate_tag_msg(&ent->tag[0], cmd)) {
 702			ent->tag[0] = 0;
 703			ent->tag[1] = 0;
 704		}
 705		ent->orig_tag[0] = ent->tag[0];
 706		ent->orig_tag[1] = ent->tag[1];
 707
 708		if (esp_alloc_lun_tag(ent, lp) < 0)
 709			continue;
 710
 711		return ent;
 712	}
 713
 714	return NULL;
 715}
 716
 717static void esp_maybe_execute_command(struct esp *esp)
 718{
 719	struct esp_target_data *tp;
 720	struct esp_lun_data *lp;
 721	struct scsi_device *dev;
 722	struct scsi_cmnd *cmd;
 723	struct esp_cmd_entry *ent;
 724	int tgt, lun, i;
 725	u32 val, start_cmd;
 726	u8 *p;
 727
 728	if (esp->active_cmd ||
 729	    (esp->flags & ESP_FLAG_RESETTING))
 730		return;
 731
 732	ent = find_and_prep_issuable_command(esp);
 733	if (!ent)
 734		return;
 735
 736	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 737		esp_autosense(esp, ent);
 738		return;
 739	}
 740
 741	cmd = ent->cmd;
 742	dev = cmd->device;
 743	tgt = dev->id;
 744	lun = dev->lun;
 745	tp = &esp->target[tgt];
 746	lp = dev->hostdata;
 747
 748	list_move(&ent->list, &esp->active_cmds);
 749
 750	esp->active_cmd = ent;
 751
 752	esp_map_dma(esp, cmd);
 753	esp_save_pointers(esp, ent);
 754
 755	esp_check_command_len(esp, cmd);
 756
 757	p = esp->command_block;
 758
 759	esp->msg_out_len = 0;
 760	if (tp->flags & ESP_TGT_CHECK_NEGO) {
 761		/* Need to negotiate.  If the target is broken
 762		 * go for synchronous transfers and non-wide.
 763		 */
 764		if (tp->flags & ESP_TGT_BROKEN) {
 765			tp->flags &= ~ESP_TGT_DISCONNECT;
 766			tp->nego_goal_period = 0;
 767			tp->nego_goal_offset = 0;
 768			tp->nego_goal_width = 0;
 769			tp->nego_goal_tags = 0;
 770		}
 771
 772		/* If the settings are not changing, skip this.  */
 773		if (spi_width(tp->starget) == tp->nego_goal_width &&
 774		    spi_period(tp->starget) == tp->nego_goal_period &&
 775		    spi_offset(tp->starget) == tp->nego_goal_offset) {
 776			tp->flags &= ~ESP_TGT_CHECK_NEGO;
 777			goto build_identify;
 778		}
 779
 780		if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
 781			esp->msg_out_len =
 782				spi_populate_width_msg(&esp->msg_out[0],
 783						       (tp->nego_goal_width ?
 784							1 : 0));
 785			tp->flags |= ESP_TGT_NEGO_WIDE;
 786		} else if (esp_need_to_nego_sync(tp)) {
 787			esp->msg_out_len =
 788				spi_populate_sync_msg(&esp->msg_out[0],
 789						      tp->nego_goal_period,
 790						      tp->nego_goal_offset);
 791			tp->flags |= ESP_TGT_NEGO_SYNC;
 792		} else {
 793			tp->flags &= ~ESP_TGT_CHECK_NEGO;
 794		}
 795
 796		/* Process it like a slow command.  */
 797		if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC))
 798			esp->flags |= ESP_FLAG_DOING_SLOWCMD;
 799	}
 800
 801build_identify:
 802	/* If we don't have a lun-data struct yet, we're probing
 803	 * so do not disconnect.  Also, do not disconnect unless
 804	 * we have a tag on this command.
 805	 */
 806	if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
 807		*p++ = IDENTIFY(1, lun);
 808	else
 809		*p++ = IDENTIFY(0, lun);
 810
 811	if (ent->tag[0] && esp->rev == ESP100) {
 812		/* ESP100 lacks select w/atn3 command, use select
 813		 * and stop instead.
 814		 */
 815		esp->flags |= ESP_FLAG_DOING_SLOWCMD;
 816	}
 817
 818	if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
 819		start_cmd = ESP_CMD_SELA;
 820		if (ent->tag[0]) {
 821			*p++ = ent->tag[0];
 822			*p++ = ent->tag[1];
 823
 824			start_cmd = ESP_CMD_SA3;
 825		}
 826
 827		for (i = 0; i < cmd->cmd_len; i++)
 828			*p++ = cmd->cmnd[i];
 829
 830		esp->select_state = ESP_SELECT_BASIC;
 831	} else {
 832		esp->cmd_bytes_left = cmd->cmd_len;
 833		esp->cmd_bytes_ptr = &cmd->cmnd[0];
 834
 835		if (ent->tag[0]) {
 836			for (i = esp->msg_out_len - 1;
 837			     i >= 0; i--)
 838				esp->msg_out[i + 2] = esp->msg_out[i];
 839			esp->msg_out[0] = ent->tag[0];
 840			esp->msg_out[1] = ent->tag[1];
 841			esp->msg_out_len += 2;
 842		}
 843
 844		start_cmd = ESP_CMD_SELAS;
 845		esp->select_state = ESP_SELECT_MSGOUT;
 846	}
 847	val = tgt;
 848	if (esp->rev == FASHME)
 849		val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
 850	esp_write8(val, ESP_BUSID);
 851
 852	esp_write_tgt_sync(esp, tgt);
 853	esp_write_tgt_config3(esp, tgt);
 854
 855	val = (p - esp->command_block);
 856
 857	if (esp_debug & ESP_DEBUG_SCSICMD) {
 858		printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
 859		for (i = 0; i < cmd->cmd_len; i++)
 860			printk("%02x ", cmd->cmnd[i]);
 861		printk("]\n");
 862	}
 863
 864	esp_send_dma_cmd(esp, val, 16, start_cmd);
 
 
 
 865}
 866
 867static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
 868{
 869	struct list_head *head = &esp->esp_cmd_pool;
 870	struct esp_cmd_entry *ret;
 871
 872	if (list_empty(head)) {
 873		ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
 874	} else {
 875		ret = list_entry(head->next, struct esp_cmd_entry, list);
 876		list_del(&ret->list);
 877		memset(ret, 0, sizeof(*ret));
 878	}
 879	return ret;
 880}
 881
 882static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
 883{
 884	list_add(&ent->list, &esp->esp_cmd_pool);
 885}
 886
 887static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
 888			    struct scsi_cmnd *cmd, unsigned int result)
 889{
 890	struct scsi_device *dev = cmd->device;
 891	int tgt = dev->id;
 892	int lun = dev->lun;
 893
 894	esp->active_cmd = NULL;
 895	esp_unmap_dma(esp, cmd);
 896	esp_free_lun_tag(ent, dev->hostdata);
 897	cmd->result = result;
 898
 899	if (ent->eh_done) {
 900		complete(ent->eh_done);
 901		ent->eh_done = NULL;
 902	}
 903
 904	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 905		esp->ops->unmap_single(esp, ent->sense_dma,
 906				       SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
 907		ent->sense_ptr = NULL;
 908
 909		/* Restore the message/status bytes to what we actually
 910		 * saw originally.  Also, report that we are providing
 911		 * the sense data.
 912		 */
 913		cmd->result = ((DRIVER_SENSE << 24) |
 914			       (DID_OK << 16) |
 915			       (COMMAND_COMPLETE << 8) |
 916			       (SAM_STAT_CHECK_CONDITION << 0));
 917
 918		ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
 919		if (esp_debug & ESP_DEBUG_AUTOSENSE) {
 920			int i;
 921
 922			printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
 923			       esp->host->unique_id, tgt, lun);
 924			for (i = 0; i < 18; i++)
 925				printk("%02x ", cmd->sense_buffer[i]);
 926			printk("]\n");
 927		}
 928	}
 929
 930	cmd->scsi_done(cmd);
 931
 932	list_del(&ent->list);
 933	esp_put_ent(esp, ent);
 934
 935	esp_maybe_execute_command(esp);
 936}
 937
 938static unsigned int compose_result(unsigned int status, unsigned int message,
 939				   unsigned int driver_code)
 940{
 941	return (status | (message << 8) | (driver_code << 16));
 942}
 943
 944static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
 945{
 946	struct scsi_device *dev = ent->cmd->device;
 947	struct esp_lun_data *lp = dev->hostdata;
 948
 949	scsi_track_queue_full(dev, lp->num_tagged - 1);
 950}
 951
 952static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
 953{
 954	struct scsi_device *dev = cmd->device;
 955	struct esp *esp = shost_priv(dev->host);
 956	struct esp_cmd_priv *spriv;
 957	struct esp_cmd_entry *ent;
 958
 959	ent = esp_get_ent(esp);
 960	if (!ent)
 961		return SCSI_MLQUEUE_HOST_BUSY;
 962
 963	ent->cmd = cmd;
 964
 965	cmd->scsi_done = done;
 966
 967	spriv = ESP_CMD_PRIV(cmd);
 968	spriv->u.dma_addr = ~(dma_addr_t)0x0;
 969
 970	list_add_tail(&ent->list, &esp->queued_cmds);
 971
 972	esp_maybe_execute_command(esp);
 973
 974	return 0;
 975}
 976
 977static DEF_SCSI_QCMD(esp_queuecommand)
 978
 979static int esp_check_gross_error(struct esp *esp)
 980{
 981	if (esp->sreg & ESP_STAT_SPAM) {
 982		/* Gross Error, could be one of:
 983		 * - top of fifo overwritten
 984		 * - top of command register overwritten
 985		 * - DMA programmed with wrong direction
 986		 * - improper phase change
 987		 */
 988		shost_printk(KERN_ERR, esp->host,
 989			     "Gross error sreg[%02x]\n", esp->sreg);
 990		/* XXX Reset the chip. XXX */
 991		return 1;
 992	}
 993	return 0;
 994}
 995
 996static int esp_check_spur_intr(struct esp *esp)
 997{
 998	switch (esp->rev) {
 999	case ESP100:
1000	case ESP100A:
1001		/* The interrupt pending bit of the status register cannot
1002		 * be trusted on these revisions.
1003		 */
1004		esp->sreg &= ~ESP_STAT_INTR;
1005		break;
1006
1007	default:
1008		if (!(esp->sreg & ESP_STAT_INTR)) {
 
1009			if (esp->ireg & ESP_INTR_SR)
1010				return 1;
1011
1012			/* If the DMA is indicating interrupt pending and the
1013			 * ESP is not, the only possibility is a DMA error.
1014			 */
1015			if (!esp->ops->dma_error(esp)) {
1016				shost_printk(KERN_ERR, esp->host,
1017					     "Spurious irq, sreg=%02x.\n",
1018					     esp->sreg);
1019				return -1;
1020			}
1021
1022			shost_printk(KERN_ERR, esp->host, "DMA error\n");
 
1023
1024			/* XXX Reset the chip. XXX */
1025			return -1;
1026		}
1027		break;
1028	}
1029
1030	return 0;
1031}
1032
1033static void esp_schedule_reset(struct esp *esp)
1034{
1035	esp_log_reset("esp_schedule_reset() from %pf\n",
1036		      __builtin_return_address(0));
1037	esp->flags |= ESP_FLAG_RESETTING;
1038	esp_event(esp, ESP_EVENT_RESET);
1039}
1040
1041/* In order to avoid having to add a special half-reconnected state
1042 * into the driver we just sit here and poll through the rest of
1043 * the reselection process to get the tag message bytes.
1044 */
1045static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1046						    struct esp_lun_data *lp)
1047{
1048	struct esp_cmd_entry *ent;
1049	int i;
1050
1051	if (!lp->num_tagged) {
1052		shost_printk(KERN_ERR, esp->host,
1053			     "Reconnect w/num_tagged==0\n");
1054		return NULL;
1055	}
1056
1057	esp_log_reconnect("reconnect tag, ");
1058
1059	for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1060		if (esp->ops->irq_pending(esp))
1061			break;
1062	}
1063	if (i == ESP_QUICKIRQ_LIMIT) {
1064		shost_printk(KERN_ERR, esp->host,
1065			     "Reconnect IRQ1 timeout\n");
1066		return NULL;
1067	}
1068
1069	esp->sreg = esp_read8(ESP_STATUS);
1070	esp->ireg = esp_read8(ESP_INTRPT);
1071
1072	esp_log_reconnect("IRQ(%d:%x:%x), ",
1073			  i, esp->ireg, esp->sreg);
1074
1075	if (esp->ireg & ESP_INTR_DC) {
1076		shost_printk(KERN_ERR, esp->host,
1077			     "Reconnect, got disconnect.\n");
1078		return NULL;
1079	}
1080
1081	if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
1082		shost_printk(KERN_ERR, esp->host,
1083			     "Reconnect, not MIP sreg[%02x].\n", esp->sreg);
1084		return NULL;
1085	}
1086
1087	/* DMA in the tag bytes... */
1088	esp->command_block[0] = 0xff;
1089	esp->command_block[1] = 0xff;
1090	esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1091			       2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1092
1093	/* ACK the message.  */
1094	scsi_esp_cmd(esp, ESP_CMD_MOK);
1095
1096	for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1097		if (esp->ops->irq_pending(esp)) {
1098			esp->sreg = esp_read8(ESP_STATUS);
1099			esp->ireg = esp_read8(ESP_INTRPT);
1100			if (esp->ireg & ESP_INTR_FDONE)
1101				break;
1102		}
1103		udelay(1);
1104	}
1105	if (i == ESP_RESELECT_TAG_LIMIT) {
1106		shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n");
 
1107		return NULL;
1108	}
1109	esp->ops->dma_drain(esp);
1110	esp->ops->dma_invalidate(esp);
1111
1112	esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1113			  i, esp->ireg, esp->sreg,
1114			  esp->command_block[0],
1115			  esp->command_block[1]);
1116
1117	if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1118	    esp->command_block[0] > ORDERED_QUEUE_TAG) {
1119		shost_printk(KERN_ERR, esp->host,
1120			     "Reconnect, bad tag type %02x.\n",
1121			     esp->command_block[0]);
1122		return NULL;
1123	}
1124
1125	ent = lp->tagged_cmds[esp->command_block[1]];
1126	if (!ent) {
1127		shost_printk(KERN_ERR, esp->host,
1128			     "Reconnect, no entry for tag %02x.\n",
1129			     esp->command_block[1]);
1130		return NULL;
1131	}
1132
1133	return ent;
1134}
1135
1136static int esp_reconnect(struct esp *esp)
1137{
1138	struct esp_cmd_entry *ent;
1139	struct esp_target_data *tp;
1140	struct esp_lun_data *lp;
1141	struct scsi_device *dev;
1142	int target, lun;
1143
1144	BUG_ON(esp->active_cmd);
1145	if (esp->rev == FASHME) {
1146		/* FASHME puts the target and lun numbers directly
1147		 * into the fifo.
1148		 */
1149		target = esp->fifo[0];
1150		lun = esp->fifo[1] & 0x7;
1151	} else {
1152		u8 bits = esp_read8(ESP_FDATA);
1153
1154		/* Older chips put the lun directly into the fifo, but
1155		 * the target is given as a sample of the arbitration
1156		 * lines on the bus at reselection time.  So we should
1157		 * see the ID of the ESP and the one reconnecting target
1158		 * set in the bitmap.
1159		 */
1160		if (!(bits & esp->scsi_id_mask))
1161			goto do_reset;
1162		bits &= ~esp->scsi_id_mask;
1163		if (!bits || (bits & (bits - 1)))
1164			goto do_reset;
1165
1166		target = ffs(bits) - 1;
1167		lun = (esp_read8(ESP_FDATA) & 0x7);
1168
1169		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1170		if (esp->rev == ESP100) {
1171			u8 ireg = esp_read8(ESP_INTRPT);
1172			/* This chip has a bug during reselection that can
1173			 * cause a spurious illegal-command interrupt, which
1174			 * we simply ACK here.  Another possibility is a bus
1175			 * reset so we must check for that.
1176			 */
1177			if (ireg & ESP_INTR_SR)
1178				goto do_reset;
1179		}
1180		scsi_esp_cmd(esp, ESP_CMD_NULL);
1181	}
1182
1183	esp_write_tgt_sync(esp, target);
1184	esp_write_tgt_config3(esp, target);
1185
1186	scsi_esp_cmd(esp, ESP_CMD_MOK);
1187
1188	if (esp->rev == FASHME)
1189		esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1190			   ESP_BUSID);
1191
1192	tp = &esp->target[target];
1193	dev = __scsi_device_lookup_by_target(tp->starget, lun);
1194	if (!dev) {
1195		shost_printk(KERN_ERR, esp->host,
1196			     "Reconnect, no lp tgt[%u] lun[%u]\n",
1197			     target, lun);
1198		goto do_reset;
1199	}
1200	lp = dev->hostdata;
1201
1202	ent = lp->non_tagged_cmd;
1203	if (!ent) {
1204		ent = esp_reconnect_with_tag(esp, lp);
1205		if (!ent)
1206			goto do_reset;
1207	}
1208
1209	esp->active_cmd = ent;
1210
 
 
 
 
 
 
1211	esp_event(esp, ESP_EVENT_CHECK_PHASE);
1212	esp_restore_pointers(esp, ent);
1213	esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1214	return 1;
1215
1216do_reset:
1217	esp_schedule_reset(esp);
1218	return 0;
1219}
1220
1221static int esp_finish_select(struct esp *esp)
1222{
1223	struct esp_cmd_entry *ent;
1224	struct scsi_cmnd *cmd;
 
 
 
1225
1226	/* No longer selecting.  */
1227	esp->select_state = ESP_SELECT_NONE;
1228
1229	esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1230	ent = esp->active_cmd;
1231	cmd = ent->cmd;
1232
1233	if (esp->ops->dma_error(esp)) {
1234		/* If we see a DMA error during or as a result of selection,
1235		 * all bets are off.
1236		 */
1237		esp_schedule_reset(esp);
1238		esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
1239		return 0;
1240	}
1241
1242	esp->ops->dma_invalidate(esp);
1243
1244	if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1245		struct esp_target_data *tp = &esp->target[cmd->device->id];
1246
1247		/* Carefully back out of the selection attempt.  Release
1248		 * resources (such as DMA mapping & TAG) and reset state (such
1249		 * as message out and command delivery variables).
1250		 */
1251		if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1252			esp_unmap_dma(esp, cmd);
1253			esp_free_lun_tag(ent, cmd->device->hostdata);
1254			tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1255			esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
1256			esp->cmd_bytes_ptr = NULL;
1257			esp->cmd_bytes_left = 0;
1258		} else {
1259			esp->ops->unmap_single(esp, ent->sense_dma,
1260					       SCSI_SENSE_BUFFERSIZE,
1261					       DMA_FROM_DEVICE);
1262			ent->sense_ptr = NULL;
1263		}
1264
1265		/* Now that the state is unwound properly, put back onto
1266		 * the issue queue.  This command is no longer active.
1267		 */
1268		list_move(&ent->list, &esp->queued_cmds);
1269		esp->active_cmd = NULL;
1270
1271		/* Return value ignored by caller, it directly invokes
1272		 * esp_reconnect().
1273		 */
1274		return 0;
1275	}
1276
1277	if (esp->ireg == ESP_INTR_DC) {
1278		struct scsi_device *dev = cmd->device;
1279
1280		/* Disconnect.  Make sure we re-negotiate sync and
1281		 * wide parameters if this target starts responding
1282		 * again in the future.
1283		 */
1284		esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1285
1286		scsi_esp_cmd(esp, ESP_CMD_ESEL);
1287		esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
1288		return 1;
1289	}
1290
1291	if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1292		/* Selection successful.  On pre-FAST chips we have
1293		 * to do a NOP and possibly clean out the FIFO.
1294		 */
1295		if (esp->rev <= ESP236) {
1296			int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1297
1298			scsi_esp_cmd(esp, ESP_CMD_NULL);
1299
1300			if (!fcnt &&
1301			    (!esp->prev_soff ||
1302			     ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1303				esp_flush_fifo(esp);
1304		}
1305
1306		/* If we are doing a slow command, negotiation, etc.
1307		 * we'll do the right thing as we transition to the
1308		 * next phase.
1309		 */
1310		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1311		return 0;
1312	}
1313
1314	shost_printk(KERN_INFO, esp->host,
1315		     "Unexpected selection completion ireg[%x]\n", esp->ireg);
1316	esp_schedule_reset(esp);
1317	return 0;
1318}
1319
1320static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1321			       struct scsi_cmnd *cmd)
1322{
1323	int fifo_cnt, ecount, bytes_sent, flush_fifo;
1324
1325	fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1326	if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1327		fifo_cnt <<= 1;
1328
1329	ecount = 0;
1330	if (!(esp->sreg & ESP_STAT_TCNT)) {
1331		ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1332			  (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1333		if (esp->rev == FASHME)
1334			ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
1335		if (esp->rev == PCSCSI && (esp->config2 & ESP_CONFIG2_FENAB))
1336			ecount |= ((unsigned int)esp_read8(ESP_TCHI)) << 16;
1337	}
1338
1339	bytes_sent = esp->data_dma_len;
1340	bytes_sent -= ecount;
1341
1342	/*
1343	 * The am53c974 has a DMA 'pecularity'. The doc states:
1344	 * In some odd byte conditions, one residual byte will
1345	 * be left in the SCSI FIFO, and the FIFO Flags will
1346	 * never count to '0 '. When this happens, the residual
1347	 * byte should be retrieved via PIO following completion
1348	 * of the BLAST operation.
1349	 */
1350	if (fifo_cnt == 1 && ent->flags & ESP_CMD_FLAG_RESIDUAL) {
1351		size_t count = 1;
1352		size_t offset = bytes_sent;
1353		u8 bval = esp_read8(ESP_FDATA);
1354
1355		if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
1356			ent->sense_ptr[bytes_sent] = bval;
1357		else {
1358			struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
1359			u8 *ptr;
1360
1361			ptr = scsi_kmap_atomic_sg(p->cur_sg, p->u.num_sg,
1362						  &offset, &count);
1363			if (likely(ptr)) {
1364				*(ptr + offset) = bval;
1365				scsi_kunmap_atomic_sg(ptr);
1366			}
1367		}
1368		bytes_sent += fifo_cnt;
1369		ent->flags &= ~ESP_CMD_FLAG_RESIDUAL;
1370	}
1371	if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1372		bytes_sent -= fifo_cnt;
1373
1374	flush_fifo = 0;
1375	if (!esp->prev_soff) {
1376		/* Synchronous data transfer, always flush fifo. */
1377		flush_fifo = 1;
1378	} else {
1379		if (esp->rev == ESP100) {
1380			u32 fflags, phase;
1381
1382			/* ESP100 has a chip bug where in the synchronous data
1383			 * phase it can mistake a final long REQ pulse from the
1384			 * target as an extra data byte.  Fun.
1385			 *
1386			 * To detect this case we resample the status register
1387			 * and fifo flags.  If we're still in a data phase and
1388			 * we see spurious chunks in the fifo, we return error
1389			 * to the caller which should reset and set things up
1390			 * such that we only try future transfers to this
1391			 * target in synchronous mode.
1392			 */
1393			esp->sreg = esp_read8(ESP_STATUS);
1394			phase = esp->sreg & ESP_STAT_PMASK;
1395			fflags = esp_read8(ESP_FFLAGS);
1396
1397			if ((phase == ESP_DOP &&
1398			     (fflags & ESP_FF_ONOTZERO)) ||
1399			    (phase == ESP_DIP &&
1400			     (fflags & ESP_FF_FBYTES)))
1401				return -1;
1402		}
1403		if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1404			flush_fifo = 1;
1405	}
1406
1407	if (flush_fifo)
1408		esp_flush_fifo(esp);
1409
1410	return bytes_sent;
1411}
1412
1413static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1414			u8 scsi_period, u8 scsi_offset,
1415			u8 esp_stp, u8 esp_soff)
1416{
1417	spi_period(tp->starget) = scsi_period;
1418	spi_offset(tp->starget) = scsi_offset;
1419	spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1420
1421	if (esp_soff) {
1422		esp_stp &= 0x1f;
1423		esp_soff |= esp->radelay;
1424		if (esp->rev >= FAS236) {
1425			u8 bit = ESP_CONFIG3_FSCSI;
1426			if (esp->rev >= FAS100A)
1427				bit = ESP_CONFIG3_FAST;
1428
1429			if (scsi_period < 50) {
1430				if (esp->rev == FASHME)
1431					esp_soff &= ~esp->radelay;
1432				tp->esp_config3 |= bit;
1433			} else {
1434				tp->esp_config3 &= ~bit;
1435			}
1436			esp->prev_cfg3 = tp->esp_config3;
1437			esp_write8(esp->prev_cfg3, ESP_CFG3);
1438		}
1439	}
1440
1441	tp->esp_period = esp->prev_stp = esp_stp;
1442	tp->esp_offset = esp->prev_soff = esp_soff;
1443
1444	esp_write8(esp_soff, ESP_SOFF);
1445	esp_write8(esp_stp, ESP_STP);
1446
1447	tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1448
1449	spi_display_xfer_agreement(tp->starget);
1450}
1451
1452static void esp_msgin_reject(struct esp *esp)
1453{
1454	struct esp_cmd_entry *ent = esp->active_cmd;
1455	struct scsi_cmnd *cmd = ent->cmd;
1456	struct esp_target_data *tp;
1457	int tgt;
1458
1459	tgt = cmd->device->id;
1460	tp = &esp->target[tgt];
1461
1462	if (tp->flags & ESP_TGT_NEGO_WIDE) {
1463		tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1464
1465		if (!esp_need_to_nego_sync(tp)) {
1466			tp->flags &= ~ESP_TGT_CHECK_NEGO;
1467			scsi_esp_cmd(esp, ESP_CMD_RATN);
1468		} else {
1469			esp->msg_out_len =
1470				spi_populate_sync_msg(&esp->msg_out[0],
1471						      tp->nego_goal_period,
1472						      tp->nego_goal_offset);
1473			tp->flags |= ESP_TGT_NEGO_SYNC;
1474			scsi_esp_cmd(esp, ESP_CMD_SATN);
1475		}
1476		return;
1477	}
1478
1479	if (tp->flags & ESP_TGT_NEGO_SYNC) {
1480		tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1481		tp->esp_period = 0;
1482		tp->esp_offset = 0;
1483		esp_setsync(esp, tp, 0, 0, 0, 0);
1484		scsi_esp_cmd(esp, ESP_CMD_RATN);
1485		return;
1486	}
1487
1488	shost_printk(KERN_INFO, esp->host, "Unexpected MESSAGE REJECT\n");
1489	esp_schedule_reset(esp);
 
1490}
1491
1492static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1493{
1494	u8 period = esp->msg_in[3];
1495	u8 offset = esp->msg_in[4];
1496	u8 stp;
1497
1498	if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1499		goto do_reject;
1500
1501	if (offset > 15)
1502		goto do_reject;
1503
1504	if (offset) {
1505		int one_clock;
1506
1507		if (period > esp->max_period) {
1508			period = offset = 0;
1509			goto do_sdtr;
1510		}
1511		if (period < esp->min_period)
1512			goto do_reject;
1513
1514		one_clock = esp->ccycle / 1000;
1515		stp = DIV_ROUND_UP(period << 2, one_clock);
1516		if (stp && esp->rev >= FAS236) {
1517			if (stp >= 50)
1518				stp--;
1519		}
1520	} else {
1521		stp = 0;
1522	}
1523
1524	esp_setsync(esp, tp, period, offset, stp, offset);
1525	return;
1526
1527do_reject:
1528	esp->msg_out[0] = MESSAGE_REJECT;
1529	esp->msg_out_len = 1;
1530	scsi_esp_cmd(esp, ESP_CMD_SATN);
1531	return;
1532
1533do_sdtr:
1534	tp->nego_goal_period = period;
1535	tp->nego_goal_offset = offset;
1536	esp->msg_out_len =
1537		spi_populate_sync_msg(&esp->msg_out[0],
1538				      tp->nego_goal_period,
1539				      tp->nego_goal_offset);
1540	scsi_esp_cmd(esp, ESP_CMD_SATN);
1541}
1542
1543static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1544{
1545	int size = 8 << esp->msg_in[3];
1546	u8 cfg3;
1547
1548	if (esp->rev != FASHME)
1549		goto do_reject;
1550
1551	if (size != 8 && size != 16)
1552		goto do_reject;
1553
1554	if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1555		goto do_reject;
1556
1557	cfg3 = tp->esp_config3;
1558	if (size == 16) {
1559		tp->flags |= ESP_TGT_WIDE;
1560		cfg3 |= ESP_CONFIG3_EWIDE;
1561	} else {
1562		tp->flags &= ~ESP_TGT_WIDE;
1563		cfg3 &= ~ESP_CONFIG3_EWIDE;
1564	}
1565	tp->esp_config3 = cfg3;
1566	esp->prev_cfg3 = cfg3;
1567	esp_write8(cfg3, ESP_CFG3);
1568
1569	tp->flags &= ~ESP_TGT_NEGO_WIDE;
1570
1571	spi_period(tp->starget) = 0;
1572	spi_offset(tp->starget) = 0;
1573	if (!esp_need_to_nego_sync(tp)) {
1574		tp->flags &= ~ESP_TGT_CHECK_NEGO;
1575		scsi_esp_cmd(esp, ESP_CMD_RATN);
1576	} else {
1577		esp->msg_out_len =
1578			spi_populate_sync_msg(&esp->msg_out[0],
1579					      tp->nego_goal_period,
1580					      tp->nego_goal_offset);
1581		tp->flags |= ESP_TGT_NEGO_SYNC;
1582		scsi_esp_cmd(esp, ESP_CMD_SATN);
1583	}
1584	return;
1585
1586do_reject:
1587	esp->msg_out[0] = MESSAGE_REJECT;
1588	esp->msg_out_len = 1;
1589	scsi_esp_cmd(esp, ESP_CMD_SATN);
1590}
1591
1592static void esp_msgin_extended(struct esp *esp)
1593{
1594	struct esp_cmd_entry *ent = esp->active_cmd;
1595	struct scsi_cmnd *cmd = ent->cmd;
1596	struct esp_target_data *tp;
1597	int tgt = cmd->device->id;
1598
1599	tp = &esp->target[tgt];
1600	if (esp->msg_in[2] == EXTENDED_SDTR) {
1601		esp_msgin_sdtr(esp, tp);
1602		return;
1603	}
1604	if (esp->msg_in[2] == EXTENDED_WDTR) {
1605		esp_msgin_wdtr(esp, tp);
1606		return;
1607	}
1608
1609	shost_printk(KERN_INFO, esp->host,
1610		     "Unexpected extended msg type %x\n", esp->msg_in[2]);
1611
1612	esp->msg_out[0] = MESSAGE_REJECT;
1613	esp->msg_out_len = 1;
1614	scsi_esp_cmd(esp, ESP_CMD_SATN);
1615}
1616
1617/* Analyze msgin bytes received from target so far.  Return non-zero
1618 * if there are more bytes needed to complete the message.
1619 */
1620static int esp_msgin_process(struct esp *esp)
1621{
1622	u8 msg0 = esp->msg_in[0];
1623	int len = esp->msg_in_len;
1624
1625	if (msg0 & 0x80) {
1626		/* Identify */
1627		shost_printk(KERN_INFO, esp->host,
1628			     "Unexpected msgin identify\n");
1629		return 0;
1630	}
1631
1632	switch (msg0) {
1633	case EXTENDED_MESSAGE:
1634		if (len == 1)
1635			return 1;
1636		if (len < esp->msg_in[1] + 2)
1637			return 1;
1638		esp_msgin_extended(esp);
1639		return 0;
1640
1641	case IGNORE_WIDE_RESIDUE: {
1642		struct esp_cmd_entry *ent;
1643		struct esp_cmd_priv *spriv;
1644		if (len == 1)
1645			return 1;
1646
1647		if (esp->msg_in[1] != 1)
1648			goto do_reject;
1649
1650		ent = esp->active_cmd;
1651		spriv = ESP_CMD_PRIV(ent->cmd);
1652
1653		if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1654			spriv->cur_sg--;
1655			spriv->cur_residue = 1;
1656		} else
1657			spriv->cur_residue++;
1658		spriv->tot_residue++;
1659		return 0;
1660	}
1661	case NOP:
1662		return 0;
1663	case RESTORE_POINTERS:
1664		esp_restore_pointers(esp, esp->active_cmd);
1665		return 0;
1666	case SAVE_POINTERS:
1667		esp_save_pointers(esp, esp->active_cmd);
1668		return 0;
1669
1670	case COMMAND_COMPLETE:
1671	case DISCONNECT: {
1672		struct esp_cmd_entry *ent = esp->active_cmd;
1673
1674		ent->message = msg0;
1675		esp_event(esp, ESP_EVENT_FREE_BUS);
1676		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1677		return 0;
1678	}
1679	case MESSAGE_REJECT:
1680		esp_msgin_reject(esp);
1681		return 0;
1682
1683	default:
1684	do_reject:
1685		esp->msg_out[0] = MESSAGE_REJECT;
1686		esp->msg_out_len = 1;
1687		scsi_esp_cmd(esp, ESP_CMD_SATN);
1688		return 0;
1689	}
1690}
1691
1692static int esp_process_event(struct esp *esp)
1693{
1694	int write, i;
1695
1696again:
1697	write = 0;
1698	esp_log_event("process event %d phase %x\n",
1699		      esp->event, esp->sreg & ESP_STAT_PMASK);
1700	switch (esp->event) {
1701	case ESP_EVENT_CHECK_PHASE:
1702		switch (esp->sreg & ESP_STAT_PMASK) {
1703		case ESP_DOP:
1704			esp_event(esp, ESP_EVENT_DATA_OUT);
1705			break;
1706		case ESP_DIP:
1707			esp_event(esp, ESP_EVENT_DATA_IN);
1708			break;
1709		case ESP_STATP:
1710			esp_flush_fifo(esp);
1711			scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1712			esp_event(esp, ESP_EVENT_STATUS);
1713			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1714			return 1;
1715
1716		case ESP_MOP:
1717			esp_event(esp, ESP_EVENT_MSGOUT);
1718			break;
1719
1720		case ESP_MIP:
1721			esp_event(esp, ESP_EVENT_MSGIN);
1722			break;
1723
1724		case ESP_CMDP:
1725			esp_event(esp, ESP_EVENT_CMD_START);
1726			break;
1727
1728		default:
1729			shost_printk(KERN_INFO, esp->host,
1730				     "Unexpected phase, sreg=%02x\n",
1731				     esp->sreg);
1732			esp_schedule_reset(esp);
1733			return 0;
1734		}
1735		goto again;
 
1736
1737	case ESP_EVENT_DATA_IN:
1738		write = 1;
1739		/* fallthru */
1740
1741	case ESP_EVENT_DATA_OUT: {
1742		struct esp_cmd_entry *ent = esp->active_cmd;
1743		struct scsi_cmnd *cmd = ent->cmd;
1744		dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1745		unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1746
1747		if (esp->rev == ESP100)
1748			scsi_esp_cmd(esp, ESP_CMD_NULL);
1749
1750		if (write)
1751			ent->flags |= ESP_CMD_FLAG_WRITE;
1752		else
1753			ent->flags &= ~ESP_CMD_FLAG_WRITE;
1754
1755		if (esp->ops->dma_length_limit)
1756			dma_len = esp->ops->dma_length_limit(esp, dma_addr,
1757							     dma_len);
1758		else
1759			dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1760
1761		esp->data_dma_len = dma_len;
1762
1763		if (!dma_len) {
1764			shost_printk(KERN_ERR, esp->host,
1765				     "DMA length is zero!\n");
1766			shost_printk(KERN_ERR, esp->host,
1767				     "cur adr[%08llx] len[%08x]\n",
1768				     (unsigned long long)esp_cur_dma_addr(ent, cmd),
1769				     esp_cur_dma_len(ent, cmd));
1770			esp_schedule_reset(esp);
1771			return 0;
1772		}
1773
1774		esp_log_datastart("start data addr[%08llx] len[%u] write(%d)\n",
 
1775				  (unsigned long long)dma_addr, dma_len, write);
1776
1777		esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1778				       write, ESP_CMD_DMA | ESP_CMD_TI);
1779		esp_event(esp, ESP_EVENT_DATA_DONE);
1780		break;
1781	}
1782	case ESP_EVENT_DATA_DONE: {
1783		struct esp_cmd_entry *ent = esp->active_cmd;
1784		struct scsi_cmnd *cmd = ent->cmd;
1785		int bytes_sent;
1786
1787		if (esp->ops->dma_error(esp)) {
1788			shost_printk(KERN_INFO, esp->host,
1789				     "data done, DMA error, resetting\n");
1790			esp_schedule_reset(esp);
1791			return 0;
1792		}
1793
1794		if (ent->flags & ESP_CMD_FLAG_WRITE) {
1795			/* XXX parity errors, etc. XXX */
1796
1797			esp->ops->dma_drain(esp);
1798		}
1799		esp->ops->dma_invalidate(esp);
1800
1801		if (esp->ireg != ESP_INTR_BSERV) {
1802			/* We should always see exactly a bus-service
1803			 * interrupt at the end of a successful transfer.
1804			 */
1805			shost_printk(KERN_INFO, esp->host,
1806				     "data done, not BSERV, resetting\n");
1807			esp_schedule_reset(esp);
1808			return 0;
1809		}
1810
1811		bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1812
1813		esp_log_datadone("data done flgs[%x] sent[%d]\n",
1814				 ent->flags, bytes_sent);
1815
1816		if (bytes_sent < 0) {
1817			/* XXX force sync mode for this target XXX */
1818			esp_schedule_reset(esp);
1819			return 0;
1820		}
1821
1822		esp_advance_dma(esp, ent, cmd, bytes_sent);
1823		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1824		goto again;
1825	}
1826
1827	case ESP_EVENT_STATUS: {
1828		struct esp_cmd_entry *ent = esp->active_cmd;
1829
1830		if (esp->ireg & ESP_INTR_FDONE) {
1831			ent->status = esp_read8(ESP_FDATA);
1832			ent->message = esp_read8(ESP_FDATA);
1833			scsi_esp_cmd(esp, ESP_CMD_MOK);
1834		} else if (esp->ireg == ESP_INTR_BSERV) {
1835			ent->status = esp_read8(ESP_FDATA);
1836			ent->message = 0xff;
1837			esp_event(esp, ESP_EVENT_MSGIN);
1838			return 0;
1839		}
1840
1841		if (ent->message != COMMAND_COMPLETE) {
1842			shost_printk(KERN_INFO, esp->host,
1843				     "Unexpected message %x in status\n",
1844				     ent->message);
1845			esp_schedule_reset(esp);
1846			return 0;
1847		}
1848
1849		esp_event(esp, ESP_EVENT_FREE_BUS);
1850		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1851		break;
1852	}
1853	case ESP_EVENT_FREE_BUS: {
1854		struct esp_cmd_entry *ent = esp->active_cmd;
1855		struct scsi_cmnd *cmd = ent->cmd;
1856
1857		if (ent->message == COMMAND_COMPLETE ||
1858		    ent->message == DISCONNECT)
1859			scsi_esp_cmd(esp, ESP_CMD_ESEL);
1860
1861		if (ent->message == COMMAND_COMPLETE) {
1862			esp_log_cmddone("Command done status[%x] message[%x]\n",
 
1863					ent->status, ent->message);
1864			if (ent->status == SAM_STAT_TASK_SET_FULL)
1865				esp_event_queue_full(esp, ent);
1866
1867			if (ent->status == SAM_STAT_CHECK_CONDITION &&
1868			    !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1869				ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1870				esp_autosense(esp, ent);
1871			} else {
1872				esp_cmd_is_done(esp, ent, cmd,
1873						compose_result(ent->status,
1874							       ent->message,
1875							       DID_OK));
1876			}
1877		} else if (ent->message == DISCONNECT) {
1878			esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n",
 
1879					   cmd->device->id,
1880					   ent->tag[0], ent->tag[1]);
1881
1882			esp->active_cmd = NULL;
1883			esp_maybe_execute_command(esp);
1884		} else {
1885			shost_printk(KERN_INFO, esp->host,
1886				     "Unexpected message %x in freebus\n",
1887				     ent->message);
1888			esp_schedule_reset(esp);
1889			return 0;
1890		}
1891		if (esp->active_cmd)
1892			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1893		break;
1894	}
1895	case ESP_EVENT_MSGOUT: {
1896		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1897
1898		if (esp_debug & ESP_DEBUG_MSGOUT) {
1899			int i;
1900			printk("ESP: Sending message [ ");
1901			for (i = 0; i < esp->msg_out_len; i++)
1902				printk("%02x ", esp->msg_out[i]);
1903			printk("]\n");
1904		}
1905
1906		if (esp->rev == FASHME) {
1907			int i;
1908
1909			/* Always use the fifo.  */
1910			for (i = 0; i < esp->msg_out_len; i++) {
1911				esp_write8(esp->msg_out[i], ESP_FDATA);
1912				esp_write8(0, ESP_FDATA);
1913			}
1914			scsi_esp_cmd(esp, ESP_CMD_TI);
1915		} else {
1916			if (esp->msg_out_len == 1) {
1917				esp_write8(esp->msg_out[0], ESP_FDATA);
1918				scsi_esp_cmd(esp, ESP_CMD_TI);
1919			} else if (esp->flags & ESP_FLAG_USE_FIFO) {
1920				for (i = 0; i < esp->msg_out_len; i++)
1921					esp_write8(esp->msg_out[i], ESP_FDATA);
1922				scsi_esp_cmd(esp, ESP_CMD_TI);
1923			} else {
1924				/* Use DMA. */
1925				memcpy(esp->command_block,
1926				       esp->msg_out,
1927				       esp->msg_out_len);
1928
1929				esp->ops->send_dma_cmd(esp,
1930						       esp->command_block_dma,
1931						       esp->msg_out_len,
1932						       esp->msg_out_len,
1933						       0,
1934						       ESP_CMD_DMA|ESP_CMD_TI);
1935			}
1936		}
1937		esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1938		break;
1939	}
1940	case ESP_EVENT_MSGOUT_DONE:
1941		if (esp->rev == FASHME) {
1942			scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1943		} else {
1944			if (esp->msg_out_len > 1)
1945				esp->ops->dma_invalidate(esp);
 
1946
1947			/* XXX if the chip went into disconnected mode,
1948			 * we can't run the phase state machine anyway.
1949			 */
1950			if (!(esp->ireg & ESP_INTR_DC))
1951				scsi_esp_cmd(esp, ESP_CMD_NULL);
1952		}
1953
1954		esp->msg_out_len = 0;
1955
1956		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1957		goto again;
1958	case ESP_EVENT_MSGIN:
1959		if (esp->ireg & ESP_INTR_BSERV) {
1960			if (esp->rev == FASHME) {
1961				if (!(esp_read8(ESP_STATUS2) &
1962				      ESP_STAT2_FEMPTY))
1963					scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1964			} else {
1965				scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1966				if (esp->rev == ESP100)
1967					scsi_esp_cmd(esp, ESP_CMD_NULL);
1968			}
1969			scsi_esp_cmd(esp, ESP_CMD_TI);
1970			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1971			return 1;
1972		}
1973		if (esp->ireg & ESP_INTR_FDONE) {
1974			u8 val;
1975
1976			if (esp->rev == FASHME)
1977				val = esp->fifo[0];
1978			else
1979				val = esp_read8(ESP_FDATA);
1980			esp->msg_in[esp->msg_in_len++] = val;
1981
1982			esp_log_msgin("Got msgin byte %x\n", val);
1983
1984			if (!esp_msgin_process(esp))
1985				esp->msg_in_len = 0;
1986
1987			if (esp->rev == FASHME)
1988				scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1989
1990			scsi_esp_cmd(esp, ESP_CMD_MOK);
1991
1992			/* Check whether a bus reset is to be done next */
1993			if (esp->event == ESP_EVENT_RESET)
1994				return 0;
1995
1996			if (esp->event != ESP_EVENT_FREE_BUS)
1997				esp_event(esp, ESP_EVENT_CHECK_PHASE);
1998		} else {
1999			shost_printk(KERN_INFO, esp->host,
2000				     "MSGIN neither BSERV not FDON, resetting");
2001			esp_schedule_reset(esp);
2002			return 0;
2003		}
2004		break;
2005	case ESP_EVENT_CMD_START:
2006		memcpy(esp->command_block, esp->cmd_bytes_ptr,
2007		       esp->cmd_bytes_left);
2008		esp_send_dma_cmd(esp, esp->cmd_bytes_left, 16, ESP_CMD_TI);
 
 
 
 
2009		esp_event(esp, ESP_EVENT_CMD_DONE);
2010		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
2011		break;
2012	case ESP_EVENT_CMD_DONE:
2013		esp->ops->dma_invalidate(esp);
2014		if (esp->ireg & ESP_INTR_BSERV) {
2015			esp_event(esp, ESP_EVENT_CHECK_PHASE);
2016			goto again;
2017		}
2018		esp_schedule_reset(esp);
2019		return 0;
 
2020
2021	case ESP_EVENT_RESET:
2022		scsi_esp_cmd(esp, ESP_CMD_RS);
2023		break;
2024
2025	default:
2026		shost_printk(KERN_INFO, esp->host,
2027			     "Unexpected event %x, resetting\n", esp->event);
2028		esp_schedule_reset(esp);
2029		return 0;
 
2030	}
2031	return 1;
2032}
2033
2034static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
2035{
2036	struct scsi_cmnd *cmd = ent->cmd;
2037
2038	esp_unmap_dma(esp, cmd);
2039	esp_free_lun_tag(ent, cmd->device->hostdata);
2040	cmd->result = DID_RESET << 16;
2041
2042	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
2043		esp->ops->unmap_single(esp, ent->sense_dma,
2044				       SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
2045		ent->sense_ptr = NULL;
2046	}
2047
2048	cmd->scsi_done(cmd);
2049	list_del(&ent->list);
2050	esp_put_ent(esp, ent);
2051}
2052
2053static void esp_clear_hold(struct scsi_device *dev, void *data)
2054{
2055	struct esp_lun_data *lp = dev->hostdata;
2056
2057	BUG_ON(lp->num_tagged);
2058	lp->hold = 0;
2059}
2060
2061static void esp_reset_cleanup(struct esp *esp)
2062{
2063	struct esp_cmd_entry *ent, *tmp;
2064	int i;
2065
2066	list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2067		struct scsi_cmnd *cmd = ent->cmd;
2068
2069		list_del(&ent->list);
2070		cmd->result = DID_RESET << 16;
2071		cmd->scsi_done(cmd);
2072		esp_put_ent(esp, ent);
2073	}
2074
2075	list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2076		if (ent == esp->active_cmd)
2077			esp->active_cmd = NULL;
2078		esp_reset_cleanup_one(esp, ent);
2079	}
2080
2081	BUG_ON(esp->active_cmd != NULL);
2082
2083	/* Force renegotiation of sync/wide transfers.  */
2084	for (i = 0; i < ESP_MAX_TARGET; i++) {
2085		struct esp_target_data *tp = &esp->target[i];
2086
2087		tp->esp_period = 0;
2088		tp->esp_offset = 0;
2089		tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2090				     ESP_CONFIG3_FSCSI |
2091				     ESP_CONFIG3_FAST);
2092		tp->flags &= ~ESP_TGT_WIDE;
2093		tp->flags |= ESP_TGT_CHECK_NEGO;
2094
2095		if (tp->starget)
2096			__starget_for_each_device(tp->starget, NULL,
2097						  esp_clear_hold);
2098	}
2099	esp->flags &= ~ESP_FLAG_RESETTING;
2100}
2101
2102/* Runs under host->lock */
2103static void __esp_interrupt(struct esp *esp)
2104{
2105	int finish_reset, intr_done;
2106	u8 phase;
2107
2108       /*
2109	* Once INTRPT is read STATUS and SSTEP are cleared.
2110	*/
2111	esp->sreg = esp_read8(ESP_STATUS);
2112	esp->seqreg = esp_read8(ESP_SSTEP);
2113	esp->ireg = esp_read8(ESP_INTRPT);
2114
2115	if (esp->flags & ESP_FLAG_RESETTING) {
2116		finish_reset = 1;
2117	} else {
2118		if (esp_check_gross_error(esp))
2119			return;
2120
2121		finish_reset = esp_check_spur_intr(esp);
2122		if (finish_reset < 0)
2123			return;
2124	}
2125
 
 
2126	if (esp->ireg & ESP_INTR_SR)
2127		finish_reset = 1;
2128
2129	if (finish_reset) {
2130		esp_reset_cleanup(esp);
2131		if (esp->eh_reset) {
2132			complete(esp->eh_reset);
2133			esp->eh_reset = NULL;
2134		}
2135		return;
2136	}
2137
2138	phase = (esp->sreg & ESP_STAT_PMASK);
2139	if (esp->rev == FASHME) {
2140		if (((phase != ESP_DIP && phase != ESP_DOP) &&
2141		     esp->select_state == ESP_SELECT_NONE &&
2142		     esp->event != ESP_EVENT_STATUS &&
2143		     esp->event != ESP_EVENT_DATA_DONE) ||
2144		    (esp->ireg & ESP_INTR_RSEL)) {
2145			esp->sreg2 = esp_read8(ESP_STATUS2);
2146			if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2147			    (esp->sreg2 & ESP_STAT2_F1BYTE))
2148				hme_read_fifo(esp);
2149		}
2150	}
2151
2152	esp_log_intr("intr sreg[%02x] seqreg[%02x] "
2153		     "sreg2[%02x] ireg[%02x]\n",
2154		     esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2155
2156	intr_done = 0;
2157
2158	if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
2159		shost_printk(KERN_INFO, esp->host,
2160			     "unexpected IREG %02x\n", esp->ireg);
2161		if (esp->ireg & ESP_INTR_IC)
2162			esp_dump_cmd_log(esp);
2163
2164		esp_schedule_reset(esp);
2165	} else {
2166		if (esp->ireg & ESP_INTR_RSEL) {
 
 
 
 
2167			if (esp->active_cmd)
2168				(void) esp_finish_select(esp);
2169			intr_done = esp_reconnect(esp);
2170		} else {
2171			/* Some combination of FDONE, BSERV, DC. */
2172			if (esp->select_state != ESP_SELECT_NONE)
2173				intr_done = esp_finish_select(esp);
2174		}
2175	}
2176	while (!intr_done)
2177		intr_done = esp_process_event(esp);
2178}
2179
2180irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2181{
2182	struct esp *esp = dev_id;
2183	unsigned long flags;
2184	irqreturn_t ret;
2185
2186	spin_lock_irqsave(esp->host->host_lock, flags);
2187	ret = IRQ_NONE;
2188	if (esp->ops->irq_pending(esp)) {
2189		ret = IRQ_HANDLED;
2190		for (;;) {
2191			int i;
2192
2193			__esp_interrupt(esp);
2194			if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2195				break;
2196			esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2197
2198			for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2199				if (esp->ops->irq_pending(esp))
2200					break;
2201			}
2202			if (i == ESP_QUICKIRQ_LIMIT)
2203				break;
2204		}
2205	}
2206	spin_unlock_irqrestore(esp->host->host_lock, flags);
2207
2208	return ret;
2209}
2210EXPORT_SYMBOL(scsi_esp_intr);
2211
2212static void esp_get_revision(struct esp *esp)
2213{
2214	u8 val;
2215
2216	esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2217	if (esp->config2 == 0) {
2218		esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2219		esp_write8(esp->config2, ESP_CFG2);
2220
2221		val = esp_read8(ESP_CFG2);
2222		val &= ~ESP_CONFIG2_MAGIC;
2223
2224		esp->config2 = 0;
2225		if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2226			/*
2227			 * If what we write to cfg2 does not come back,
2228			 * cfg2 is not implemented.
2229			 * Therefore this must be a plain esp100.
2230			 */
2231			esp->rev = ESP100;
2232			return;
2233		}
2234	}
2235
2236	esp_set_all_config3(esp, 5);
2237	esp->prev_cfg3 = 5;
2238	esp_write8(esp->config2, ESP_CFG2);
2239	esp_write8(0, ESP_CFG3);
2240	esp_write8(esp->prev_cfg3, ESP_CFG3);
2241
2242	val = esp_read8(ESP_CFG3);
2243	if (val != 5) {
2244		/* The cfg2 register is implemented, however
2245		 * cfg3 is not, must be esp100a.
 
2246		 */
2247		esp->rev = ESP100A;
2248	} else {
2249		esp_set_all_config3(esp, 0);
2250		esp->prev_cfg3 = 0;
 
 
 
2251		esp_write8(esp->prev_cfg3, ESP_CFG3);
2252
2253		/* All of cfg{1,2,3} implemented, must be one of
2254		 * the fas variants, figure out which one.
2255		 */
2256		if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2257			esp->rev = FAST;
2258			esp->sync_defp = SYNC_DEFP_FAST;
2259		} else {
2260			esp->rev = ESP236;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2261		}
2262	}
2263}
2264
2265static void esp_init_swstate(struct esp *esp)
2266{
2267	int i;
2268
2269	INIT_LIST_HEAD(&esp->queued_cmds);
2270	INIT_LIST_HEAD(&esp->active_cmds);
2271	INIT_LIST_HEAD(&esp->esp_cmd_pool);
2272
2273	/* Start with a clear state, domain validation (via ->slave_configure,
2274	 * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
2275	 * commands.
2276	 */
2277	for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2278		esp->target[i].flags = 0;
2279		esp->target[i].nego_goal_period = 0;
2280		esp->target[i].nego_goal_offset = 0;
2281		esp->target[i].nego_goal_width = 0;
2282		esp->target[i].nego_goal_tags = 0;
2283	}
2284}
2285
2286/* This places the ESP into a known state at boot time. */
2287static void esp_bootup_reset(struct esp *esp)
2288{
2289	u8 val;
2290
2291	/* Reset the DMA */
2292	esp->ops->reset_dma(esp);
2293
2294	/* Reset the ESP */
2295	esp_reset_esp(esp);
2296
2297	/* Reset the SCSI bus, but tell ESP not to generate an irq */
2298	val = esp_read8(ESP_CFG1);
2299	val |= ESP_CONFIG1_SRRDISAB;
2300	esp_write8(val, ESP_CFG1);
2301
2302	scsi_esp_cmd(esp, ESP_CMD_RS);
2303	udelay(400);
2304
2305	esp_write8(esp->config1, ESP_CFG1);
2306
2307	/* Eat any bitrot in the chip and we are done... */
2308	esp_read8(ESP_INTRPT);
2309}
2310
2311static void esp_set_clock_params(struct esp *esp)
2312{
2313	int fhz;
2314	u8 ccf;
2315
2316	/* This is getting messy but it has to be done correctly or else
2317	 * you get weird behavior all over the place.  We are trying to
2318	 * basically figure out three pieces of information.
2319	 *
2320	 * a) Clock Conversion Factor
2321	 *
2322	 *    This is a representation of the input crystal clock frequency
2323	 *    going into the ESP on this machine.  Any operation whose timing
2324	 *    is longer than 400ns depends on this value being correct.  For
2325	 *    example, you'll get blips for arbitration/selection during high
2326	 *    load or with multiple targets if this is not set correctly.
2327	 *
2328	 * b) Selection Time-Out
2329	 *
2330	 *    The ESP isn't very bright and will arbitrate for the bus and try
2331	 *    to select a target forever if you let it.  This value tells the
2332	 *    ESP when it has taken too long to negotiate and that it should
2333	 *    interrupt the CPU so we can see what happened.  The value is
2334	 *    computed as follows (from NCR/Symbios chip docs).
2335	 *
2336	 *          (Time Out Period) *  (Input Clock)
2337	 *    STO = ----------------------------------
2338	 *          (8192) * (Clock Conversion Factor)
2339	 *
2340	 *    We use a time out period of 250ms (ESP_BUS_TIMEOUT).
2341	 *
2342	 * c) Imperical constants for synchronous offset and transfer period
2343         *    register values
2344	 *
2345	 *    This entails the smallest and largest sync period we could ever
2346	 *    handle on this ESP.
2347	 */
2348	fhz = esp->cfreq;
2349
2350	ccf = ((fhz / 1000000) + 4) / 5;
2351	if (ccf == 1)
2352		ccf = 2;
2353
2354	/* If we can't find anything reasonable, just assume 20MHZ.
2355	 * This is the clock frequency of the older sun4c's where I've
2356	 * been unable to find the clock-frequency PROM property.  All
2357	 * other machines provide useful values it seems.
2358	 */
2359	if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
2360		fhz = 20000000;
2361		ccf = 4;
2362	}
2363
2364	esp->cfact = (ccf == 8 ? 0 : ccf);
2365	esp->cfreq = fhz;
2366	esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
2367	esp->ctick = ESP_TICK(ccf, esp->ccycle);
2368	esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
2369	esp->sync_defp = SYNC_DEFP_SLOW;
2370}
2371
2372static const char *esp_chip_names[] = {
2373	"ESP100",
2374	"ESP100A",
2375	"ESP236",
2376	"FAS236",
2377	"FAS100A",
2378	"FAST",
2379	"FASHME",
2380	"AM53C974",
2381};
2382
2383static struct scsi_transport_template *esp_transport_template;
2384
2385int scsi_esp_register(struct esp *esp, struct device *dev)
2386{
2387	static int instance;
2388	int err;
2389
2390	if (!esp->num_tags)
2391		esp->num_tags = ESP_DEFAULT_TAGS;
2392	esp->host->transportt = esp_transport_template;
2393	esp->host->max_lun = ESP_MAX_LUN;
2394	esp->host->cmd_per_lun = 2;
2395	esp->host->unique_id = instance;
2396
2397	esp_set_clock_params(esp);
2398
2399	esp_get_revision(esp);
2400
2401	esp_init_swstate(esp);
2402
2403	esp_bootup_reset(esp);
2404
2405	dev_printk(KERN_INFO, dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
2406		   esp->host->unique_id, esp->regs, esp->dma_regs,
2407		   esp->host->irq);
2408	dev_printk(KERN_INFO, dev,
2409		   "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2410		   esp->host->unique_id, esp_chip_names[esp->rev],
2411		   esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
2412
2413	/* Let the SCSI bus reset settle. */
2414	ssleep(esp_bus_reset_settle);
2415
2416	err = scsi_add_host(esp->host, dev);
2417	if (err)
2418		return err;
2419
2420	instance++;
2421
2422	scsi_scan_host(esp->host);
2423
2424	return 0;
2425}
2426EXPORT_SYMBOL(scsi_esp_register);
2427
2428void scsi_esp_unregister(struct esp *esp)
2429{
2430	scsi_remove_host(esp->host);
2431}
2432EXPORT_SYMBOL(scsi_esp_unregister);
2433
2434static int esp_target_alloc(struct scsi_target *starget)
2435{
2436	struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2437	struct esp_target_data *tp = &esp->target[starget->id];
2438
2439	tp->starget = starget;
2440
2441	return 0;
2442}
2443
2444static void esp_target_destroy(struct scsi_target *starget)
2445{
2446	struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2447	struct esp_target_data *tp = &esp->target[starget->id];
2448
2449	tp->starget = NULL;
2450}
2451
2452static int esp_slave_alloc(struct scsi_device *dev)
2453{
2454	struct esp *esp = shost_priv(dev->host);
2455	struct esp_target_data *tp = &esp->target[dev->id];
2456	struct esp_lun_data *lp;
2457
2458	lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2459	if (!lp)
2460		return -ENOMEM;
2461	dev->hostdata = lp;
2462
2463	spi_min_period(tp->starget) = esp->min_period;
2464	spi_max_offset(tp->starget) = 15;
2465
2466	if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2467		spi_max_width(tp->starget) = 1;
2468	else
2469		spi_max_width(tp->starget) = 0;
2470
2471	return 0;
2472}
2473
2474static int esp_slave_configure(struct scsi_device *dev)
2475{
2476	struct esp *esp = shost_priv(dev->host);
2477	struct esp_target_data *tp = &esp->target[dev->id];
 
 
 
2478
2479	if (dev->tagged_supported)
2480		scsi_change_queue_depth(dev, esp->num_tags);
 
2481
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2482	tp->flags |= ESP_TGT_DISCONNECT;
2483
2484	if (!spi_initial_dv(dev->sdev_target))
2485		spi_dv_device(dev);
2486
2487	return 0;
2488}
2489
2490static void esp_slave_destroy(struct scsi_device *dev)
2491{
2492	struct esp_lun_data *lp = dev->hostdata;
2493
2494	kfree(lp);
2495	dev->hostdata = NULL;
2496}
2497
2498static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2499{
2500	struct esp *esp = shost_priv(cmd->device->host);
2501	struct esp_cmd_entry *ent, *tmp;
2502	struct completion eh_done;
2503	unsigned long flags;
2504
2505	/* XXX This helps a lot with debugging but might be a bit
2506	 * XXX much for the final driver.
2507	 */
2508	spin_lock_irqsave(esp->host->host_lock, flags);
2509	shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n",
2510		     cmd, cmd->cmnd[0]);
2511	ent = esp->active_cmd;
2512	if (ent)
2513		shost_printk(KERN_ERR, esp->host,
2514			     "Current command [%p:%02x]\n",
2515			     ent->cmd, ent->cmd->cmnd[0]);
2516	list_for_each_entry(ent, &esp->queued_cmds, list) {
2517		shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n",
2518			     ent->cmd, ent->cmd->cmnd[0]);
2519	}
2520	list_for_each_entry(ent, &esp->active_cmds, list) {
2521		shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n",
2522			     ent->cmd, ent->cmd->cmnd[0]);
2523	}
2524	esp_dump_cmd_log(esp);
2525	spin_unlock_irqrestore(esp->host->host_lock, flags);
2526
2527	spin_lock_irqsave(esp->host->host_lock, flags);
2528
2529	ent = NULL;
2530	list_for_each_entry(tmp, &esp->queued_cmds, list) {
2531		if (tmp->cmd == cmd) {
2532			ent = tmp;
2533			break;
2534		}
2535	}
2536
2537	if (ent) {
2538		/* Easiest case, we didn't even issue the command
2539		 * yet so it is trivial to abort.
2540		 */
2541		list_del(&ent->list);
2542
2543		cmd->result = DID_ABORT << 16;
2544		cmd->scsi_done(cmd);
2545
2546		esp_put_ent(esp, ent);
2547
2548		goto out_success;
2549	}
2550
2551	init_completion(&eh_done);
2552
2553	ent = esp->active_cmd;
2554	if (ent && ent->cmd == cmd) {
2555		/* Command is the currently active command on
2556		 * the bus.  If we already have an output message
2557		 * pending, no dice.
2558		 */
2559		if (esp->msg_out_len)
2560			goto out_failure;
2561
2562		/* Send out an abort, encouraging the target to
2563		 * go to MSGOUT phase by asserting ATN.
2564		 */
2565		esp->msg_out[0] = ABORT_TASK_SET;
2566		esp->msg_out_len = 1;
2567		ent->eh_done = &eh_done;
2568
2569		scsi_esp_cmd(esp, ESP_CMD_SATN);
2570	} else {
2571		/* The command is disconnected.  This is not easy to
2572		 * abort.  For now we fail and let the scsi error
2573		 * handling layer go try a scsi bus reset or host
2574		 * reset.
2575		 *
2576		 * What we could do is put together a scsi command
2577		 * solely for the purpose of sending an abort message
2578		 * to the target.  Coming up with all the code to
2579		 * cook up scsi commands, special case them everywhere,
2580		 * etc. is for questionable gain and it would be better
2581		 * if the generic scsi error handling layer could do at
2582		 * least some of that for us.
2583		 *
2584		 * Anyways this is an area for potential future improvement
2585		 * in this driver.
2586		 */
2587		goto out_failure;
2588	}
2589
2590	spin_unlock_irqrestore(esp->host->host_lock, flags);
2591
2592	if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2593		spin_lock_irqsave(esp->host->host_lock, flags);
2594		ent->eh_done = NULL;
2595		spin_unlock_irqrestore(esp->host->host_lock, flags);
2596
2597		return FAILED;
2598	}
2599
2600	return SUCCESS;
2601
2602out_success:
2603	spin_unlock_irqrestore(esp->host->host_lock, flags);
2604	return SUCCESS;
2605
2606out_failure:
2607	/* XXX This might be a good location to set ESP_TGT_BROKEN
2608	 * XXX since we know which target/lun in particular is
2609	 * XXX causing trouble.
2610	 */
2611	spin_unlock_irqrestore(esp->host->host_lock, flags);
2612	return FAILED;
2613}
2614
2615static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2616{
2617	struct esp *esp = shost_priv(cmd->device->host);
2618	struct completion eh_reset;
2619	unsigned long flags;
2620
2621	init_completion(&eh_reset);
2622
2623	spin_lock_irqsave(esp->host->host_lock, flags);
2624
2625	esp->eh_reset = &eh_reset;
2626
2627	/* XXX This is too simple... We should add lots of
2628	 * XXX checks here so that if we find that the chip is
2629	 * XXX very wedged we return failure immediately so
2630	 * XXX that we can perform a full chip reset.
2631	 */
2632	esp->flags |= ESP_FLAG_RESETTING;
2633	scsi_esp_cmd(esp, ESP_CMD_RS);
2634
2635	spin_unlock_irqrestore(esp->host->host_lock, flags);
2636
2637	ssleep(esp_bus_reset_settle);
2638
2639	if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2640		spin_lock_irqsave(esp->host->host_lock, flags);
2641		esp->eh_reset = NULL;
2642		spin_unlock_irqrestore(esp->host->host_lock, flags);
2643
2644		return FAILED;
2645	}
2646
2647	return SUCCESS;
2648}
2649
2650/* All bets are off, reset the entire device.  */
2651static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2652{
2653	struct esp *esp = shost_priv(cmd->device->host);
2654	unsigned long flags;
2655
2656	spin_lock_irqsave(esp->host->host_lock, flags);
2657	esp_bootup_reset(esp);
2658	esp_reset_cleanup(esp);
2659	spin_unlock_irqrestore(esp->host->host_lock, flags);
2660
2661	ssleep(esp_bus_reset_settle);
2662
2663	return SUCCESS;
2664}
2665
2666static const char *esp_info(struct Scsi_Host *host)
2667{
2668	return "esp";
2669}
2670
2671struct scsi_host_template scsi_esp_template = {
2672	.module			= THIS_MODULE,
2673	.name			= "esp",
2674	.info			= esp_info,
2675	.queuecommand		= esp_queuecommand,
2676	.target_alloc		= esp_target_alloc,
2677	.target_destroy		= esp_target_destroy,
2678	.slave_alloc		= esp_slave_alloc,
2679	.slave_configure	= esp_slave_configure,
2680	.slave_destroy		= esp_slave_destroy,
2681	.eh_abort_handler	= esp_eh_abort_handler,
2682	.eh_bus_reset_handler	= esp_eh_bus_reset_handler,
2683	.eh_host_reset_handler	= esp_eh_host_reset_handler,
2684	.can_queue		= 7,
2685	.this_id		= 7,
2686	.sg_tablesize		= SG_ALL,
2687	.use_clustering		= ENABLE_CLUSTERING,
2688	.max_sectors		= 0xffff,
2689	.skip_settle_delay	= 1,
2690};
2691EXPORT_SYMBOL(scsi_esp_template);
2692
2693static void esp_get_signalling(struct Scsi_Host *host)
2694{
2695	struct esp *esp = shost_priv(host);
2696	enum spi_signal_type type;
2697
2698	if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2699		type = SPI_SIGNAL_HVD;
2700	else
2701		type = SPI_SIGNAL_SE;
2702
2703	spi_signalling(host) = type;
2704}
2705
2706static void esp_set_offset(struct scsi_target *target, int offset)
2707{
2708	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2709	struct esp *esp = shost_priv(host);
2710	struct esp_target_data *tp = &esp->target[target->id];
2711
2712	if (esp->flags & ESP_FLAG_DISABLE_SYNC)
2713		tp->nego_goal_offset = 0;
2714	else
2715		tp->nego_goal_offset = offset;
2716	tp->flags |= ESP_TGT_CHECK_NEGO;
2717}
2718
2719static void esp_set_period(struct scsi_target *target, int period)
2720{
2721	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2722	struct esp *esp = shost_priv(host);
2723	struct esp_target_data *tp = &esp->target[target->id];
2724
2725	tp->nego_goal_period = period;
2726	tp->flags |= ESP_TGT_CHECK_NEGO;
2727}
2728
2729static void esp_set_width(struct scsi_target *target, int width)
2730{
2731	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2732	struct esp *esp = shost_priv(host);
2733	struct esp_target_data *tp = &esp->target[target->id];
2734
2735	tp->nego_goal_width = (width ? 1 : 0);
2736	tp->flags |= ESP_TGT_CHECK_NEGO;
2737}
2738
2739static struct spi_function_template esp_transport_ops = {
2740	.set_offset		= esp_set_offset,
2741	.show_offset		= 1,
2742	.set_period		= esp_set_period,
2743	.show_period		= 1,
2744	.set_width		= esp_set_width,
2745	.show_width		= 1,
2746	.get_signalling		= esp_get_signalling,
2747};
2748
2749static int __init esp_init(void)
2750{
2751	BUILD_BUG_ON(sizeof(struct scsi_pointer) <
2752		     sizeof(struct esp_cmd_priv));
2753
2754	esp_transport_template = spi_attach_transport(&esp_transport_ops);
2755	if (!esp_transport_template)
2756		return -ENODEV;
2757
2758	return 0;
2759}
2760
2761static void __exit esp_exit(void)
2762{
2763	spi_release_transport(esp_transport_template);
2764}
2765
2766MODULE_DESCRIPTION("ESP SCSI driver core");
2767MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
2768MODULE_LICENSE("GPL");
2769MODULE_VERSION(DRV_VERSION);
2770
2771module_param(esp_bus_reset_settle, int, 0);
2772MODULE_PARM_DESC(esp_bus_reset_settle,
2773		 "ESP scsi bus reset delay in seconds");
2774
2775module_param(esp_debug, int, 0);
2776MODULE_PARM_DESC(esp_debug,
2777"ESP bitmapped debugging message enable value:\n"
2778"	0x00000001	Log interrupt events\n"
2779"	0x00000002	Log scsi commands\n"
2780"	0x00000004	Log resets\n"
2781"	0x00000008	Log message in events\n"
2782"	0x00000010	Log message out events\n"
2783"	0x00000020	Log command completion\n"
2784"	0x00000040	Log disconnects\n"
2785"	0x00000080	Log data start\n"
2786"	0x00000100	Log data done\n"
2787"	0x00000200	Log reconnects\n"
2788"	0x00000400	Log auto-sense data\n"
2789);
2790
2791module_init(esp_init);
2792module_exit(esp_exit);