Linux Audio

Check our new training course

Loading...
   1/* -*- mode: c; c-basic-offset: 8 -*- */
   2
   3/* NCR (or Symbios) 53c700 and 53c700-66 Driver
   4 *
   5 * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
   6**-----------------------------------------------------------------------------
   7**  
   8**  This program is free software; you can redistribute it and/or modify
   9**  it under the terms of the GNU General Public License as published by
  10**  the Free Software Foundation; either version 2 of the License, or
  11**  (at your option) any later version.
  12**
  13**  This program is distributed in the hope that it will be useful,
  14**  but WITHOUT ANY WARRANTY; without even the implied warranty of
  15**  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16**  GNU General Public License for more details.
  17**
  18**  You should have received a copy of the GNU General Public License
  19**  along with this program; if not, write to the Free Software
  20**  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21**
  22**-----------------------------------------------------------------------------
  23 */
  24
  25/* Notes:
  26 *
  27 * This driver is designed exclusively for these chips (virtually the
  28 * earliest of the scripts engine chips).  They need their own drivers
  29 * because they are missing so many of the scripts and snazzy register
  30 * features of their elder brothers (the 710, 720 and 770).
  31 *
  32 * The 700 is the lowliest of the line, it can only do async SCSI.
  33 * The 700-66 can at least do synchronous SCSI up to 10MHz.
  34 * 
  35 * The 700 chip has no host bus interface logic of its own.  However,
  36 * it is usually mapped to a location with well defined register
  37 * offsets.  Therefore, if you can determine the base address and the
  38 * irq your board incorporating this chip uses, you can probably use
  39 * this driver to run it (although you'll probably have to write a
  40 * minimal wrapper for the purpose---see the NCR_D700 driver for
  41 * details about how to do this).
  42 *
  43 *
  44 * TODO List:
  45 *
  46 * 1. Better statistics in the proc fs
  47 *
  48 * 2. Implement message queue (queues SCSI messages like commands) and make
  49 *    the abort and device reset functions use them.
  50 * */
  51
  52/* CHANGELOG
  53 *
  54 * Version 2.8
  55 *
  56 * Fixed bad bug affecting tag starvation processing (previously the
  57 * driver would hang the system if too many tags starved.  Also fixed
  58 * bad bug having to do with 10 byte command processing and REQUEST
  59 * SENSE (the command would loop forever getting a transfer length
  60 * mismatch in the CMD phase).
  61 *
  62 * Version 2.7
  63 *
  64 * Fixed scripts problem which caused certain devices (notably CDRWs)
  65 * to hang on initial INQUIRY.  Updated NCR_700_readl/writel to use
  66 * __raw_readl/writel for parisc compatibility (Thomas
  67 * Bogendoerfer). Added missing SCp->request_bufflen initialisation
  68 * for sense requests (Ryan Bradetich).
  69 *
  70 * Version 2.6
  71 *
  72 * Following test of the 64 bit parisc kernel by Richard Hirst,
  73 * several problems have now been corrected.  Also adds support for
  74 * consistent memory allocation.
  75 *
  76 * Version 2.5
  77 * 
  78 * More Compatibility changes for 710 (now actually works).  Enhanced
  79 * support for odd clock speeds which constrain SDTR negotiations.
  80 * correct cacheline separation for scsi messages and status for
  81 * incoherent architectures.  Use of the pci mapping functions on
  82 * buffers to begin support for 64 bit drivers.
  83 *
  84 * Version 2.4
  85 *
  86 * Added support for the 53c710 chip (in 53c700 emulation mode only---no 
  87 * special 53c710 instructions or registers are used).
  88 *
  89 * Version 2.3
  90 *
  91 * More endianness/cache coherency changes.
  92 *
  93 * Better bad device handling (handles devices lying about tag
  94 * queueing support and devices which fail to provide sense data on
  95 * contingent allegiance conditions)
  96 *
  97 * Many thanks to Richard Hirst <rhirst@linuxcare.com> for patiently
  98 * debugging this driver on the parisc architecture and suggesting
  99 * many improvements and bug fixes.
 100 *
 101 * Thanks also go to Linuxcare Inc. for providing several PARISC
 102 * machines for me to debug the driver on.
 103 *
 104 * Version 2.2
 105 *
 106 * Made the driver mem or io mapped; added endian invariance; added
 107 * dma cache flushing operations for architectures which need it;
 108 * added support for more varied clocking speeds.
 109 *
 110 * Version 2.1
 111 *
 112 * Initial modularisation from the D700.  See NCR_D700.c for the rest of
 113 * the changelog.
 114 * */
 115#define NCR_700_VERSION "2.8"
 116
 117#include <linux/kernel.h>
 118#include <linux/types.h>
 119#include <linux/string.h>
 120#include <linux/slab.h>
 121#include <linux/ioport.h>
 122#include <linux/delay.h>
 123#include <linux/spinlock.h>
 124#include <linux/completion.h>
 125#include <linux/init.h>
 126#include <linux/proc_fs.h>
 127#include <linux/blkdev.h>
 128#include <linux/module.h>
 129#include <linux/interrupt.h>
 130#include <linux/device.h>
 131#include <asm/dma.h>
 132#include <asm/io.h>
 133#include <asm/pgtable.h>
 134#include <asm/byteorder.h>
 135
 136#include <scsi/scsi.h>
 137#include <scsi/scsi_cmnd.h>
 138#include <scsi/scsi_dbg.h>
 139#include <scsi/scsi_eh.h>
 140#include <scsi/scsi_host.h>
 141#include <scsi/scsi_tcq.h>
 142#include <scsi/scsi_transport.h>
 143#include <scsi/scsi_transport_spi.h>
 144
 145#include "53c700.h"
 146
 147/* NOTE: For 64 bit drivers there are points in the code where we use
 148 * a non dereferenceable pointer to point to a structure in dma-able
 149 * memory (which is 32 bits) so that we can use all of the structure
 150 * operations but take the address at the end.  This macro allows us
 151 * to truncate the 64 bit pointer down to 32 bits without the compiler
 152 * complaining */
 153#define to32bit(x)	((__u32)((unsigned long)(x)))
 154
 155#ifdef NCR_700_DEBUG
 156#define STATIC
 157#else
 158#define STATIC static
 159#endif
 160
 161MODULE_AUTHOR("James Bottomley");
 162MODULE_DESCRIPTION("53c700 and 53c700-66 Driver");
 163MODULE_LICENSE("GPL");
 164
 165/* This is the script */
 166#include "53c700_d.h"
 167
 168
 169STATIC int NCR_700_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *);
 170STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt);
 171STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
 172STATIC void NCR_700_chip_setup(struct Scsi_Host *host);
 173STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
 174STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
 175STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
 176STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
 177static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
 178
 179STATIC struct device_attribute *NCR_700_dev_attrs[];
 180
 181STATIC struct scsi_transport_template *NCR_700_transport_template = NULL;
 182
 183static char *NCR_700_phase[] = {
 184	"",
 185	"after selection",
 186	"before command phase",
 187	"after command phase",
 188	"after status phase",
 189	"after data in phase",
 190	"after data out phase",
 191	"during data phase",
 192};
 193
 194static char *NCR_700_condition[] = {
 195	"",
 196	"NOT MSG_OUT",
 197	"UNEXPECTED PHASE",
 198	"NOT MSG_IN",
 199	"UNEXPECTED MSG",
 200	"MSG_IN",
 201	"SDTR_MSG RECEIVED",
 202	"REJECT_MSG RECEIVED",
 203	"DISCONNECT_MSG RECEIVED",
 204	"MSG_OUT",
 205	"DATA_IN",
 206	
 207};
 208
 209static char *NCR_700_fatal_messages[] = {
 210	"unexpected message after reselection",
 211	"still MSG_OUT after message injection",
 212	"not MSG_IN after selection",
 213	"Illegal message length received",
 214};
 215
 216static char *NCR_700_SBCL_bits[] = {
 217	"IO ",
 218	"CD ",
 219	"MSG ",
 220	"ATN ",
 221	"SEL ",
 222	"BSY ",
 223	"ACK ",
 224	"REQ ",
 225};
 226
 227static char *NCR_700_SBCL_to_phase[] = {
 228	"DATA_OUT",
 229	"DATA_IN",
 230	"CMD_OUT",
 231	"STATE",
 232	"ILLEGAL PHASE",
 233	"ILLEGAL PHASE",
 234	"MSG OUT",
 235	"MSG IN",
 236};
 237
 238/* This translates the SDTR message offset and period to a value
 239 * which can be loaded into the SXFER_REG.
 240 *
 241 * NOTE: According to SCSI-2, the true transfer period (in ns) is
 242 *       actually four times this period value */
 243static inline __u8
 244NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters *hostdata,
 245			       __u8 offset, __u8 period)
 246{
 247	int XFERP;
 248
 249	__u8 min_xferp = (hostdata->chip710
 250			  ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
 251	__u8 max_offset = (hostdata->chip710
 252			   ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET);
 253
 254	if(offset == 0)
 255		return 0;
 256
 257	if(period < hostdata->min_period) {
 258		printk(KERN_WARNING "53c700: Period %dns is less than this chip's minimum, setting to %d\n", period*4, NCR_700_MIN_PERIOD*4);
 259		period = hostdata->min_period;
 260	}
 261	XFERP = (period*4 * hostdata->sync_clock)/1000 - 4;
 262	if(offset > max_offset) {
 263		printk(KERN_WARNING "53c700: Offset %d exceeds chip maximum, setting to %d\n",
 264		       offset, max_offset);
 265		offset = max_offset;
 266	}
 267	if(XFERP < min_xferp) {
 268		XFERP =  min_xferp;
 269	}
 270	return (offset & 0x0f) | (XFERP & 0x07)<<4;
 271}
 272
 273static inline __u8
 274NCR_700_get_SXFER(struct scsi_device *SDp)
 275{
 276	struct NCR_700_Host_Parameters *hostdata = 
 277		(struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
 278
 279	return NCR_700_offset_period_to_sxfer(hostdata,
 280					      spi_offset(SDp->sdev_target),
 281					      spi_period(SDp->sdev_target));
 282}
 283
 284struct Scsi_Host *
 285NCR_700_detect(struct scsi_host_template *tpnt,
 286	       struct NCR_700_Host_Parameters *hostdata, struct device *dev)
 287{
 288	dma_addr_t pScript, pSlots;
 289	__u8 *memory;
 290	__u32 *script;
 291	struct Scsi_Host *host;
 292	static int banner = 0;
 293	int j;
 294
 295	if(tpnt->sdev_attrs == NULL)
 296		tpnt->sdev_attrs = NCR_700_dev_attrs;
 297
 298	memory = dma_alloc_attrs(hostdata->dev, TOTAL_MEM_SIZE, &pScript,
 299				 GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
 300	if(memory == NULL) {
 301		printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n");
 302		return NULL;
 303	}
 304
 305	script = (__u32 *)memory;
 306	hostdata->msgin = memory + MSGIN_OFFSET;
 307	hostdata->msgout = memory + MSGOUT_OFFSET;
 308	hostdata->status = memory + STATUS_OFFSET;
 309	hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET);
 310	hostdata->dev = dev;
 311
 312	pSlots = pScript + SLOTS_OFFSET;
 313
 314	/* Fill in the missing routines from the host template */
 315	tpnt->queuecommand = NCR_700_queuecommand;
 316	tpnt->eh_abort_handler = NCR_700_abort;
 317	tpnt->eh_host_reset_handler = NCR_700_host_reset;
 318	tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
 319	tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
 320	tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN;
 321	tpnt->use_clustering = ENABLE_CLUSTERING;
 322	tpnt->slave_configure = NCR_700_slave_configure;
 323	tpnt->slave_destroy = NCR_700_slave_destroy;
 324	tpnt->slave_alloc = NCR_700_slave_alloc;
 325	tpnt->change_queue_depth = NCR_700_change_queue_depth;
 326
 327	if(tpnt->name == NULL)
 328		tpnt->name = "53c700";
 329	if(tpnt->proc_name == NULL)
 330		tpnt->proc_name = "53c700";
 331
 332	host = scsi_host_alloc(tpnt, 4);
 333	if (!host)
 334		return NULL;
 335	memset(hostdata->slots, 0, sizeof(struct NCR_700_command_slot)
 336	       * NCR_700_COMMAND_SLOTS_PER_HOST);
 337	for (j = 0; j < NCR_700_COMMAND_SLOTS_PER_HOST; j++) {
 338		dma_addr_t offset = (dma_addr_t)((unsigned long)&hostdata->slots[j].SG[0]
 339					  - (unsigned long)&hostdata->slots[0].SG[0]);
 340		hostdata->slots[j].pSG = (struct NCR_700_SG_List *)((unsigned long)(pSlots + offset));
 341		if(j == 0)
 342			hostdata->free_list = &hostdata->slots[j];
 343		else
 344			hostdata->slots[j-1].ITL_forw = &hostdata->slots[j];
 345		hostdata->slots[j].state = NCR_700_SLOT_FREE;
 346	}
 347
 348	for (j = 0; j < ARRAY_SIZE(SCRIPT); j++)
 349		script[j] = bS_to_host(SCRIPT[j]);
 350
 351	/* adjust all labels to be bus physical */
 352	for (j = 0; j < PATCHES; j++)
 353		script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
 354	/* now patch up fixed addresses. */
 355	script_patch_32(hostdata->dev, script, MessageLocation,
 356			pScript + MSGOUT_OFFSET);
 357	script_patch_32(hostdata->dev, script, StatusAddress,
 358			pScript + STATUS_OFFSET);
 359	script_patch_32(hostdata->dev, script, ReceiveMsgAddress,
 360			pScript + MSGIN_OFFSET);
 361
 362	hostdata->script = script;
 363	hostdata->pScript = pScript;
 364	dma_sync_single_for_device(hostdata->dev, pScript, sizeof(SCRIPT), DMA_TO_DEVICE);
 365	hostdata->state = NCR_700_HOST_FREE;
 366	hostdata->cmd = NULL;
 367	host->max_id = 8;
 368	host->max_lun = NCR_700_MAX_LUNS;
 369	BUG_ON(NCR_700_transport_template == NULL);
 370	host->transportt = NCR_700_transport_template;
 371	host->unique_id = (unsigned long)hostdata->base;
 372	hostdata->eh_complete = NULL;
 373	host->hostdata[0] = (unsigned long)hostdata;
 374	/* kick the chip */
 375	NCR_700_writeb(0xff, host, CTEST9_REG);
 376	if (hostdata->chip710)
 377		hostdata->rev = (NCR_700_readb(host, CTEST8_REG)>>4) & 0x0f;
 378	else
 379		hostdata->rev = (NCR_700_readb(host, CTEST7_REG)>>4) & 0x0f;
 380	hostdata->fast = (NCR_700_readb(host, CTEST9_REG) == 0);
 381	if (banner == 0) {
 382		printk(KERN_NOTICE "53c700: Version " NCR_700_VERSION " By James.Bottomley@HansenPartnership.com\n");
 383		banner = 1;
 384	}
 385	printk(KERN_NOTICE "scsi%d: %s rev %d %s\n", host->host_no,
 386	       hostdata->chip710 ? "53c710" :
 387	       (hostdata->fast ? "53c700-66" : "53c700"),
 388	       hostdata->rev, hostdata->differential ?
 389	       "(Differential)" : "");
 390	/* reset the chip */
 391	NCR_700_chip_reset(host);
 392
 393	if (scsi_add_host(host, dev)) {
 394		dev_printk(KERN_ERR, dev, "53c700: scsi_add_host failed\n");
 395		scsi_host_put(host);
 396		return NULL;
 397	}
 398
 399	spi_signalling(host) = hostdata->differential ? SPI_SIGNAL_HVD :
 400		SPI_SIGNAL_SE;
 401
 402	return host;
 403}
 404
 405int
 406NCR_700_release(struct Scsi_Host *host)
 407{
 408	struct NCR_700_Host_Parameters *hostdata = 
 409		(struct NCR_700_Host_Parameters *)host->hostdata[0];
 410
 411	dma_free_attrs(hostdata->dev, TOTAL_MEM_SIZE, hostdata->script,
 412		       hostdata->pScript, DMA_ATTR_NON_CONSISTENT);
 413	return 1;
 414}
 415
 416static inline __u8
 417NCR_700_identify(int can_disconnect, __u8 lun)
 418{
 419	return IDENTIFY_BASE |
 420		((can_disconnect) ? 0x40 : 0) |
 421		(lun & NCR_700_LUN_MASK);
 422}
 423
 424/*
 425 * Function : static int data_residual (Scsi_Host *host)
 426 *
 427 * Purpose : return residual data count of what's in the chip.  If you
 428 * really want to know what this function is doing, it's almost a
 429 * direct transcription of the algorithm described in the 53c710
 430 * guide, except that the DBC and DFIFO registers are only 6 bits
 431 * wide on a 53c700.
 432 *
 433 * Inputs : host - SCSI host */
 434static inline int
 435NCR_700_data_residual (struct Scsi_Host *host) {
 436	struct NCR_700_Host_Parameters *hostdata = 
 437		(struct NCR_700_Host_Parameters *)host->hostdata[0];
 438	int count, synchronous = 0;
 439	unsigned int ddir;
 440
 441	if(hostdata->chip710) {
 442		count = ((NCR_700_readb(host, DFIFO_REG) & 0x7f) -
 443			 (NCR_700_readl(host, DBC_REG) & 0x7f)) & 0x7f;
 444	} else {
 445		count = ((NCR_700_readb(host, DFIFO_REG) & 0x3f) -
 446			 (NCR_700_readl(host, DBC_REG) & 0x3f)) & 0x3f;
 447	}
 448	
 449	if(hostdata->fast)
 450		synchronous = NCR_700_readb(host, SXFER_REG) & 0x0f;
 451	
 452	/* get the data direction */
 453	ddir = NCR_700_readb(host, CTEST0_REG) & 0x01;
 454
 455	if (ddir) {
 456		/* Receive */
 457		if (synchronous) 
 458			count += (NCR_700_readb(host, SSTAT2_REG) & 0xf0) >> 4;
 459		else
 460			if (NCR_700_readb(host, SSTAT1_REG) & SIDL_REG_FULL)
 461				++count;
 462	} else {
 463		/* Send */
 464		__u8 sstat = NCR_700_readb(host, SSTAT1_REG);
 465		if (sstat & SODL_REG_FULL)
 466			++count;
 467		if (synchronous && (sstat & SODR_REG_FULL))
 468			++count;
 469	}
 470#ifdef NCR_700_DEBUG
 471	if(count)
 472		printk("RESIDUAL IS %d (ddir %d)\n", count, ddir);
 473#endif
 474	return count;
 475}
 476
 477/* print out the SCSI wires and corresponding phase from the SBCL register
 478 * in the chip */
 479static inline char *
 480sbcl_to_string(__u8 sbcl)
 481{
 482	int i;
 483	static char ret[256];
 484
 485	ret[0]='\0';
 486	for(i=0; i<8; i++) {
 487		if((1<<i) & sbcl) 
 488			strcat(ret, NCR_700_SBCL_bits[i]);
 489	}
 490	strcat(ret, NCR_700_SBCL_to_phase[sbcl & 0x07]);
 491	return ret;
 492}
 493
 494static inline __u8
 495bitmap_to_number(__u8 bitmap)
 496{
 497	__u8 i;
 498
 499	for(i=0; i<8 && !(bitmap &(1<<i)); i++)
 500		;
 501	return i;
 502}
 503
 504/* Pull a slot off the free list */
 505STATIC struct NCR_700_command_slot *
 506find_empty_slot(struct NCR_700_Host_Parameters *hostdata)
 507{
 508	struct NCR_700_command_slot *slot = hostdata->free_list;
 509
 510	if(slot == NULL) {
 511		/* sanity check */
 512		if(hostdata->command_slot_count != NCR_700_COMMAND_SLOTS_PER_HOST)
 513			printk(KERN_ERR "SLOTS FULL, but count is %d, should be %d\n", hostdata->command_slot_count, NCR_700_COMMAND_SLOTS_PER_HOST);
 514		return NULL;
 515	}
 516
 517	if(slot->state != NCR_700_SLOT_FREE)
 518		/* should panic! */
 519		printk(KERN_ERR "BUSY SLOT ON FREE LIST!!!\n");
 520		
 521
 522	hostdata->free_list = slot->ITL_forw;
 523	slot->ITL_forw = NULL;
 524
 525
 526	/* NOTE: set the state to busy here, not queued, since this
 527	 * indicates the slot is in use and cannot be run by the IRQ
 528	 * finish routine.  If we cannot queue the command when it
 529	 * is properly build, we then change to NCR_700_SLOT_QUEUED */
 530	slot->state = NCR_700_SLOT_BUSY;
 531	slot->flags = 0;
 532	hostdata->command_slot_count++;
 533	
 534	return slot;
 535}
 536
 537STATIC void 
 538free_slot(struct NCR_700_command_slot *slot,
 539	  struct NCR_700_Host_Parameters *hostdata)
 540{
 541	if((slot->state & NCR_700_SLOT_MASK) != NCR_700_SLOT_MAGIC) {
 542		printk(KERN_ERR "53c700: SLOT %p is not MAGIC!!!\n", slot);
 543	}
 544	if(slot->state == NCR_700_SLOT_FREE) {
 545		printk(KERN_ERR "53c700: SLOT %p is FREE!!!\n", slot);
 546	}
 547	
 548	slot->resume_offset = 0;
 549	slot->cmnd = NULL;
 550	slot->state = NCR_700_SLOT_FREE;
 551	slot->ITL_forw = hostdata->free_list;
 552	hostdata->free_list = slot;
 553	hostdata->command_slot_count--;
 554}
 555
 556
 557/* This routine really does very little.  The command is indexed on
 558   the ITL and (if tagged) the ITLQ lists in _queuecommand */
 559STATIC void
 560save_for_reselection(struct NCR_700_Host_Parameters *hostdata,
 561		     struct scsi_cmnd *SCp, __u32 dsp)
 562{
 563	/* Its just possible that this gets executed twice */
 564	if(SCp != NULL) {
 565		struct NCR_700_command_slot *slot =
 566			(struct NCR_700_command_slot *)SCp->host_scribble;
 567
 568		slot->resume_offset = dsp;
 569	}
 570	hostdata->state = NCR_700_HOST_FREE;
 571	hostdata->cmd = NULL;
 572}
 573
 574STATIC inline void
 575NCR_700_unmap(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp,
 576	      struct NCR_700_command_slot *slot)
 577{
 578	if(SCp->sc_data_direction != DMA_NONE &&
 579	   SCp->sc_data_direction != DMA_BIDIRECTIONAL)
 580		scsi_dma_unmap(SCp);
 581}
 582
 583STATIC inline void
 584NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
 585	       struct scsi_cmnd *SCp, int result)
 586{
 587	hostdata->state = NCR_700_HOST_FREE;
 588	hostdata->cmd = NULL;
 589
 590	if(SCp != NULL) {
 591		struct NCR_700_command_slot *slot =
 592			(struct NCR_700_command_slot *)SCp->host_scribble;
 593
 594		dma_unmap_single(hostdata->dev, slot->pCmd,
 595				 MAX_COMMAND_SIZE, DMA_TO_DEVICE);
 596		if (slot->flags == NCR_700_FLAG_AUTOSENSE) {
 597			char *cmnd = NCR_700_get_sense_cmnd(SCp->device);
 598
 599			dma_unmap_single(hostdata->dev, slot->dma_handle,
 600					 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
 601			/* restore the old result if the request sense was
 602			 * successful */
 603			if (result == 0)
 604				result = cmnd[7];
 605			/* restore the original length */
 606			SCp->cmd_len = cmnd[8];
 607		} else
 608			NCR_700_unmap(hostdata, SCp, slot);
 609
 610		free_slot(slot, hostdata);
 611#ifdef NCR_700_DEBUG
 612		if(NCR_700_get_depth(SCp->device) == 0 ||
 613		   NCR_700_get_depth(SCp->device) > SCp->device->queue_depth)
 614			printk(KERN_ERR "Invalid depth in NCR_700_scsi_done(): %d\n",
 615			       NCR_700_get_depth(SCp->device));
 616#endif /* NCR_700_DEBUG */
 617		NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) - 1);
 618
 619		SCp->host_scribble = NULL;
 620		SCp->result = result;
 621		SCp->scsi_done(SCp);
 622	} else {
 623		printk(KERN_ERR "53c700: SCSI DONE HAS NULL SCp\n");
 624	}
 625}
 626
 627
 628STATIC void
 629NCR_700_internal_bus_reset(struct Scsi_Host *host)
 630{
 631	/* Bus reset */
 632	NCR_700_writeb(ASSERT_RST, host, SCNTL1_REG);
 633	udelay(50);
 634	NCR_700_writeb(0, host, SCNTL1_REG);
 635
 636}
 637
 638STATIC void
 639NCR_700_chip_setup(struct Scsi_Host *host)
 640{
 641	struct NCR_700_Host_Parameters *hostdata = 
 642		(struct NCR_700_Host_Parameters *)host->hostdata[0];
 643	__u8 min_period;
 644	__u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
 645
 646	if(hostdata->chip710) {
 647		__u8 burst_disable = 0;
 648		__u8 burst_length = 0;
 649
 650		switch (hostdata->burst_length) {
 651			case 1:
 652			        burst_length = BURST_LENGTH_1;
 653			        break;
 654			case 2:
 655			        burst_length = BURST_LENGTH_2;
 656			        break;
 657			case 4:
 658			        burst_length = BURST_LENGTH_4;
 659			        break;
 660			case 8:
 661			        burst_length = BURST_LENGTH_8;
 662			        break;
 663			default:
 664			        burst_disable = BURST_DISABLE;
 665			        break;
 666		}
 667		hostdata->dcntl_extra |= COMPAT_700_MODE;
 668
 669		NCR_700_writeb(hostdata->dcntl_extra, host, DCNTL_REG);
 670		NCR_700_writeb(burst_length | hostdata->dmode_extra,
 671			       host, DMODE_710_REG);
 672		NCR_700_writeb(burst_disable | hostdata->ctest7_extra |
 673			       (hostdata->differential ? DIFF : 0),
 674			       host, CTEST7_REG);
 675		NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG);
 676		NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY
 677			       | AUTO_ATN, host, SCNTL0_REG);
 678	} else {
 679		NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra,
 680			       host, DMODE_700_REG);
 681		NCR_700_writeb(hostdata->differential ? 
 682			       DIFF : 0, host, CTEST7_REG);
 683		if(hostdata->fast) {
 684			/* this is for 700-66, does nothing on 700 */
 685			NCR_700_writeb(LAST_DIS_ENBL | ENABLE_ACTIVE_NEGATION 
 686				       | GENERATE_RECEIVE_PARITY, host,
 687				       CTEST8_REG);
 688		} else {
 689			NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY
 690				       | PARITY | AUTO_ATN, host, SCNTL0_REG);
 691		}
 692	}
 693
 694	NCR_700_writeb(1 << host->this_id, host, SCID_REG);
 695	NCR_700_writeb(0, host, SBCL_REG);
 696	NCR_700_writeb(ASYNC_OPERATION, host, SXFER_REG);
 697
 698	NCR_700_writeb(PHASE_MM_INT | SEL_TIMEOUT_INT | GROSS_ERR_INT | UX_DISC_INT
 699	     | RST_INT | PAR_ERR_INT | SELECT_INT, host, SIEN_REG);
 700
 701	NCR_700_writeb(ABORT_INT | INT_INST_INT | ILGL_INST_INT, host, DIEN_REG);
 702	NCR_700_writeb(ENABLE_SELECT, host, SCNTL1_REG);
 703	if(hostdata->clock > 75) {
 704		printk(KERN_ERR "53c700: Clock speed %dMHz is too high: 75Mhz is the maximum this chip can be driven at\n", hostdata->clock);
 705		/* do the best we can, but the async clock will be out
 706		 * of spec: sync divider 2, async divider 3 */
 707		DEBUG(("53c700: sync 2 async 3\n"));
 708		NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG);
 709		NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
 710		hostdata->sync_clock = hostdata->clock/2;
 711	} else	if(hostdata->clock > 50  && hostdata->clock <= 75) {
 712		/* sync divider 1.5, async divider 3 */
 713		DEBUG(("53c700: sync 1.5 async 3\n"));
 714		NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG);
 715		NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
 716		hostdata->sync_clock = hostdata->clock*2;
 717		hostdata->sync_clock /= 3;
 718		
 719	} else if(hostdata->clock > 37 && hostdata->clock <= 50) {
 720		/* sync divider 1, async divider 2 */
 721		DEBUG(("53c700: sync 1 async 2\n"));
 722		NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
 723		NCR_700_writeb(ASYNC_DIV_2_0 | hostdata->dcntl_extra, host, DCNTL_REG);
 724		hostdata->sync_clock = hostdata->clock;
 725	} else if(hostdata->clock > 25 && hostdata->clock <=37) {
 726		/* sync divider 1, async divider 1.5 */
 727		DEBUG(("53c700: sync 1 async 1.5\n"));
 728		NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
 729		NCR_700_writeb(ASYNC_DIV_1_5 | hostdata->dcntl_extra, host, DCNTL_REG);
 730		hostdata->sync_clock = hostdata->clock;
 731	} else {
 732		DEBUG(("53c700: sync 1 async 1\n"));
 733		NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
 734		NCR_700_writeb(ASYNC_DIV_1_0 | hostdata->dcntl_extra, host, DCNTL_REG);
 735		/* sync divider 1, async divider 1 */
 736		hostdata->sync_clock = hostdata->clock;
 737	}
 738	/* Calculate the actual minimum period that can be supported
 739	 * by our synchronous clock speed.  See the 710 manual for
 740	 * exact details of this calculation which is based on a
 741	 * setting of the SXFER register */
 742	min_period = 1000*(4+min_xferp)/(4*hostdata->sync_clock);
 743	hostdata->min_period = NCR_700_MIN_PERIOD;
 744	if(min_period > NCR_700_MIN_PERIOD)
 745		hostdata->min_period = min_period;
 746}
 747
 748STATIC void
 749NCR_700_chip_reset(struct Scsi_Host *host)
 750{
 751	struct NCR_700_Host_Parameters *hostdata = 
 752		(struct NCR_700_Host_Parameters *)host->hostdata[0];
 753	if(hostdata->chip710) {
 754		NCR_700_writeb(SOFTWARE_RESET_710, host, ISTAT_REG);
 755		udelay(100);
 756
 757		NCR_700_writeb(0, host, ISTAT_REG);
 758	} else {
 759		NCR_700_writeb(SOFTWARE_RESET, host, DCNTL_REG);
 760		udelay(100);
 761		
 762		NCR_700_writeb(0, host, DCNTL_REG);
 763	}
 764
 765	mdelay(1000);
 766
 767	NCR_700_chip_setup(host);
 768}
 769
 770/* The heart of the message processing engine is that the instruction
 771 * immediately after the INT is the normal case (and so must be CLEAR
 772 * ACK).  If we want to do something else, we call that routine in
 773 * scripts and set temp to be the normal case + 8 (skipping the CLEAR
 774 * ACK) so that the routine returns correctly to resume its activity
 775 * */
 776STATIC __u32
 777process_extended_message(struct Scsi_Host *host, 
 778			 struct NCR_700_Host_Parameters *hostdata,
 779			 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
 780{
 781	__u32 resume_offset = dsp, temp = dsp + 8;
 782	__u8 pun = 0xff, lun = 0xff;
 783
 784	if(SCp != NULL) {
 785		pun = SCp->device->id;
 786		lun = SCp->device->lun;
 787	}
 788
 789	switch(hostdata->msgin[2]) {
 790	case A_SDTR_MSG:
 791		if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
 792			struct scsi_target *starget = SCp->device->sdev_target;
 793			__u8 period = hostdata->msgin[3];
 794			__u8 offset = hostdata->msgin[4];
 795
 796			if(offset == 0 || period == 0) {
 797				offset = 0;
 798				period = 0;
 799			}
 800
 801			spi_offset(starget) = offset;
 802			spi_period(starget) = period;
 803			
 804			if(NCR_700_is_flag_set(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION)) {
 805				spi_display_xfer_agreement(starget);
 806				NCR_700_clear_flag(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION);
 807			}
 808			
 809			NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
 810			NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
 811			
 812			NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
 813				       host, SXFER_REG);
 814
 815		} else {
 816			/* SDTR message out of the blue, reject it */
 817			shost_printk(KERN_WARNING, host,
 818				"Unexpected SDTR msg\n");
 819			hostdata->msgout[0] = A_REJECT_MSG;
 820			dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
 821			script_patch_16(hostdata->dev, hostdata->script,
 822			                MessageCount, 1);
 823			/* SendMsgOut returns, so set up the return
 824			 * address */
 825			resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
 826		}
 827		break;
 828	
 829	case A_WDTR_MSG:
 830		printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
 831		       host->host_no, pun, lun);
 832		hostdata->msgout[0] = A_REJECT_MSG;
 833		dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
 834		script_patch_16(hostdata->dev, hostdata->script, MessageCount,
 835		                1);
 836		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
 837
 838		break;
 839
 840	default:
 841		printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
 842		       host->host_no, pun, lun,
 843		       NCR_700_phase[(dsps & 0xf00) >> 8]);
 844		spi_print_msg(hostdata->msgin);
 845		printk("\n");
 846		/* just reject it */
 847		hostdata->msgout[0] = A_REJECT_MSG;
 848		dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
 849		script_patch_16(hostdata->dev, hostdata->script, MessageCount,
 850		                1);
 851		/* SendMsgOut returns, so set up the return
 852		 * address */
 853		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
 854	}
 855	NCR_700_writel(temp, host, TEMP_REG);
 856	return resume_offset;
 857}
 858
 859STATIC __u32
 860process_message(struct Scsi_Host *host,	struct NCR_700_Host_Parameters *hostdata,
 861		struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
 862{
 863	/* work out where to return to */
 864	__u32 temp = dsp + 8, resume_offset = dsp;
 865	__u8 pun = 0xff, lun = 0xff;
 866
 867	if(SCp != NULL) {
 868		pun = SCp->device->id;
 869		lun = SCp->device->lun;
 870	}
 871
 872#ifdef NCR_700_DEBUG
 873	printk("scsi%d (%d:%d): message %s: ", host->host_no, pun, lun,
 874	       NCR_700_phase[(dsps & 0xf00) >> 8]);
 875	spi_print_msg(hostdata->msgin);
 876	printk("\n");
 877#endif
 878
 879	switch(hostdata->msgin[0]) {
 880
 881	case A_EXTENDED_MSG:
 882		resume_offset =  process_extended_message(host, hostdata, SCp,
 883							  dsp, dsps);
 884		break;
 885
 886	case A_REJECT_MSG:
 887		if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
 888			/* Rejected our sync negotiation attempt */
 889			spi_period(SCp->device->sdev_target) =
 890				spi_offset(SCp->device->sdev_target) = 0;
 891			NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
 892			NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
 893		} else if(SCp != NULL && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION) {
 894			/* rejected our first simple tag message */
 895			scmd_printk(KERN_WARNING, SCp,
 896				"Rejected first tag queue attempt, turning off tag queueing\n");
 897			/* we're done negotiating */
 898			NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION);
 899			hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
 900
 901			SCp->device->tagged_supported = 0;
 902			SCp->device->simple_tags = 0;
 903			scsi_change_queue_depth(SCp->device, host->cmd_per_lun);
 904		} else {
 905			shost_printk(KERN_WARNING, host,
 906				"(%d:%d) Unexpected REJECT Message %s\n",
 907			       pun, lun,
 908			       NCR_700_phase[(dsps & 0xf00) >> 8]);
 909			/* however, just ignore it */
 910		}
 911		break;
 912
 913	case A_PARITY_ERROR_MSG:
 914		printk(KERN_ERR "scsi%d (%d:%d) Parity Error!\n", host->host_no,
 915		       pun, lun);
 916		NCR_700_internal_bus_reset(host);
 917		break;
 918	case A_SIMPLE_TAG_MSG:
 919		printk(KERN_INFO "scsi%d (%d:%d) SIMPLE TAG %d %s\n", host->host_no,
 920		       pun, lun, hostdata->msgin[1],
 921		       NCR_700_phase[(dsps & 0xf00) >> 8]);
 922		/* just ignore it */
 923		break;
 924	default:
 925		printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
 926		       host->host_no, pun, lun,
 927		       NCR_700_phase[(dsps & 0xf00) >> 8]);
 928
 929		spi_print_msg(hostdata->msgin);
 930		printk("\n");
 931		/* just reject it */
 932		hostdata->msgout[0] = A_REJECT_MSG;
 933		dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
 934		script_patch_16(hostdata->dev, hostdata->script, MessageCount,
 935		                1);
 936		/* SendMsgOut returns, so set up the return
 937		 * address */
 938		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
 939
 940		break;
 941	}
 942	NCR_700_writel(temp, host, TEMP_REG);
 943	/* set us up to receive another message */
 944	dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
 945	return resume_offset;
 946}
 947
 948STATIC __u32
 949process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
 950			 struct Scsi_Host *host,
 951			 struct NCR_700_Host_Parameters *hostdata)
 952{
 953	__u32 resume_offset = 0;
 954	__u8 pun = 0xff, lun=0xff;
 955
 956	if(SCp != NULL) {
 957		pun = SCp->device->id;
 958		lun = SCp->device->lun;
 959	}
 960
 961	if(dsps == A_GOOD_STATUS_AFTER_STATUS) {
 962		DEBUG(("  COMMAND COMPLETE, status=%02x\n",
 963		       hostdata->status[0]));
 964		/* OK, if TCQ still under negotiation, we now know it works */
 965		if (NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION)
 966			NCR_700_set_tag_neg_state(SCp->device,
 967						  NCR_700_FINISHED_TAG_NEGOTIATION);
 968			
 969		/* check for contingent allegiance contitions */
 970		if(status_byte(hostdata->status[0]) == CHECK_CONDITION ||
 971		   status_byte(hostdata->status[0]) == COMMAND_TERMINATED) {
 972			struct NCR_700_command_slot *slot =
 973				(struct NCR_700_command_slot *)SCp->host_scribble;
 974			if(slot->flags == NCR_700_FLAG_AUTOSENSE) {
 975				/* OOPS: bad device, returning another
 976				 * contingent allegiance condition */
 977				scmd_printk(KERN_ERR, SCp,
 978					"broken device is looping in contingent allegiance: ignoring\n");
 979				NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
 980			} else {
 981				char *cmnd =
 982					NCR_700_get_sense_cmnd(SCp->device);
 983#ifdef NCR_DEBUG
 984				scsi_print_command(SCp);
 985				printk("  cmd %p has status %d, requesting sense\n",
 986				       SCp, hostdata->status[0]);
 987#endif
 988				/* we can destroy the command here
 989				 * because the contingent allegiance
 990				 * condition will cause a retry which
 991				 * will re-copy the command from the
 992				 * saved data_cmnd.  We also unmap any
 993				 * data associated with the command
 994				 * here */
 995				NCR_700_unmap(hostdata, SCp, slot);
 996				dma_unmap_single(hostdata->dev, slot->pCmd,
 997						 MAX_COMMAND_SIZE,
 998						 DMA_TO_DEVICE);
 999
1000				cmnd[0] = REQUEST_SENSE;
1001				cmnd[1] = (lun & 0x7) << 5;
1002				cmnd[2] = 0;
1003				cmnd[3] = 0;
1004				cmnd[4] = SCSI_SENSE_BUFFERSIZE;
1005				cmnd[5] = 0;
1006				/* Here's a quiet hack: the
1007				 * REQUEST_SENSE command is six bytes,
1008				 * so store a flag indicating that
1009				 * this was an internal sense request
1010				 * and the original status at the end
1011				 * of the command */
1012				cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
1013				cmnd[7] = hostdata->status[0];
1014				cmnd[8] = SCp->cmd_len;
1015				SCp->cmd_len = 6; /* command length for
1016						   * REQUEST_SENSE */
1017				slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1018				slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1019				slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | SCSI_SENSE_BUFFERSIZE);
1020				slot->SG[0].pAddr = bS_to_host(slot->dma_handle);
1021				slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
1022				slot->SG[1].pAddr = 0;
1023				slot->resume_offset = hostdata->pScript;
1024				dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
1025				dma_cache_sync(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1026
1027				/* queue the command for reissue */
1028				slot->state = NCR_700_SLOT_QUEUED;
1029				slot->flags = NCR_700_FLAG_AUTOSENSE;
1030				hostdata->state = NCR_700_HOST_FREE;
1031				hostdata->cmd = NULL;
1032			}
1033		} else {
1034			// Currently rely on the mid layer evaluation
1035			// of the tag queuing capability
1036			//
1037			//if(status_byte(hostdata->status[0]) == GOOD &&
1038			//   SCp->cmnd[0] == INQUIRY && SCp->use_sg == 0) {
1039			//	/* Piggy back the tag queueing support
1040			//	 * on this command */
1041			//	dma_sync_single_for_cpu(hostdata->dev,
1042			//			    slot->dma_handle,
1043			//			    SCp->request_bufflen,
1044			//			    DMA_FROM_DEVICE);
1045			//	if(((char *)SCp->request_buffer)[7] & 0x02) {
1046			//		scmd_printk(KERN_INFO, SCp,
1047			//		     "Enabling Tag Command Queuing\n");
1048			//		hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1049			//		NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1050			//	} else {
1051			//		NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1052			//		hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1053			//	}
1054			//}
1055			NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
1056		}
1057	} else if((dsps & 0xfffff0f0) == A_UNEXPECTED_PHASE) {
1058		__u8 i = (dsps & 0xf00) >> 8;
1059
1060		scmd_printk(KERN_ERR, SCp, "UNEXPECTED PHASE %s (%s)\n",
1061		       NCR_700_phase[i],
1062		       sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1063		scmd_printk(KERN_ERR, SCp, "         len = %d, cmd =",
1064			SCp->cmd_len);
1065		scsi_print_command(SCp);
1066
1067		NCR_700_internal_bus_reset(host);
1068	} else if((dsps & 0xfffff000) == A_FATAL) {
1069		int i = (dsps & 0xfff);
1070
1071		printk(KERN_ERR "scsi%d: (%d:%d) FATAL ERROR: %s\n",
1072		       host->host_no, pun, lun, NCR_700_fatal_messages[i]);
1073		if(dsps == A_FATAL_ILLEGAL_MSG_LENGTH) {
1074			printk(KERN_ERR "     msg begins %02x %02x\n",
1075			       hostdata->msgin[0], hostdata->msgin[1]);
1076		}
1077		NCR_700_internal_bus_reset(host);
1078	} else if((dsps & 0xfffff0f0) == A_DISCONNECT) {
1079#ifdef NCR_700_DEBUG
1080		__u8 i = (dsps & 0xf00) >> 8;
1081
1082		printk("scsi%d: (%d:%d), DISCONNECTED (%d) %s\n",
1083		       host->host_no, pun, lun,
1084		       i, NCR_700_phase[i]);
1085#endif
1086		save_for_reselection(hostdata, SCp, dsp);
1087
1088	} else if(dsps == A_RESELECTION_IDENTIFIED) {
1089		__u8 lun;
1090		struct NCR_700_command_slot *slot;
1091		__u8 reselection_id = hostdata->reselection_id;
1092		struct scsi_device *SDp;
1093
1094		lun = hostdata->msgin[0] & 0x1f;
1095
1096		hostdata->reselection_id = 0xff;
1097		DEBUG(("scsi%d: (%d:%d) RESELECTED!\n",
1098		       host->host_no, reselection_id, lun));
1099		/* clear the reselection indicator */
1100		SDp = __scsi_device_lookup(host, 0, reselection_id, lun);
1101		if(unlikely(SDp == NULL)) {
1102			printk(KERN_ERR "scsi%d: (%d:%d) HAS NO device\n",
1103			       host->host_no, reselection_id, lun);
1104			BUG();
1105		}
1106		if(hostdata->msgin[1] == A_SIMPLE_TAG_MSG) {
1107			struct scsi_cmnd *SCp;
1108
1109			SCp = scsi_host_find_tag(SDp->host, hostdata->msgin[2]);
1110			if(unlikely(SCp == NULL)) {
1111				printk(KERN_ERR "scsi%d: (%d:%d) no saved request for tag %d\n", 
1112				       host->host_no, reselection_id, lun, hostdata->msgin[2]);
1113				BUG();
1114			}
1115
1116			slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1117			DDEBUG(KERN_DEBUG, SDp,
1118				"reselection is tag %d, slot %p(%d)\n",
1119				hostdata->msgin[2], slot, slot->tag);
1120		} else {
1121			struct NCR_700_Device_Parameters *p = SDp->hostdata;
1122			struct scsi_cmnd *SCp = p->current_cmnd;
1123
1124			if(unlikely(SCp == NULL)) {
1125				sdev_printk(KERN_ERR, SDp,
1126					"no saved request for untagged cmd\n");
1127				BUG();
1128			}
1129			slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1130		}
1131
1132		if(slot == NULL) {
1133			printk(KERN_ERR "scsi%d: (%d:%d) RESELECTED but no saved command (MSG = %02x %02x %02x)!!\n",
1134			       host->host_no, reselection_id, lun,
1135			       hostdata->msgin[0], hostdata->msgin[1],
1136			       hostdata->msgin[2]);
1137		} else {
1138			if(hostdata->state != NCR_700_HOST_BUSY)
1139				printk(KERN_ERR "scsi%d: FATAL, host not busy during valid reselection!\n",
1140				       host->host_no);
1141			resume_offset = slot->resume_offset;
1142			hostdata->cmd = slot->cmnd;
1143
1144			/* re-patch for this command */
1145			script_patch_32_abs(hostdata->dev, hostdata->script,
1146			                    CommandAddress, slot->pCmd);
1147			script_patch_16(hostdata->dev, hostdata->script,
1148					CommandCount, slot->cmnd->cmd_len);
1149			script_patch_32_abs(hostdata->dev, hostdata->script,
1150			                    SGScriptStartAddress,
1151					    to32bit(&slot->pSG[0].ins));
1152
1153			/* Note: setting SXFER only works if we're
1154			 * still in the MESSAGE phase, so it is vital
1155			 * that ACK is still asserted when we process
1156			 * the reselection message.  The resume offset
1157			 * should therefore always clear ACK */
1158			NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
1159				       host, SXFER_REG);
1160			dma_cache_sync(hostdata->dev, hostdata->msgin,
1161				       MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
1162			dma_cache_sync(hostdata->dev, hostdata->msgout,
1163				       MSG_ARRAY_SIZE, DMA_TO_DEVICE);
1164			/* I'm just being paranoid here, the command should
1165			 * already have been flushed from the cache */
1166			dma_cache_sync(hostdata->dev, slot->cmnd->cmnd,
1167				       slot->cmnd->cmd_len, DMA_TO_DEVICE);
1168
1169
1170			
1171		}
1172	} else if(dsps == A_RESELECTED_DURING_SELECTION) {
1173
1174		/* This section is full of debugging code because I've
1175		 * never managed to reach it.  I think what happens is
1176		 * that, because the 700 runs with selection
1177		 * interrupts enabled the whole time that we take a
1178		 * selection interrupt before we manage to get to the
1179		 * reselected script interrupt */
1180
1181		__u8 reselection_id = NCR_700_readb(host, SFBR_REG);
1182		struct NCR_700_command_slot *slot;
1183		
1184		/* Take out our own ID */
1185		reselection_id &= ~(1<<host->this_id);
1186		
1187		/* I've never seen this happen, so keep this as a printk rather
1188		 * than a debug */
1189		printk(KERN_INFO "scsi%d: (%d:%d) RESELECTION DURING SELECTION, dsp=%08x[%04x] state=%d, count=%d\n",
1190		       host->host_no, reselection_id, lun, dsp, dsp - hostdata->pScript, hostdata->state, hostdata->command_slot_count);
1191
1192		{
1193			/* FIXME: DEBUGGING CODE */
1194			__u32 SG = (__u32)bS_to_cpu(hostdata->script[A_SGScriptStartAddress_used[0]]);
1195			int i;
1196
1197			for(i=0; i< NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1198				if(SG >= to32bit(&hostdata->slots[i].pSG[0])
1199				   && SG <= to32bit(&hostdata->slots[i].pSG[NCR_700_SG_SEGMENTS]))
1200					break;
1201			}
1202			printk(KERN_INFO "IDENTIFIED SG segment as being %08x in slot %p, cmd %p, slot->resume_offset=%08x\n", SG, &hostdata->slots[i], hostdata->slots[i].cmnd, hostdata->slots[i].resume_offset);
1203			SCp =  hostdata->slots[i].cmnd;
1204		}
1205
1206		if(SCp != NULL) {
1207			slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1208			/* change slot from busy to queued to redo command */
1209			slot->state = NCR_700_SLOT_QUEUED;
1210		}
1211		hostdata->cmd = NULL;
1212		
1213		if(reselection_id == 0) {
1214			if(hostdata->reselection_id == 0xff) {
1215				printk(KERN_ERR "scsi%d: Invalid reselection during selection!!\n", host->host_no);
1216				return 0;
1217			} else {
1218				printk(KERN_ERR "scsi%d: script reselected and we took a selection interrupt\n",
1219				       host->host_no);
1220				reselection_id = hostdata->reselection_id;
1221			}
1222		} else {
1223			
1224			/* convert to real ID */
1225			reselection_id = bitmap_to_number(reselection_id);
1226		}
1227		hostdata->reselection_id = reselection_id;
1228		/* just in case we have a stale simple tag message, clear it */
1229		hostdata->msgin[1] = 0;
1230		dma_cache_sync(hostdata->dev, hostdata->msgin,
1231			       MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL);
1232		if(hostdata->tag_negotiated & (1<<reselection_id)) {
1233			resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1234		} else {
1235			resume_offset = hostdata->pScript + Ent_GetReselectionData;
1236		}
1237	} else if(dsps == A_COMPLETED_SELECTION_AS_TARGET) {
1238		/* we've just disconnected from the bus, do nothing since
1239		 * a return here will re-run the queued command slot
1240		 * that may have been interrupted by the initial selection */
1241		DEBUG((" SELECTION COMPLETED\n"));
1242	} else if((dsps & 0xfffff0f0) == A_MSG_IN) { 
1243		resume_offset = process_message(host, hostdata, SCp,
1244						dsp, dsps);
1245	} else if((dsps &  0xfffff000) == 0) {
1246		__u8 i = (dsps & 0xf0) >> 4, j = (dsps & 0xf00) >> 8;
1247		printk(KERN_ERR "scsi%d: (%d:%d), unhandled script condition %s %s at %04x\n",
1248		       host->host_no, pun, lun, NCR_700_condition[i],
1249		       NCR_700_phase[j], dsp - hostdata->pScript);
1250		if(SCp != NULL) {
1251			struct scatterlist *sg;
1252
1253			scsi_print_command(SCp);
1254			scsi_for_each_sg(SCp, sg, scsi_sg_count(SCp) + 1, i) {
1255				printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, sg->length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr);
1256			}
1257		}
1258		NCR_700_internal_bus_reset(host);
1259	} else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) {
1260		printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n",
1261		       host->host_no, pun, lun, dsps & 0xfff, dsp, dsp - hostdata->pScript);
1262		resume_offset = dsp;
1263	} else {
1264		printk(KERN_ERR "scsi%d: (%d:%d), unidentified script interrupt 0x%x at %04x\n",
1265		       host->host_no, pun, lun, dsps, dsp - hostdata->pScript);
1266		NCR_700_internal_bus_reset(host);
1267	}
1268	return resume_offset;
1269}
1270
1271/* We run the 53c700 with selection interrupts always enabled.  This
1272 * means that the chip may be selected as soon as the bus frees.  On a
1273 * busy bus, this can be before the scripts engine finishes its
1274 * processing.  Therefore, part of the selection processing has to be
1275 * to find out what the scripts engine is doing and complete the
1276 * function if necessary (i.e. process the pending disconnect or save
1277 * the interrupted initial selection */
1278STATIC inline __u32
1279process_selection(struct Scsi_Host *host, __u32 dsp)
1280{
1281	__u8 id = 0;	/* Squash compiler warning */
1282	int count = 0;
1283	__u32 resume_offset = 0;
1284	struct NCR_700_Host_Parameters *hostdata =
1285		(struct NCR_700_Host_Parameters *)host->hostdata[0];
1286	struct scsi_cmnd *SCp = hostdata->cmd;
1287	__u8 sbcl;
1288
1289	for(count = 0; count < 5; count++) {
1290		id = NCR_700_readb(host, hostdata->chip710 ?
1291				   CTEST9_REG : SFBR_REG);
1292
1293		/* Take out our own ID */
1294		id &= ~(1<<host->this_id);
1295		if(id != 0) 
1296			break;
1297		udelay(5);
1298	}
1299	sbcl = NCR_700_readb(host, SBCL_REG);
1300	if((sbcl & SBCL_IO) == 0) {
1301		/* mark as having been selected rather than reselected */
1302		id = 0xff;
1303	} else {
1304		/* convert to real ID */
1305		hostdata->reselection_id = id = bitmap_to_number(id);
1306		DEBUG(("scsi%d:  Reselected by %d\n",
1307		       host->host_no, id));
1308	}
1309	if(hostdata->state == NCR_700_HOST_BUSY && SCp != NULL) {
1310		struct NCR_700_command_slot *slot =
1311			(struct NCR_700_command_slot *)SCp->host_scribble;
1312		DEBUG(("  ID %d WARNING: RESELECTION OF BUSY HOST, saving cmd %p, slot %p, addr %x [%04x], resume %x!\n", id, hostdata->cmd, slot, dsp, dsp - hostdata->pScript, resume_offset));
1313		
1314		switch(dsp - hostdata->pScript) {
1315		case Ent_Disconnect1:
1316		case Ent_Disconnect2:
1317			save_for_reselection(hostdata, SCp, Ent_Disconnect2 + hostdata->pScript);
1318			break;
1319		case Ent_Disconnect3:
1320		case Ent_Disconnect4:
1321			save_for_reselection(hostdata, SCp, Ent_Disconnect4 + hostdata->pScript);
1322			break;
1323		case Ent_Disconnect5:
1324		case Ent_Disconnect6:
1325			save_for_reselection(hostdata, SCp, Ent_Disconnect6 + hostdata->pScript);
1326			break;
1327		case Ent_Disconnect7:
1328		case Ent_Disconnect8:
1329			save_for_reselection(hostdata, SCp, Ent_Disconnect8 + hostdata->pScript);
1330			break;
1331		case Ent_Finish1:
1332		case Ent_Finish2:
1333			process_script_interrupt(A_GOOD_STATUS_AFTER_STATUS, dsp, SCp, host, hostdata);
1334			break;
1335			
1336		default:
1337			slot->state = NCR_700_SLOT_QUEUED;
1338			break;
1339			}
1340	}
1341	hostdata->state = NCR_700_HOST_BUSY;
1342	hostdata->cmd = NULL;
1343	/* clear any stale simple tag message */
1344	hostdata->msgin[1] = 0;
1345	dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
1346		       DMA_BIDIRECTIONAL);
1347
1348	if(id == 0xff) {
1349		/* Selected as target, Ignore */
1350		resume_offset = hostdata->pScript + Ent_SelectedAsTarget;
1351	} else if(hostdata->tag_negotiated & (1<<id)) {
1352		resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1353	} else {
1354		resume_offset = hostdata->pScript + Ent_GetReselectionData;
1355	}
1356	return resume_offset;
1357}
1358
1359static inline void
1360NCR_700_clear_fifo(struct Scsi_Host *host) {
1361	const struct NCR_700_Host_Parameters *hostdata
1362		= (struct NCR_700_Host_Parameters *)host->hostdata[0];
1363	if(hostdata->chip710) {
1364		NCR_700_writeb(CLR_FIFO_710, host, CTEST8_REG);
1365	} else {
1366		NCR_700_writeb(CLR_FIFO, host, DFIFO_REG);
1367	}
1368}
1369
1370static inline void
1371NCR_700_flush_fifo(struct Scsi_Host *host) {
1372	const struct NCR_700_Host_Parameters *hostdata
1373		= (struct NCR_700_Host_Parameters *)host->hostdata[0];
1374	if(hostdata->chip710) {
1375		NCR_700_writeb(FLUSH_DMA_FIFO_710, host, CTEST8_REG);
1376		udelay(10);
1377		NCR_700_writeb(0, host, CTEST8_REG);
1378	} else {
1379		NCR_700_writeb(FLUSH_DMA_FIFO, host, DFIFO_REG);
1380		udelay(10);
1381		NCR_700_writeb(0, host, DFIFO_REG);
1382	}
1383}
1384
1385
1386/* The queue lock with interrupts disabled must be held on entry to
1387 * this function */
1388STATIC int
1389NCR_700_start_command(struct scsi_cmnd *SCp)
1390{
1391	struct NCR_700_command_slot *slot =
1392		(struct NCR_700_command_slot *)SCp->host_scribble;
1393	struct NCR_700_Host_Parameters *hostdata =
1394		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1395	__u16 count = 1;	/* for IDENTIFY message */
1396	u8 lun = SCp->device->lun;
1397
1398	if(hostdata->state != NCR_700_HOST_FREE) {
1399		/* keep this inside the lock to close the race window where
1400		 * the running command finishes on another CPU while we don't
1401		 * change the state to queued on this one */
1402		slot->state = NCR_700_SLOT_QUEUED;
1403
1404		DEBUG(("scsi%d: host busy, queueing command %p, slot %p\n",
1405		       SCp->device->host->host_no, slot->cmnd, slot));
1406		return 0;
1407	}
1408	hostdata->state = NCR_700_HOST_BUSY;
1409	hostdata->cmd = SCp;
1410	slot->state = NCR_700_SLOT_BUSY;
1411	/* keep interrupts disabled until we have the command correctly
1412	 * set up so we cannot take a selection interrupt */
1413
1414	hostdata->msgout[0] = NCR_700_identify((SCp->cmnd[0] != REQUEST_SENSE &&
1415						slot->flags != NCR_700_FLAG_AUTOSENSE),
1416					       lun);
1417	/* for INQUIRY or REQUEST_SENSE commands, we cannot be sure
1418	 * if the negotiated transfer parameters still hold, so
1419	 * always renegotiate them */
1420	if(SCp->cmnd[0] == INQUIRY || SCp->cmnd[0] == REQUEST_SENSE ||
1421	   slot->flags == NCR_700_FLAG_AUTOSENSE) {
1422		NCR_700_clear_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
1423	}
1424
1425	/* REQUEST_SENSE is asking for contingent I_T_L(_Q) status.
1426	 * If a contingent allegiance condition exists, the device
1427	 * will refuse all tags, so send the request sense as untagged
1428	 * */
1429	if((hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1430	   && (slot->tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE &&
1431	       slot->flags != NCR_700_FLAG_AUTOSENSE)) {
1432		count += spi_populate_tag_msg(&hostdata->msgout[count], SCp);
1433	}
1434
1435	if(hostdata->fast &&
1436	   NCR_700_is_flag_clear(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC)) {
1437		count += spi_populate_sync_msg(&hostdata->msgout[count],
1438				spi_period(SCp->device->sdev_target),
1439				spi_offset(SCp->device->sdev_target));
1440		NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1441	}
1442
1443	script_patch_16(hostdata->dev, hostdata->script, MessageCount, count);
1444
1445
1446	script_patch_ID(hostdata->dev, hostdata->script,
1447			Device_ID, 1<<scmd_id(SCp));
1448
1449	script_patch_32_abs(hostdata->dev, hostdata->script, CommandAddress,
1450			    slot->pCmd);
1451	script_patch_16(hostdata->dev, hostdata->script, CommandCount,
1452	                SCp->cmd_len);
1453	/* finally plumb the beginning of the SG list into the script
1454	 * */
1455	script_patch_32_abs(hostdata->dev, hostdata->script,
1456	                    SGScriptStartAddress, to32bit(&slot->pSG[0].ins));
1457	NCR_700_clear_fifo(SCp->device->host);
1458
1459	if(slot->resume_offset == 0)
1460		slot->resume_offset = hostdata->pScript;
1461	/* now perform all the writebacks and invalidates */
1462	dma_cache_sync(hostdata->dev, hostdata->msgout, count, DMA_TO_DEVICE);
1463	dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
1464		       DMA_FROM_DEVICE);
1465	dma_cache_sync(hostdata->dev, SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
1466	dma_cache_sync(hostdata->dev, hostdata->status, 1, DMA_FROM_DEVICE);
1467
1468	/* set the synchronous period/offset */
1469	NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
1470		       SCp->device->host, SXFER_REG);
1471	NCR_700_writel(slot->temp, SCp->device->host, TEMP_REG);
1472	NCR_700_writel(slot->resume_offset, SCp->device->host, DSP_REG);
1473
1474	return 1;
1475}
1476
1477irqreturn_t
1478NCR_700_intr(int irq, void *dev_id)
1479{
1480	struct Scsi_Host *host = (struct Scsi_Host *)dev_id;
1481	struct NCR_700_Host_Parameters *hostdata =
1482		(struct NCR_700_Host_Parameters *)host->hostdata[0];
1483	__u8 istat;
1484	__u32 resume_offset = 0;
1485	__u8 pun = 0xff, lun = 0xff;
1486	unsigned long flags;
1487	int handled = 0;
1488
1489	/* Use the host lock to serialise access to the 53c700
1490	 * hardware.  Note: In future, we may need to take the queue
1491	 * lock to enter the done routines.  When that happens, we
1492	 * need to ensure that for this driver, the host lock and the
1493	 * queue lock point to the same thing. */
1494	spin_lock_irqsave(host->host_lock, flags);
1495	if((istat = NCR_700_readb(host, ISTAT_REG))
1496	      & (SCSI_INT_PENDING | DMA_INT_PENDING)) {
1497		__u32 dsps;
1498		__u8 sstat0 = 0, dstat = 0;
1499		__u32 dsp;
1500		struct scsi_cmnd *SCp = hostdata->cmd;
1501		enum NCR_700_Host_State state;
1502
1503		handled = 1;
1504		state = hostdata->state;
1505		SCp = hostdata->cmd;
1506
1507		if(istat & SCSI_INT_PENDING) {
1508			udelay(10);
1509
1510			sstat0 = NCR_700_readb(host, SSTAT0_REG);
1511		}
1512
1513		if(istat & DMA_INT_PENDING) {
1514			udelay(10);
1515
1516			dstat = NCR_700_readb(host, DSTAT_REG);
1517		}
1518
1519		dsps = NCR_700_readl(host, DSPS_REG);
1520		dsp = NCR_700_readl(host, DSP_REG);
1521
1522		DEBUG(("scsi%d: istat %02x sstat0 %02x dstat %02x dsp %04x[%08x] dsps 0x%x\n",
1523		       host->host_no, istat, sstat0, dstat,
1524		       (dsp - (__u32)(hostdata->pScript))/4,
1525		       dsp, dsps));
1526
1527		if(SCp != NULL) {
1528			pun = SCp->device->id;
1529			lun = SCp->device->lun;
1530		}
1531
1532		if(sstat0 & SCSI_RESET_DETECTED) {
1533			struct scsi_device *SDp;
1534			int i;
1535
1536			hostdata->state = NCR_700_HOST_BUSY;
1537
1538			printk(KERN_ERR "scsi%d: Bus Reset detected, executing command %p, slot %p, dsp %08x[%04x]\n",
1539			       host->host_no, SCp, SCp == NULL ? NULL : SCp->host_scribble, dsp, dsp - hostdata->pScript);
1540
1541			scsi_report_bus_reset(host, 0);
1542
1543			/* clear all the negotiated parameters */
1544			__shost_for_each_device(SDp, host)
1545				NCR_700_clear_flag(SDp, ~0);
1546			
1547			/* clear all the slots and their pending commands */
1548			for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1549				struct scsi_cmnd *SCp;
1550				struct NCR_700_command_slot *slot =
1551					&hostdata->slots[i];
1552
1553				if(slot->state == NCR_700_SLOT_FREE)
1554					continue;
1555				
1556				SCp = slot->cmnd;
1557				printk(KERN_ERR " failing command because of reset, slot %p, cmnd %p\n",
1558				       slot, SCp);
1559				free_slot(slot, hostdata);
1560				SCp->host_scribble = NULL;
1561				NCR_700_set_depth(SCp->device, 0);
1562				/* NOTE: deadlock potential here: we
1563				 * rely on mid-layer guarantees that
1564				 * scsi_done won't try to issue the
1565				 * command again otherwise we'll
1566				 * deadlock on the
1567				 * hostdata->state_lock */
1568				SCp->result = DID_RESET << 16;
1569				SCp->scsi_done(SCp);
1570			}
1571			mdelay(25);
1572			NCR_700_chip_setup(host);
1573
1574			hostdata->state = NCR_700_HOST_FREE;
1575			hostdata->cmd = NULL;
1576			/* signal back if this was an eh induced reset */
1577			if(hostdata->eh_complete != NULL)
1578				complete(hostdata->eh_complete);
1579			goto out_unlock;
1580		} else if(sstat0 & SELECTION_TIMEOUT) {
1581			DEBUG(("scsi%d: (%d:%d) selection timeout\n",
1582			       host->host_no, pun, lun));
1583			NCR_700_scsi_done(hostdata, SCp, DID_NO_CONNECT<<16);
1584		} else if(sstat0 & PHASE_MISMATCH) {
1585			struct NCR_700_command_slot *slot = (SCp == NULL) ? NULL :
1586				(struct NCR_700_command_slot *)SCp->host_scribble;
1587
1588			if(dsp == Ent_SendMessage + 8 + hostdata->pScript) {
1589				/* It wants to reply to some part of
1590				 * our message */
1591#ifdef NCR_700_DEBUG
1592				__u32 temp = NCR_700_readl(host, TEMP_REG);
1593				int count = (hostdata->script[Ent_SendMessage/4] & 0xffffff) - ((NCR_700_readl(host, DBC_REG) & 0xffffff) + NCR_700_data_residual(host));
1594				printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1595#endif
1596				resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch;
1597			} else if(dsp >= to32bit(&slot->pSG[0].ins) &&
1598				  dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) {
1599				int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff;
1600				int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List);
1601				int residual = NCR_700_data_residual(host);
1602				int i;
1603#ifdef NCR_700_DEBUG
1604				__u32 naddr = NCR_700_readl(host, DNAD_REG);
1605
1606				printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x\n",
1607				       host->host_no, pun, lun,
1608				       SGcount, data_transfer);
1609				scsi_print_command(SCp);
1610				if(residual) {
1611					printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x, residual %d\n",
1612				       host->host_no, pun, lun,
1613				       SGcount, data_transfer, residual);
1614				}
1615#endif
1616				data_transfer += residual;
1617
1618				if(data_transfer != 0) {
1619					int count; 
1620					__u32 pAddr;
1621
1622					SGcount--;
1623
1624					count = (bS_to_cpu(slot->SG[SGcount].ins) & 0x00ffffff);
1625					DEBUG(("DATA TRANSFER MISMATCH, count = %d, transferred %d\n", count, count-data_transfer));
1626					slot->SG[SGcount].ins &= bS_to_host(0xff000000);
1627					slot->SG[SGcount].ins |= bS_to_host(data_transfer);
1628					pAddr = bS_to_cpu(slot->SG[SGcount].pAddr);
1629					pAddr += (count - data_transfer);
1630#ifdef NCR_700_DEBUG
1631					if(pAddr != naddr) {
1632						printk("scsi%d (%d:%d) transfer mismatch pAddr=%lx, naddr=%lx, data_transfer=%d, residual=%d\n", host->host_no, pun, lun, (unsigned long)pAddr, (unsigned long)naddr, data_transfer, residual);
1633					}
1634#endif
1635					slot->SG[SGcount].pAddr = bS_to_host(pAddr);
1636				}
1637				/* set the executed moves to nops */
1638				for(i=0; i<SGcount; i++) {
1639					slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
1640					slot->SG[i].pAddr = 0;
1641				}
1642				dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1643				/* and pretend we disconnected after
1644				 * the command phase */
1645				resume_offset = hostdata->pScript + Ent_MsgInDuringData;
1646				/* make sure all the data is flushed */
1647				NCR_700_flush_fifo(host);
1648			} else {
1649				__u8 sbcl = NCR_700_readb(host, SBCL_REG);
1650				printk(KERN_ERR "scsi%d: (%d:%d) phase mismatch at %04x, phase %s\n",
1651				       host->host_no, pun, lun, dsp - hostdata->pScript, sbcl_to_string(sbcl));
1652				NCR_700_internal_bus_reset(host);
1653			}
1654
1655		} else if(sstat0 & SCSI_GROSS_ERROR) {
1656			printk(KERN_ERR "scsi%d: (%d:%d) GROSS ERROR\n",
1657			       host->host_no, pun, lun);
1658			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1659		} else if(sstat0 & PARITY_ERROR) {
1660			printk(KERN_ERR "scsi%d: (%d:%d) PARITY ERROR\n",
1661			       host->host_no, pun, lun);
1662			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1663		} else if(dstat & SCRIPT_INT_RECEIVED) {
1664			DEBUG(("scsi%d: (%d:%d) ====>SCRIPT INTERRUPT<====\n",
1665			       host->host_no, pun, lun));
1666			resume_offset = process_script_interrupt(dsps, dsp, SCp, host, hostdata);
1667		} else if(dstat & (ILGL_INST_DETECTED)) {
1668			printk(KERN_ERR "scsi%d: (%d:%d) Illegal Instruction detected at 0x%08x[0x%x]!!!\n"
1669			       "         Please email James.Bottomley@HansenPartnership.com with the details\n",
1670			       host->host_no, pun, lun,
1671			       dsp, dsp - hostdata->pScript);
1672			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1673		} else if(dstat & (WATCH_DOG_INTERRUPT|ABORTED)) {
1674			printk(KERN_ERR "scsi%d: (%d:%d) serious DMA problem, dstat=%02x\n",
1675			       host->host_no, pun, lun, dstat);
1676			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1677		}
1678
1679		
1680		/* NOTE: selection interrupt processing MUST occur
1681		 * after script interrupt processing to correctly cope
1682		 * with the case where we process a disconnect and
1683		 * then get reselected before we process the
1684		 * disconnection */
1685		if(sstat0 & SELECTED) {
1686			/* FIXME: It currently takes at least FOUR
1687			 * interrupts to complete a command that
1688			 * disconnects: one for the disconnect, one
1689			 * for the reselection, one to get the
1690			 * reselection data and one to complete the
1691			 * command.  If we guess the reselected
1692			 * command here and prepare it, we only need
1693			 * to get a reselection data interrupt if we
1694			 * guessed wrongly.  Since the interrupt
1695			 * overhead is much greater than the command
1696			 * setup, this would be an efficient
1697			 * optimisation particularly as we probably
1698			 * only have one outstanding command on a
1699			 * target most of the time */
1700
1701			resume_offset = process_selection(host, dsp);
1702
1703		}
1704
1705	}
1706
1707	if(resume_offset) {
1708		if(hostdata->state != NCR_700_HOST_BUSY) {
1709			printk(KERN_ERR "scsi%d: Driver error: resume at 0x%08x [0x%04x] with non busy host!\n",
1710			       host->host_no, resume_offset, resume_offset - hostdata->pScript);
1711			hostdata->state = NCR_700_HOST_BUSY;
1712		}
1713
1714		DEBUG(("Attempting to resume at %x\n", resume_offset));
1715		NCR_700_clear_fifo(host);
1716		NCR_700_writel(resume_offset, host, DSP_REG);
1717	} 
1718	/* There is probably a technical no-no about this: If we're a
1719	 * shared interrupt and we got this interrupt because the
1720	 * other device needs servicing not us, we're still going to
1721	 * check our queued commands here---of course, there shouldn't
1722	 * be any outstanding.... */
1723	if(hostdata->state == NCR_700_HOST_FREE) {
1724		int i;
1725
1726		for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1727			/* fairness: always run the queue from the last
1728			 * position we left off */
1729			int j = (i + hostdata->saved_slot_position)
1730				% NCR_700_COMMAND_SLOTS_PER_HOST;
1731			
1732			if(hostdata->slots[j].state != NCR_700_SLOT_QUEUED)
1733				continue;
1734			if(NCR_700_start_command(hostdata->slots[j].cmnd)) {
1735				DEBUG(("scsi%d: Issuing saved command slot %p, cmd %p\t\n",
1736				       host->host_no, &hostdata->slots[j],
1737				       hostdata->slots[j].cmnd));
1738				hostdata->saved_slot_position = j + 1;
1739			}
1740
1741			break;
1742		}
1743	}
1744 out_unlock:
1745	spin_unlock_irqrestore(host->host_lock, flags);
1746	return IRQ_RETVAL(handled);
1747}
1748
1749static int
1750NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
1751{
1752	struct NCR_700_Host_Parameters *hostdata = 
1753		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1754	__u32 move_ins;
1755	enum dma_data_direction direction;
1756	struct NCR_700_command_slot *slot;
1757
1758	if(hostdata->command_slot_count >= NCR_700_COMMAND_SLOTS_PER_HOST) {
1759		/* We're over our allocation, this should never happen
1760		 * since we report the max allocation to the mid layer */
1761		printk(KERN_WARNING "scsi%d: Command depth has gone over queue depth\n", SCp->device->host->host_no);
1762		return 1;
1763	}
1764	/* check for untagged commands.  We cannot have any outstanding
1765	 * commands if we accept them.  Commands could be untagged because:
1766	 *
1767	 * - The tag negotiated bitmap is clear
1768	 * - The blk layer sent and untagged command
1769	 */
1770	if(NCR_700_get_depth(SCp->device) != 0
1771	   && (!(hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1772	       || !(SCp->flags & SCMD_TAGGED))) {
1773		CDEBUG(KERN_ERR, SCp, "has non zero depth %d\n",
1774		       NCR_700_get_depth(SCp->device));
1775		return SCSI_MLQUEUE_DEVICE_BUSY;
1776	}
1777	if(NCR_700_get_depth(SCp->device) >= SCp->device->queue_depth) {
1778		CDEBUG(KERN_ERR, SCp, "has max tag depth %d\n",
1779		       NCR_700_get_depth(SCp->device));
1780		return SCSI_MLQUEUE_DEVICE_BUSY;
1781	}
1782	NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) + 1);
1783
1784	/* begin the command here */
1785	/* no need to check for NULL, test for command_slot_count above
1786	 * ensures a slot is free */
1787	slot = find_empty_slot(hostdata);
1788
1789	slot->cmnd = SCp;
1790
1791	SCp->scsi_done = done;
1792	SCp->host_scribble = (unsigned char *)slot;
1793	SCp->SCp.ptr = NULL;
1794	SCp->SCp.buffer = NULL;
1795
1796#ifdef NCR_700_DEBUG
1797	printk("53c700: scsi%d, command ", SCp->device->host->host_no);
1798	scsi_print_command(SCp);
1799#endif
1800	if ((SCp->flags & SCMD_TAGGED)
1801	   && (hostdata->tag_negotiated &(1<<scmd_id(SCp))) == 0
1802	   && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) {
1803		scmd_printk(KERN_ERR, SCp, "Enabling Tag Command Queuing\n");
1804		hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1805		NCR_700_set_tag_neg_state(SCp->device, NCR_700_DURING_TAG_NEGOTIATION);
1806	}
1807
1808	/* here we may have to process an untagged command.  The gate
1809	 * above ensures that this will be the only one outstanding,
1810	 * so clear the tag negotiated bit.
1811	 *
1812	 * FIXME: This will royally screw up on multiple LUN devices
1813	 * */
1814	if (!(SCp->flags & SCMD_TAGGED)
1815	   && (hostdata->tag_negotiated &(1<<scmd_id(SCp)))) {
1816		scmd_printk(KERN_INFO, SCp, "Disabling Tag Command Queuing\n");
1817		hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1818	}
1819
1820	if ((hostdata->tag_negotiated & (1<<scmd_id(SCp))) &&
1821	    SCp->device->simple_tags) {
1822		slot->tag = SCp->request->tag;
1823		CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
1824		       slot->tag, slot);
1825	} else {
1826		struct NCR_700_Device_Parameters *p = SCp->device->hostdata;
1827
1828		slot->tag = SCSI_NO_TAG;
1829		/* save current command for reselection */
1830		p->current_cmnd = SCp;
1831	}
1832	/* sanity check: some of the commands generated by the mid-layer
1833	 * have an eccentric idea of their sc_data_direction */
1834	if(!scsi_sg_count(SCp) && !scsi_bufflen(SCp) &&
1835	   SCp->sc_data_direction != DMA_NONE) {
1836#ifdef NCR_700_DEBUG
1837		printk("53c700: Command");
1838		scsi_print_command(SCp);
1839		printk("Has wrong data direction %d\n", SCp->sc_data_direction);
1840#endif
1841		SCp->sc_data_direction = DMA_NONE;
1842	}
1843
1844	switch (SCp->cmnd[0]) {
1845	case REQUEST_SENSE:
1846		/* clear the internal sense magic */
1847		SCp->cmnd[6] = 0;
1848		/* fall through */
1849	default:
1850		/* OK, get it from the command */
1851		switch(SCp->sc_data_direction) {
1852		case DMA_BIDIRECTIONAL:
1853		default:
1854			printk(KERN_ERR "53c700: Unknown command for data direction ");
1855			scsi_print_command(SCp);
1856			
1857			move_ins = 0;
1858			break;
1859		case DMA_NONE:
1860			move_ins = 0;
1861			break;
1862		case DMA_FROM_DEVICE:
1863			move_ins = SCRIPT_MOVE_DATA_IN;
1864			break;
1865		case DMA_TO_DEVICE:
1866			move_ins = SCRIPT_MOVE_DATA_OUT;
1867			break;
1868		}
1869	}
1870
1871	/* now build the scatter gather list */
1872	direction = SCp->sc_data_direction;
1873	if(move_ins != 0) {
1874		int i;
1875		int sg_count;
1876		dma_addr_t vPtr = 0;
1877		struct scatterlist *sg;
1878		__u32 count = 0;
1879
1880		sg_count = scsi_dma_map(SCp);
1881		BUG_ON(sg_count < 0);
1882
1883		scsi_for_each_sg(SCp, sg, sg_count, i) {
1884			vPtr = sg_dma_address(sg);
1885			count = sg_dma_len(sg);
1886
1887			slot->SG[i].ins = bS_to_host(move_ins | count);
1888			DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n",
1889			       i, count, slot->SG[i].ins, (unsigned long)vPtr));
1890			slot->SG[i].pAddr = bS_to_host(vPtr);
1891		}
1892		slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
1893		slot->SG[i].pAddr = 0;
1894		dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1895		DEBUG((" SETTING %p to %x\n",
1896		       (&slot->pSG[i].ins),
1897		       slot->SG[i].ins));
1898	}
1899	slot->resume_offset = 0;
1900	slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd,
1901				    MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1902	NCR_700_start_command(SCp);
1903	return 0;
1904}
1905
1906STATIC DEF_SCSI_QCMD(NCR_700_queuecommand)
1907
1908STATIC int
1909NCR_700_abort(struct scsi_cmnd * SCp)
1910{
1911	struct NCR_700_command_slot *slot;
1912
1913	scmd_printk(KERN_INFO, SCp, "abort command\n");
1914
1915	slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1916
1917	if(slot == NULL)
1918		/* no outstanding command to abort */
1919		return SUCCESS;
1920	if(SCp->cmnd[0] == TEST_UNIT_READY) {
1921		/* FIXME: This is because of a problem in the new
1922		 * error handler.  When it is in error recovery, it
1923		 * will send a TUR to a device it thinks may still be
1924		 * showing a problem.  If the TUR isn't responded to,
1925		 * it will abort it and mark the device off line.
1926		 * Unfortunately, it does no other error recovery, so
1927		 * this would leave us with an outstanding command
1928		 * occupying a slot.  Rather than allow this to
1929		 * happen, we issue a bus reset to force all
1930		 * outstanding commands to terminate here. */
1931		NCR_700_internal_bus_reset(SCp->device->host);
1932		/* still drop through and return failed */
1933	}
1934	return FAILED;
1935
1936}
1937
1938STATIC int
1939NCR_700_host_reset(struct scsi_cmnd * SCp)
1940{
1941	DECLARE_COMPLETION_ONSTACK(complete);
1942	struct NCR_700_Host_Parameters *hostdata = 
1943		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1944
1945	scmd_printk(KERN_INFO, SCp,
1946		"New error handler wants HOST reset, cmd %p\n\t", SCp);
1947	scsi_print_command(SCp);
1948
1949	/* In theory, eh_complete should always be null because the
1950	 * eh is single threaded, but just in case we're handling a
1951	 * reset via sg or something */
1952	spin_lock_irq(SCp->device->host->host_lock);
1953	while (hostdata->eh_complete != NULL) {
1954		spin_unlock_irq(SCp->device->host->host_lock);
1955		msleep_interruptible(100);
1956		spin_lock_irq(SCp->device->host->host_lock);
1957	}
1958
1959	hostdata->eh_complete = &complete;
1960	NCR_700_internal_bus_reset(SCp->device->host);
1961	NCR_700_chip_reset(SCp->device->host);
1962
1963	spin_unlock_irq(SCp->device->host->host_lock);
1964	wait_for_completion(&complete);
1965	spin_lock_irq(SCp->device->host->host_lock);
1966
1967	hostdata->eh_complete = NULL;
1968	/* Revalidate the transport parameters of the failing device */
1969	if(hostdata->fast)
1970		spi_schedule_dv_device(SCp->device);
1971
1972	spin_unlock_irq(SCp->device->host->host_lock);
1973	return SUCCESS;
1974}
1975
1976STATIC void
1977NCR_700_set_period(struct scsi_target *STp, int period)
1978{
1979	struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
1980	struct NCR_700_Host_Parameters *hostdata = 
1981		(struct NCR_700_Host_Parameters *)SHp->hostdata[0];
1982	
1983	if(!hostdata->fast)
1984		return;
1985
1986	if(period < hostdata->min_period)
1987		period = hostdata->min_period;
1988
1989	spi_period(STp) = period;
1990	spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
1991			    NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1992	spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
1993}
1994
1995STATIC void
1996NCR_700_set_offset(struct scsi_target *STp, int offset)
1997{
1998	struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
1999	struct NCR_700_Host_Parameters *hostdata = 
2000		(struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2001	int max_offset = hostdata->chip710
2002		? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET;
2003	
2004	if(!hostdata->fast)
2005		return;
2006
2007	if(offset > max_offset)
2008		offset = max_offset;
2009
2010	/* if we're currently async, make sure the period is reasonable */
2011	if(spi_offset(STp) == 0 && (spi_period(STp) < hostdata->min_period ||
2012				    spi_period(STp) > 0xff))
2013		spi_period(STp) = hostdata->min_period;
2014
2015	spi_offset(STp) = offset;
2016	spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2017			    NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2018	spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2019}
2020
2021STATIC int
2022NCR_700_slave_alloc(struct scsi_device *SDp)
2023{
2024	SDp->hostdata = kzalloc(sizeof(struct NCR_700_Device_Parameters),
2025				GFP_KERNEL);
2026
2027	if (!SDp->hostdata)
2028		return -ENOMEM;
2029
2030	return 0;
2031}
2032
2033STATIC int
2034NCR_700_slave_configure(struct scsi_device *SDp)
2035{
2036	struct NCR_700_Host_Parameters *hostdata = 
2037		(struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2038
2039	/* to do here: allocate memory; build a queue_full list */
2040	if(SDp->tagged_supported) {
2041		scsi_change_queue_depth(SDp, NCR_700_DEFAULT_TAGS);
2042		NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2043	}
2044
2045	if(hostdata->fast) {
2046		/* Find the correct offset and period via domain validation */
2047		if (!spi_initial_dv(SDp->sdev_target))
2048			spi_dv_device(SDp);
2049	} else {
2050		spi_offset(SDp->sdev_target) = 0;
2051		spi_period(SDp->sdev_target) = 0;
2052	}
2053	return 0;
2054}
2055
2056STATIC void
2057NCR_700_slave_destroy(struct scsi_device *SDp)
2058{
2059	kfree(SDp->hostdata);
2060	SDp->hostdata = NULL;
2061}
2062
2063static int
2064NCR_700_change_queue_depth(struct scsi_device *SDp, int depth)
2065{
2066	if (depth > NCR_700_MAX_TAGS)
2067		depth = NCR_700_MAX_TAGS;
2068	return scsi_change_queue_depth(SDp, depth);
2069}
2070
2071static ssize_t
2072NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf)
2073{
2074	struct scsi_device *SDp = to_scsi_device(dev);
2075
2076	return snprintf(buf, 20, "%d\n", NCR_700_get_depth(SDp));
2077}
2078
2079static struct device_attribute NCR_700_active_tags_attr = {
2080	.attr = {
2081		.name =		"active_tags",
2082		.mode =		S_IRUGO,
2083	},
2084	.show = NCR_700_show_active_tags,
2085};
2086
2087STATIC struct device_attribute *NCR_700_dev_attrs[] = {
2088	&NCR_700_active_tags_attr,
2089	NULL,
2090};
2091
2092EXPORT_SYMBOL(NCR_700_detect);
2093EXPORT_SYMBOL(NCR_700_release);
2094EXPORT_SYMBOL(NCR_700_intr);
2095
2096static struct spi_function_template NCR_700_transport_functions =  {
2097	.set_period	= NCR_700_set_period,
2098	.show_period	= 1,
2099	.set_offset	= NCR_700_set_offset,
2100	.show_offset	= 1,
2101};
2102
2103static int __init NCR_700_init(void)
2104{
2105	NCR_700_transport_template = spi_attach_transport(&NCR_700_transport_functions);
2106	if(!NCR_700_transport_template)
2107		return -ENODEV;
2108	return 0;
2109}
2110
2111static void __exit NCR_700_exit(void)
2112{
2113	spi_release_transport(NCR_700_transport_template);
2114}
2115
2116module_init(NCR_700_init);
2117module_exit(NCR_700_exit);
2118
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2
   3/* NCR (or Symbios) 53c700 and 53c700-66 Driver
   4 *
   5 * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
   6**-----------------------------------------------------------------------------
   7**  
   8**
   9**-----------------------------------------------------------------------------
  10 */
  11
  12/* Notes:
  13 *
  14 * This driver is designed exclusively for these chips (virtually the
  15 * earliest of the scripts engine chips).  They need their own drivers
  16 * because they are missing so many of the scripts and snazzy register
  17 * features of their elder brothers (the 710, 720 and 770).
  18 *
  19 * The 700 is the lowliest of the line, it can only do async SCSI.
  20 * The 700-66 can at least do synchronous SCSI up to 10MHz.
  21 * 
  22 * The 700 chip has no host bus interface logic of its own.  However,
  23 * it is usually mapped to a location with well defined register
  24 * offsets.  Therefore, if you can determine the base address and the
  25 * irq your board incorporating this chip uses, you can probably use
  26 * this driver to run it (although you'll probably have to write a
  27 * minimal wrapper for the purpose---see the NCR_D700 driver for
  28 * details about how to do this).
  29 *
  30 *
  31 * TODO List:
  32 *
  33 * 1. Better statistics in the proc fs
  34 *
  35 * 2. Implement message queue (queues SCSI messages like commands) and make
  36 *    the abort and device reset functions use them.
  37 * */
  38
  39/* CHANGELOG
  40 *
  41 * Version 2.8
  42 *
  43 * Fixed bad bug affecting tag starvation processing (previously the
  44 * driver would hang the system if too many tags starved.  Also fixed
  45 * bad bug having to do with 10 byte command processing and REQUEST
  46 * SENSE (the command would loop forever getting a transfer length
  47 * mismatch in the CMD phase).
  48 *
  49 * Version 2.7
  50 *
  51 * Fixed scripts problem which caused certain devices (notably CDRWs)
  52 * to hang on initial INQUIRY.  Updated NCR_700_readl/writel to use
  53 * __raw_readl/writel for parisc compatibility (Thomas
  54 * Bogendoerfer). Added missing SCp->request_bufflen initialisation
  55 * for sense requests (Ryan Bradetich).
  56 *
  57 * Version 2.6
  58 *
  59 * Following test of the 64 bit parisc kernel by Richard Hirst,
  60 * several problems have now been corrected.  Also adds support for
  61 * consistent memory allocation.
  62 *
  63 * Version 2.5
  64 * 
  65 * More Compatibility changes for 710 (now actually works).  Enhanced
  66 * support for odd clock speeds which constrain SDTR negotiations.
  67 * correct cacheline separation for scsi messages and status for
  68 * incoherent architectures.  Use of the pci mapping functions on
  69 * buffers to begin support for 64 bit drivers.
  70 *
  71 * Version 2.4
  72 *
  73 * Added support for the 53c710 chip (in 53c700 emulation mode only---no 
  74 * special 53c710 instructions or registers are used).
  75 *
  76 * Version 2.3
  77 *
  78 * More endianness/cache coherency changes.
  79 *
  80 * Better bad device handling (handles devices lying about tag
  81 * queueing support and devices which fail to provide sense data on
  82 * contingent allegiance conditions)
  83 *
  84 * Many thanks to Richard Hirst <rhirst@linuxcare.com> for patiently
  85 * debugging this driver on the parisc architecture and suggesting
  86 * many improvements and bug fixes.
  87 *
  88 * Thanks also go to Linuxcare Inc. for providing several PARISC
  89 * machines for me to debug the driver on.
  90 *
  91 * Version 2.2
  92 *
  93 * Made the driver mem or io mapped; added endian invariance; added
  94 * dma cache flushing operations for architectures which need it;
  95 * added support for more varied clocking speeds.
  96 *
  97 * Version 2.1
  98 *
  99 * Initial modularisation from the D700.  See NCR_D700.c for the rest of
 100 * the changelog.
 101 * */
 102#define NCR_700_VERSION "2.8"
 103
 104#include <linux/kernel.h>
 105#include <linux/types.h>
 106#include <linux/string.h>
 107#include <linux/slab.h>
 108#include <linux/ioport.h>
 109#include <linux/delay.h>
 110#include <linux/spinlock.h>
 111#include <linux/completion.h>
 112#include <linux/init.h>
 113#include <linux/proc_fs.h>
 114#include <linux/blkdev.h>
 115#include <linux/module.h>
 116#include <linux/interrupt.h>
 117#include <linux/device.h>
 118#include <linux/pgtable.h>
 119#include <asm/dma.h>
 120#include <asm/io.h>
 121#include <asm/byteorder.h>
 122
 123#include <scsi/scsi.h>
 124#include <scsi/scsi_cmnd.h>
 125#include <scsi/scsi_dbg.h>
 126#include <scsi/scsi_eh.h>
 127#include <scsi/scsi_host.h>
 128#include <scsi/scsi_tcq.h>
 129#include <scsi/scsi_transport.h>
 130#include <scsi/scsi_transport_spi.h>
 131
 132#include "53c700.h"
 133
 134/* NOTE: For 64 bit drivers there are points in the code where we use
 135 * a non dereferenceable pointer to point to a structure in dma-able
 136 * memory (which is 32 bits) so that we can use all of the structure
 137 * operations but take the address at the end.  This macro allows us
 138 * to truncate the 64 bit pointer down to 32 bits without the compiler
 139 * complaining */
 140#define to32bit(x)	((__u32)((unsigned long)(x)))
 141
 142#ifdef NCR_700_DEBUG
 143#define STATIC
 144#else
 145#define STATIC static
 146#endif
 147
 148MODULE_AUTHOR("James Bottomley");
 149MODULE_DESCRIPTION("53c700 and 53c700-66 Driver");
 150MODULE_LICENSE("GPL");
 151
 152/* This is the script */
 153#include "53c700_d.h"
 154
 155
 156STATIC int NCR_700_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *);
 157STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt);
 158STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
 159STATIC void NCR_700_chip_setup(struct Scsi_Host *host);
 160STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
 161STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
 162STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
 163STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
 164static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
 165
 166STATIC const struct attribute_group *NCR_700_dev_groups[];
 167
 168STATIC struct scsi_transport_template *NCR_700_transport_template = NULL;
 169
 170static char *NCR_700_phase[] = {
 171	"",
 172	"after selection",
 173	"before command phase",
 174	"after command phase",
 175	"after status phase",
 176	"after data in phase",
 177	"after data out phase",
 178	"during data phase",
 179};
 180
 181static char *NCR_700_condition[] = {
 182	"",
 183	"NOT MSG_OUT",
 184	"UNEXPECTED PHASE",
 185	"NOT MSG_IN",
 186	"UNEXPECTED MSG",
 187	"MSG_IN",
 188	"SDTR_MSG RECEIVED",
 189	"REJECT_MSG RECEIVED",
 190	"DISCONNECT_MSG RECEIVED",
 191	"MSG_OUT",
 192	"DATA_IN",
 193	
 194};
 195
 196static char *NCR_700_fatal_messages[] = {
 197	"unexpected message after reselection",
 198	"still MSG_OUT after message injection",
 199	"not MSG_IN after selection",
 200	"Illegal message length received",
 201};
 202
 203static char *NCR_700_SBCL_bits[] = {
 204	"IO ",
 205	"CD ",
 206	"MSG ",
 207	"ATN ",
 208	"SEL ",
 209	"BSY ",
 210	"ACK ",
 211	"REQ ",
 212};
 213
 214static char *NCR_700_SBCL_to_phase[] = {
 215	"DATA_OUT",
 216	"DATA_IN",
 217	"CMD_OUT",
 218	"STATE",
 219	"ILLEGAL PHASE",
 220	"ILLEGAL PHASE",
 221	"MSG OUT",
 222	"MSG IN",
 223};
 224
 225/* This translates the SDTR message offset and period to a value
 226 * which can be loaded into the SXFER_REG.
 227 *
 228 * NOTE: According to SCSI-2, the true transfer period (in ns) is
 229 *       actually four times this period value */
 230static inline __u8
 231NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters *hostdata,
 232			       __u8 offset, __u8 period)
 233{
 234	int XFERP;
 235
 236	__u8 min_xferp = (hostdata->chip710
 237			  ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
 238	__u8 max_offset = (hostdata->chip710
 239			   ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET);
 240
 241	if(offset == 0)
 242		return 0;
 243
 244	if(period < hostdata->min_period) {
 245		printk(KERN_WARNING "53c700: Period %dns is less than this chip's minimum, setting to %d\n", period*4, NCR_700_MIN_PERIOD*4);
 246		period = hostdata->min_period;
 247	}
 248	XFERP = (period*4 * hostdata->sync_clock)/1000 - 4;
 249	if(offset > max_offset) {
 250		printk(KERN_WARNING "53c700: Offset %d exceeds chip maximum, setting to %d\n",
 251		       offset, max_offset);
 252		offset = max_offset;
 253	}
 254	if(XFERP < min_xferp) {
 255		XFERP =  min_xferp;
 256	}
 257	return (offset & 0x0f) | (XFERP & 0x07)<<4;
 258}
 259
 260static inline __u8
 261NCR_700_get_SXFER(struct scsi_device *SDp)
 262{
 263	struct NCR_700_Host_Parameters *hostdata = 
 264		(struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
 265
 266	return NCR_700_offset_period_to_sxfer(hostdata,
 267					      spi_offset(SDp->sdev_target),
 268					      spi_period(SDp->sdev_target));
 269}
 270
 271static inline dma_addr_t virt_to_dma(struct NCR_700_Host_Parameters *h, void *p)
 272{
 273	return h->pScript + ((uintptr_t)p - (uintptr_t)h->script);
 274}
 275
 276static inline void dma_sync_to_dev(struct NCR_700_Host_Parameters *h,
 277		void *addr, size_t size)
 278{
 279	if (h->noncoherent)
 280		dma_sync_single_for_device(h->dev, virt_to_dma(h, addr),
 281					   size, DMA_BIDIRECTIONAL);
 282}
 283
 284static inline void dma_sync_from_dev(struct NCR_700_Host_Parameters *h,
 285		void *addr, size_t size)
 286{
 287	if (h->noncoherent)
 288		dma_sync_single_for_device(h->dev, virt_to_dma(h, addr), size,
 289					   DMA_BIDIRECTIONAL);
 290}
 291
 292struct Scsi_Host *
 293NCR_700_detect(struct scsi_host_template *tpnt,
 294	       struct NCR_700_Host_Parameters *hostdata, struct device *dev)
 295{
 296	dma_addr_t pScript, pSlots;
 297	__u8 *memory;
 298	__u32 *script;
 299	struct Scsi_Host *host;
 300	static int banner = 0;
 301	int j;
 302
 303	if (tpnt->sdev_groups == NULL)
 304		tpnt->sdev_groups = NCR_700_dev_groups;
 305
 306	memory = dma_alloc_coherent(dev, TOTAL_MEM_SIZE, &pScript, GFP_KERNEL);
 307	if (!memory) {
 308		hostdata->noncoherent = 1;
 309		memory = dma_alloc_noncoherent(dev, TOTAL_MEM_SIZE, &pScript,
 310					 DMA_BIDIRECTIONAL, GFP_KERNEL);
 311	}
 312	if (!memory) {
 313		printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n");
 314		return NULL;
 315	}
 316
 317	script = (__u32 *)memory;
 318	hostdata->msgin = memory + MSGIN_OFFSET;
 319	hostdata->msgout = memory + MSGOUT_OFFSET;
 320	hostdata->status = memory + STATUS_OFFSET;
 321	hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET);
 322	hostdata->dev = dev;
 323
 324	pSlots = pScript + SLOTS_OFFSET;
 325
 326	/* Fill in the missing routines from the host template */
 327	tpnt->queuecommand = NCR_700_queuecommand;
 328	tpnt->eh_abort_handler = NCR_700_abort;
 329	tpnt->eh_host_reset_handler = NCR_700_host_reset;
 330	tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
 331	tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
 332	tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN;
 333	tpnt->slave_configure = NCR_700_slave_configure;
 334	tpnt->slave_destroy = NCR_700_slave_destroy;
 335	tpnt->slave_alloc = NCR_700_slave_alloc;
 336	tpnt->change_queue_depth = NCR_700_change_queue_depth;
 337
 338	if(tpnt->name == NULL)
 339		tpnt->name = "53c700";
 340	if(tpnt->proc_name == NULL)
 341		tpnt->proc_name = "53c700";
 342
 343	host = scsi_host_alloc(tpnt, 4);
 344	if (!host)
 345		return NULL;
 346	memset(hostdata->slots, 0, sizeof(struct NCR_700_command_slot)
 347	       * NCR_700_COMMAND_SLOTS_PER_HOST);
 348	for (j = 0; j < NCR_700_COMMAND_SLOTS_PER_HOST; j++) {
 349		dma_addr_t offset = (dma_addr_t)((unsigned long)&hostdata->slots[j].SG[0]
 350					  - (unsigned long)&hostdata->slots[0].SG[0]);
 351		hostdata->slots[j].pSG = (struct NCR_700_SG_List *)((unsigned long)(pSlots + offset));
 352		if(j == 0)
 353			hostdata->free_list = &hostdata->slots[j];
 354		else
 355			hostdata->slots[j-1].ITL_forw = &hostdata->slots[j];
 356		hostdata->slots[j].state = NCR_700_SLOT_FREE;
 357	}
 358
 359	for (j = 0; j < ARRAY_SIZE(SCRIPT); j++)
 360		script[j] = bS_to_host(SCRIPT[j]);
 361
 362	/* adjust all labels to be bus physical */
 363	for (j = 0; j < PATCHES; j++)
 364		script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
 365	/* now patch up fixed addresses. */
 366	script_patch_32(hostdata, script, MessageLocation,
 367			pScript + MSGOUT_OFFSET);
 368	script_patch_32(hostdata, script, StatusAddress,
 369			pScript + STATUS_OFFSET);
 370	script_patch_32(hostdata, script, ReceiveMsgAddress,
 371			pScript + MSGIN_OFFSET);
 372
 373	hostdata->script = script;
 374	hostdata->pScript = pScript;
 375	dma_sync_single_for_device(hostdata->dev, pScript, sizeof(SCRIPT), DMA_TO_DEVICE);
 376	hostdata->state = NCR_700_HOST_FREE;
 377	hostdata->cmd = NULL;
 378	host->max_id = 8;
 379	host->max_lun = NCR_700_MAX_LUNS;
 380	BUG_ON(NCR_700_transport_template == NULL);
 381	host->transportt = NCR_700_transport_template;
 382	host->unique_id = (unsigned long)hostdata->base;
 383	hostdata->eh_complete = NULL;
 384	host->hostdata[0] = (unsigned long)hostdata;
 385	/* kick the chip */
 386	NCR_700_writeb(0xff, host, CTEST9_REG);
 387	if (hostdata->chip710)
 388		hostdata->rev = (NCR_700_readb(host, CTEST8_REG)>>4) & 0x0f;
 389	else
 390		hostdata->rev = (NCR_700_readb(host, CTEST7_REG)>>4) & 0x0f;
 391	hostdata->fast = (NCR_700_readb(host, CTEST9_REG) == 0);
 392	if (banner == 0) {
 393		printk(KERN_NOTICE "53c700: Version " NCR_700_VERSION " By James.Bottomley@HansenPartnership.com\n");
 394		banner = 1;
 395	}
 396	printk(KERN_NOTICE "scsi%d: %s rev %d %s\n", host->host_no,
 397	       hostdata->chip710 ? "53c710" :
 398	       (hostdata->fast ? "53c700-66" : "53c700"),
 399	       hostdata->rev, hostdata->differential ?
 400	       "(Differential)" : "");
 401	/* reset the chip */
 402	NCR_700_chip_reset(host);
 403
 404	if (scsi_add_host(host, dev)) {
 405		dev_printk(KERN_ERR, dev, "53c700: scsi_add_host failed\n");
 406		scsi_host_put(host);
 407		return NULL;
 408	}
 409
 410	spi_signalling(host) = hostdata->differential ? SPI_SIGNAL_HVD :
 411		SPI_SIGNAL_SE;
 412
 413	return host;
 414}
 415
 416int
 417NCR_700_release(struct Scsi_Host *host)
 418{
 419	struct NCR_700_Host_Parameters *hostdata = 
 420		(struct NCR_700_Host_Parameters *)host->hostdata[0];
 421
 422	if (hostdata->noncoherent)
 423		dma_free_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
 424				hostdata->script, hostdata->pScript,
 425				DMA_BIDIRECTIONAL);
 426	else
 427		dma_free_coherent(hostdata->dev, TOTAL_MEM_SIZE,
 428				  hostdata->script, hostdata->pScript);
 429	return 1;
 430}
 431
 432static inline __u8
 433NCR_700_identify(int can_disconnect, __u8 lun)
 434{
 435	return IDENTIFY_BASE |
 436		((can_disconnect) ? 0x40 : 0) |
 437		(lun & NCR_700_LUN_MASK);
 438}
 439
 440/*
 441 * Function : static int data_residual (Scsi_Host *host)
 442 *
 443 * Purpose : return residual data count of what's in the chip.  If you
 444 * really want to know what this function is doing, it's almost a
 445 * direct transcription of the algorithm described in the 53c710
 446 * guide, except that the DBC and DFIFO registers are only 6 bits
 447 * wide on a 53c700.
 448 *
 449 * Inputs : host - SCSI host */
 450static inline int
 451NCR_700_data_residual (struct Scsi_Host *host) {
 452	struct NCR_700_Host_Parameters *hostdata = 
 453		(struct NCR_700_Host_Parameters *)host->hostdata[0];
 454	int count, synchronous = 0;
 455	unsigned int ddir;
 456
 457	if(hostdata->chip710) {
 458		count = ((NCR_700_readb(host, DFIFO_REG) & 0x7f) -
 459			 (NCR_700_readl(host, DBC_REG) & 0x7f)) & 0x7f;
 460	} else {
 461		count = ((NCR_700_readb(host, DFIFO_REG) & 0x3f) -
 462			 (NCR_700_readl(host, DBC_REG) & 0x3f)) & 0x3f;
 463	}
 464	
 465	if(hostdata->fast)
 466		synchronous = NCR_700_readb(host, SXFER_REG) & 0x0f;
 467	
 468	/* get the data direction */
 469	ddir = NCR_700_readb(host, CTEST0_REG) & 0x01;
 470
 471	if (ddir) {
 472		/* Receive */
 473		if (synchronous) 
 474			count += (NCR_700_readb(host, SSTAT2_REG) & 0xf0) >> 4;
 475		else
 476			if (NCR_700_readb(host, SSTAT1_REG) & SIDL_REG_FULL)
 477				++count;
 478	} else {
 479		/* Send */
 480		__u8 sstat = NCR_700_readb(host, SSTAT1_REG);
 481		if (sstat & SODL_REG_FULL)
 482			++count;
 483		if (synchronous && (sstat & SODR_REG_FULL))
 484			++count;
 485	}
 486#ifdef NCR_700_DEBUG
 487	if(count)
 488		printk("RESIDUAL IS %d (ddir %d)\n", count, ddir);
 489#endif
 490	return count;
 491}
 492
 493/* print out the SCSI wires and corresponding phase from the SBCL register
 494 * in the chip */
 495static inline char *
 496sbcl_to_string(__u8 sbcl)
 497{
 498	int i;
 499	static char ret[256];
 500
 501	ret[0]='\0';
 502	for(i=0; i<8; i++) {
 503		if((1<<i) & sbcl) 
 504			strcat(ret, NCR_700_SBCL_bits[i]);
 505	}
 506	strcat(ret, NCR_700_SBCL_to_phase[sbcl & 0x07]);
 507	return ret;
 508}
 509
 510static inline __u8
 511bitmap_to_number(__u8 bitmap)
 512{
 513	__u8 i;
 514
 515	for(i=0; i<8 && !(bitmap &(1<<i)); i++)
 516		;
 517	return i;
 518}
 519
 520/* Pull a slot off the free list */
 521STATIC struct NCR_700_command_slot *
 522find_empty_slot(struct NCR_700_Host_Parameters *hostdata)
 523{
 524	struct NCR_700_command_slot *slot = hostdata->free_list;
 525
 526	if(slot == NULL) {
 527		/* sanity check */
 528		if(hostdata->command_slot_count != NCR_700_COMMAND_SLOTS_PER_HOST)
 529			printk(KERN_ERR "SLOTS FULL, but count is %d, should be %d\n", hostdata->command_slot_count, NCR_700_COMMAND_SLOTS_PER_HOST);
 530		return NULL;
 531	}
 532
 533	if(slot->state != NCR_700_SLOT_FREE)
 534		/* should panic! */
 535		printk(KERN_ERR "BUSY SLOT ON FREE LIST!!!\n");
 536		
 537
 538	hostdata->free_list = slot->ITL_forw;
 539	slot->ITL_forw = NULL;
 540
 541
 542	/* NOTE: set the state to busy here, not queued, since this
 543	 * indicates the slot is in use and cannot be run by the IRQ
 544	 * finish routine.  If we cannot queue the command when it
 545	 * is properly build, we then change to NCR_700_SLOT_QUEUED */
 546	slot->state = NCR_700_SLOT_BUSY;
 547	slot->flags = 0;
 548	hostdata->command_slot_count++;
 549	
 550	return slot;
 551}
 552
 553STATIC void 
 554free_slot(struct NCR_700_command_slot *slot,
 555	  struct NCR_700_Host_Parameters *hostdata)
 556{
 557	if((slot->state & NCR_700_SLOT_MASK) != NCR_700_SLOT_MAGIC) {
 558		printk(KERN_ERR "53c700: SLOT %p is not MAGIC!!!\n", slot);
 559	}
 560	if(slot->state == NCR_700_SLOT_FREE) {
 561		printk(KERN_ERR "53c700: SLOT %p is FREE!!!\n", slot);
 562	}
 563	
 564	slot->resume_offset = 0;
 565	slot->cmnd = NULL;
 566	slot->state = NCR_700_SLOT_FREE;
 567	slot->ITL_forw = hostdata->free_list;
 568	hostdata->free_list = slot;
 569	hostdata->command_slot_count--;
 570}
 571
 572
 573/* This routine really does very little.  The command is indexed on
 574   the ITL and (if tagged) the ITLQ lists in _queuecommand */
 575STATIC void
 576save_for_reselection(struct NCR_700_Host_Parameters *hostdata,
 577		     struct scsi_cmnd *SCp, __u32 dsp)
 578{
 579	/* Its just possible that this gets executed twice */
 580	if(SCp != NULL) {
 581		struct NCR_700_command_slot *slot =
 582			(struct NCR_700_command_slot *)SCp->host_scribble;
 583
 584		slot->resume_offset = dsp;
 585	}
 586	hostdata->state = NCR_700_HOST_FREE;
 587	hostdata->cmd = NULL;
 588}
 589
 590STATIC inline void
 591NCR_700_unmap(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp,
 592	      struct NCR_700_command_slot *slot)
 593{
 594	if(SCp->sc_data_direction != DMA_NONE &&
 595	   SCp->sc_data_direction != DMA_BIDIRECTIONAL)
 596		scsi_dma_unmap(SCp);
 597}
 598
 599STATIC inline void
 600NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
 601	       struct scsi_cmnd *SCp, int result)
 602{
 603	hostdata->state = NCR_700_HOST_FREE;
 604	hostdata->cmd = NULL;
 605
 606	if(SCp != NULL) {
 607		struct NCR_700_command_slot *slot =
 608			(struct NCR_700_command_slot *)SCp->host_scribble;
 609
 610		dma_unmap_single(hostdata->dev, slot->pCmd,
 611				 MAX_COMMAND_SIZE, DMA_TO_DEVICE);
 612		if (slot->flags == NCR_700_FLAG_AUTOSENSE) {
 613			char *cmnd = NCR_700_get_sense_cmnd(SCp->device);
 614
 615			dma_unmap_single(hostdata->dev, slot->dma_handle,
 616					 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
 617			/* restore the old result if the request sense was
 618			 * successful */
 619			if (result == 0)
 620				result = cmnd[7];
 621			/* restore the original length */
 622			SCp->cmd_len = cmnd[8];
 623		} else
 624			NCR_700_unmap(hostdata, SCp, slot);
 625
 626		free_slot(slot, hostdata);
 627#ifdef NCR_700_DEBUG
 628		if(NCR_700_get_depth(SCp->device) == 0 ||
 629		   NCR_700_get_depth(SCp->device) > SCp->device->queue_depth)
 630			printk(KERN_ERR "Invalid depth in NCR_700_scsi_done(): %d\n",
 631			       NCR_700_get_depth(SCp->device));
 632#endif /* NCR_700_DEBUG */
 633		NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) - 1);
 634
 635		SCp->host_scribble = NULL;
 636		SCp->result = result;
 637		scsi_done(SCp);
 638	} else {
 639		printk(KERN_ERR "53c700: SCSI DONE HAS NULL SCp\n");
 640	}
 641}
 642
 643
 644STATIC void
 645NCR_700_internal_bus_reset(struct Scsi_Host *host)
 646{
 647	/* Bus reset */
 648	NCR_700_writeb(ASSERT_RST, host, SCNTL1_REG);
 649	udelay(50);
 650	NCR_700_writeb(0, host, SCNTL1_REG);
 651
 652}
 653
 654STATIC void
 655NCR_700_chip_setup(struct Scsi_Host *host)
 656{
 657	struct NCR_700_Host_Parameters *hostdata = 
 658		(struct NCR_700_Host_Parameters *)host->hostdata[0];
 659	__u8 min_period;
 660	__u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
 661
 662	if(hostdata->chip710) {
 663		__u8 burst_disable = 0;
 664		__u8 burst_length = 0;
 665
 666		switch (hostdata->burst_length) {
 667			case 1:
 668			        burst_length = BURST_LENGTH_1;
 669			        break;
 670			case 2:
 671			        burst_length = BURST_LENGTH_2;
 672			        break;
 673			case 4:
 674			        burst_length = BURST_LENGTH_4;
 675			        break;
 676			case 8:
 677			        burst_length = BURST_LENGTH_8;
 678			        break;
 679			default:
 680			        burst_disable = BURST_DISABLE;
 681			        break;
 682		}
 683		hostdata->dcntl_extra |= COMPAT_700_MODE;
 684
 685		NCR_700_writeb(hostdata->dcntl_extra, host, DCNTL_REG);
 686		NCR_700_writeb(burst_length | hostdata->dmode_extra,
 687			       host, DMODE_710_REG);
 688		NCR_700_writeb(burst_disable | hostdata->ctest7_extra |
 689			       (hostdata->differential ? DIFF : 0),
 690			       host, CTEST7_REG);
 691		NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG);
 692		NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY
 693			       | AUTO_ATN, host, SCNTL0_REG);
 694	} else {
 695		NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra,
 696			       host, DMODE_700_REG);
 697		NCR_700_writeb(hostdata->differential ? 
 698			       DIFF : 0, host, CTEST7_REG);
 699		if(hostdata->fast) {
 700			/* this is for 700-66, does nothing on 700 */
 701			NCR_700_writeb(LAST_DIS_ENBL | ENABLE_ACTIVE_NEGATION 
 702				       | GENERATE_RECEIVE_PARITY, host,
 703				       CTEST8_REG);
 704		} else {
 705			NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY
 706				       | PARITY | AUTO_ATN, host, SCNTL0_REG);
 707		}
 708	}
 709
 710	NCR_700_writeb(1 << host->this_id, host, SCID_REG);
 711	NCR_700_writeb(0, host, SBCL_REG);
 712	NCR_700_writeb(ASYNC_OPERATION, host, SXFER_REG);
 713
 714	NCR_700_writeb(PHASE_MM_INT | SEL_TIMEOUT_INT | GROSS_ERR_INT | UX_DISC_INT
 715	     | RST_INT | PAR_ERR_INT | SELECT_INT, host, SIEN_REG);
 716
 717	NCR_700_writeb(ABORT_INT | INT_INST_INT | ILGL_INST_INT, host, DIEN_REG);
 718	NCR_700_writeb(ENABLE_SELECT, host, SCNTL1_REG);
 719	if(hostdata->clock > 75) {
 720		printk(KERN_ERR "53c700: Clock speed %dMHz is too high: 75Mhz is the maximum this chip can be driven at\n", hostdata->clock);
 721		/* do the best we can, but the async clock will be out
 722		 * of spec: sync divider 2, async divider 3 */
 723		DEBUG(("53c700: sync 2 async 3\n"));
 724		NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG);
 725		NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
 726		hostdata->sync_clock = hostdata->clock/2;
 727	} else	if(hostdata->clock > 50  && hostdata->clock <= 75) {
 728		/* sync divider 1.5, async divider 3 */
 729		DEBUG(("53c700: sync 1.5 async 3\n"));
 730		NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG);
 731		NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
 732		hostdata->sync_clock = hostdata->clock*2;
 733		hostdata->sync_clock /= 3;
 734		
 735	} else if(hostdata->clock > 37 && hostdata->clock <= 50) {
 736		/* sync divider 1, async divider 2 */
 737		DEBUG(("53c700: sync 1 async 2\n"));
 738		NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
 739		NCR_700_writeb(ASYNC_DIV_2_0 | hostdata->dcntl_extra, host, DCNTL_REG);
 740		hostdata->sync_clock = hostdata->clock;
 741	} else if(hostdata->clock > 25 && hostdata->clock <=37) {
 742		/* sync divider 1, async divider 1.5 */
 743		DEBUG(("53c700: sync 1 async 1.5\n"));
 744		NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
 745		NCR_700_writeb(ASYNC_DIV_1_5 | hostdata->dcntl_extra, host, DCNTL_REG);
 746		hostdata->sync_clock = hostdata->clock;
 747	} else {
 748		DEBUG(("53c700: sync 1 async 1\n"));
 749		NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
 750		NCR_700_writeb(ASYNC_DIV_1_0 | hostdata->dcntl_extra, host, DCNTL_REG);
 751		/* sync divider 1, async divider 1 */
 752		hostdata->sync_clock = hostdata->clock;
 753	}
 754	/* Calculate the actual minimum period that can be supported
 755	 * by our synchronous clock speed.  See the 710 manual for
 756	 * exact details of this calculation which is based on a
 757	 * setting of the SXFER register */
 758	min_period = 1000*(4+min_xferp)/(4*hostdata->sync_clock);
 759	hostdata->min_period = NCR_700_MIN_PERIOD;
 760	if(min_period > NCR_700_MIN_PERIOD)
 761		hostdata->min_period = min_period;
 762}
 763
 764STATIC void
 765NCR_700_chip_reset(struct Scsi_Host *host)
 766{
 767	struct NCR_700_Host_Parameters *hostdata = 
 768		(struct NCR_700_Host_Parameters *)host->hostdata[0];
 769	if(hostdata->chip710) {
 770		NCR_700_writeb(SOFTWARE_RESET_710, host, ISTAT_REG);
 771		udelay(100);
 772
 773		NCR_700_writeb(0, host, ISTAT_REG);
 774	} else {
 775		NCR_700_writeb(SOFTWARE_RESET, host, DCNTL_REG);
 776		udelay(100);
 777		
 778		NCR_700_writeb(0, host, DCNTL_REG);
 779	}
 780
 781	mdelay(1000);
 782
 783	NCR_700_chip_setup(host);
 784}
 785
 786/* The heart of the message processing engine is that the instruction
 787 * immediately after the INT is the normal case (and so must be CLEAR
 788 * ACK).  If we want to do something else, we call that routine in
 789 * scripts and set temp to be the normal case + 8 (skipping the CLEAR
 790 * ACK) so that the routine returns correctly to resume its activity
 791 * */
 792STATIC __u32
 793process_extended_message(struct Scsi_Host *host, 
 794			 struct NCR_700_Host_Parameters *hostdata,
 795			 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
 796{
 797	__u32 resume_offset = dsp, temp = dsp + 8;
 798	__u8 pun = 0xff, lun = 0xff;
 799
 800	if(SCp != NULL) {
 801		pun = SCp->device->id;
 802		lun = SCp->device->lun;
 803	}
 804
 805	switch(hostdata->msgin[2]) {
 806	case A_SDTR_MSG:
 807		if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
 808			struct scsi_target *starget = SCp->device->sdev_target;
 809			__u8 period = hostdata->msgin[3];
 810			__u8 offset = hostdata->msgin[4];
 811
 812			if(offset == 0 || period == 0) {
 813				offset = 0;
 814				period = 0;
 815			}
 816
 817			spi_offset(starget) = offset;
 818			spi_period(starget) = period;
 819			
 820			if(NCR_700_is_flag_set(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION)) {
 821				spi_display_xfer_agreement(starget);
 822				NCR_700_clear_flag(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION);
 823			}
 824			
 825			NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
 826			NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
 827			
 828			NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
 829				       host, SXFER_REG);
 830
 831		} else {
 832			/* SDTR message out of the blue, reject it */
 833			shost_printk(KERN_WARNING, host,
 834				"Unexpected SDTR msg\n");
 835			hostdata->msgout[0] = A_REJECT_MSG;
 836			dma_sync_to_dev(hostdata, hostdata->msgout, 1);
 837			script_patch_16(hostdata, hostdata->script,
 838			                MessageCount, 1);
 839			/* SendMsgOut returns, so set up the return
 840			 * address */
 841			resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
 842		}
 843		break;
 844	
 845	case A_WDTR_MSG:
 846		printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
 847		       host->host_no, pun, lun);
 848		hostdata->msgout[0] = A_REJECT_MSG;
 849		dma_sync_to_dev(hostdata, hostdata->msgout, 1);
 850		script_patch_16(hostdata, hostdata->script, MessageCount, 1);
 851		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
 852
 853		break;
 854
 855	default:
 856		printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
 857		       host->host_no, pun, lun,
 858		       NCR_700_phase[(dsps & 0xf00) >> 8]);
 859		spi_print_msg(hostdata->msgin);
 860		printk("\n");
 861		/* just reject it */
 862		hostdata->msgout[0] = A_REJECT_MSG;
 863		dma_sync_to_dev(hostdata, hostdata->msgout, 1);
 864		script_patch_16(hostdata, hostdata->script, MessageCount, 1);
 865		/* SendMsgOut returns, so set up the return
 866		 * address */
 867		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
 868	}
 869	NCR_700_writel(temp, host, TEMP_REG);
 870	return resume_offset;
 871}
 872
 873STATIC __u32
 874process_message(struct Scsi_Host *host,	struct NCR_700_Host_Parameters *hostdata,
 875		struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
 876{
 877	/* work out where to return to */
 878	__u32 temp = dsp + 8, resume_offset = dsp;
 879	__u8 pun = 0xff, lun = 0xff;
 880
 881	if(SCp != NULL) {
 882		pun = SCp->device->id;
 883		lun = SCp->device->lun;
 884	}
 885
 886#ifdef NCR_700_DEBUG
 887	printk("scsi%d (%d:%d): message %s: ", host->host_no, pun, lun,
 888	       NCR_700_phase[(dsps & 0xf00) >> 8]);
 889	spi_print_msg(hostdata->msgin);
 890	printk("\n");
 891#endif
 892
 893	switch(hostdata->msgin[0]) {
 894
 895	case A_EXTENDED_MSG:
 896		resume_offset =  process_extended_message(host, hostdata, SCp,
 897							  dsp, dsps);
 898		break;
 899
 900	case A_REJECT_MSG:
 901		if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
 902			/* Rejected our sync negotiation attempt */
 903			spi_period(SCp->device->sdev_target) =
 904				spi_offset(SCp->device->sdev_target) = 0;
 905			NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
 906			NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
 907		} else if(SCp != NULL && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION) {
 908			/* rejected our first simple tag message */
 909			scmd_printk(KERN_WARNING, SCp,
 910				"Rejected first tag queue attempt, turning off tag queueing\n");
 911			/* we're done negotiating */
 912			NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION);
 913			hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
 914
 915			SCp->device->tagged_supported = 0;
 916			SCp->device->simple_tags = 0;
 917			scsi_change_queue_depth(SCp->device, host->cmd_per_lun);
 918		} else {
 919			shost_printk(KERN_WARNING, host,
 920				"(%d:%d) Unexpected REJECT Message %s\n",
 921			       pun, lun,
 922			       NCR_700_phase[(dsps & 0xf00) >> 8]);
 923			/* however, just ignore it */
 924		}
 925		break;
 926
 927	case A_PARITY_ERROR_MSG:
 928		printk(KERN_ERR "scsi%d (%d:%d) Parity Error!\n", host->host_no,
 929		       pun, lun);
 930		NCR_700_internal_bus_reset(host);
 931		break;
 932	case A_SIMPLE_TAG_MSG:
 933		printk(KERN_INFO "scsi%d (%d:%d) SIMPLE TAG %d %s\n", host->host_no,
 934		       pun, lun, hostdata->msgin[1],
 935		       NCR_700_phase[(dsps & 0xf00) >> 8]);
 936		/* just ignore it */
 937		break;
 938	default:
 939		printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
 940		       host->host_no, pun, lun,
 941		       NCR_700_phase[(dsps & 0xf00) >> 8]);
 942
 943		spi_print_msg(hostdata->msgin);
 944		printk("\n");
 945		/* just reject it */
 946		hostdata->msgout[0] = A_REJECT_MSG;
 947		dma_sync_to_dev(hostdata, hostdata->msgout, 1);
 948		script_patch_16(hostdata, hostdata->script, MessageCount, 1);
 949		/* SendMsgOut returns, so set up the return
 950		 * address */
 951		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
 952
 953		break;
 954	}
 955	NCR_700_writel(temp, host, TEMP_REG);
 956	/* set us up to receive another message */
 957	dma_sync_from_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE);
 958	return resume_offset;
 959}
 960
 961STATIC __u32
 962process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
 963			 struct Scsi_Host *host,
 964			 struct NCR_700_Host_Parameters *hostdata)
 965{
 966	__u32 resume_offset = 0;
 967	__u8 pun = 0xff, lun=0xff;
 968
 969	if(SCp != NULL) {
 970		pun = SCp->device->id;
 971		lun = SCp->device->lun;
 972	}
 973
 974	if(dsps == A_GOOD_STATUS_AFTER_STATUS) {
 975		DEBUG(("  COMMAND COMPLETE, status=%02x\n",
 976		       hostdata->status[0]));
 977		/* OK, if TCQ still under negotiation, we now know it works */
 978		if (NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION)
 979			NCR_700_set_tag_neg_state(SCp->device,
 980						  NCR_700_FINISHED_TAG_NEGOTIATION);
 981
 982		/* check for contingent allegiance conditions */
 983		if (hostdata->status[0] == SAM_STAT_CHECK_CONDITION ||
 984		    hostdata->status[0] == SAM_STAT_COMMAND_TERMINATED) {
 985			struct NCR_700_command_slot *slot =
 986				(struct NCR_700_command_slot *)SCp->host_scribble;
 987			if(slot->flags == NCR_700_FLAG_AUTOSENSE) {
 988				/* OOPS: bad device, returning another
 989				 * contingent allegiance condition */
 990				scmd_printk(KERN_ERR, SCp,
 991					"broken device is looping in contingent allegiance: ignoring\n");
 992				NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
 993			} else {
 994				char *cmnd =
 995					NCR_700_get_sense_cmnd(SCp->device);
 996#ifdef NCR_DEBUG
 997				scsi_print_command(SCp);
 998				printk("  cmd %p has status %d, requesting sense\n",
 999				       SCp, hostdata->status[0]);
1000#endif
1001				/* we can destroy the command here
1002				 * because the contingent allegiance
1003				 * condition will cause a retry which
1004				 * will re-copy the command from the
1005				 * saved data_cmnd.  We also unmap any
1006				 * data associated with the command
1007				 * here */
1008				NCR_700_unmap(hostdata, SCp, slot);
1009				dma_unmap_single(hostdata->dev, slot->pCmd,
1010						 MAX_COMMAND_SIZE,
1011						 DMA_TO_DEVICE);
1012
1013				cmnd[0] = REQUEST_SENSE;
1014				cmnd[1] = (lun & 0x7) << 5;
1015				cmnd[2] = 0;
1016				cmnd[3] = 0;
1017				cmnd[4] = SCSI_SENSE_BUFFERSIZE;
1018				cmnd[5] = 0;
1019				/* Here's a quiet hack: the
1020				 * REQUEST_SENSE command is six bytes,
1021				 * so store a flag indicating that
1022				 * this was an internal sense request
1023				 * and the original status at the end
1024				 * of the command */
1025				cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
1026				cmnd[7] = hostdata->status[0];
1027				cmnd[8] = SCp->cmd_len;
1028				SCp->cmd_len = 6; /* command length for
1029						   * REQUEST_SENSE */
1030				slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1031				slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1032				slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | SCSI_SENSE_BUFFERSIZE);
1033				slot->SG[0].pAddr = bS_to_host(slot->dma_handle);
1034				slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
1035				slot->SG[1].pAddr = 0;
1036				slot->resume_offset = hostdata->pScript;
1037				dma_sync_to_dev(hostdata, slot->SG, sizeof(slot->SG[0])*2);
1038				dma_sync_from_dev(hostdata, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE);
1039
1040				/* queue the command for reissue */
1041				slot->state = NCR_700_SLOT_QUEUED;
1042				slot->flags = NCR_700_FLAG_AUTOSENSE;
1043				hostdata->state = NCR_700_HOST_FREE;
1044				hostdata->cmd = NULL;
1045			}
1046		} else {
1047			// Currently rely on the mid layer evaluation
1048			// of the tag queuing capability
1049			//
1050			//if(status_byte(hostdata->status[0]) == GOOD &&
1051			//   SCp->cmnd[0] == INQUIRY && SCp->use_sg == 0) {
1052			//	/* Piggy back the tag queueing support
1053			//	 * on this command */
1054			//	dma_sync_single_for_cpu(hostdata->dev,
1055			//			    slot->dma_handle,
1056			//			    SCp->request_bufflen,
1057			//			    DMA_FROM_DEVICE);
1058			//	if(((char *)SCp->request_buffer)[7] & 0x02) {
1059			//		scmd_printk(KERN_INFO, SCp,
1060			//		     "Enabling Tag Command Queuing\n");
1061			//		hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1062			//		NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1063			//	} else {
1064			//		NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1065			//		hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1066			//	}
1067			//}
1068			NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
1069		}
1070	} else if((dsps & 0xfffff0f0) == A_UNEXPECTED_PHASE) {
1071		__u8 i = (dsps & 0xf00) >> 8;
1072
1073		scmd_printk(KERN_ERR, SCp, "UNEXPECTED PHASE %s (%s)\n",
1074		       NCR_700_phase[i],
1075		       sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1076		scmd_printk(KERN_ERR, SCp, "         len = %d, cmd =",
1077			SCp->cmd_len);
1078		scsi_print_command(SCp);
1079
1080		NCR_700_internal_bus_reset(host);
1081	} else if((dsps & 0xfffff000) == A_FATAL) {
1082		int i = (dsps & 0xfff);
1083
1084		printk(KERN_ERR "scsi%d: (%d:%d) FATAL ERROR: %s\n",
1085		       host->host_no, pun, lun, NCR_700_fatal_messages[i]);
1086		if(dsps == A_FATAL_ILLEGAL_MSG_LENGTH) {
1087			printk(KERN_ERR "     msg begins %02x %02x\n",
1088			       hostdata->msgin[0], hostdata->msgin[1]);
1089		}
1090		NCR_700_internal_bus_reset(host);
1091	} else if((dsps & 0xfffff0f0) == A_DISCONNECT) {
1092#ifdef NCR_700_DEBUG
1093		__u8 i = (dsps & 0xf00) >> 8;
1094
1095		printk("scsi%d: (%d:%d), DISCONNECTED (%d) %s\n",
1096		       host->host_no, pun, lun,
1097		       i, NCR_700_phase[i]);
1098#endif
1099		save_for_reselection(hostdata, SCp, dsp);
1100
1101	} else if(dsps == A_RESELECTION_IDENTIFIED) {
1102		__u8 lun;
1103		struct NCR_700_command_slot *slot;
1104		__u8 reselection_id = hostdata->reselection_id;
1105		struct scsi_device *SDp;
1106
1107		lun = hostdata->msgin[0] & 0x1f;
1108
1109		hostdata->reselection_id = 0xff;
1110		DEBUG(("scsi%d: (%d:%d) RESELECTED!\n",
1111		       host->host_no, reselection_id, lun));
1112		/* clear the reselection indicator */
1113		SDp = __scsi_device_lookup(host, 0, reselection_id, lun);
1114		if(unlikely(SDp == NULL)) {
1115			printk(KERN_ERR "scsi%d: (%d:%d) HAS NO device\n",
1116			       host->host_no, reselection_id, lun);
1117			BUG();
1118		}
1119		if(hostdata->msgin[1] == A_SIMPLE_TAG_MSG) {
1120			struct scsi_cmnd *SCp;
1121
1122			SCp = scsi_host_find_tag(SDp->host, hostdata->msgin[2]);
1123			if(unlikely(SCp == NULL)) {
1124				printk(KERN_ERR "scsi%d: (%d:%d) no saved request for tag %d\n", 
1125				       host->host_no, reselection_id, lun, hostdata->msgin[2]);
1126				BUG();
1127			}
1128
1129			slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1130			DDEBUG(KERN_DEBUG, SDp,
1131				"reselection is tag %d, slot %p(%d)\n",
1132				hostdata->msgin[2], slot, slot->tag);
1133		} else {
1134			struct NCR_700_Device_Parameters *p = SDp->hostdata;
1135			struct scsi_cmnd *SCp = p->current_cmnd;
1136
1137			if(unlikely(SCp == NULL)) {
1138				sdev_printk(KERN_ERR, SDp,
1139					"no saved request for untagged cmd\n");
1140				BUG();
1141			}
1142			slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1143		}
1144
1145		if(slot == NULL) {
1146			printk(KERN_ERR "scsi%d: (%d:%d) RESELECTED but no saved command (MSG = %02x %02x %02x)!!\n",
1147			       host->host_no, reselection_id, lun,
1148			       hostdata->msgin[0], hostdata->msgin[1],
1149			       hostdata->msgin[2]);
1150		} else {
1151			if(hostdata->state != NCR_700_HOST_BUSY)
1152				printk(KERN_ERR "scsi%d: FATAL, host not busy during valid reselection!\n",
1153				       host->host_no);
1154			resume_offset = slot->resume_offset;
1155			hostdata->cmd = slot->cmnd;
1156
1157			/* re-patch for this command */
1158			script_patch_32_abs(hostdata, hostdata->script,
1159			                    CommandAddress, slot->pCmd);
1160			script_patch_16(hostdata, hostdata->script,
1161					CommandCount, slot->cmnd->cmd_len);
1162			script_patch_32_abs(hostdata, hostdata->script,
1163			                    SGScriptStartAddress,
1164					    to32bit(&slot->pSG[0].ins));
1165
1166			/* Note: setting SXFER only works if we're
1167			 * still in the MESSAGE phase, so it is vital
1168			 * that ACK is still asserted when we process
1169			 * the reselection message.  The resume offset
1170			 * should therefore always clear ACK */
1171			NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
1172				       host, SXFER_REG);
1173			dma_sync_from_dev(hostdata, hostdata->msgin,
1174				       MSG_ARRAY_SIZE);
1175			dma_sync_to_dev(hostdata, hostdata->msgout,
1176				       MSG_ARRAY_SIZE);
1177			/* I'm just being paranoid here, the command should
1178			 * already have been flushed from the cache */
1179			dma_sync_to_dev(hostdata, slot->cmnd->cmnd,
1180				       slot->cmnd->cmd_len);
1181
1182
1183			
1184		}
1185	} else if(dsps == A_RESELECTED_DURING_SELECTION) {
1186
1187		/* This section is full of debugging code because I've
1188		 * never managed to reach it.  I think what happens is
1189		 * that, because the 700 runs with selection
1190		 * interrupts enabled the whole time that we take a
1191		 * selection interrupt before we manage to get to the
1192		 * reselected script interrupt */
1193
1194		__u8 reselection_id = NCR_700_readb(host, SFBR_REG);
1195		struct NCR_700_command_slot *slot;
1196		
1197		/* Take out our own ID */
1198		reselection_id &= ~(1<<host->this_id);
1199		
1200		/* I've never seen this happen, so keep this as a printk rather
1201		 * than a debug */
1202		printk(KERN_INFO "scsi%d: (%d:%d) RESELECTION DURING SELECTION, dsp=%08x[%04x] state=%d, count=%d\n",
1203		       host->host_no, reselection_id, lun, dsp, dsp - hostdata->pScript, hostdata->state, hostdata->command_slot_count);
1204
1205		{
1206			/* FIXME: DEBUGGING CODE */
1207			__u32 SG = (__u32)bS_to_cpu(hostdata->script[A_SGScriptStartAddress_used[0]]);
1208			int i;
1209
1210			for(i=0; i< NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1211				if(SG >= to32bit(&hostdata->slots[i].pSG[0])
1212				   && SG <= to32bit(&hostdata->slots[i].pSG[NCR_700_SG_SEGMENTS]))
1213					break;
1214			}
1215			printk(KERN_INFO "IDENTIFIED SG segment as being %08x in slot %p, cmd %p, slot->resume_offset=%08x\n", SG, &hostdata->slots[i], hostdata->slots[i].cmnd, hostdata->slots[i].resume_offset);
1216			SCp =  hostdata->slots[i].cmnd;
1217		}
1218
1219		if(SCp != NULL) {
1220			slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1221			/* change slot from busy to queued to redo command */
1222			slot->state = NCR_700_SLOT_QUEUED;
1223		}
1224		hostdata->cmd = NULL;
1225		
1226		if(reselection_id == 0) {
1227			if(hostdata->reselection_id == 0xff) {
1228				printk(KERN_ERR "scsi%d: Invalid reselection during selection!!\n", host->host_no);
1229				return 0;
1230			} else {
1231				printk(KERN_ERR "scsi%d: script reselected and we took a selection interrupt\n",
1232				       host->host_no);
1233				reselection_id = hostdata->reselection_id;
1234			}
1235		} else {
1236			
1237			/* convert to real ID */
1238			reselection_id = bitmap_to_number(reselection_id);
1239		}
1240		hostdata->reselection_id = reselection_id;
1241		/* just in case we have a stale simple tag message, clear it */
1242		hostdata->msgin[1] = 0;
1243		dma_sync_to_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE);
1244		if(hostdata->tag_negotiated & (1<<reselection_id)) {
1245			resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1246		} else {
1247			resume_offset = hostdata->pScript + Ent_GetReselectionData;
1248		}
1249	} else if(dsps == A_COMPLETED_SELECTION_AS_TARGET) {
1250		/* we've just disconnected from the bus, do nothing since
1251		 * a return here will re-run the queued command slot
1252		 * that may have been interrupted by the initial selection */
1253		DEBUG((" SELECTION COMPLETED\n"));
1254	} else if((dsps & 0xfffff0f0) == A_MSG_IN) { 
1255		resume_offset = process_message(host, hostdata, SCp,
1256						dsp, dsps);
1257	} else if((dsps &  0xfffff000) == 0) {
1258		__u8 i = (dsps & 0xf0) >> 4, j = (dsps & 0xf00) >> 8;
1259		printk(KERN_ERR "scsi%d: (%d:%d), unhandled script condition %s %s at %04x\n",
1260		       host->host_no, pun, lun, NCR_700_condition[i],
1261		       NCR_700_phase[j], dsp - hostdata->pScript);
1262		if(SCp != NULL) {
1263			struct scatterlist *sg;
1264
1265			scsi_print_command(SCp);
1266			scsi_for_each_sg(SCp, sg, scsi_sg_count(SCp) + 1, i) {
1267				printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, sg->length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr);
1268			}
1269		}
1270		NCR_700_internal_bus_reset(host);
1271	} else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) {
1272		printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n",
1273		       host->host_no, pun, lun, dsps & 0xfff, dsp, dsp - hostdata->pScript);
1274		resume_offset = dsp;
1275	} else {
1276		printk(KERN_ERR "scsi%d: (%d:%d), unidentified script interrupt 0x%x at %04x\n",
1277		       host->host_no, pun, lun, dsps, dsp - hostdata->pScript);
1278		NCR_700_internal_bus_reset(host);
1279	}
1280	return resume_offset;
1281}
1282
1283/* We run the 53c700 with selection interrupts always enabled.  This
1284 * means that the chip may be selected as soon as the bus frees.  On a
1285 * busy bus, this can be before the scripts engine finishes its
1286 * processing.  Therefore, part of the selection processing has to be
1287 * to find out what the scripts engine is doing and complete the
1288 * function if necessary (i.e. process the pending disconnect or save
1289 * the interrupted initial selection */
1290STATIC inline __u32
1291process_selection(struct Scsi_Host *host, __u32 dsp)
1292{
1293	__u8 id = 0;	/* Squash compiler warning */
1294	int count = 0;
1295	__u32 resume_offset = 0;
1296	struct NCR_700_Host_Parameters *hostdata =
1297		(struct NCR_700_Host_Parameters *)host->hostdata[0];
1298	struct scsi_cmnd *SCp = hostdata->cmd;
1299	__u8 sbcl;
1300
1301	for(count = 0; count < 5; count++) {
1302		id = NCR_700_readb(host, hostdata->chip710 ?
1303				   CTEST9_REG : SFBR_REG);
1304
1305		/* Take out our own ID */
1306		id &= ~(1<<host->this_id);
1307		if(id != 0) 
1308			break;
1309		udelay(5);
1310	}
1311	sbcl = NCR_700_readb(host, SBCL_REG);
1312	if((sbcl & SBCL_IO) == 0) {
1313		/* mark as having been selected rather than reselected */
1314		id = 0xff;
1315	} else {
1316		/* convert to real ID */
1317		hostdata->reselection_id = id = bitmap_to_number(id);
1318		DEBUG(("scsi%d:  Reselected by %d\n",
1319		       host->host_no, id));
1320	}
1321	if(hostdata->state == NCR_700_HOST_BUSY && SCp != NULL) {
1322		struct NCR_700_command_slot *slot =
1323			(struct NCR_700_command_slot *)SCp->host_scribble;
1324		DEBUG(("  ID %d WARNING: RESELECTION OF BUSY HOST, saving cmd %p, slot %p, addr %x [%04x], resume %x!\n", id, hostdata->cmd, slot, dsp, dsp - hostdata->pScript, resume_offset));
1325		
1326		switch(dsp - hostdata->pScript) {
1327		case Ent_Disconnect1:
1328		case Ent_Disconnect2:
1329			save_for_reselection(hostdata, SCp, Ent_Disconnect2 + hostdata->pScript);
1330			break;
1331		case Ent_Disconnect3:
1332		case Ent_Disconnect4:
1333			save_for_reselection(hostdata, SCp, Ent_Disconnect4 + hostdata->pScript);
1334			break;
1335		case Ent_Disconnect5:
1336		case Ent_Disconnect6:
1337			save_for_reselection(hostdata, SCp, Ent_Disconnect6 + hostdata->pScript);
1338			break;
1339		case Ent_Disconnect7:
1340		case Ent_Disconnect8:
1341			save_for_reselection(hostdata, SCp, Ent_Disconnect8 + hostdata->pScript);
1342			break;
1343		case Ent_Finish1:
1344		case Ent_Finish2:
1345			process_script_interrupt(A_GOOD_STATUS_AFTER_STATUS, dsp, SCp, host, hostdata);
1346			break;
1347			
1348		default:
1349			slot->state = NCR_700_SLOT_QUEUED;
1350			break;
1351			}
1352	}
1353	hostdata->state = NCR_700_HOST_BUSY;
1354	hostdata->cmd = NULL;
1355	/* clear any stale simple tag message */
1356	hostdata->msgin[1] = 0;
1357	dma_sync_to_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE);
1358
1359	if(id == 0xff) {
1360		/* Selected as target, Ignore */
1361		resume_offset = hostdata->pScript + Ent_SelectedAsTarget;
1362	} else if(hostdata->tag_negotiated & (1<<id)) {
1363		resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1364	} else {
1365		resume_offset = hostdata->pScript + Ent_GetReselectionData;
1366	}
1367	return resume_offset;
1368}
1369
1370static inline void
1371NCR_700_clear_fifo(struct Scsi_Host *host) {
1372	const struct NCR_700_Host_Parameters *hostdata
1373		= (struct NCR_700_Host_Parameters *)host->hostdata[0];
1374	if(hostdata->chip710) {
1375		NCR_700_writeb(CLR_FIFO_710, host, CTEST8_REG);
1376	} else {
1377		NCR_700_writeb(CLR_FIFO, host, DFIFO_REG);
1378	}
1379}
1380
1381static inline void
1382NCR_700_flush_fifo(struct Scsi_Host *host) {
1383	const struct NCR_700_Host_Parameters *hostdata
1384		= (struct NCR_700_Host_Parameters *)host->hostdata[0];
1385	if(hostdata->chip710) {
1386		NCR_700_writeb(FLUSH_DMA_FIFO_710, host, CTEST8_REG);
1387		udelay(10);
1388		NCR_700_writeb(0, host, CTEST8_REG);
1389	} else {
1390		NCR_700_writeb(FLUSH_DMA_FIFO, host, DFIFO_REG);
1391		udelay(10);
1392		NCR_700_writeb(0, host, DFIFO_REG);
1393	}
1394}
1395
1396
1397/* The queue lock with interrupts disabled must be held on entry to
1398 * this function */
1399STATIC int
1400NCR_700_start_command(struct scsi_cmnd *SCp)
1401{
1402	struct NCR_700_command_slot *slot =
1403		(struct NCR_700_command_slot *)SCp->host_scribble;
1404	struct NCR_700_Host_Parameters *hostdata =
1405		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1406	__u16 count = 1;	/* for IDENTIFY message */
1407	u8 lun = SCp->device->lun;
1408
1409	if(hostdata->state != NCR_700_HOST_FREE) {
1410		/* keep this inside the lock to close the race window where
1411		 * the running command finishes on another CPU while we don't
1412		 * change the state to queued on this one */
1413		slot->state = NCR_700_SLOT_QUEUED;
1414
1415		DEBUG(("scsi%d: host busy, queueing command %p, slot %p\n",
1416		       SCp->device->host->host_no, slot->cmnd, slot));
1417		return 0;
1418	}
1419	hostdata->state = NCR_700_HOST_BUSY;
1420	hostdata->cmd = SCp;
1421	slot->state = NCR_700_SLOT_BUSY;
1422	/* keep interrupts disabled until we have the command correctly
1423	 * set up so we cannot take a selection interrupt */
1424
1425	hostdata->msgout[0] = NCR_700_identify((SCp->cmnd[0] != REQUEST_SENSE &&
1426						slot->flags != NCR_700_FLAG_AUTOSENSE),
1427					       lun);
1428	/* for INQUIRY or REQUEST_SENSE commands, we cannot be sure
1429	 * if the negotiated transfer parameters still hold, so
1430	 * always renegotiate them */
1431	if(SCp->cmnd[0] == INQUIRY || SCp->cmnd[0] == REQUEST_SENSE ||
1432	   slot->flags == NCR_700_FLAG_AUTOSENSE) {
1433		NCR_700_clear_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
1434	}
1435
1436	/* REQUEST_SENSE is asking for contingent I_T_L(_Q) status.
1437	 * If a contingent allegiance condition exists, the device
1438	 * will refuse all tags, so send the request sense as untagged
1439	 * */
1440	if((hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1441	   && (slot->tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE &&
1442	       slot->flags != NCR_700_FLAG_AUTOSENSE)) {
1443		count += spi_populate_tag_msg(&hostdata->msgout[count], SCp);
1444	}
1445
1446	if(hostdata->fast &&
1447	   NCR_700_is_flag_clear(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC)) {
1448		count += spi_populate_sync_msg(&hostdata->msgout[count],
1449				spi_period(SCp->device->sdev_target),
1450				spi_offset(SCp->device->sdev_target));
1451		NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1452	}
1453
1454	script_patch_16(hostdata, hostdata->script, MessageCount, count);
1455
1456	script_patch_ID(hostdata, hostdata->script, Device_ID, 1<<scmd_id(SCp));
1457
1458	script_patch_32_abs(hostdata, hostdata->script, CommandAddress,
1459			    slot->pCmd);
1460	script_patch_16(hostdata, hostdata->script, CommandCount, SCp->cmd_len);
1461	/* finally plumb the beginning of the SG list into the script
1462	 * */
1463	script_patch_32_abs(hostdata, hostdata->script,
1464	                    SGScriptStartAddress, to32bit(&slot->pSG[0].ins));
1465	NCR_700_clear_fifo(SCp->device->host);
1466
1467	if(slot->resume_offset == 0)
1468		slot->resume_offset = hostdata->pScript;
1469	/* now perform all the writebacks and invalidates */
1470	dma_sync_to_dev(hostdata, hostdata->msgout, count);
1471	dma_sync_from_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE);
1472	dma_sync_to_dev(hostdata, SCp->cmnd, SCp->cmd_len);
1473	dma_sync_from_dev(hostdata, hostdata->status, 1);
1474
1475	/* set the synchronous period/offset */
1476	NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
1477		       SCp->device->host, SXFER_REG);
1478	NCR_700_writel(slot->temp, SCp->device->host, TEMP_REG);
1479	NCR_700_writel(slot->resume_offset, SCp->device->host, DSP_REG);
1480
1481	return 1;
1482}
1483
1484irqreturn_t
1485NCR_700_intr(int irq, void *dev_id)
1486{
1487	struct Scsi_Host *host = (struct Scsi_Host *)dev_id;
1488	struct NCR_700_Host_Parameters *hostdata =
1489		(struct NCR_700_Host_Parameters *)host->hostdata[0];
1490	__u8 istat;
1491	__u32 resume_offset = 0;
1492	__u8 pun = 0xff, lun = 0xff;
1493	unsigned long flags;
1494	int handled = 0;
1495
1496	/* Use the host lock to serialise access to the 53c700
1497	 * hardware.  Note: In future, we may need to take the queue
1498	 * lock to enter the done routines.  When that happens, we
1499	 * need to ensure that for this driver, the host lock and the
1500	 * queue lock point to the same thing. */
1501	spin_lock_irqsave(host->host_lock, flags);
1502	if((istat = NCR_700_readb(host, ISTAT_REG))
1503	      & (SCSI_INT_PENDING | DMA_INT_PENDING)) {
1504		__u32 dsps;
1505		__u8 sstat0 = 0, dstat = 0;
1506		__u32 dsp;
1507		struct scsi_cmnd *SCp = hostdata->cmd;
1508
1509		handled = 1;
1510
1511		if(istat & SCSI_INT_PENDING) {
1512			udelay(10);
1513
1514			sstat0 = NCR_700_readb(host, SSTAT0_REG);
1515		}
1516
1517		if(istat & DMA_INT_PENDING) {
1518			udelay(10);
1519
1520			dstat = NCR_700_readb(host, DSTAT_REG);
1521		}
1522
1523		dsps = NCR_700_readl(host, DSPS_REG);
1524		dsp = NCR_700_readl(host, DSP_REG);
1525
1526		DEBUG(("scsi%d: istat %02x sstat0 %02x dstat %02x dsp %04x[%08x] dsps 0x%x\n",
1527		       host->host_no, istat, sstat0, dstat,
1528		       (dsp - (__u32)(hostdata->pScript))/4,
1529		       dsp, dsps));
1530
1531		if(SCp != NULL) {
1532			pun = SCp->device->id;
1533			lun = SCp->device->lun;
1534		}
1535
1536		if(sstat0 & SCSI_RESET_DETECTED) {
1537			struct scsi_device *SDp;
1538			int i;
1539
1540			hostdata->state = NCR_700_HOST_BUSY;
1541
1542			printk(KERN_ERR "scsi%d: Bus Reset detected, executing command %p, slot %p, dsp %08x[%04x]\n",
1543			       host->host_no, SCp, SCp == NULL ? NULL : SCp->host_scribble, dsp, dsp - hostdata->pScript);
1544
1545			scsi_report_bus_reset(host, 0);
1546
1547			/* clear all the negotiated parameters */
1548			__shost_for_each_device(SDp, host)
1549				NCR_700_clear_flag(SDp, ~0);
1550			
1551			/* clear all the slots and their pending commands */
1552			for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1553				struct scsi_cmnd *SCp;
1554				struct NCR_700_command_slot *slot =
1555					&hostdata->slots[i];
1556
1557				if(slot->state == NCR_700_SLOT_FREE)
1558					continue;
1559				
1560				SCp = slot->cmnd;
1561				printk(KERN_ERR " failing command because of reset, slot %p, cmnd %p\n",
1562				       slot, SCp);
1563				free_slot(slot, hostdata);
1564				SCp->host_scribble = NULL;
1565				NCR_700_set_depth(SCp->device, 0);
1566				/* NOTE: deadlock potential here: we
1567				 * rely on mid-layer guarantees that
1568				 * scsi_done won't try to issue the
1569				 * command again otherwise we'll
1570				 * deadlock on the
1571				 * hostdata->state_lock */
1572				SCp->result = DID_RESET << 16;
1573				scsi_done(SCp);
1574			}
1575			mdelay(25);
1576			NCR_700_chip_setup(host);
1577
1578			hostdata->state = NCR_700_HOST_FREE;
1579			hostdata->cmd = NULL;
1580			/* signal back if this was an eh induced reset */
1581			if(hostdata->eh_complete != NULL)
1582				complete(hostdata->eh_complete);
1583			goto out_unlock;
1584		} else if(sstat0 & SELECTION_TIMEOUT) {
1585			DEBUG(("scsi%d: (%d:%d) selection timeout\n",
1586			       host->host_no, pun, lun));
1587			NCR_700_scsi_done(hostdata, SCp, DID_NO_CONNECT<<16);
1588		} else if(sstat0 & PHASE_MISMATCH) {
1589			struct NCR_700_command_slot *slot = (SCp == NULL) ? NULL :
1590				(struct NCR_700_command_slot *)SCp->host_scribble;
1591
1592			if(dsp == Ent_SendMessage + 8 + hostdata->pScript) {
1593				/* It wants to reply to some part of
1594				 * our message */
1595#ifdef NCR_700_DEBUG
1596				__u32 temp = NCR_700_readl(host, TEMP_REG);
1597				int count = (hostdata->script[Ent_SendMessage/4] & 0xffffff) - ((NCR_700_readl(host, DBC_REG) & 0xffffff) + NCR_700_data_residual(host));
1598				printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1599#endif
1600				resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch;
1601			} else if (slot && dsp >= to32bit(&slot->pSG[0].ins) &&
1602				  dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) {
1603				int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff;
1604				int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List);
1605				int residual = NCR_700_data_residual(host);
1606				int i;
1607#ifdef NCR_700_DEBUG
1608				__u32 naddr = NCR_700_readl(host, DNAD_REG);
1609
1610				printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x\n",
1611				       host->host_no, pun, lun,
1612				       SGcount, data_transfer);
1613				scsi_print_command(SCp);
1614				if(residual) {
1615					printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x, residual %d\n",
1616				       host->host_no, pun, lun,
1617				       SGcount, data_transfer, residual);
1618				}
1619#endif
1620				data_transfer += residual;
1621
1622				if(data_transfer != 0) {
1623					int count; 
1624					__u32 pAddr;
1625
1626					SGcount--;
1627
1628					count = (bS_to_cpu(slot->SG[SGcount].ins) & 0x00ffffff);
1629					DEBUG(("DATA TRANSFER MISMATCH, count = %d, transferred %d\n", count, count-data_transfer));
1630					slot->SG[SGcount].ins &= bS_to_host(0xff000000);
1631					slot->SG[SGcount].ins |= bS_to_host(data_transfer);
1632					pAddr = bS_to_cpu(slot->SG[SGcount].pAddr);
1633					pAddr += (count - data_transfer);
1634#ifdef NCR_700_DEBUG
1635					if(pAddr != naddr) {
1636						printk("scsi%d (%d:%d) transfer mismatch pAddr=%lx, naddr=%lx, data_transfer=%d, residual=%d\n", host->host_no, pun, lun, (unsigned long)pAddr, (unsigned long)naddr, data_transfer, residual);
1637					}
1638#endif
1639					slot->SG[SGcount].pAddr = bS_to_host(pAddr);
1640				}
1641				/* set the executed moves to nops */
1642				for(i=0; i<SGcount; i++) {
1643					slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
1644					slot->SG[i].pAddr = 0;
1645				}
1646				dma_sync_to_dev(hostdata, slot->SG, sizeof(slot->SG));
1647				/* and pretend we disconnected after
1648				 * the command phase */
1649				resume_offset = hostdata->pScript + Ent_MsgInDuringData;
1650				/* make sure all the data is flushed */
1651				NCR_700_flush_fifo(host);
1652			} else {
1653				__u8 sbcl = NCR_700_readb(host, SBCL_REG);
1654				printk(KERN_ERR "scsi%d: (%d:%d) phase mismatch at %04x, phase %s\n",
1655				       host->host_no, pun, lun, dsp - hostdata->pScript, sbcl_to_string(sbcl));
1656				NCR_700_internal_bus_reset(host);
1657			}
1658
1659		} else if(sstat0 & SCSI_GROSS_ERROR) {
1660			printk(KERN_ERR "scsi%d: (%d:%d) GROSS ERROR\n",
1661			       host->host_no, pun, lun);
1662			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1663		} else if(sstat0 & PARITY_ERROR) {
1664			printk(KERN_ERR "scsi%d: (%d:%d) PARITY ERROR\n",
1665			       host->host_no, pun, lun);
1666			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1667		} else if(dstat & SCRIPT_INT_RECEIVED) {
1668			DEBUG(("scsi%d: (%d:%d) ====>SCRIPT INTERRUPT<====\n",
1669			       host->host_no, pun, lun));
1670			resume_offset = process_script_interrupt(dsps, dsp, SCp, host, hostdata);
1671		} else if(dstat & (ILGL_INST_DETECTED)) {
1672			printk(KERN_ERR "scsi%d: (%d:%d) Illegal Instruction detected at 0x%08x[0x%x]!!!\n"
1673			       "         Please email James.Bottomley@HansenPartnership.com with the details\n",
1674			       host->host_no, pun, lun,
1675			       dsp, dsp - hostdata->pScript);
1676			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1677		} else if(dstat & (WATCH_DOG_INTERRUPT|ABORTED)) {
1678			printk(KERN_ERR "scsi%d: (%d:%d) serious DMA problem, dstat=%02x\n",
1679			       host->host_no, pun, lun, dstat);
1680			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1681		}
1682
1683		
1684		/* NOTE: selection interrupt processing MUST occur
1685		 * after script interrupt processing to correctly cope
1686		 * with the case where we process a disconnect and
1687		 * then get reselected before we process the
1688		 * disconnection */
1689		if(sstat0 & SELECTED) {
1690			/* FIXME: It currently takes at least FOUR
1691			 * interrupts to complete a command that
1692			 * disconnects: one for the disconnect, one
1693			 * for the reselection, one to get the
1694			 * reselection data and one to complete the
1695			 * command.  If we guess the reselected
1696			 * command here and prepare it, we only need
1697			 * to get a reselection data interrupt if we
1698			 * guessed wrongly.  Since the interrupt
1699			 * overhead is much greater than the command
1700			 * setup, this would be an efficient
1701			 * optimisation particularly as we probably
1702			 * only have one outstanding command on a
1703			 * target most of the time */
1704
1705			resume_offset = process_selection(host, dsp);
1706
1707		}
1708
1709	}
1710
1711	if(resume_offset) {
1712		if(hostdata->state != NCR_700_HOST_BUSY) {
1713			printk(KERN_ERR "scsi%d: Driver error: resume at 0x%08x [0x%04x] with non busy host!\n",
1714			       host->host_no, resume_offset, resume_offset - hostdata->pScript);
1715			hostdata->state = NCR_700_HOST_BUSY;
1716		}
1717
1718		DEBUG(("Attempting to resume at %x\n", resume_offset));
1719		NCR_700_clear_fifo(host);
1720		NCR_700_writel(resume_offset, host, DSP_REG);
1721	} 
1722	/* There is probably a technical no-no about this: If we're a
1723	 * shared interrupt and we got this interrupt because the
1724	 * other device needs servicing not us, we're still going to
1725	 * check our queued commands here---of course, there shouldn't
1726	 * be any outstanding.... */
1727	if(hostdata->state == NCR_700_HOST_FREE) {
1728		int i;
1729
1730		for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1731			/* fairness: always run the queue from the last
1732			 * position we left off */
1733			int j = (i + hostdata->saved_slot_position)
1734				% NCR_700_COMMAND_SLOTS_PER_HOST;
1735			
1736			if(hostdata->slots[j].state != NCR_700_SLOT_QUEUED)
1737				continue;
1738			if(NCR_700_start_command(hostdata->slots[j].cmnd)) {
1739				DEBUG(("scsi%d: Issuing saved command slot %p, cmd %p\t\n",
1740				       host->host_no, &hostdata->slots[j],
1741				       hostdata->slots[j].cmnd));
1742				hostdata->saved_slot_position = j + 1;
1743			}
1744
1745			break;
1746		}
1747	}
1748 out_unlock:
1749	spin_unlock_irqrestore(host->host_lock, flags);
1750	return IRQ_RETVAL(handled);
1751}
1752
1753static int NCR_700_queuecommand_lck(struct scsi_cmnd *SCp)
1754{
1755	struct NCR_700_Host_Parameters *hostdata = 
1756		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1757	__u32 move_ins;
1758	struct NCR_700_command_slot *slot;
1759
1760	if(hostdata->command_slot_count >= NCR_700_COMMAND_SLOTS_PER_HOST) {
1761		/* We're over our allocation, this should never happen
1762		 * since we report the max allocation to the mid layer */
1763		printk(KERN_WARNING "scsi%d: Command depth has gone over queue depth\n", SCp->device->host->host_no);
1764		return 1;
1765	}
1766	/* check for untagged commands.  We cannot have any outstanding
1767	 * commands if we accept them.  Commands could be untagged because:
1768	 *
1769	 * - The tag negotiated bitmap is clear
1770	 * - The blk layer sent and untagged command
1771	 */
1772	if(NCR_700_get_depth(SCp->device) != 0
1773	   && (!(hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1774	       || !(SCp->flags & SCMD_TAGGED))) {
1775		CDEBUG(KERN_ERR, SCp, "has non zero depth %d\n",
1776		       NCR_700_get_depth(SCp->device));
1777		return SCSI_MLQUEUE_DEVICE_BUSY;
1778	}
1779	if(NCR_700_get_depth(SCp->device) >= SCp->device->queue_depth) {
1780		CDEBUG(KERN_ERR, SCp, "has max tag depth %d\n",
1781		       NCR_700_get_depth(SCp->device));
1782		return SCSI_MLQUEUE_DEVICE_BUSY;
1783	}
1784	NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) + 1);
1785
1786	/* begin the command here */
1787	/* no need to check for NULL, test for command_slot_count above
1788	 * ensures a slot is free */
1789	slot = find_empty_slot(hostdata);
1790
1791	slot->cmnd = SCp;
1792
1793	SCp->host_scribble = (unsigned char *)slot;
1794
1795#ifdef NCR_700_DEBUG
1796	printk("53c700: scsi%d, command ", SCp->device->host->host_no);
1797	scsi_print_command(SCp);
1798#endif
1799	if ((SCp->flags & SCMD_TAGGED)
1800	   && (hostdata->tag_negotiated &(1<<scmd_id(SCp))) == 0
1801	   && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) {
1802		scmd_printk(KERN_ERR, SCp, "Enabling Tag Command Queuing\n");
1803		hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1804		NCR_700_set_tag_neg_state(SCp->device, NCR_700_DURING_TAG_NEGOTIATION);
1805	}
1806
1807	/* here we may have to process an untagged command.  The gate
1808	 * above ensures that this will be the only one outstanding,
1809	 * so clear the tag negotiated bit.
1810	 *
1811	 * FIXME: This will royally screw up on multiple LUN devices
1812	 * */
1813	if (!(SCp->flags & SCMD_TAGGED)
1814	   && (hostdata->tag_negotiated &(1<<scmd_id(SCp)))) {
1815		scmd_printk(KERN_INFO, SCp, "Disabling Tag Command Queuing\n");
1816		hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1817	}
1818
1819	if ((hostdata->tag_negotiated & (1<<scmd_id(SCp))) &&
1820	    SCp->device->simple_tags) {
1821		slot->tag = scsi_cmd_to_rq(SCp)->tag;
1822		CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
1823		       slot->tag, slot);
1824	} else {
1825		struct NCR_700_Device_Parameters *p = SCp->device->hostdata;
1826
1827		slot->tag = SCSI_NO_TAG;
1828		/* save current command for reselection */
1829		p->current_cmnd = SCp;
1830	}
1831	/* sanity check: some of the commands generated by the mid-layer
1832	 * have an eccentric idea of their sc_data_direction */
1833	if(!scsi_sg_count(SCp) && !scsi_bufflen(SCp) &&
1834	   SCp->sc_data_direction != DMA_NONE) {
1835#ifdef NCR_700_DEBUG
1836		printk("53c700: Command");
1837		scsi_print_command(SCp);
1838		printk("Has wrong data direction %d\n", SCp->sc_data_direction);
1839#endif
1840		SCp->sc_data_direction = DMA_NONE;
1841	}
1842
1843	switch (SCp->cmnd[0]) {
1844	case REQUEST_SENSE:
1845		/* clear the internal sense magic */
1846		SCp->cmnd[6] = 0;
1847		fallthrough;
1848	default:
1849		/* OK, get it from the command */
1850		switch(SCp->sc_data_direction) {
1851		case DMA_BIDIRECTIONAL:
1852		default:
1853			printk(KERN_ERR "53c700: Unknown command for data direction ");
1854			scsi_print_command(SCp);
1855			
1856			move_ins = 0;
1857			break;
1858		case DMA_NONE:
1859			move_ins = 0;
1860			break;
1861		case DMA_FROM_DEVICE:
1862			move_ins = SCRIPT_MOVE_DATA_IN;
1863			break;
1864		case DMA_TO_DEVICE:
1865			move_ins = SCRIPT_MOVE_DATA_OUT;
1866			break;
1867		}
1868	}
1869
1870	/* now build the scatter gather list */
1871	if(move_ins != 0) {
1872		int i;
1873		int sg_count;
1874		dma_addr_t vPtr = 0;
1875		struct scatterlist *sg;
1876		__u32 count = 0;
1877
1878		sg_count = scsi_dma_map(SCp);
1879		BUG_ON(sg_count < 0);
1880
1881		scsi_for_each_sg(SCp, sg, sg_count, i) {
1882			vPtr = sg_dma_address(sg);
1883			count = sg_dma_len(sg);
1884
1885			slot->SG[i].ins = bS_to_host(move_ins | count);
1886			DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n",
1887			       i, count, slot->SG[i].ins, (unsigned long)vPtr));
1888			slot->SG[i].pAddr = bS_to_host(vPtr);
1889		}
1890		slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
1891		slot->SG[i].pAddr = 0;
1892		dma_sync_to_dev(hostdata, slot->SG, sizeof(slot->SG));
1893		DEBUG((" SETTING %p to %x\n",
1894		       (&slot->pSG[i].ins),
1895		       slot->SG[i].ins));
1896	}
1897	slot->resume_offset = 0;
1898	slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd,
1899				    MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1900	NCR_700_start_command(SCp);
1901	return 0;
1902}
1903
1904STATIC DEF_SCSI_QCMD(NCR_700_queuecommand)
1905
1906STATIC int
1907NCR_700_abort(struct scsi_cmnd * SCp)
1908{
1909	struct NCR_700_command_slot *slot;
1910
1911	scmd_printk(KERN_INFO, SCp, "abort command\n");
1912
1913	slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1914
1915	if(slot == NULL)
1916		/* no outstanding command to abort */
1917		return SUCCESS;
1918	if(SCp->cmnd[0] == TEST_UNIT_READY) {
1919		/* FIXME: This is because of a problem in the new
1920		 * error handler.  When it is in error recovery, it
1921		 * will send a TUR to a device it thinks may still be
1922		 * showing a problem.  If the TUR isn't responded to,
1923		 * it will abort it and mark the device off line.
1924		 * Unfortunately, it does no other error recovery, so
1925		 * this would leave us with an outstanding command
1926		 * occupying a slot.  Rather than allow this to
1927		 * happen, we issue a bus reset to force all
1928		 * outstanding commands to terminate here. */
1929		NCR_700_internal_bus_reset(SCp->device->host);
1930		/* still drop through and return failed */
1931	}
1932	return FAILED;
1933
1934}
1935
1936STATIC int
1937NCR_700_host_reset(struct scsi_cmnd * SCp)
1938{
1939	DECLARE_COMPLETION_ONSTACK(complete);
1940	struct NCR_700_Host_Parameters *hostdata = 
1941		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1942
1943	scmd_printk(KERN_INFO, SCp,
1944		"New error handler wants HOST reset, cmd %p\n\t", SCp);
1945	scsi_print_command(SCp);
1946
1947	/* In theory, eh_complete should always be null because the
1948	 * eh is single threaded, but just in case we're handling a
1949	 * reset via sg or something */
1950	spin_lock_irq(SCp->device->host->host_lock);
1951	while (hostdata->eh_complete != NULL) {
1952		spin_unlock_irq(SCp->device->host->host_lock);
1953		msleep_interruptible(100);
1954		spin_lock_irq(SCp->device->host->host_lock);
1955	}
1956
1957	hostdata->eh_complete = &complete;
1958	NCR_700_internal_bus_reset(SCp->device->host);
1959	NCR_700_chip_reset(SCp->device->host);
1960
1961	spin_unlock_irq(SCp->device->host->host_lock);
1962	wait_for_completion(&complete);
1963	spin_lock_irq(SCp->device->host->host_lock);
1964
1965	hostdata->eh_complete = NULL;
1966	/* Revalidate the transport parameters of the failing device */
1967	if(hostdata->fast)
1968		spi_schedule_dv_device(SCp->device);
1969
1970	spin_unlock_irq(SCp->device->host->host_lock);
1971	return SUCCESS;
1972}
1973
1974STATIC void
1975NCR_700_set_period(struct scsi_target *STp, int period)
1976{
1977	struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
1978	struct NCR_700_Host_Parameters *hostdata = 
1979		(struct NCR_700_Host_Parameters *)SHp->hostdata[0];
1980	
1981	if(!hostdata->fast)
1982		return;
1983
1984	if(period < hostdata->min_period)
1985		period = hostdata->min_period;
1986
1987	spi_period(STp) = period;
1988	spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
1989			    NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1990	spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
1991}
1992
1993STATIC void
1994NCR_700_set_offset(struct scsi_target *STp, int offset)
1995{
1996	struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
1997	struct NCR_700_Host_Parameters *hostdata = 
1998		(struct NCR_700_Host_Parameters *)SHp->hostdata[0];
1999	int max_offset = hostdata->chip710
2000		? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET;
2001	
2002	if(!hostdata->fast)
2003		return;
2004
2005	if(offset > max_offset)
2006		offset = max_offset;
2007
2008	/* if we're currently async, make sure the period is reasonable */
2009	if(spi_offset(STp) == 0 && (spi_period(STp) < hostdata->min_period ||
2010				    spi_period(STp) > 0xff))
2011		spi_period(STp) = hostdata->min_period;
2012
2013	spi_offset(STp) = offset;
2014	spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2015			    NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2016	spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2017}
2018
2019STATIC int
2020NCR_700_slave_alloc(struct scsi_device *SDp)
2021{
2022	SDp->hostdata = kzalloc(sizeof(struct NCR_700_Device_Parameters),
2023				GFP_KERNEL);
2024
2025	if (!SDp->hostdata)
2026		return -ENOMEM;
2027
2028	return 0;
2029}
2030
2031STATIC int
2032NCR_700_slave_configure(struct scsi_device *SDp)
2033{
2034	struct NCR_700_Host_Parameters *hostdata = 
2035		(struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2036
2037	/* to do here: allocate memory; build a queue_full list */
2038	if(SDp->tagged_supported) {
2039		scsi_change_queue_depth(SDp, NCR_700_DEFAULT_TAGS);
2040		NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2041	}
2042
2043	if(hostdata->fast) {
2044		/* Find the correct offset and period via domain validation */
2045		if (!spi_initial_dv(SDp->sdev_target))
2046			spi_dv_device(SDp);
2047	} else {
2048		spi_offset(SDp->sdev_target) = 0;
2049		spi_period(SDp->sdev_target) = 0;
2050	}
2051	return 0;
2052}
2053
2054STATIC void
2055NCR_700_slave_destroy(struct scsi_device *SDp)
2056{
2057	kfree(SDp->hostdata);
2058	SDp->hostdata = NULL;
2059}
2060
2061static int
2062NCR_700_change_queue_depth(struct scsi_device *SDp, int depth)
2063{
2064	if (depth > NCR_700_MAX_TAGS)
2065		depth = NCR_700_MAX_TAGS;
2066	return scsi_change_queue_depth(SDp, depth);
2067}
2068
2069static ssize_t
2070NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf)
2071{
2072	struct scsi_device *SDp = to_scsi_device(dev);
2073
2074	return sysfs_emit(buf, "%d\n", NCR_700_get_depth(SDp));
2075}
2076
2077static struct device_attribute NCR_700_active_tags_attr = {
2078	.attr = {
2079		.name =		"active_tags",
2080		.mode =		S_IRUGO,
2081	},
2082	.show = NCR_700_show_active_tags,
2083};
2084
2085STATIC struct attribute *NCR_700_dev_attrs[] = {
2086	&NCR_700_active_tags_attr.attr,
2087	NULL,
2088};
2089
2090ATTRIBUTE_GROUPS(NCR_700_dev);
2091
2092EXPORT_SYMBOL(NCR_700_detect);
2093EXPORT_SYMBOL(NCR_700_release);
2094EXPORT_SYMBOL(NCR_700_intr);
2095
2096static struct spi_function_template NCR_700_transport_functions =  {
2097	.set_period	= NCR_700_set_period,
2098	.show_period	= 1,
2099	.set_offset	= NCR_700_set_offset,
2100	.show_offset	= 1,
2101};
2102
2103static int __init NCR_700_init(void)
2104{
2105	NCR_700_transport_template = spi_attach_transport(&NCR_700_transport_functions);
2106	if(!NCR_700_transport_template)
2107		return -ENODEV;
2108	return 0;
2109}
2110
2111static void __exit NCR_700_exit(void)
2112{
2113	spi_release_transport(NCR_700_transport_template);
2114}
2115
2116module_init(NCR_700_init);
2117module_exit(NCR_700_exit);
2118