Linux Audio

Check our new training course

Loading...
v3.1
   1/* -*- mode: c; c-basic-offset: 8 -*- */
   2
   3/* NCR (or Symbios) 53c700 and 53c700-66 Driver
   4 *
   5 * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
   6**-----------------------------------------------------------------------------
   7**  
   8**  This program is free software; you can redistribute it and/or modify
   9**  it under the terms of the GNU General Public License as published by
  10**  the Free Software Foundation; either version 2 of the License, or
  11**  (at your option) any later version.
  12**
  13**  This program is distributed in the hope that it will be useful,
  14**  but WITHOUT ANY WARRANTY; without even the implied warranty of
  15**  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16**  GNU General Public License for more details.
  17**
  18**  You should have received a copy of the GNU General Public License
  19**  along with this program; if not, write to the Free Software
  20**  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21**
  22**-----------------------------------------------------------------------------
  23 */
  24
  25/* Notes:
  26 *
  27 * This driver is designed exclusively for these chips (virtually the
  28 * earliest of the scripts engine chips).  They need their own drivers
  29 * because they are missing so many of the scripts and snazzy register
  30 * features of their elder brothers (the 710, 720 and 770).
  31 *
  32 * The 700 is the lowliest of the line, it can only do async SCSI.
  33 * The 700-66 can at least do synchronous SCSI up to 10MHz.
  34 * 
  35 * The 700 chip has no host bus interface logic of its own.  However,
  36 * it is usually mapped to a location with well defined register
  37 * offsets.  Therefore, if you can determine the base address and the
  38 * irq your board incorporating this chip uses, you can probably use
  39 * this driver to run it (although you'll probably have to write a
  40 * minimal wrapper for the purpose---see the NCR_D700 driver for
  41 * details about how to do this).
  42 *
  43 *
  44 * TODO List:
  45 *
  46 * 1. Better statistics in the proc fs
  47 *
  48 * 2. Implement message queue (queues SCSI messages like commands) and make
  49 *    the abort and device reset functions use them.
  50 * */
  51
  52/* CHANGELOG
  53 *
  54 * Version 2.8
  55 *
  56 * Fixed bad bug affecting tag starvation processing (previously the
  57 * driver would hang the system if too many tags starved.  Also fixed
  58 * bad bug having to do with 10 byte command processing and REQUEST
  59 * SENSE (the command would loop forever getting a transfer length
  60 * mismatch in the CMD phase).
  61 *
  62 * Version 2.7
  63 *
  64 * Fixed scripts problem which caused certain devices (notably CDRWs)
  65 * to hang on initial INQUIRY.  Updated NCR_700_readl/writel to use
  66 * __raw_readl/writel for parisc compatibility (Thomas
  67 * Bogendoerfer). Added missing SCp->request_bufflen initialisation
  68 * for sense requests (Ryan Bradetich).
  69 *
  70 * Version 2.6
  71 *
  72 * Following test of the 64 bit parisc kernel by Richard Hirst,
  73 * several problems have now been corrected.  Also adds support for
  74 * consistent memory allocation.
  75 *
  76 * Version 2.5
  77 * 
  78 * More Compatibility changes for 710 (now actually works).  Enhanced
  79 * support for odd clock speeds which constrain SDTR negotiations.
  80 * correct cacheline separation for scsi messages and status for
  81 * incoherent architectures.  Use of the pci mapping functions on
  82 * buffers to begin support for 64 bit drivers.
  83 *
  84 * Version 2.4
  85 *
  86 * Added support for the 53c710 chip (in 53c700 emulation mode only---no 
  87 * special 53c710 instructions or registers are used).
  88 *
  89 * Version 2.3
  90 *
  91 * More endianness/cache coherency changes.
  92 *
  93 * Better bad device handling (handles devices lying about tag
  94 * queueing support and devices which fail to provide sense data on
  95 * contingent allegiance conditions)
  96 *
  97 * Many thanks to Richard Hirst <rhirst@linuxcare.com> for patiently
  98 * debugging this driver on the parisc architecture and suggesting
  99 * many improvements and bug fixes.
 100 *
 101 * Thanks also go to Linuxcare Inc. for providing several PARISC
 102 * machines for me to debug the driver on.
 103 *
 104 * Version 2.2
 105 *
 106 * Made the driver mem or io mapped; added endian invariance; added
 107 * dma cache flushing operations for architectures which need it;
 108 * added support for more varied clocking speeds.
 109 *
 110 * Version 2.1
 111 *
 112 * Initial modularisation from the D700.  See NCR_D700.c for the rest of
 113 * the changelog.
 114 * */
 115#define NCR_700_VERSION "2.8"
 116
 117#include <linux/kernel.h>
 118#include <linux/types.h>
 119#include <linux/string.h>
 120#include <linux/slab.h>
 121#include <linux/ioport.h>
 122#include <linux/delay.h>
 123#include <linux/spinlock.h>
 124#include <linux/completion.h>
 125#include <linux/init.h>
 126#include <linux/proc_fs.h>
 127#include <linux/blkdev.h>
 128#include <linux/module.h>
 129#include <linux/interrupt.h>
 130#include <linux/device.h>
 131#include <asm/dma.h>
 132#include <asm/system.h>
 133#include <asm/io.h>
 134#include <asm/pgtable.h>
 135#include <asm/byteorder.h>
 136
 137#include <scsi/scsi.h>
 138#include <scsi/scsi_cmnd.h>
 139#include <scsi/scsi_dbg.h>
 140#include <scsi/scsi_eh.h>
 141#include <scsi/scsi_host.h>
 142#include <scsi/scsi_tcq.h>
 143#include <scsi/scsi_transport.h>
 144#include <scsi/scsi_transport_spi.h>
 145
 146#include "53c700.h"
 147
 148/* NOTE: For 64 bit drivers there are points in the code where we use
 149 * a non dereferenceable pointer to point to a structure in dma-able
 150 * memory (which is 32 bits) so that we can use all of the structure
 151 * operations but take the address at the end.  This macro allows us
 152 * to truncate the 64 bit pointer down to 32 bits without the compiler
 153 * complaining */
 154#define to32bit(x)	((__u32)((unsigned long)(x)))
 155
 156#ifdef NCR_700_DEBUG
 157#define STATIC
 158#else
 159#define STATIC static
 160#endif
 161
 162MODULE_AUTHOR("James Bottomley");
 163MODULE_DESCRIPTION("53c700 and 53c700-66 Driver");
 164MODULE_LICENSE("GPL");
 165
 166/* This is the script */
 167#include "53c700_d.h"
 168
 169
 170STATIC int NCR_700_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *);
 171STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt);
 172STATIC int NCR_700_bus_reset(struct scsi_cmnd * SCpnt);
 173STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
 174STATIC void NCR_700_chip_setup(struct Scsi_Host *host);
 175STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
 176STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
 177STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
 178STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
 179static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth, int reason);
 180static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth);
 181
 182STATIC struct device_attribute *NCR_700_dev_attrs[];
 183
 184STATIC struct scsi_transport_template *NCR_700_transport_template = NULL;
 185
 186static char *NCR_700_phase[] = {
 187	"",
 188	"after selection",
 189	"before command phase",
 190	"after command phase",
 191	"after status phase",
 192	"after data in phase",
 193	"after data out phase",
 194	"during data phase",
 195};
 196
 197static char *NCR_700_condition[] = {
 198	"",
 199	"NOT MSG_OUT",
 200	"UNEXPECTED PHASE",
 201	"NOT MSG_IN",
 202	"UNEXPECTED MSG",
 203	"MSG_IN",
 204	"SDTR_MSG RECEIVED",
 205	"REJECT_MSG RECEIVED",
 206	"DISCONNECT_MSG RECEIVED",
 207	"MSG_OUT",
 208	"DATA_IN",
 209	
 210};
 211
 212static char *NCR_700_fatal_messages[] = {
 213	"unexpected message after reselection",
 214	"still MSG_OUT after message injection",
 215	"not MSG_IN after selection",
 216	"Illegal message length received",
 217};
 218
 219static char *NCR_700_SBCL_bits[] = {
 220	"IO ",
 221	"CD ",
 222	"MSG ",
 223	"ATN ",
 224	"SEL ",
 225	"BSY ",
 226	"ACK ",
 227	"REQ ",
 228};
 229
 230static char *NCR_700_SBCL_to_phase[] = {
 231	"DATA_OUT",
 232	"DATA_IN",
 233	"CMD_OUT",
 234	"STATE",
 235	"ILLEGAL PHASE",
 236	"ILLEGAL PHASE",
 237	"MSG OUT",
 238	"MSG IN",
 239};
 240
 241/* This translates the SDTR message offset and period to a value
 242 * which can be loaded into the SXFER_REG.
 243 *
 244 * NOTE: According to SCSI-2, the true transfer period (in ns) is
 245 *       actually four times this period value */
 246static inline __u8
 247NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters *hostdata,
 248			       __u8 offset, __u8 period)
 249{
 250	int XFERP;
 251
 252	__u8 min_xferp = (hostdata->chip710
 253			  ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
 254	__u8 max_offset = (hostdata->chip710
 255			   ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET);
 256
 257	if(offset == 0)
 258		return 0;
 259
 260	if(period < hostdata->min_period) {
 261		printk(KERN_WARNING "53c700: Period %dns is less than this chip's minimum, setting to %d\n", period*4, NCR_700_MIN_PERIOD*4);
 262		period = hostdata->min_period;
 263	}
 264	XFERP = (period*4 * hostdata->sync_clock)/1000 - 4;
 265	if(offset > max_offset) {
 266		printk(KERN_WARNING "53c700: Offset %d exceeds chip maximum, setting to %d\n",
 267		       offset, max_offset);
 268		offset = max_offset;
 269	}
 270	if(XFERP < min_xferp) {
 271		XFERP =  min_xferp;
 272	}
 273	return (offset & 0x0f) | (XFERP & 0x07)<<4;
 274}
 275
 276static inline __u8
 277NCR_700_get_SXFER(struct scsi_device *SDp)
 278{
 279	struct NCR_700_Host_Parameters *hostdata = 
 280		(struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
 281
 282	return NCR_700_offset_period_to_sxfer(hostdata,
 283					      spi_offset(SDp->sdev_target),
 284					      spi_period(SDp->sdev_target));
 285}
 286
 287struct Scsi_Host *
 288NCR_700_detect(struct scsi_host_template *tpnt,
 289	       struct NCR_700_Host_Parameters *hostdata, struct device *dev)
 290{
 291	dma_addr_t pScript, pSlots;
 292	__u8 *memory;
 293	__u32 *script;
 294	struct Scsi_Host *host;
 295	static int banner = 0;
 296	int j;
 297
 298	if(tpnt->sdev_attrs == NULL)
 299		tpnt->sdev_attrs = NCR_700_dev_attrs;
 300
 301	memory = dma_alloc_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
 302				       &pScript, GFP_KERNEL);
 303	if(memory == NULL) {
 304		printk(KERN_ERR "53c700: Failed to allocate memory for driver, detatching\n");
 305		return NULL;
 306	}
 307
 308	script = (__u32 *)memory;
 309	hostdata->msgin = memory + MSGIN_OFFSET;
 310	hostdata->msgout = memory + MSGOUT_OFFSET;
 311	hostdata->status = memory + STATUS_OFFSET;
 312	hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET);
 313	hostdata->dev = dev;
 314
 315	pSlots = pScript + SLOTS_OFFSET;
 316
 317	/* Fill in the missing routines from the host template */
 318	tpnt->queuecommand = NCR_700_queuecommand;
 319	tpnt->eh_abort_handler = NCR_700_abort;
 320	tpnt->eh_bus_reset_handler = NCR_700_bus_reset;
 321	tpnt->eh_host_reset_handler = NCR_700_host_reset;
 322	tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
 323	tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
 324	tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN;
 325	tpnt->use_clustering = ENABLE_CLUSTERING;
 326	tpnt->slave_configure = NCR_700_slave_configure;
 327	tpnt->slave_destroy = NCR_700_slave_destroy;
 328	tpnt->slave_alloc = NCR_700_slave_alloc;
 329	tpnt->change_queue_depth = NCR_700_change_queue_depth;
 330	tpnt->change_queue_type = NCR_700_change_queue_type;
 331
 332	if(tpnt->name == NULL)
 333		tpnt->name = "53c700";
 334	if(tpnt->proc_name == NULL)
 335		tpnt->proc_name = "53c700";
 336
 337	host = scsi_host_alloc(tpnt, 4);
 338	if (!host)
 339		return NULL;
 340	memset(hostdata->slots, 0, sizeof(struct NCR_700_command_slot)
 341	       * NCR_700_COMMAND_SLOTS_PER_HOST);
 342	for (j = 0; j < NCR_700_COMMAND_SLOTS_PER_HOST; j++) {
 343		dma_addr_t offset = (dma_addr_t)((unsigned long)&hostdata->slots[j].SG[0]
 344					  - (unsigned long)&hostdata->slots[0].SG[0]);
 345		hostdata->slots[j].pSG = (struct NCR_700_SG_List *)((unsigned long)(pSlots + offset));
 346		if(j == 0)
 347			hostdata->free_list = &hostdata->slots[j];
 348		else
 349			hostdata->slots[j-1].ITL_forw = &hostdata->slots[j];
 350		hostdata->slots[j].state = NCR_700_SLOT_FREE;
 351	}
 352
 353	for (j = 0; j < ARRAY_SIZE(SCRIPT); j++)
 354		script[j] = bS_to_host(SCRIPT[j]);
 355
 356	/* adjust all labels to be bus physical */
 357	for (j = 0; j < PATCHES; j++)
 358		script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
 359	/* now patch up fixed addresses. */
 360	script_patch_32(hostdata->dev, script, MessageLocation,
 361			pScript + MSGOUT_OFFSET);
 362	script_patch_32(hostdata->dev, script, StatusAddress,
 363			pScript + STATUS_OFFSET);
 364	script_patch_32(hostdata->dev, script, ReceiveMsgAddress,
 365			pScript + MSGIN_OFFSET);
 366
 367	hostdata->script = script;
 368	hostdata->pScript = pScript;
 369	dma_sync_single_for_device(hostdata->dev, pScript, sizeof(SCRIPT), DMA_TO_DEVICE);
 370	hostdata->state = NCR_700_HOST_FREE;
 371	hostdata->cmd = NULL;
 372	host->max_id = 8;
 373	host->max_lun = NCR_700_MAX_LUNS;
 374	BUG_ON(NCR_700_transport_template == NULL);
 375	host->transportt = NCR_700_transport_template;
 376	host->unique_id = (unsigned long)hostdata->base;
 377	hostdata->eh_complete = NULL;
 378	host->hostdata[0] = (unsigned long)hostdata;
 379	/* kick the chip */
 380	NCR_700_writeb(0xff, host, CTEST9_REG);
 381	if (hostdata->chip710)
 382		hostdata->rev = (NCR_700_readb(host, CTEST8_REG)>>4) & 0x0f;
 383	else
 384		hostdata->rev = (NCR_700_readb(host, CTEST7_REG)>>4) & 0x0f;
 385	hostdata->fast = (NCR_700_readb(host, CTEST9_REG) == 0);
 386	if (banner == 0) {
 387		printk(KERN_NOTICE "53c700: Version " NCR_700_VERSION " By James.Bottomley@HansenPartnership.com\n");
 388		banner = 1;
 389	}
 390	printk(KERN_NOTICE "scsi%d: %s rev %d %s\n", host->host_no,
 391	       hostdata->chip710 ? "53c710" :
 392	       (hostdata->fast ? "53c700-66" : "53c700"),
 393	       hostdata->rev, hostdata->differential ?
 394	       "(Differential)" : "");
 395	/* reset the chip */
 396	NCR_700_chip_reset(host);
 397
 398	if (scsi_add_host(host, dev)) {
 399		dev_printk(KERN_ERR, dev, "53c700: scsi_add_host failed\n");
 400		scsi_host_put(host);
 401		return NULL;
 402	}
 403
 404	spi_signalling(host) = hostdata->differential ? SPI_SIGNAL_HVD :
 405		SPI_SIGNAL_SE;
 406
 407	return host;
 408}
 409
 410int
 411NCR_700_release(struct Scsi_Host *host)
 412{
 413	struct NCR_700_Host_Parameters *hostdata = 
 414		(struct NCR_700_Host_Parameters *)host->hostdata[0];
 415
 416	dma_free_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
 417			       hostdata->script, hostdata->pScript);
 418	return 1;
 419}
 420
 421static inline __u8
 422NCR_700_identify(int can_disconnect, __u8 lun)
 423{
 424	return IDENTIFY_BASE |
 425		((can_disconnect) ? 0x40 : 0) |
 426		(lun & NCR_700_LUN_MASK);
 427}
 428
 429/*
 430 * Function : static int data_residual (Scsi_Host *host)
 431 *
 432 * Purpose : return residual data count of what's in the chip.  If you
 433 * really want to know what this function is doing, it's almost a
 434 * direct transcription of the algorithm described in the 53c710
 435 * guide, except that the DBC and DFIFO registers are only 6 bits
 436 * wide on a 53c700.
 437 *
 438 * Inputs : host - SCSI host */
 439static inline int
 440NCR_700_data_residual (struct Scsi_Host *host) {
 441	struct NCR_700_Host_Parameters *hostdata = 
 442		(struct NCR_700_Host_Parameters *)host->hostdata[0];
 443	int count, synchronous = 0;
 444	unsigned int ddir;
 445
 446	if(hostdata->chip710) {
 447		count = ((NCR_700_readb(host, DFIFO_REG) & 0x7f) -
 448			 (NCR_700_readl(host, DBC_REG) & 0x7f)) & 0x7f;
 449	} else {
 450		count = ((NCR_700_readb(host, DFIFO_REG) & 0x3f) -
 451			 (NCR_700_readl(host, DBC_REG) & 0x3f)) & 0x3f;
 452	}
 453	
 454	if(hostdata->fast)
 455		synchronous = NCR_700_readb(host, SXFER_REG) & 0x0f;
 456	
 457	/* get the data direction */
 458	ddir = NCR_700_readb(host, CTEST0_REG) & 0x01;
 459
 460	if (ddir) {
 461		/* Receive */
 462		if (synchronous) 
 463			count += (NCR_700_readb(host, SSTAT2_REG) & 0xf0) >> 4;
 464		else
 465			if (NCR_700_readb(host, SSTAT1_REG) & SIDL_REG_FULL)
 466				++count;
 467	} else {
 468		/* Send */
 469		__u8 sstat = NCR_700_readb(host, SSTAT1_REG);
 470		if (sstat & SODL_REG_FULL)
 471			++count;
 472		if (synchronous && (sstat & SODR_REG_FULL))
 473			++count;
 474	}
 475#ifdef NCR_700_DEBUG
 476	if(count)
 477		printk("RESIDUAL IS %d (ddir %d)\n", count, ddir);
 478#endif
 479	return count;
 480}
 481
 482/* print out the SCSI wires and corresponding phase from the SBCL register
 483 * in the chip */
 484static inline char *
 485sbcl_to_string(__u8 sbcl)
 486{
 487	int i;
 488	static char ret[256];
 489
 490	ret[0]='\0';
 491	for(i=0; i<8; i++) {
 492		if((1<<i) & sbcl) 
 493			strcat(ret, NCR_700_SBCL_bits[i]);
 494	}
 495	strcat(ret, NCR_700_SBCL_to_phase[sbcl & 0x07]);
 496	return ret;
 497}
 498
 499static inline __u8
 500bitmap_to_number(__u8 bitmap)
 501{
 502	__u8 i;
 503
 504	for(i=0; i<8 && !(bitmap &(1<<i)); i++)
 505		;
 506	return i;
 507}
 508
 509/* Pull a slot off the free list */
 510STATIC struct NCR_700_command_slot *
 511find_empty_slot(struct NCR_700_Host_Parameters *hostdata)
 512{
 513	struct NCR_700_command_slot *slot = hostdata->free_list;
 514
 515	if(slot == NULL) {
 516		/* sanity check */
 517		if(hostdata->command_slot_count != NCR_700_COMMAND_SLOTS_PER_HOST)
 518			printk(KERN_ERR "SLOTS FULL, but count is %d, should be %d\n", hostdata->command_slot_count, NCR_700_COMMAND_SLOTS_PER_HOST);
 519		return NULL;
 520	}
 521
 522	if(slot->state != NCR_700_SLOT_FREE)
 523		/* should panic! */
 524		printk(KERN_ERR "BUSY SLOT ON FREE LIST!!!\n");
 525		
 526
 527	hostdata->free_list = slot->ITL_forw;
 528	slot->ITL_forw = NULL;
 529
 530
 531	/* NOTE: set the state to busy here, not queued, since this
 532	 * indicates the slot is in use and cannot be run by the IRQ
 533	 * finish routine.  If we cannot queue the command when it
 534	 * is properly build, we then change to NCR_700_SLOT_QUEUED */
 535	slot->state = NCR_700_SLOT_BUSY;
 536	slot->flags = 0;
 537	hostdata->command_slot_count++;
 538	
 539	return slot;
 540}
 541
 542STATIC void 
 543free_slot(struct NCR_700_command_slot *slot,
 544	  struct NCR_700_Host_Parameters *hostdata)
 545{
 546	if((slot->state & NCR_700_SLOT_MASK) != NCR_700_SLOT_MAGIC) {
 547		printk(KERN_ERR "53c700: SLOT %p is not MAGIC!!!\n", slot);
 548	}
 549	if(slot->state == NCR_700_SLOT_FREE) {
 550		printk(KERN_ERR "53c700: SLOT %p is FREE!!!\n", slot);
 551	}
 552	
 553	slot->resume_offset = 0;
 554	slot->cmnd = NULL;
 555	slot->state = NCR_700_SLOT_FREE;
 556	slot->ITL_forw = hostdata->free_list;
 557	hostdata->free_list = slot;
 558	hostdata->command_slot_count--;
 559}
 560
 561
 562/* This routine really does very little.  The command is indexed on
 563   the ITL and (if tagged) the ITLQ lists in _queuecommand */
 564STATIC void
 565save_for_reselection(struct NCR_700_Host_Parameters *hostdata,
 566		     struct scsi_cmnd *SCp, __u32 dsp)
 567{
 568	/* Its just possible that this gets executed twice */
 569	if(SCp != NULL) {
 570		struct NCR_700_command_slot *slot =
 571			(struct NCR_700_command_slot *)SCp->host_scribble;
 572
 573		slot->resume_offset = dsp;
 574	}
 575	hostdata->state = NCR_700_HOST_FREE;
 576	hostdata->cmd = NULL;
 577}
 578
 579STATIC inline void
 580NCR_700_unmap(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp,
 581	      struct NCR_700_command_slot *slot)
 582{
 583	if(SCp->sc_data_direction != DMA_NONE &&
 584	   SCp->sc_data_direction != DMA_BIDIRECTIONAL)
 585		scsi_dma_unmap(SCp);
 586}
 587
 588STATIC inline void
 589NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
 590	       struct scsi_cmnd *SCp, int result)
 591{
 592	hostdata->state = NCR_700_HOST_FREE;
 593	hostdata->cmd = NULL;
 594
 595	if(SCp != NULL) {
 596		struct NCR_700_command_slot *slot = 
 597			(struct NCR_700_command_slot *)SCp->host_scribble;
 598		
 599		dma_unmap_single(hostdata->dev, slot->pCmd,
 600				 MAX_COMMAND_SIZE, DMA_TO_DEVICE);
 601		if (slot->flags == NCR_700_FLAG_AUTOSENSE) {
 602			char *cmnd = NCR_700_get_sense_cmnd(SCp->device);
 603#ifdef NCR_700_DEBUG
 604			printk(" ORIGINAL CMD %p RETURNED %d, new return is %d sense is\n",
 605			       SCp, SCp->cmnd[7], result);
 606			scsi_print_sense("53c700", SCp);
 607
 608#endif
 609			dma_unmap_single(hostdata->dev, slot->dma_handle,
 610					 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
 611			/* restore the old result if the request sense was
 612			 * successful */
 613			if (result == 0)
 614				result = cmnd[7];
 615			/* restore the original length */
 616			SCp->cmd_len = cmnd[8];
 617		} else
 618			NCR_700_unmap(hostdata, SCp, slot);
 619
 620		free_slot(slot, hostdata);
 621#ifdef NCR_700_DEBUG
 622		if(NCR_700_get_depth(SCp->device) == 0 ||
 623		   NCR_700_get_depth(SCp->device) > SCp->device->queue_depth)
 624			printk(KERN_ERR "Invalid depth in NCR_700_scsi_done(): %d\n",
 625			       NCR_700_get_depth(SCp->device));
 626#endif /* NCR_700_DEBUG */
 627		NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) - 1);
 628
 629		SCp->host_scribble = NULL;
 630		SCp->result = result;
 631		SCp->scsi_done(SCp);
 632	} else {
 633		printk(KERN_ERR "53c700: SCSI DONE HAS NULL SCp\n");
 634	}
 635}
 636
 637
 638STATIC void
 639NCR_700_internal_bus_reset(struct Scsi_Host *host)
 640{
 641	/* Bus reset */
 642	NCR_700_writeb(ASSERT_RST, host, SCNTL1_REG);
 643	udelay(50);
 644	NCR_700_writeb(0, host, SCNTL1_REG);
 645
 646}
 647
 648STATIC void
 649NCR_700_chip_setup(struct Scsi_Host *host)
 650{
 651	struct NCR_700_Host_Parameters *hostdata = 
 652		(struct NCR_700_Host_Parameters *)host->hostdata[0];
 653	__u8 min_period;
 654	__u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
 655
 656	if(hostdata->chip710) {
 657		__u8 burst_disable = 0;
 658		__u8 burst_length = 0;
 659
 660		switch (hostdata->burst_length) {
 661			case 1:
 662			        burst_length = BURST_LENGTH_1;
 663			        break;
 664			case 2:
 665			        burst_length = BURST_LENGTH_2;
 666			        break;
 667			case 4:
 668			        burst_length = BURST_LENGTH_4;
 669			        break;
 670			case 8:
 671			        burst_length = BURST_LENGTH_8;
 672			        break;
 673			default:
 674			        burst_disable = BURST_DISABLE;
 675			        break;
 676		}
 677		hostdata->dcntl_extra |= COMPAT_700_MODE;
 678
 679		NCR_700_writeb(hostdata->dcntl_extra, host, DCNTL_REG);
 680		NCR_700_writeb(burst_length | hostdata->dmode_extra,
 681			       host, DMODE_710_REG);
 682		NCR_700_writeb(burst_disable | hostdata->ctest7_extra |
 683			       (hostdata->differential ? DIFF : 0),
 684			       host, CTEST7_REG);
 685		NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG);
 686		NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY
 687			       | AUTO_ATN, host, SCNTL0_REG);
 688	} else {
 689		NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra,
 690			       host, DMODE_700_REG);
 691		NCR_700_writeb(hostdata->differential ? 
 692			       DIFF : 0, host, CTEST7_REG);
 693		if(hostdata->fast) {
 694			/* this is for 700-66, does nothing on 700 */
 695			NCR_700_writeb(LAST_DIS_ENBL | ENABLE_ACTIVE_NEGATION 
 696				       | GENERATE_RECEIVE_PARITY, host,
 697				       CTEST8_REG);
 698		} else {
 699			NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY
 700				       | PARITY | AUTO_ATN, host, SCNTL0_REG);
 701		}
 702	}
 703
 704	NCR_700_writeb(1 << host->this_id, host, SCID_REG);
 705	NCR_700_writeb(0, host, SBCL_REG);
 706	NCR_700_writeb(ASYNC_OPERATION, host, SXFER_REG);
 707
 708	NCR_700_writeb(PHASE_MM_INT | SEL_TIMEOUT_INT | GROSS_ERR_INT | UX_DISC_INT
 709	     | RST_INT | PAR_ERR_INT | SELECT_INT, host, SIEN_REG);
 710
 711	NCR_700_writeb(ABORT_INT | INT_INST_INT | ILGL_INST_INT, host, DIEN_REG);
 712	NCR_700_writeb(ENABLE_SELECT, host, SCNTL1_REG);
 713	if(hostdata->clock > 75) {
 714		printk(KERN_ERR "53c700: Clock speed %dMHz is too high: 75Mhz is the maximum this chip can be driven at\n", hostdata->clock);
 715		/* do the best we can, but the async clock will be out
 716		 * of spec: sync divider 2, async divider 3 */
 717		DEBUG(("53c700: sync 2 async 3\n"));
 718		NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG);
 719		NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
 720		hostdata->sync_clock = hostdata->clock/2;
 721	} else	if(hostdata->clock > 50  && hostdata->clock <= 75) {
 722		/* sync divider 1.5, async divider 3 */
 723		DEBUG(("53c700: sync 1.5 async 3\n"));
 724		NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG);
 725		NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
 726		hostdata->sync_clock = hostdata->clock*2;
 727		hostdata->sync_clock /= 3;
 728		
 729	} else if(hostdata->clock > 37 && hostdata->clock <= 50) {
 730		/* sync divider 1, async divider 2 */
 731		DEBUG(("53c700: sync 1 async 2\n"));
 732		NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
 733		NCR_700_writeb(ASYNC_DIV_2_0 | hostdata->dcntl_extra, host, DCNTL_REG);
 734		hostdata->sync_clock = hostdata->clock;
 735	} else if(hostdata->clock > 25 && hostdata->clock <=37) {
 736		/* sync divider 1, async divider 1.5 */
 737		DEBUG(("53c700: sync 1 async 1.5\n"));
 738		NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
 739		NCR_700_writeb(ASYNC_DIV_1_5 | hostdata->dcntl_extra, host, DCNTL_REG);
 740		hostdata->sync_clock = hostdata->clock;
 741	} else {
 742		DEBUG(("53c700: sync 1 async 1\n"));
 743		NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
 744		NCR_700_writeb(ASYNC_DIV_1_0 | hostdata->dcntl_extra, host, DCNTL_REG);
 745		/* sync divider 1, async divider 1 */
 746		hostdata->sync_clock = hostdata->clock;
 747	}
 748	/* Calculate the actual minimum period that can be supported
 749	 * by our synchronous clock speed.  See the 710 manual for
 750	 * exact details of this calculation which is based on a
 751	 * setting of the SXFER register */
 752	min_period = 1000*(4+min_xferp)/(4*hostdata->sync_clock);
 753	hostdata->min_period = NCR_700_MIN_PERIOD;
 754	if(min_period > NCR_700_MIN_PERIOD)
 755		hostdata->min_period = min_period;
 756}
 757
 758STATIC void
 759NCR_700_chip_reset(struct Scsi_Host *host)
 760{
 761	struct NCR_700_Host_Parameters *hostdata = 
 762		(struct NCR_700_Host_Parameters *)host->hostdata[0];
 763	if(hostdata->chip710) {
 764		NCR_700_writeb(SOFTWARE_RESET_710, host, ISTAT_REG);
 765		udelay(100);
 766
 767		NCR_700_writeb(0, host, ISTAT_REG);
 768	} else {
 769		NCR_700_writeb(SOFTWARE_RESET, host, DCNTL_REG);
 770		udelay(100);
 771		
 772		NCR_700_writeb(0, host, DCNTL_REG);
 773	}
 774
 775	mdelay(1000);
 776
 777	NCR_700_chip_setup(host);
 778}
 779
 780/* The heart of the message processing engine is that the instruction
 781 * immediately after the INT is the normal case (and so must be CLEAR
 782 * ACK).  If we want to do something else, we call that routine in
 783 * scripts and set temp to be the normal case + 8 (skipping the CLEAR
 784 * ACK) so that the routine returns correctly to resume its activity
 785 * */
 786STATIC __u32
 787process_extended_message(struct Scsi_Host *host, 
 788			 struct NCR_700_Host_Parameters *hostdata,
 789			 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
 790{
 791	__u32 resume_offset = dsp, temp = dsp + 8;
 792	__u8 pun = 0xff, lun = 0xff;
 793
 794	if(SCp != NULL) {
 795		pun = SCp->device->id;
 796		lun = SCp->device->lun;
 797	}
 798
 799	switch(hostdata->msgin[2]) {
 800	case A_SDTR_MSG:
 801		if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
 802			struct scsi_target *starget = SCp->device->sdev_target;
 803			__u8 period = hostdata->msgin[3];
 804			__u8 offset = hostdata->msgin[4];
 805
 806			if(offset == 0 || period == 0) {
 807				offset = 0;
 808				period = 0;
 809			}
 810
 811			spi_offset(starget) = offset;
 812			spi_period(starget) = period;
 813			
 814			if(NCR_700_is_flag_set(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION)) {
 815				spi_display_xfer_agreement(starget);
 816				NCR_700_clear_flag(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION);
 817			}
 818			
 819			NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
 820			NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
 821			
 822			NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
 823				       host, SXFER_REG);
 824
 825		} else {
 826			/* SDTR message out of the blue, reject it */
 827			shost_printk(KERN_WARNING, host,
 828				"Unexpected SDTR msg\n");
 829			hostdata->msgout[0] = A_REJECT_MSG;
 830			dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
 831			script_patch_16(hostdata->dev, hostdata->script,
 832			                MessageCount, 1);
 833			/* SendMsgOut returns, so set up the return
 834			 * address */
 835			resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
 836		}
 837		break;
 838	
 839	case A_WDTR_MSG:
 840		printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
 841		       host->host_no, pun, lun);
 842		hostdata->msgout[0] = A_REJECT_MSG;
 843		dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
 844		script_patch_16(hostdata->dev, hostdata->script, MessageCount,
 845		                1);
 846		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
 847
 848		break;
 849
 850	default:
 851		printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
 852		       host->host_no, pun, lun,
 853		       NCR_700_phase[(dsps & 0xf00) >> 8]);
 854		spi_print_msg(hostdata->msgin);
 855		printk("\n");
 856		/* just reject it */
 857		hostdata->msgout[0] = A_REJECT_MSG;
 858		dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
 859		script_patch_16(hostdata->dev, hostdata->script, MessageCount,
 860		                1);
 861		/* SendMsgOut returns, so set up the return
 862		 * address */
 863		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
 864	}
 865	NCR_700_writel(temp, host, TEMP_REG);
 866	return resume_offset;
 867}
 868
 869STATIC __u32
 870process_message(struct Scsi_Host *host,	struct NCR_700_Host_Parameters *hostdata,
 871		struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
 872{
 873	/* work out where to return to */
 874	__u32 temp = dsp + 8, resume_offset = dsp;
 875	__u8 pun = 0xff, lun = 0xff;
 876
 877	if(SCp != NULL) {
 878		pun = SCp->device->id;
 879		lun = SCp->device->lun;
 880	}
 881
 882#ifdef NCR_700_DEBUG
 883	printk("scsi%d (%d:%d): message %s: ", host->host_no, pun, lun,
 884	       NCR_700_phase[(dsps & 0xf00) >> 8]);
 885	spi_print_msg(hostdata->msgin);
 886	printk("\n");
 887#endif
 888
 889	switch(hostdata->msgin[0]) {
 890
 891	case A_EXTENDED_MSG:
 892		resume_offset =  process_extended_message(host, hostdata, SCp,
 893							  dsp, dsps);
 894		break;
 895
 896	case A_REJECT_MSG:
 897		if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
 898			/* Rejected our sync negotiation attempt */
 899			spi_period(SCp->device->sdev_target) =
 900				spi_offset(SCp->device->sdev_target) = 0;
 901			NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
 902			NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
 903		} else if(SCp != NULL && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION) {
 904			/* rejected our first simple tag message */
 905			scmd_printk(KERN_WARNING, SCp,
 906				"Rejected first tag queue attempt, turning off tag queueing\n");
 907			/* we're done negotiating */
 908			NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION);
 909			hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
 910			SCp->device->tagged_supported = 0;
 911			scsi_deactivate_tcq(SCp->device, host->cmd_per_lun);
 912		} else {
 913			shost_printk(KERN_WARNING, host,
 914				"(%d:%d) Unexpected REJECT Message %s\n",
 915			       pun, lun,
 916			       NCR_700_phase[(dsps & 0xf00) >> 8]);
 917			/* however, just ignore it */
 918		}
 919		break;
 920
 921	case A_PARITY_ERROR_MSG:
 922		printk(KERN_ERR "scsi%d (%d:%d) Parity Error!\n", host->host_no,
 923		       pun, lun);
 924		NCR_700_internal_bus_reset(host);
 925		break;
 926	case A_SIMPLE_TAG_MSG:
 927		printk(KERN_INFO "scsi%d (%d:%d) SIMPLE TAG %d %s\n", host->host_no,
 928		       pun, lun, hostdata->msgin[1],
 929		       NCR_700_phase[(dsps & 0xf00) >> 8]);
 930		/* just ignore it */
 931		break;
 932	default:
 933		printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
 934		       host->host_no, pun, lun,
 935		       NCR_700_phase[(dsps & 0xf00) >> 8]);
 936
 937		spi_print_msg(hostdata->msgin);
 938		printk("\n");
 939		/* just reject it */
 940		hostdata->msgout[0] = A_REJECT_MSG;
 941		dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
 942		script_patch_16(hostdata->dev, hostdata->script, MessageCount,
 943		                1);
 944		/* SendMsgOut returns, so set up the return
 945		 * address */
 946		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
 947
 948		break;
 949	}
 950	NCR_700_writel(temp, host, TEMP_REG);
 951	/* set us up to receive another message */
 952	dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
 953	return resume_offset;
 954}
 955
 956STATIC __u32
 957process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
 958			 struct Scsi_Host *host,
 959			 struct NCR_700_Host_Parameters *hostdata)
 960{
 961	__u32 resume_offset = 0;
 962	__u8 pun = 0xff, lun=0xff;
 963
 964	if(SCp != NULL) {
 965		pun = SCp->device->id;
 966		lun = SCp->device->lun;
 967	}
 968
 969	if(dsps == A_GOOD_STATUS_AFTER_STATUS) {
 970		DEBUG(("  COMMAND COMPLETE, status=%02x\n",
 971		       hostdata->status[0]));
 972		/* OK, if TCQ still under negotiation, we now know it works */
 973		if (NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION)
 974			NCR_700_set_tag_neg_state(SCp->device,
 975						  NCR_700_FINISHED_TAG_NEGOTIATION);
 976			
 977		/* check for contingent allegiance contitions */
 978		if(status_byte(hostdata->status[0]) == CHECK_CONDITION ||
 979		   status_byte(hostdata->status[0]) == COMMAND_TERMINATED) {
 980			struct NCR_700_command_slot *slot =
 981				(struct NCR_700_command_slot *)SCp->host_scribble;
 982			if(slot->flags == NCR_700_FLAG_AUTOSENSE) {
 983				/* OOPS: bad device, returning another
 984				 * contingent allegiance condition */
 985				scmd_printk(KERN_ERR, SCp,
 986					"broken device is looping in contingent allegiance: ignoring\n");
 987				NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
 988			} else {
 989				char *cmnd =
 990					NCR_700_get_sense_cmnd(SCp->device);
 991#ifdef NCR_DEBUG
 992				scsi_print_command(SCp);
 993				printk("  cmd %p has status %d, requesting sense\n",
 994				       SCp, hostdata->status[0]);
 995#endif
 996				/* we can destroy the command here
 997				 * because the contingent allegiance
 998				 * condition will cause a retry which
 999				 * will re-copy the command from the
1000				 * saved data_cmnd.  We also unmap any
1001				 * data associated with the command
1002				 * here */
1003				NCR_700_unmap(hostdata, SCp, slot);
1004				dma_unmap_single(hostdata->dev, slot->pCmd,
1005						 MAX_COMMAND_SIZE,
1006						 DMA_TO_DEVICE);
1007
1008				cmnd[0] = REQUEST_SENSE;
1009				cmnd[1] = (SCp->device->lun & 0x7) << 5;
1010				cmnd[2] = 0;
1011				cmnd[3] = 0;
1012				cmnd[4] = SCSI_SENSE_BUFFERSIZE;
1013				cmnd[5] = 0;
1014				/* Here's a quiet hack: the
1015				 * REQUEST_SENSE command is six bytes,
1016				 * so store a flag indicating that
1017				 * this was an internal sense request
1018				 * and the original status at the end
1019				 * of the command */
1020				cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
1021				cmnd[7] = hostdata->status[0];
1022				cmnd[8] = SCp->cmd_len;
1023				SCp->cmd_len = 6; /* command length for
1024						   * REQUEST_SENSE */
1025				slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1026				slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1027				slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | SCSI_SENSE_BUFFERSIZE);
1028				slot->SG[0].pAddr = bS_to_host(slot->dma_handle);
1029				slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
1030				slot->SG[1].pAddr = 0;
1031				slot->resume_offset = hostdata->pScript;
1032				dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
1033				dma_cache_sync(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1034
1035				/* queue the command for reissue */
1036				slot->state = NCR_700_SLOT_QUEUED;
1037				slot->flags = NCR_700_FLAG_AUTOSENSE;
1038				hostdata->state = NCR_700_HOST_FREE;
1039				hostdata->cmd = NULL;
1040			}
1041		} else {
1042			// Currently rely on the mid layer evaluation
1043			// of the tag queuing capability
1044			//
1045			//if(status_byte(hostdata->status[0]) == GOOD &&
1046			//   SCp->cmnd[0] == INQUIRY && SCp->use_sg == 0) {
1047			//	/* Piggy back the tag queueing support
1048			//	 * on this command */
1049			//	dma_sync_single_for_cpu(hostdata->dev,
1050			//			    slot->dma_handle,
1051			//			    SCp->request_bufflen,
1052			//			    DMA_FROM_DEVICE);
1053			//	if(((char *)SCp->request_buffer)[7] & 0x02) {
1054			//		scmd_printk(KERN_INFO, SCp,
1055			//		     "Enabling Tag Command Queuing\n");
1056			//		hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1057			//		NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1058			//	} else {
1059			//		NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1060			//		hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1061			//	}
1062			//}
1063			NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
1064		}
1065	} else if((dsps & 0xfffff0f0) == A_UNEXPECTED_PHASE) {
1066		__u8 i = (dsps & 0xf00) >> 8;
1067
1068		scmd_printk(KERN_ERR, SCp, "UNEXPECTED PHASE %s (%s)\n",
1069		       NCR_700_phase[i],
1070		       sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1071		scmd_printk(KERN_ERR, SCp, "         len = %d, cmd =",
1072			SCp->cmd_len);
1073		scsi_print_command(SCp);
1074
1075		NCR_700_internal_bus_reset(host);
1076	} else if((dsps & 0xfffff000) == A_FATAL) {
1077		int i = (dsps & 0xfff);
1078
1079		printk(KERN_ERR "scsi%d: (%d:%d) FATAL ERROR: %s\n",
1080		       host->host_no, pun, lun, NCR_700_fatal_messages[i]);
1081		if(dsps == A_FATAL_ILLEGAL_MSG_LENGTH) {
1082			printk(KERN_ERR "     msg begins %02x %02x\n",
1083			       hostdata->msgin[0], hostdata->msgin[1]);
1084		}
1085		NCR_700_internal_bus_reset(host);
1086	} else if((dsps & 0xfffff0f0) == A_DISCONNECT) {
1087#ifdef NCR_700_DEBUG
1088		__u8 i = (dsps & 0xf00) >> 8;
1089
1090		printk("scsi%d: (%d:%d), DISCONNECTED (%d) %s\n",
1091		       host->host_no, pun, lun,
1092		       i, NCR_700_phase[i]);
1093#endif
1094		save_for_reselection(hostdata, SCp, dsp);
1095
1096	} else if(dsps == A_RESELECTION_IDENTIFIED) {
1097		__u8 lun;
1098		struct NCR_700_command_slot *slot;
1099		__u8 reselection_id = hostdata->reselection_id;
1100		struct scsi_device *SDp;
1101
1102		lun = hostdata->msgin[0] & 0x1f;
1103
1104		hostdata->reselection_id = 0xff;
1105		DEBUG(("scsi%d: (%d:%d) RESELECTED!\n",
1106		       host->host_no, reselection_id, lun));
1107		/* clear the reselection indicator */
1108		SDp = __scsi_device_lookup(host, 0, reselection_id, lun);
1109		if(unlikely(SDp == NULL)) {
1110			printk(KERN_ERR "scsi%d: (%d:%d) HAS NO device\n",
1111			       host->host_no, reselection_id, lun);
1112			BUG();
1113		}
1114		if(hostdata->msgin[1] == A_SIMPLE_TAG_MSG) {
1115			struct scsi_cmnd *SCp = scsi_find_tag(SDp, hostdata->msgin[2]);
1116			if(unlikely(SCp == NULL)) {
1117				printk(KERN_ERR "scsi%d: (%d:%d) no saved request for tag %d\n", 
1118				       host->host_no, reselection_id, lun, hostdata->msgin[2]);
1119				BUG();
1120			}
1121
1122			slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1123			DDEBUG(KERN_DEBUG, SDp,
1124				"reselection is tag %d, slot %p(%d)\n",
1125				hostdata->msgin[2], slot, slot->tag);
1126		} else {
1127			struct scsi_cmnd *SCp = scsi_find_tag(SDp, SCSI_NO_TAG);
1128			if(unlikely(SCp == NULL)) {
1129				sdev_printk(KERN_ERR, SDp,
1130					"no saved request for untagged cmd\n");
1131				BUG();
1132			}
1133			slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1134		}
1135
1136		if(slot == NULL) {
1137			printk(KERN_ERR "scsi%d: (%d:%d) RESELECTED but no saved command (MSG = %02x %02x %02x)!!\n",
1138			       host->host_no, reselection_id, lun,
1139			       hostdata->msgin[0], hostdata->msgin[1],
1140			       hostdata->msgin[2]);
1141		} else {
1142			if(hostdata->state != NCR_700_HOST_BUSY)
1143				printk(KERN_ERR "scsi%d: FATAL, host not busy during valid reselection!\n",
1144				       host->host_no);
1145			resume_offset = slot->resume_offset;
1146			hostdata->cmd = slot->cmnd;
1147
1148			/* re-patch for this command */
1149			script_patch_32_abs(hostdata->dev, hostdata->script,
1150			                    CommandAddress, slot->pCmd);
1151			script_patch_16(hostdata->dev, hostdata->script,
1152					CommandCount, slot->cmnd->cmd_len);
1153			script_patch_32_abs(hostdata->dev, hostdata->script,
1154			                    SGScriptStartAddress,
1155					    to32bit(&slot->pSG[0].ins));
1156
1157			/* Note: setting SXFER only works if we're
1158			 * still in the MESSAGE phase, so it is vital
1159			 * that ACK is still asserted when we process
1160			 * the reselection message.  The resume offset
1161			 * should therefore always clear ACK */
1162			NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
1163				       host, SXFER_REG);
1164			dma_cache_sync(hostdata->dev, hostdata->msgin,
1165				       MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
1166			dma_cache_sync(hostdata->dev, hostdata->msgout,
1167				       MSG_ARRAY_SIZE, DMA_TO_DEVICE);
1168			/* I'm just being paranoid here, the command should
1169			 * already have been flushed from the cache */
1170			dma_cache_sync(hostdata->dev, slot->cmnd->cmnd,
1171				       slot->cmnd->cmd_len, DMA_TO_DEVICE);
1172
1173
1174			
1175		}
1176	} else if(dsps == A_RESELECTED_DURING_SELECTION) {
1177
1178		/* This section is full of debugging code because I've
1179		 * never managed to reach it.  I think what happens is
1180		 * that, because the 700 runs with selection
1181		 * interrupts enabled the whole time that we take a
1182		 * selection interrupt before we manage to get to the
1183		 * reselected script interrupt */
1184
1185		__u8 reselection_id = NCR_700_readb(host, SFBR_REG);
1186		struct NCR_700_command_slot *slot;
1187		
1188		/* Take out our own ID */
1189		reselection_id &= ~(1<<host->this_id);
1190		
1191		/* I've never seen this happen, so keep this as a printk rather
1192		 * than a debug */
1193		printk(KERN_INFO "scsi%d: (%d:%d) RESELECTION DURING SELECTION, dsp=%08x[%04x] state=%d, count=%d\n",
1194		       host->host_no, reselection_id, lun, dsp, dsp - hostdata->pScript, hostdata->state, hostdata->command_slot_count);
1195
1196		{
1197			/* FIXME: DEBUGGING CODE */
1198			__u32 SG = (__u32)bS_to_cpu(hostdata->script[A_SGScriptStartAddress_used[0]]);
1199			int i;
1200
1201			for(i=0; i< NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1202				if(SG >= to32bit(&hostdata->slots[i].pSG[0])
1203				   && SG <= to32bit(&hostdata->slots[i].pSG[NCR_700_SG_SEGMENTS]))
1204					break;
1205			}
1206			printk(KERN_INFO "IDENTIFIED SG segment as being %08x in slot %p, cmd %p, slot->resume_offset=%08x\n", SG, &hostdata->slots[i], hostdata->slots[i].cmnd, hostdata->slots[i].resume_offset);
1207			SCp =  hostdata->slots[i].cmnd;
1208		}
1209
1210		if(SCp != NULL) {
1211			slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1212			/* change slot from busy to queued to redo command */
1213			slot->state = NCR_700_SLOT_QUEUED;
1214		}
1215		hostdata->cmd = NULL;
1216		
1217		if(reselection_id == 0) {
1218			if(hostdata->reselection_id == 0xff) {
1219				printk(KERN_ERR "scsi%d: Invalid reselection during selection!!\n", host->host_no);
1220				return 0;
1221			} else {
1222				printk(KERN_ERR "scsi%d: script reselected and we took a selection interrupt\n",
1223				       host->host_no);
1224				reselection_id = hostdata->reselection_id;
1225			}
1226		} else {
1227			
1228			/* convert to real ID */
1229			reselection_id = bitmap_to_number(reselection_id);
1230		}
1231		hostdata->reselection_id = reselection_id;
1232		/* just in case we have a stale simple tag message, clear it */
1233		hostdata->msgin[1] = 0;
1234		dma_cache_sync(hostdata->dev, hostdata->msgin,
1235			       MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL);
1236		if(hostdata->tag_negotiated & (1<<reselection_id)) {
1237			resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1238		} else {
1239			resume_offset = hostdata->pScript + Ent_GetReselectionData;
1240		}
1241	} else if(dsps == A_COMPLETED_SELECTION_AS_TARGET) {
1242		/* we've just disconnected from the bus, do nothing since
1243		 * a return here will re-run the queued command slot
1244		 * that may have been interrupted by the initial selection */
1245		DEBUG((" SELECTION COMPLETED\n"));
1246	} else if((dsps & 0xfffff0f0) == A_MSG_IN) { 
1247		resume_offset = process_message(host, hostdata, SCp,
1248						dsp, dsps);
1249	} else if((dsps &  0xfffff000) == 0) {
1250		__u8 i = (dsps & 0xf0) >> 4, j = (dsps & 0xf00) >> 8;
1251		printk(KERN_ERR "scsi%d: (%d:%d), unhandled script condition %s %s at %04x\n",
1252		       host->host_no, pun, lun, NCR_700_condition[i],
1253		       NCR_700_phase[j], dsp - hostdata->pScript);
1254		if(SCp != NULL) {
1255			struct scatterlist *sg;
1256
1257			scsi_print_command(SCp);
1258			scsi_for_each_sg(SCp, sg, scsi_sg_count(SCp) + 1, i) {
1259				printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, sg->length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr);
1260			}
1261		}
1262		NCR_700_internal_bus_reset(host);
1263	} else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) {
1264		printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n",
1265		       host->host_no, pun, lun, dsps & 0xfff, dsp, dsp - hostdata->pScript);
1266		resume_offset = dsp;
1267	} else {
1268		printk(KERN_ERR "scsi%d: (%d:%d), unidentified script interrupt 0x%x at %04x\n",
1269		       host->host_no, pun, lun, dsps, dsp - hostdata->pScript);
1270		NCR_700_internal_bus_reset(host);
1271	}
1272	return resume_offset;
1273}
1274
1275/* We run the 53c700 with selection interrupts always enabled.  This
1276 * means that the chip may be selected as soon as the bus frees.  On a
1277 * busy bus, this can be before the scripts engine finishes its
1278 * processing.  Therefore, part of the selection processing has to be
1279 * to find out what the scripts engine is doing and complete the
1280 * function if necessary (i.e. process the pending disconnect or save
1281 * the interrupted initial selection */
1282STATIC inline __u32
1283process_selection(struct Scsi_Host *host, __u32 dsp)
1284{
1285	__u8 id = 0;	/* Squash compiler warning */
1286	int count = 0;
1287	__u32 resume_offset = 0;
1288	struct NCR_700_Host_Parameters *hostdata =
1289		(struct NCR_700_Host_Parameters *)host->hostdata[0];
1290	struct scsi_cmnd *SCp = hostdata->cmd;
1291	__u8 sbcl;
1292
1293	for(count = 0; count < 5; count++) {
1294		id = NCR_700_readb(host, hostdata->chip710 ?
1295				   CTEST9_REG : SFBR_REG);
1296
1297		/* Take out our own ID */
1298		id &= ~(1<<host->this_id);
1299		if(id != 0) 
1300			break;
1301		udelay(5);
1302	}
1303	sbcl = NCR_700_readb(host, SBCL_REG);
1304	if((sbcl & SBCL_IO) == 0) {
1305		/* mark as having been selected rather than reselected */
1306		id = 0xff;
1307	} else {
1308		/* convert to real ID */
1309		hostdata->reselection_id = id = bitmap_to_number(id);
1310		DEBUG(("scsi%d:  Reselected by %d\n",
1311		       host->host_no, id));
1312	}
1313	if(hostdata->state == NCR_700_HOST_BUSY && SCp != NULL) {
1314		struct NCR_700_command_slot *slot =
1315			(struct NCR_700_command_slot *)SCp->host_scribble;
1316		DEBUG(("  ID %d WARNING: RESELECTION OF BUSY HOST, saving cmd %p, slot %p, addr %x [%04x], resume %x!\n", id, hostdata->cmd, slot, dsp, dsp - hostdata->pScript, resume_offset));
1317		
1318		switch(dsp - hostdata->pScript) {
1319		case Ent_Disconnect1:
1320		case Ent_Disconnect2:
1321			save_for_reselection(hostdata, SCp, Ent_Disconnect2 + hostdata->pScript);
1322			break;
1323		case Ent_Disconnect3:
1324		case Ent_Disconnect4:
1325			save_for_reselection(hostdata, SCp, Ent_Disconnect4 + hostdata->pScript);
1326			break;
1327		case Ent_Disconnect5:
1328		case Ent_Disconnect6:
1329			save_for_reselection(hostdata, SCp, Ent_Disconnect6 + hostdata->pScript);
1330			break;
1331		case Ent_Disconnect7:
1332		case Ent_Disconnect8:
1333			save_for_reselection(hostdata, SCp, Ent_Disconnect8 + hostdata->pScript);
1334			break;
1335		case Ent_Finish1:
1336		case Ent_Finish2:
1337			process_script_interrupt(A_GOOD_STATUS_AFTER_STATUS, dsp, SCp, host, hostdata);
1338			break;
1339			
1340		default:
1341			slot->state = NCR_700_SLOT_QUEUED;
1342			break;
1343			}
1344	}
1345	hostdata->state = NCR_700_HOST_BUSY;
1346	hostdata->cmd = NULL;
1347	/* clear any stale simple tag message */
1348	hostdata->msgin[1] = 0;
1349	dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
1350		       DMA_BIDIRECTIONAL);
1351
1352	if(id == 0xff) {
1353		/* Selected as target, Ignore */
1354		resume_offset = hostdata->pScript + Ent_SelectedAsTarget;
1355	} else if(hostdata->tag_negotiated & (1<<id)) {
1356		resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1357	} else {
1358		resume_offset = hostdata->pScript + Ent_GetReselectionData;
1359	}
1360	return resume_offset;
1361}
1362
1363static inline void
1364NCR_700_clear_fifo(struct Scsi_Host *host) {
1365	const struct NCR_700_Host_Parameters *hostdata
1366		= (struct NCR_700_Host_Parameters *)host->hostdata[0];
1367	if(hostdata->chip710) {
1368		NCR_700_writeb(CLR_FIFO_710, host, CTEST8_REG);
1369	} else {
1370		NCR_700_writeb(CLR_FIFO, host, DFIFO_REG);
1371	}
1372}
1373
1374static inline void
1375NCR_700_flush_fifo(struct Scsi_Host *host) {
1376	const struct NCR_700_Host_Parameters *hostdata
1377		= (struct NCR_700_Host_Parameters *)host->hostdata[0];
1378	if(hostdata->chip710) {
1379		NCR_700_writeb(FLUSH_DMA_FIFO_710, host, CTEST8_REG);
1380		udelay(10);
1381		NCR_700_writeb(0, host, CTEST8_REG);
1382	} else {
1383		NCR_700_writeb(FLUSH_DMA_FIFO, host, DFIFO_REG);
1384		udelay(10);
1385		NCR_700_writeb(0, host, DFIFO_REG);
1386	}
1387}
1388
1389
1390/* The queue lock with interrupts disabled must be held on entry to
1391 * this function */
1392STATIC int
1393NCR_700_start_command(struct scsi_cmnd *SCp)
1394{
1395	struct NCR_700_command_slot *slot =
1396		(struct NCR_700_command_slot *)SCp->host_scribble;
1397	struct NCR_700_Host_Parameters *hostdata =
1398		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1399	__u16 count = 1;	/* for IDENTIFY message */
1400	
1401	if(hostdata->state != NCR_700_HOST_FREE) {
1402		/* keep this inside the lock to close the race window where
1403		 * the running command finishes on another CPU while we don't
1404		 * change the state to queued on this one */
1405		slot->state = NCR_700_SLOT_QUEUED;
1406
1407		DEBUG(("scsi%d: host busy, queueing command %p, slot %p\n",
1408		       SCp->device->host->host_no, slot->cmnd, slot));
1409		return 0;
1410	}
1411	hostdata->state = NCR_700_HOST_BUSY;
1412	hostdata->cmd = SCp;
1413	slot->state = NCR_700_SLOT_BUSY;
1414	/* keep interrupts disabled until we have the command correctly
1415	 * set up so we cannot take a selection interrupt */
1416
1417	hostdata->msgout[0] = NCR_700_identify((SCp->cmnd[0] != REQUEST_SENSE &&
1418						slot->flags != NCR_700_FLAG_AUTOSENSE),
1419					       SCp->device->lun);
1420	/* for INQUIRY or REQUEST_SENSE commands, we cannot be sure
1421	 * if the negotiated transfer parameters still hold, so
1422	 * always renegotiate them */
1423	if(SCp->cmnd[0] == INQUIRY || SCp->cmnd[0] == REQUEST_SENSE ||
1424	   slot->flags == NCR_700_FLAG_AUTOSENSE) {
1425		NCR_700_clear_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
1426	}
1427
1428	/* REQUEST_SENSE is asking for contingent I_T_L(_Q) status.
1429	 * If a contingent allegiance condition exists, the device
1430	 * will refuse all tags, so send the request sense as untagged
1431	 * */
1432	if((hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1433	   && (slot->tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE &&
1434	       slot->flags != NCR_700_FLAG_AUTOSENSE)) {
1435		count += scsi_populate_tag_msg(SCp, &hostdata->msgout[count]);
1436	}
1437
1438	if(hostdata->fast &&
1439	   NCR_700_is_flag_clear(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC)) {
1440		count += spi_populate_sync_msg(&hostdata->msgout[count],
1441				spi_period(SCp->device->sdev_target),
1442				spi_offset(SCp->device->sdev_target));
1443		NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1444	}
1445
1446	script_patch_16(hostdata->dev, hostdata->script, MessageCount, count);
1447
1448
1449	script_patch_ID(hostdata->dev, hostdata->script,
1450			Device_ID, 1<<scmd_id(SCp));
1451
1452	script_patch_32_abs(hostdata->dev, hostdata->script, CommandAddress,
1453			    slot->pCmd);
1454	script_patch_16(hostdata->dev, hostdata->script, CommandCount,
1455	                SCp->cmd_len);
1456	/* finally plumb the beginning of the SG list into the script
1457	 * */
1458	script_patch_32_abs(hostdata->dev, hostdata->script,
1459	                    SGScriptStartAddress, to32bit(&slot->pSG[0].ins));
1460	NCR_700_clear_fifo(SCp->device->host);
1461
1462	if(slot->resume_offset == 0)
1463		slot->resume_offset = hostdata->pScript;
1464	/* now perform all the writebacks and invalidates */
1465	dma_cache_sync(hostdata->dev, hostdata->msgout, count, DMA_TO_DEVICE);
1466	dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
1467		       DMA_FROM_DEVICE);
1468	dma_cache_sync(hostdata->dev, SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
1469	dma_cache_sync(hostdata->dev, hostdata->status, 1, DMA_FROM_DEVICE);
1470
1471	/* set the synchronous period/offset */
1472	NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
1473		       SCp->device->host, SXFER_REG);
1474	NCR_700_writel(slot->temp, SCp->device->host, TEMP_REG);
1475	NCR_700_writel(slot->resume_offset, SCp->device->host, DSP_REG);
1476
1477	return 1;
1478}
1479
1480irqreturn_t
1481NCR_700_intr(int irq, void *dev_id)
1482{
1483	struct Scsi_Host *host = (struct Scsi_Host *)dev_id;
1484	struct NCR_700_Host_Parameters *hostdata =
1485		(struct NCR_700_Host_Parameters *)host->hostdata[0];
1486	__u8 istat;
1487	__u32 resume_offset = 0;
1488	__u8 pun = 0xff, lun = 0xff;
1489	unsigned long flags;
1490	int handled = 0;
1491
1492	/* Use the host lock to serialise access to the 53c700
1493	 * hardware.  Note: In future, we may need to take the queue
1494	 * lock to enter the done routines.  When that happens, we
1495	 * need to ensure that for this driver, the host lock and the
1496	 * queue lock point to the same thing. */
1497	spin_lock_irqsave(host->host_lock, flags);
1498	if((istat = NCR_700_readb(host, ISTAT_REG))
1499	      & (SCSI_INT_PENDING | DMA_INT_PENDING)) {
1500		__u32 dsps;
1501		__u8 sstat0 = 0, dstat = 0;
1502		__u32 dsp;
1503		struct scsi_cmnd *SCp = hostdata->cmd;
1504		enum NCR_700_Host_State state;
1505
1506		handled = 1;
1507		state = hostdata->state;
1508		SCp = hostdata->cmd;
1509
1510		if(istat & SCSI_INT_PENDING) {
1511			udelay(10);
1512
1513			sstat0 = NCR_700_readb(host, SSTAT0_REG);
1514		}
1515
1516		if(istat & DMA_INT_PENDING) {
1517			udelay(10);
1518
1519			dstat = NCR_700_readb(host, DSTAT_REG);
1520		}
1521
1522		dsps = NCR_700_readl(host, DSPS_REG);
1523		dsp = NCR_700_readl(host, DSP_REG);
1524
1525		DEBUG(("scsi%d: istat %02x sstat0 %02x dstat %02x dsp %04x[%08x] dsps 0x%x\n",
1526		       host->host_no, istat, sstat0, dstat,
1527		       (dsp - (__u32)(hostdata->pScript))/4,
1528		       dsp, dsps));
1529
1530		if(SCp != NULL) {
1531			pun = SCp->device->id;
1532			lun = SCp->device->lun;
1533		}
1534
1535		if(sstat0 & SCSI_RESET_DETECTED) {
1536			struct scsi_device *SDp;
1537			int i;
1538
1539			hostdata->state = NCR_700_HOST_BUSY;
1540
1541			printk(KERN_ERR "scsi%d: Bus Reset detected, executing command %p, slot %p, dsp %08x[%04x]\n",
1542			       host->host_no, SCp, SCp == NULL ? NULL : SCp->host_scribble, dsp, dsp - hostdata->pScript);
1543
1544			scsi_report_bus_reset(host, 0);
1545
1546			/* clear all the negotiated parameters */
1547			__shost_for_each_device(SDp, host)
1548				NCR_700_clear_flag(SDp, ~0);
1549			
1550			/* clear all the slots and their pending commands */
1551			for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1552				struct scsi_cmnd *SCp;
1553				struct NCR_700_command_slot *slot =
1554					&hostdata->slots[i];
1555
1556				if(slot->state == NCR_700_SLOT_FREE)
1557					continue;
1558				
1559				SCp = slot->cmnd;
1560				printk(KERN_ERR " failing command because of reset, slot %p, cmnd %p\n",
1561				       slot, SCp);
1562				free_slot(slot, hostdata);
1563				SCp->host_scribble = NULL;
1564				NCR_700_set_depth(SCp->device, 0);
1565				/* NOTE: deadlock potential here: we
1566				 * rely on mid-layer guarantees that
1567				 * scsi_done won't try to issue the
1568				 * command again otherwise we'll
1569				 * deadlock on the
1570				 * hostdata->state_lock */
1571				SCp->result = DID_RESET << 16;
1572				SCp->scsi_done(SCp);
1573			}
1574			mdelay(25);
1575			NCR_700_chip_setup(host);
1576
1577			hostdata->state = NCR_700_HOST_FREE;
1578			hostdata->cmd = NULL;
1579			/* signal back if this was an eh induced reset */
1580			if(hostdata->eh_complete != NULL)
1581				complete(hostdata->eh_complete);
1582			goto out_unlock;
1583		} else if(sstat0 & SELECTION_TIMEOUT) {
1584			DEBUG(("scsi%d: (%d:%d) selection timeout\n",
1585			       host->host_no, pun, lun));
1586			NCR_700_scsi_done(hostdata, SCp, DID_NO_CONNECT<<16);
1587		} else if(sstat0 & PHASE_MISMATCH) {
1588			struct NCR_700_command_slot *slot = (SCp == NULL) ? NULL :
1589				(struct NCR_700_command_slot *)SCp->host_scribble;
1590
1591			if(dsp == Ent_SendMessage + 8 + hostdata->pScript) {
1592				/* It wants to reply to some part of
1593				 * our message */
1594#ifdef NCR_700_DEBUG
1595				__u32 temp = NCR_700_readl(host, TEMP_REG);
1596				int count = (hostdata->script[Ent_SendMessage/4] & 0xffffff) - ((NCR_700_readl(host, DBC_REG) & 0xffffff) + NCR_700_data_residual(host));
1597				printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1598#endif
1599				resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch;
1600			} else if(dsp >= to32bit(&slot->pSG[0].ins) &&
1601				  dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) {
1602				int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff;
1603				int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List);
1604				int residual = NCR_700_data_residual(host);
1605				int i;
1606#ifdef NCR_700_DEBUG
1607				__u32 naddr = NCR_700_readl(host, DNAD_REG);
1608
1609				printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x\n",
1610				       host->host_no, pun, lun,
1611				       SGcount, data_transfer);
1612				scsi_print_command(SCp);
1613				if(residual) {
1614					printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x, residual %d\n",
1615				       host->host_no, pun, lun,
1616				       SGcount, data_transfer, residual);
1617				}
1618#endif
1619				data_transfer += residual;
1620
1621				if(data_transfer != 0) {
1622					int count; 
1623					__u32 pAddr;
1624
1625					SGcount--;
1626
1627					count = (bS_to_cpu(slot->SG[SGcount].ins) & 0x00ffffff);
1628					DEBUG(("DATA TRANSFER MISMATCH, count = %d, transferred %d\n", count, count-data_transfer));
1629					slot->SG[SGcount].ins &= bS_to_host(0xff000000);
1630					slot->SG[SGcount].ins |= bS_to_host(data_transfer);
1631					pAddr = bS_to_cpu(slot->SG[SGcount].pAddr);
1632					pAddr += (count - data_transfer);
1633#ifdef NCR_700_DEBUG
1634					if(pAddr != naddr) {
1635						printk("scsi%d (%d:%d) transfer mismatch pAddr=%lx, naddr=%lx, data_transfer=%d, residual=%d\n", host->host_no, pun, lun, (unsigned long)pAddr, (unsigned long)naddr, data_transfer, residual);
1636					}
1637#endif
1638					slot->SG[SGcount].pAddr = bS_to_host(pAddr);
1639				}
1640				/* set the executed moves to nops */
1641				for(i=0; i<SGcount; i++) {
1642					slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
1643					slot->SG[i].pAddr = 0;
1644				}
1645				dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1646				/* and pretend we disconnected after
1647				 * the command phase */
1648				resume_offset = hostdata->pScript + Ent_MsgInDuringData;
1649				/* make sure all the data is flushed */
1650				NCR_700_flush_fifo(host);
1651			} else {
1652				__u8 sbcl = NCR_700_readb(host, SBCL_REG);
1653				printk(KERN_ERR "scsi%d: (%d:%d) phase mismatch at %04x, phase %s\n",
1654				       host->host_no, pun, lun, dsp - hostdata->pScript, sbcl_to_string(sbcl));
1655				NCR_700_internal_bus_reset(host);
1656			}
1657
1658		} else if(sstat0 & SCSI_GROSS_ERROR) {
1659			printk(KERN_ERR "scsi%d: (%d:%d) GROSS ERROR\n",
1660			       host->host_no, pun, lun);
1661			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1662		} else if(sstat0 & PARITY_ERROR) {
1663			printk(KERN_ERR "scsi%d: (%d:%d) PARITY ERROR\n",
1664			       host->host_no, pun, lun);
1665			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1666		} else if(dstat & SCRIPT_INT_RECEIVED) {
1667			DEBUG(("scsi%d: (%d:%d) ====>SCRIPT INTERRUPT<====\n",
1668			       host->host_no, pun, lun));
1669			resume_offset = process_script_interrupt(dsps, dsp, SCp, host, hostdata);
1670		} else if(dstat & (ILGL_INST_DETECTED)) {
1671			printk(KERN_ERR "scsi%d: (%d:%d) Illegal Instruction detected at 0x%08x[0x%x]!!!\n"
1672			       "         Please email James.Bottomley@HansenPartnership.com with the details\n",
1673			       host->host_no, pun, lun,
1674			       dsp, dsp - hostdata->pScript);
1675			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1676		} else if(dstat & (WATCH_DOG_INTERRUPT|ABORTED)) {
1677			printk(KERN_ERR "scsi%d: (%d:%d) serious DMA problem, dstat=%02x\n",
1678			       host->host_no, pun, lun, dstat);
1679			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1680		}
1681
1682		
1683		/* NOTE: selection interrupt processing MUST occur
1684		 * after script interrupt processing to correctly cope
1685		 * with the case where we process a disconnect and
1686		 * then get reselected before we process the
1687		 * disconnection */
1688		if(sstat0 & SELECTED) {
1689			/* FIXME: It currently takes at least FOUR
1690			 * interrupts to complete a command that
1691			 * disconnects: one for the disconnect, one
1692			 * for the reselection, one to get the
1693			 * reselection data and one to complete the
1694			 * command.  If we guess the reselected
1695			 * command here and prepare it, we only need
1696			 * to get a reselection data interrupt if we
1697			 * guessed wrongly.  Since the interrupt
1698			 * overhead is much greater than the command
1699			 * setup, this would be an efficient
1700			 * optimisation particularly as we probably
1701			 * only have one outstanding command on a
1702			 * target most of the time */
1703
1704			resume_offset = process_selection(host, dsp);
1705
1706		}
1707
1708	}
1709
1710	if(resume_offset) {
1711		if(hostdata->state != NCR_700_HOST_BUSY) {
1712			printk(KERN_ERR "scsi%d: Driver error: resume at 0x%08x [0x%04x] with non busy host!\n",
1713			       host->host_no, resume_offset, resume_offset - hostdata->pScript);
1714			hostdata->state = NCR_700_HOST_BUSY;
1715		}
1716
1717		DEBUG(("Attempting to resume at %x\n", resume_offset));
1718		NCR_700_clear_fifo(host);
1719		NCR_700_writel(resume_offset, host, DSP_REG);
1720	} 
1721	/* There is probably a technical no-no about this: If we're a
1722	 * shared interrupt and we got this interrupt because the
1723	 * other device needs servicing not us, we're still going to
1724	 * check our queued commands here---of course, there shouldn't
1725	 * be any outstanding.... */
1726	if(hostdata->state == NCR_700_HOST_FREE) {
1727		int i;
1728
1729		for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1730			/* fairness: always run the queue from the last
1731			 * position we left off */
1732			int j = (i + hostdata->saved_slot_position)
1733				% NCR_700_COMMAND_SLOTS_PER_HOST;
1734			
1735			if(hostdata->slots[j].state != NCR_700_SLOT_QUEUED)
1736				continue;
1737			if(NCR_700_start_command(hostdata->slots[j].cmnd)) {
1738				DEBUG(("scsi%d: Issuing saved command slot %p, cmd %p\t\n",
1739				       host->host_no, &hostdata->slots[j],
1740				       hostdata->slots[j].cmnd));
1741				hostdata->saved_slot_position = j + 1;
1742			}
1743
1744			break;
1745		}
1746	}
1747 out_unlock:
1748	spin_unlock_irqrestore(host->host_lock, flags);
1749	return IRQ_RETVAL(handled);
1750}
1751
1752static int
1753NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
1754{
1755	struct NCR_700_Host_Parameters *hostdata = 
1756		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1757	__u32 move_ins;
1758	enum dma_data_direction direction;
1759	struct NCR_700_command_slot *slot;
1760
1761	if(hostdata->command_slot_count >= NCR_700_COMMAND_SLOTS_PER_HOST) {
1762		/* We're over our allocation, this should never happen
1763		 * since we report the max allocation to the mid layer */
1764		printk(KERN_WARNING "scsi%d: Command depth has gone over queue depth\n", SCp->device->host->host_no);
1765		return 1;
1766	}
1767	/* check for untagged commands.  We cannot have any outstanding
1768	 * commands if we accept them.  Commands could be untagged because:
1769	 *
1770	 * - The tag negotiated bitmap is clear
1771	 * - The blk layer sent and untagged command
1772	 */
1773	if(NCR_700_get_depth(SCp->device) != 0
1774	   && (!(hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1775	       || !blk_rq_tagged(SCp->request))) {
1776		CDEBUG(KERN_ERR, SCp, "has non zero depth %d\n",
1777		       NCR_700_get_depth(SCp->device));
1778		return SCSI_MLQUEUE_DEVICE_BUSY;
1779	}
1780	if(NCR_700_get_depth(SCp->device) >= SCp->device->queue_depth) {
1781		CDEBUG(KERN_ERR, SCp, "has max tag depth %d\n",
1782		       NCR_700_get_depth(SCp->device));
1783		return SCSI_MLQUEUE_DEVICE_BUSY;
1784	}
1785	NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) + 1);
1786
1787	/* begin the command here */
1788	/* no need to check for NULL, test for command_slot_count above
1789	 * ensures a slot is free */
1790	slot = find_empty_slot(hostdata);
1791
1792	slot->cmnd = SCp;
1793
1794	SCp->scsi_done = done;
1795	SCp->host_scribble = (unsigned char *)slot;
1796	SCp->SCp.ptr = NULL;
1797	SCp->SCp.buffer = NULL;
1798
1799#ifdef NCR_700_DEBUG
1800	printk("53c700: scsi%d, command ", SCp->device->host->host_no);
1801	scsi_print_command(SCp);
1802#endif
1803	if(blk_rq_tagged(SCp->request)
1804	   && (hostdata->tag_negotiated &(1<<scmd_id(SCp))) == 0
1805	   && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) {
1806		scmd_printk(KERN_ERR, SCp, "Enabling Tag Command Queuing\n");
1807		hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1808		NCR_700_set_tag_neg_state(SCp->device, NCR_700_DURING_TAG_NEGOTIATION);
1809	}
1810
1811	/* here we may have to process an untagged command.  The gate
1812	 * above ensures that this will be the only one outstanding,
1813	 * so clear the tag negotiated bit.
1814	 *
1815	 * FIXME: This will royally screw up on multiple LUN devices
1816	 * */
1817	if(!blk_rq_tagged(SCp->request)
1818	   && (hostdata->tag_negotiated &(1<<scmd_id(SCp)))) {
1819		scmd_printk(KERN_INFO, SCp, "Disabling Tag Command Queuing\n");
1820		hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1821	}
1822
1823	if((hostdata->tag_negotiated &(1<<scmd_id(SCp)))
1824	   && scsi_get_tag_type(SCp->device)) {
1825		slot->tag = SCp->request->tag;
1826		CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
1827		       slot->tag, slot);
1828	} else {
1829		slot->tag = SCSI_NO_TAG;
1830		/* must populate current_cmnd for scsi_find_tag to work */
1831		SCp->device->current_cmnd = SCp;
1832	}
1833	/* sanity check: some of the commands generated by the mid-layer
1834	 * have an eccentric idea of their sc_data_direction */
1835	if(!scsi_sg_count(SCp) && !scsi_bufflen(SCp) &&
1836	   SCp->sc_data_direction != DMA_NONE) {
1837#ifdef NCR_700_DEBUG
1838		printk("53c700: Command");
1839		scsi_print_command(SCp);
1840		printk("Has wrong data direction %d\n", SCp->sc_data_direction);
1841#endif
1842		SCp->sc_data_direction = DMA_NONE;
1843	}
1844
1845	switch (SCp->cmnd[0]) {
1846	case REQUEST_SENSE:
1847		/* clear the internal sense magic */
1848		SCp->cmnd[6] = 0;
1849		/* fall through */
1850	default:
1851		/* OK, get it from the command */
1852		switch(SCp->sc_data_direction) {
1853		case DMA_BIDIRECTIONAL:
1854		default:
1855			printk(KERN_ERR "53c700: Unknown command for data direction ");
1856			scsi_print_command(SCp);
1857			
1858			move_ins = 0;
1859			break;
1860		case DMA_NONE:
1861			move_ins = 0;
1862			break;
1863		case DMA_FROM_DEVICE:
1864			move_ins = SCRIPT_MOVE_DATA_IN;
1865			break;
1866		case DMA_TO_DEVICE:
1867			move_ins = SCRIPT_MOVE_DATA_OUT;
1868			break;
1869		}
1870	}
1871
1872	/* now build the scatter gather list */
1873	direction = SCp->sc_data_direction;
1874	if(move_ins != 0) {
1875		int i;
1876		int sg_count;
1877		dma_addr_t vPtr = 0;
1878		struct scatterlist *sg;
1879		__u32 count = 0;
1880
1881		sg_count = scsi_dma_map(SCp);
1882		BUG_ON(sg_count < 0);
1883
1884		scsi_for_each_sg(SCp, sg, sg_count, i) {
1885			vPtr = sg_dma_address(sg);
1886			count = sg_dma_len(sg);
1887
1888			slot->SG[i].ins = bS_to_host(move_ins | count);
1889			DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n",
1890			       i, count, slot->SG[i].ins, (unsigned long)vPtr));
1891			slot->SG[i].pAddr = bS_to_host(vPtr);
1892		}
1893		slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
1894		slot->SG[i].pAddr = 0;
1895		dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1896		DEBUG((" SETTING %08lx to %x\n",
1897		       (&slot->pSG[i].ins),
1898		       slot->SG[i].ins));
1899	}
1900	slot->resume_offset = 0;
1901	slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd,
1902				    MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1903	NCR_700_start_command(SCp);
1904	return 0;
1905}
1906
1907STATIC DEF_SCSI_QCMD(NCR_700_queuecommand)
1908
1909STATIC int
1910NCR_700_abort(struct scsi_cmnd * SCp)
1911{
1912	struct NCR_700_command_slot *slot;
1913
1914	scmd_printk(KERN_INFO, SCp,
1915		"New error handler wants to abort command\n\t");
1916	scsi_print_command(SCp);
1917
1918	slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1919
1920	if(slot == NULL)
1921		/* no outstanding command to abort */
1922		return SUCCESS;
1923	if(SCp->cmnd[0] == TEST_UNIT_READY) {
1924		/* FIXME: This is because of a problem in the new
1925		 * error handler.  When it is in error recovery, it
1926		 * will send a TUR to a device it thinks may still be
1927		 * showing a problem.  If the TUR isn't responded to,
1928		 * it will abort it and mark the device off line.
1929		 * Unfortunately, it does no other error recovery, so
1930		 * this would leave us with an outstanding command
1931		 * occupying a slot.  Rather than allow this to
1932		 * happen, we issue a bus reset to force all
1933		 * outstanding commands to terminate here. */
1934		NCR_700_internal_bus_reset(SCp->device->host);
1935		/* still drop through and return failed */
1936	}
1937	return FAILED;
1938
1939}
1940
1941STATIC int
1942NCR_700_bus_reset(struct scsi_cmnd * SCp)
1943{
1944	DECLARE_COMPLETION_ONSTACK(complete);
1945	struct NCR_700_Host_Parameters *hostdata = 
1946		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1947
1948	scmd_printk(KERN_INFO, SCp,
1949		"New error handler wants BUS reset, cmd %p\n\t", SCp);
1950	scsi_print_command(SCp);
1951
1952	/* In theory, eh_complete should always be null because the
1953	 * eh is single threaded, but just in case we're handling a
1954	 * reset via sg or something */
1955	spin_lock_irq(SCp->device->host->host_lock);
1956	while (hostdata->eh_complete != NULL) {
1957		spin_unlock_irq(SCp->device->host->host_lock);
1958		msleep_interruptible(100);
1959		spin_lock_irq(SCp->device->host->host_lock);
1960	}
1961
1962	hostdata->eh_complete = &complete;
1963	NCR_700_internal_bus_reset(SCp->device->host);
1964
1965	spin_unlock_irq(SCp->device->host->host_lock);
1966	wait_for_completion(&complete);
1967	spin_lock_irq(SCp->device->host->host_lock);
1968
1969	hostdata->eh_complete = NULL;
1970	/* Revalidate the transport parameters of the failing device */
1971	if(hostdata->fast)
1972		spi_schedule_dv_device(SCp->device);
1973
1974	spin_unlock_irq(SCp->device->host->host_lock);
1975	return SUCCESS;
1976}
1977
1978STATIC int
1979NCR_700_host_reset(struct scsi_cmnd * SCp)
1980{
1981	scmd_printk(KERN_INFO, SCp, "New error handler wants HOST reset\n\t");
1982	scsi_print_command(SCp);
1983
1984	spin_lock_irq(SCp->device->host->host_lock);
1985
1986	NCR_700_internal_bus_reset(SCp->device->host);
1987	NCR_700_chip_reset(SCp->device->host);
1988
1989	spin_unlock_irq(SCp->device->host->host_lock);
1990
1991	return SUCCESS;
1992}
1993
1994STATIC void
1995NCR_700_set_period(struct scsi_target *STp, int period)
1996{
1997	struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
1998	struct NCR_700_Host_Parameters *hostdata = 
1999		(struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2000	
2001	if(!hostdata->fast)
2002		return;
2003
2004	if(period < hostdata->min_period)
2005		period = hostdata->min_period;
2006
2007	spi_period(STp) = period;
2008	spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2009			    NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2010	spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2011}
2012
2013STATIC void
2014NCR_700_set_offset(struct scsi_target *STp, int offset)
2015{
2016	struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
2017	struct NCR_700_Host_Parameters *hostdata = 
2018		(struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2019	int max_offset = hostdata->chip710
2020		? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET;
2021	
2022	if(!hostdata->fast)
2023		return;
2024
2025	if(offset > max_offset)
2026		offset = max_offset;
2027
2028	/* if we're currently async, make sure the period is reasonable */
2029	if(spi_offset(STp) == 0 && (spi_period(STp) < hostdata->min_period ||
2030				    spi_period(STp) > 0xff))
2031		spi_period(STp) = hostdata->min_period;
2032
2033	spi_offset(STp) = offset;
2034	spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2035			    NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2036	spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2037}
2038
2039STATIC int
2040NCR_700_slave_alloc(struct scsi_device *SDp)
2041{
2042	SDp->hostdata = kzalloc(sizeof(struct NCR_700_Device_Parameters),
2043				GFP_KERNEL);
2044
2045	if (!SDp->hostdata)
2046		return -ENOMEM;
2047
2048	return 0;
2049}
2050
2051STATIC int
2052NCR_700_slave_configure(struct scsi_device *SDp)
2053{
2054	struct NCR_700_Host_Parameters *hostdata = 
2055		(struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2056
2057	/* to do here: allocate memory; build a queue_full list */
2058	if(SDp->tagged_supported) {
2059		scsi_set_tag_type(SDp, MSG_ORDERED_TAG);
2060		scsi_activate_tcq(SDp, NCR_700_DEFAULT_TAGS);
2061		NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2062	} else {
2063		/* initialise to default depth */
2064		scsi_adjust_queue_depth(SDp, 0, SDp->host->cmd_per_lun);
2065	}
2066	if(hostdata->fast) {
2067		/* Find the correct offset and period via domain validation */
2068		if (!spi_initial_dv(SDp->sdev_target))
2069			spi_dv_device(SDp);
2070	} else {
2071		spi_offset(SDp->sdev_target) = 0;
2072		spi_period(SDp->sdev_target) = 0;
2073	}
2074	return 0;
2075}
2076
2077STATIC void
2078NCR_700_slave_destroy(struct scsi_device *SDp)
2079{
2080	kfree(SDp->hostdata);
2081	SDp->hostdata = NULL;
2082}
2083
2084static int
2085NCR_700_change_queue_depth(struct scsi_device *SDp, int depth, int reason)
2086{
2087	if (reason != SCSI_QDEPTH_DEFAULT)
2088		return -EOPNOTSUPP;
2089
2090	if (depth > NCR_700_MAX_TAGS)
2091		depth = NCR_700_MAX_TAGS;
2092
2093	scsi_adjust_queue_depth(SDp, scsi_get_tag_type(SDp), depth);
2094	return depth;
2095}
2096
2097static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type)
2098{
2099	int change_tag = ((tag_type ==0 &&  scsi_get_tag_type(SDp) != 0)
2100			  || (tag_type != 0 && scsi_get_tag_type(SDp) == 0));
2101	struct NCR_700_Host_Parameters *hostdata = 
2102		(struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2103
2104	scsi_set_tag_type(SDp, tag_type);
2105
2106	/* We have a global (per target) flag to track whether TCQ is
2107	 * enabled, so we'll be turning it off for the entire target here.
2108	 * our tag algorithm will fail if we mix tagged and untagged commands,
2109	 * so quiesce the device before doing this */
2110	if (change_tag)
2111		scsi_target_quiesce(SDp->sdev_target);
2112
2113	if (!tag_type) {
2114		/* shift back to the default unqueued number of commands
2115		 * (the user can still raise this) */
2116		scsi_deactivate_tcq(SDp, SDp->host->cmd_per_lun);
2117		hostdata->tag_negotiated &= ~(1 << sdev_id(SDp));
2118	} else {
2119		/* Here, we cleared the negotiation flag above, so this
2120		 * will force the driver to renegotiate */
2121		scsi_activate_tcq(SDp, SDp->queue_depth);
2122		if (change_tag)
2123			NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2124	}
2125	if (change_tag)
2126		scsi_target_resume(SDp->sdev_target);
2127
2128	return tag_type;
2129}
2130
2131static ssize_t
2132NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf)
2133{
2134	struct scsi_device *SDp = to_scsi_device(dev);
2135
2136	return snprintf(buf, 20, "%d\n", NCR_700_get_depth(SDp));
2137}
2138
2139static struct device_attribute NCR_700_active_tags_attr = {
2140	.attr = {
2141		.name =		"active_tags",
2142		.mode =		S_IRUGO,
2143	},
2144	.show = NCR_700_show_active_tags,
2145};
2146
2147STATIC struct device_attribute *NCR_700_dev_attrs[] = {
2148	&NCR_700_active_tags_attr,
2149	NULL,
2150};
2151
2152EXPORT_SYMBOL(NCR_700_detect);
2153EXPORT_SYMBOL(NCR_700_release);
2154EXPORT_SYMBOL(NCR_700_intr);
2155
2156static struct spi_function_template NCR_700_transport_functions =  {
2157	.set_period	= NCR_700_set_period,
2158	.show_period	= 1,
2159	.set_offset	= NCR_700_set_offset,
2160	.show_offset	= 1,
2161};
2162
2163static int __init NCR_700_init(void)
2164{
2165	NCR_700_transport_template = spi_attach_transport(&NCR_700_transport_functions);
2166	if(!NCR_700_transport_template)
2167		return -ENODEV;
2168	return 0;
2169}
2170
2171static void __exit NCR_700_exit(void)
2172{
2173	spi_release_transport(NCR_700_transport_template);
2174}
2175
2176module_init(NCR_700_init);
2177module_exit(NCR_700_exit);
2178
v3.5.6
   1/* -*- mode: c; c-basic-offset: 8 -*- */
   2
   3/* NCR (or Symbios) 53c700 and 53c700-66 Driver
   4 *
   5 * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
   6**-----------------------------------------------------------------------------
   7**  
   8**  This program is free software; you can redistribute it and/or modify
   9**  it under the terms of the GNU General Public License as published by
  10**  the Free Software Foundation; either version 2 of the License, or
  11**  (at your option) any later version.
  12**
  13**  This program is distributed in the hope that it will be useful,
  14**  but WITHOUT ANY WARRANTY; without even the implied warranty of
  15**  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16**  GNU General Public License for more details.
  17**
  18**  You should have received a copy of the GNU General Public License
  19**  along with this program; if not, write to the Free Software
  20**  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21**
  22**-----------------------------------------------------------------------------
  23 */
  24
  25/* Notes:
  26 *
  27 * This driver is designed exclusively for these chips (virtually the
  28 * earliest of the scripts engine chips).  They need their own drivers
  29 * because they are missing so many of the scripts and snazzy register
  30 * features of their elder brothers (the 710, 720 and 770).
  31 *
  32 * The 700 is the lowliest of the line, it can only do async SCSI.
  33 * The 700-66 can at least do synchronous SCSI up to 10MHz.
  34 * 
  35 * The 700 chip has no host bus interface logic of its own.  However,
  36 * it is usually mapped to a location with well defined register
  37 * offsets.  Therefore, if you can determine the base address and the
  38 * irq your board incorporating this chip uses, you can probably use
  39 * this driver to run it (although you'll probably have to write a
  40 * minimal wrapper for the purpose---see the NCR_D700 driver for
  41 * details about how to do this).
  42 *
  43 *
  44 * TODO List:
  45 *
  46 * 1. Better statistics in the proc fs
  47 *
  48 * 2. Implement message queue (queues SCSI messages like commands) and make
  49 *    the abort and device reset functions use them.
  50 * */
  51
  52/* CHANGELOG
  53 *
  54 * Version 2.8
  55 *
  56 * Fixed bad bug affecting tag starvation processing (previously the
  57 * driver would hang the system if too many tags starved.  Also fixed
  58 * bad bug having to do with 10 byte command processing and REQUEST
  59 * SENSE (the command would loop forever getting a transfer length
  60 * mismatch in the CMD phase).
  61 *
  62 * Version 2.7
  63 *
  64 * Fixed scripts problem which caused certain devices (notably CDRWs)
  65 * to hang on initial INQUIRY.  Updated NCR_700_readl/writel to use
  66 * __raw_readl/writel for parisc compatibility (Thomas
  67 * Bogendoerfer). Added missing SCp->request_bufflen initialisation
  68 * for sense requests (Ryan Bradetich).
  69 *
  70 * Version 2.6
  71 *
  72 * Following test of the 64 bit parisc kernel by Richard Hirst,
  73 * several problems have now been corrected.  Also adds support for
  74 * consistent memory allocation.
  75 *
  76 * Version 2.5
  77 * 
  78 * More Compatibility changes for 710 (now actually works).  Enhanced
  79 * support for odd clock speeds which constrain SDTR negotiations.
  80 * correct cacheline separation for scsi messages and status for
  81 * incoherent architectures.  Use of the pci mapping functions on
  82 * buffers to begin support for 64 bit drivers.
  83 *
  84 * Version 2.4
  85 *
  86 * Added support for the 53c710 chip (in 53c700 emulation mode only---no 
  87 * special 53c710 instructions or registers are used).
  88 *
  89 * Version 2.3
  90 *
  91 * More endianness/cache coherency changes.
  92 *
  93 * Better bad device handling (handles devices lying about tag
  94 * queueing support and devices which fail to provide sense data on
  95 * contingent allegiance conditions)
  96 *
  97 * Many thanks to Richard Hirst <rhirst@linuxcare.com> for patiently
  98 * debugging this driver on the parisc architecture and suggesting
  99 * many improvements and bug fixes.
 100 *
 101 * Thanks also go to Linuxcare Inc. for providing several PARISC
 102 * machines for me to debug the driver on.
 103 *
 104 * Version 2.2
 105 *
 106 * Made the driver mem or io mapped; added endian invariance; added
 107 * dma cache flushing operations for architectures which need it;
 108 * added support for more varied clocking speeds.
 109 *
 110 * Version 2.1
 111 *
 112 * Initial modularisation from the D700.  See NCR_D700.c for the rest of
 113 * the changelog.
 114 * */
 115#define NCR_700_VERSION "2.8"
 116
 117#include <linux/kernel.h>
 118#include <linux/types.h>
 119#include <linux/string.h>
 120#include <linux/slab.h>
 121#include <linux/ioport.h>
 122#include <linux/delay.h>
 123#include <linux/spinlock.h>
 124#include <linux/completion.h>
 125#include <linux/init.h>
 126#include <linux/proc_fs.h>
 127#include <linux/blkdev.h>
 128#include <linux/module.h>
 129#include <linux/interrupt.h>
 130#include <linux/device.h>
 131#include <asm/dma.h>
 
 132#include <asm/io.h>
 133#include <asm/pgtable.h>
 134#include <asm/byteorder.h>
 135
 136#include <scsi/scsi.h>
 137#include <scsi/scsi_cmnd.h>
 138#include <scsi/scsi_dbg.h>
 139#include <scsi/scsi_eh.h>
 140#include <scsi/scsi_host.h>
 141#include <scsi/scsi_tcq.h>
 142#include <scsi/scsi_transport.h>
 143#include <scsi/scsi_transport_spi.h>
 144
 145#include "53c700.h"
 146
 147/* NOTE: For 64 bit drivers there are points in the code where we use
 148 * a non dereferenceable pointer to point to a structure in dma-able
 149 * memory (which is 32 bits) so that we can use all of the structure
 150 * operations but take the address at the end.  This macro allows us
 151 * to truncate the 64 bit pointer down to 32 bits without the compiler
 152 * complaining */
 153#define to32bit(x)	((__u32)((unsigned long)(x)))
 154
 155#ifdef NCR_700_DEBUG
 156#define STATIC
 157#else
 158#define STATIC static
 159#endif
 160
 161MODULE_AUTHOR("James Bottomley");
 162MODULE_DESCRIPTION("53c700 and 53c700-66 Driver");
 163MODULE_LICENSE("GPL");
 164
 165/* This is the script */
 166#include "53c700_d.h"
 167
 168
 169STATIC int NCR_700_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *);
 170STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt);
 171STATIC int NCR_700_bus_reset(struct scsi_cmnd * SCpnt);
 172STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
 173STATIC void NCR_700_chip_setup(struct Scsi_Host *host);
 174STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
 175STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
 176STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
 177STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
 178static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth, int reason);
 179static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth);
 180
 181STATIC struct device_attribute *NCR_700_dev_attrs[];
 182
 183STATIC struct scsi_transport_template *NCR_700_transport_template = NULL;
 184
 185static char *NCR_700_phase[] = {
 186	"",
 187	"after selection",
 188	"before command phase",
 189	"after command phase",
 190	"after status phase",
 191	"after data in phase",
 192	"after data out phase",
 193	"during data phase",
 194};
 195
 196static char *NCR_700_condition[] = {
 197	"",
 198	"NOT MSG_OUT",
 199	"UNEXPECTED PHASE",
 200	"NOT MSG_IN",
 201	"UNEXPECTED MSG",
 202	"MSG_IN",
 203	"SDTR_MSG RECEIVED",
 204	"REJECT_MSG RECEIVED",
 205	"DISCONNECT_MSG RECEIVED",
 206	"MSG_OUT",
 207	"DATA_IN",
 208	
 209};
 210
 211static char *NCR_700_fatal_messages[] = {
 212	"unexpected message after reselection",
 213	"still MSG_OUT after message injection",
 214	"not MSG_IN after selection",
 215	"Illegal message length received",
 216};
 217
 218static char *NCR_700_SBCL_bits[] = {
 219	"IO ",
 220	"CD ",
 221	"MSG ",
 222	"ATN ",
 223	"SEL ",
 224	"BSY ",
 225	"ACK ",
 226	"REQ ",
 227};
 228
 229static char *NCR_700_SBCL_to_phase[] = {
 230	"DATA_OUT",
 231	"DATA_IN",
 232	"CMD_OUT",
 233	"STATE",
 234	"ILLEGAL PHASE",
 235	"ILLEGAL PHASE",
 236	"MSG OUT",
 237	"MSG IN",
 238};
 239
 240/* This translates the SDTR message offset and period to a value
 241 * which can be loaded into the SXFER_REG.
 242 *
 243 * NOTE: According to SCSI-2, the true transfer period (in ns) is
 244 *       actually four times this period value */
 245static inline __u8
 246NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters *hostdata,
 247			       __u8 offset, __u8 period)
 248{
 249	int XFERP;
 250
 251	__u8 min_xferp = (hostdata->chip710
 252			  ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
 253	__u8 max_offset = (hostdata->chip710
 254			   ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET);
 255
 256	if(offset == 0)
 257		return 0;
 258
 259	if(period < hostdata->min_period) {
 260		printk(KERN_WARNING "53c700: Period %dns is less than this chip's minimum, setting to %d\n", period*4, NCR_700_MIN_PERIOD*4);
 261		period = hostdata->min_period;
 262	}
 263	XFERP = (period*4 * hostdata->sync_clock)/1000 - 4;
 264	if(offset > max_offset) {
 265		printk(KERN_WARNING "53c700: Offset %d exceeds chip maximum, setting to %d\n",
 266		       offset, max_offset);
 267		offset = max_offset;
 268	}
 269	if(XFERP < min_xferp) {
 270		XFERP =  min_xferp;
 271	}
 272	return (offset & 0x0f) | (XFERP & 0x07)<<4;
 273}
 274
 275static inline __u8
 276NCR_700_get_SXFER(struct scsi_device *SDp)
 277{
 278	struct NCR_700_Host_Parameters *hostdata = 
 279		(struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
 280
 281	return NCR_700_offset_period_to_sxfer(hostdata,
 282					      spi_offset(SDp->sdev_target),
 283					      spi_period(SDp->sdev_target));
 284}
 285
 286struct Scsi_Host *
 287NCR_700_detect(struct scsi_host_template *tpnt,
 288	       struct NCR_700_Host_Parameters *hostdata, struct device *dev)
 289{
 290	dma_addr_t pScript, pSlots;
 291	__u8 *memory;
 292	__u32 *script;
 293	struct Scsi_Host *host;
 294	static int banner = 0;
 295	int j;
 296
 297	if(tpnt->sdev_attrs == NULL)
 298		tpnt->sdev_attrs = NCR_700_dev_attrs;
 299
 300	memory = dma_alloc_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
 301				       &pScript, GFP_KERNEL);
 302	if(memory == NULL) {
 303		printk(KERN_ERR "53c700: Failed to allocate memory for driver, detatching\n");
 304		return NULL;
 305	}
 306
 307	script = (__u32 *)memory;
 308	hostdata->msgin = memory + MSGIN_OFFSET;
 309	hostdata->msgout = memory + MSGOUT_OFFSET;
 310	hostdata->status = memory + STATUS_OFFSET;
 311	hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET);
 312	hostdata->dev = dev;
 313
 314	pSlots = pScript + SLOTS_OFFSET;
 315
 316	/* Fill in the missing routines from the host template */
 317	tpnt->queuecommand = NCR_700_queuecommand;
 318	tpnt->eh_abort_handler = NCR_700_abort;
 319	tpnt->eh_bus_reset_handler = NCR_700_bus_reset;
 320	tpnt->eh_host_reset_handler = NCR_700_host_reset;
 321	tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
 322	tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
 323	tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN;
 324	tpnt->use_clustering = ENABLE_CLUSTERING;
 325	tpnt->slave_configure = NCR_700_slave_configure;
 326	tpnt->slave_destroy = NCR_700_slave_destroy;
 327	tpnt->slave_alloc = NCR_700_slave_alloc;
 328	tpnt->change_queue_depth = NCR_700_change_queue_depth;
 329	tpnt->change_queue_type = NCR_700_change_queue_type;
 330
 331	if(tpnt->name == NULL)
 332		tpnt->name = "53c700";
 333	if(tpnt->proc_name == NULL)
 334		tpnt->proc_name = "53c700";
 335
 336	host = scsi_host_alloc(tpnt, 4);
 337	if (!host)
 338		return NULL;
 339	memset(hostdata->slots, 0, sizeof(struct NCR_700_command_slot)
 340	       * NCR_700_COMMAND_SLOTS_PER_HOST);
 341	for (j = 0; j < NCR_700_COMMAND_SLOTS_PER_HOST; j++) {
 342		dma_addr_t offset = (dma_addr_t)((unsigned long)&hostdata->slots[j].SG[0]
 343					  - (unsigned long)&hostdata->slots[0].SG[0]);
 344		hostdata->slots[j].pSG = (struct NCR_700_SG_List *)((unsigned long)(pSlots + offset));
 345		if(j == 0)
 346			hostdata->free_list = &hostdata->slots[j];
 347		else
 348			hostdata->slots[j-1].ITL_forw = &hostdata->slots[j];
 349		hostdata->slots[j].state = NCR_700_SLOT_FREE;
 350	}
 351
 352	for (j = 0; j < ARRAY_SIZE(SCRIPT); j++)
 353		script[j] = bS_to_host(SCRIPT[j]);
 354
 355	/* adjust all labels to be bus physical */
 356	for (j = 0; j < PATCHES; j++)
 357		script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
 358	/* now patch up fixed addresses. */
 359	script_patch_32(hostdata->dev, script, MessageLocation,
 360			pScript + MSGOUT_OFFSET);
 361	script_patch_32(hostdata->dev, script, StatusAddress,
 362			pScript + STATUS_OFFSET);
 363	script_patch_32(hostdata->dev, script, ReceiveMsgAddress,
 364			pScript + MSGIN_OFFSET);
 365
 366	hostdata->script = script;
 367	hostdata->pScript = pScript;
 368	dma_sync_single_for_device(hostdata->dev, pScript, sizeof(SCRIPT), DMA_TO_DEVICE);
 369	hostdata->state = NCR_700_HOST_FREE;
 370	hostdata->cmd = NULL;
 371	host->max_id = 8;
 372	host->max_lun = NCR_700_MAX_LUNS;
 373	BUG_ON(NCR_700_transport_template == NULL);
 374	host->transportt = NCR_700_transport_template;
 375	host->unique_id = (unsigned long)hostdata->base;
 376	hostdata->eh_complete = NULL;
 377	host->hostdata[0] = (unsigned long)hostdata;
 378	/* kick the chip */
 379	NCR_700_writeb(0xff, host, CTEST9_REG);
 380	if (hostdata->chip710)
 381		hostdata->rev = (NCR_700_readb(host, CTEST8_REG)>>4) & 0x0f;
 382	else
 383		hostdata->rev = (NCR_700_readb(host, CTEST7_REG)>>4) & 0x0f;
 384	hostdata->fast = (NCR_700_readb(host, CTEST9_REG) == 0);
 385	if (banner == 0) {
 386		printk(KERN_NOTICE "53c700: Version " NCR_700_VERSION " By James.Bottomley@HansenPartnership.com\n");
 387		banner = 1;
 388	}
 389	printk(KERN_NOTICE "scsi%d: %s rev %d %s\n", host->host_no,
 390	       hostdata->chip710 ? "53c710" :
 391	       (hostdata->fast ? "53c700-66" : "53c700"),
 392	       hostdata->rev, hostdata->differential ?
 393	       "(Differential)" : "");
 394	/* reset the chip */
 395	NCR_700_chip_reset(host);
 396
 397	if (scsi_add_host(host, dev)) {
 398		dev_printk(KERN_ERR, dev, "53c700: scsi_add_host failed\n");
 399		scsi_host_put(host);
 400		return NULL;
 401	}
 402
 403	spi_signalling(host) = hostdata->differential ? SPI_SIGNAL_HVD :
 404		SPI_SIGNAL_SE;
 405
 406	return host;
 407}
 408
 409int
 410NCR_700_release(struct Scsi_Host *host)
 411{
 412	struct NCR_700_Host_Parameters *hostdata = 
 413		(struct NCR_700_Host_Parameters *)host->hostdata[0];
 414
 415	dma_free_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
 416			       hostdata->script, hostdata->pScript);
 417	return 1;
 418}
 419
 420static inline __u8
 421NCR_700_identify(int can_disconnect, __u8 lun)
 422{
 423	return IDENTIFY_BASE |
 424		((can_disconnect) ? 0x40 : 0) |
 425		(lun & NCR_700_LUN_MASK);
 426}
 427
 428/*
 429 * Function : static int data_residual (Scsi_Host *host)
 430 *
 431 * Purpose : return residual data count of what's in the chip.  If you
 432 * really want to know what this function is doing, it's almost a
 433 * direct transcription of the algorithm described in the 53c710
 434 * guide, except that the DBC and DFIFO registers are only 6 bits
 435 * wide on a 53c700.
 436 *
 437 * Inputs : host - SCSI host */
 438static inline int
 439NCR_700_data_residual (struct Scsi_Host *host) {
 440	struct NCR_700_Host_Parameters *hostdata = 
 441		(struct NCR_700_Host_Parameters *)host->hostdata[0];
 442	int count, synchronous = 0;
 443	unsigned int ddir;
 444
 445	if(hostdata->chip710) {
 446		count = ((NCR_700_readb(host, DFIFO_REG) & 0x7f) -
 447			 (NCR_700_readl(host, DBC_REG) & 0x7f)) & 0x7f;
 448	} else {
 449		count = ((NCR_700_readb(host, DFIFO_REG) & 0x3f) -
 450			 (NCR_700_readl(host, DBC_REG) & 0x3f)) & 0x3f;
 451	}
 452	
 453	if(hostdata->fast)
 454		synchronous = NCR_700_readb(host, SXFER_REG) & 0x0f;
 455	
 456	/* get the data direction */
 457	ddir = NCR_700_readb(host, CTEST0_REG) & 0x01;
 458
 459	if (ddir) {
 460		/* Receive */
 461		if (synchronous) 
 462			count += (NCR_700_readb(host, SSTAT2_REG) & 0xf0) >> 4;
 463		else
 464			if (NCR_700_readb(host, SSTAT1_REG) & SIDL_REG_FULL)
 465				++count;
 466	} else {
 467		/* Send */
 468		__u8 sstat = NCR_700_readb(host, SSTAT1_REG);
 469		if (sstat & SODL_REG_FULL)
 470			++count;
 471		if (synchronous && (sstat & SODR_REG_FULL))
 472			++count;
 473	}
 474#ifdef NCR_700_DEBUG
 475	if(count)
 476		printk("RESIDUAL IS %d (ddir %d)\n", count, ddir);
 477#endif
 478	return count;
 479}
 480
 481/* print out the SCSI wires and corresponding phase from the SBCL register
 482 * in the chip */
 483static inline char *
 484sbcl_to_string(__u8 sbcl)
 485{
 486	int i;
 487	static char ret[256];
 488
 489	ret[0]='\0';
 490	for(i=0; i<8; i++) {
 491		if((1<<i) & sbcl) 
 492			strcat(ret, NCR_700_SBCL_bits[i]);
 493	}
 494	strcat(ret, NCR_700_SBCL_to_phase[sbcl & 0x07]);
 495	return ret;
 496}
 497
 498static inline __u8
 499bitmap_to_number(__u8 bitmap)
 500{
 501	__u8 i;
 502
 503	for(i=0; i<8 && !(bitmap &(1<<i)); i++)
 504		;
 505	return i;
 506}
 507
 508/* Pull a slot off the free list */
 509STATIC struct NCR_700_command_slot *
 510find_empty_slot(struct NCR_700_Host_Parameters *hostdata)
 511{
 512	struct NCR_700_command_slot *slot = hostdata->free_list;
 513
 514	if(slot == NULL) {
 515		/* sanity check */
 516		if(hostdata->command_slot_count != NCR_700_COMMAND_SLOTS_PER_HOST)
 517			printk(KERN_ERR "SLOTS FULL, but count is %d, should be %d\n", hostdata->command_slot_count, NCR_700_COMMAND_SLOTS_PER_HOST);
 518		return NULL;
 519	}
 520
 521	if(slot->state != NCR_700_SLOT_FREE)
 522		/* should panic! */
 523		printk(KERN_ERR "BUSY SLOT ON FREE LIST!!!\n");
 524		
 525
 526	hostdata->free_list = slot->ITL_forw;
 527	slot->ITL_forw = NULL;
 528
 529
 530	/* NOTE: set the state to busy here, not queued, since this
 531	 * indicates the slot is in use and cannot be run by the IRQ
 532	 * finish routine.  If we cannot queue the command when it
 533	 * is properly build, we then change to NCR_700_SLOT_QUEUED */
 534	slot->state = NCR_700_SLOT_BUSY;
 535	slot->flags = 0;
 536	hostdata->command_slot_count++;
 537	
 538	return slot;
 539}
 540
 541STATIC void 
 542free_slot(struct NCR_700_command_slot *slot,
 543	  struct NCR_700_Host_Parameters *hostdata)
 544{
 545	if((slot->state & NCR_700_SLOT_MASK) != NCR_700_SLOT_MAGIC) {
 546		printk(KERN_ERR "53c700: SLOT %p is not MAGIC!!!\n", slot);
 547	}
 548	if(slot->state == NCR_700_SLOT_FREE) {
 549		printk(KERN_ERR "53c700: SLOT %p is FREE!!!\n", slot);
 550	}
 551	
 552	slot->resume_offset = 0;
 553	slot->cmnd = NULL;
 554	slot->state = NCR_700_SLOT_FREE;
 555	slot->ITL_forw = hostdata->free_list;
 556	hostdata->free_list = slot;
 557	hostdata->command_slot_count--;
 558}
 559
 560
 561/* This routine really does very little.  The command is indexed on
 562   the ITL and (if tagged) the ITLQ lists in _queuecommand */
 563STATIC void
 564save_for_reselection(struct NCR_700_Host_Parameters *hostdata,
 565		     struct scsi_cmnd *SCp, __u32 dsp)
 566{
 567	/* Its just possible that this gets executed twice */
 568	if(SCp != NULL) {
 569		struct NCR_700_command_slot *slot =
 570			(struct NCR_700_command_slot *)SCp->host_scribble;
 571
 572		slot->resume_offset = dsp;
 573	}
 574	hostdata->state = NCR_700_HOST_FREE;
 575	hostdata->cmd = NULL;
 576}
 577
 578STATIC inline void
 579NCR_700_unmap(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp,
 580	      struct NCR_700_command_slot *slot)
 581{
 582	if(SCp->sc_data_direction != DMA_NONE &&
 583	   SCp->sc_data_direction != DMA_BIDIRECTIONAL)
 584		scsi_dma_unmap(SCp);
 585}
 586
 587STATIC inline void
 588NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
 589	       struct scsi_cmnd *SCp, int result)
 590{
 591	hostdata->state = NCR_700_HOST_FREE;
 592	hostdata->cmd = NULL;
 593
 594	if(SCp != NULL) {
 595		struct NCR_700_command_slot *slot = 
 596			(struct NCR_700_command_slot *)SCp->host_scribble;
 597		
 598		dma_unmap_single(hostdata->dev, slot->pCmd,
 599				 MAX_COMMAND_SIZE, DMA_TO_DEVICE);
 600		if (slot->flags == NCR_700_FLAG_AUTOSENSE) {
 601			char *cmnd = NCR_700_get_sense_cmnd(SCp->device);
 602#ifdef NCR_700_DEBUG
 603			printk(" ORIGINAL CMD %p RETURNED %d, new return is %d sense is\n",
 604			       SCp, SCp->cmnd[7], result);
 605			scsi_print_sense("53c700", SCp);
 606
 607#endif
 608			dma_unmap_single(hostdata->dev, slot->dma_handle,
 609					 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
 610			/* restore the old result if the request sense was
 611			 * successful */
 612			if (result == 0)
 613				result = cmnd[7];
 614			/* restore the original length */
 615			SCp->cmd_len = cmnd[8];
 616		} else
 617			NCR_700_unmap(hostdata, SCp, slot);
 618
 619		free_slot(slot, hostdata);
 620#ifdef NCR_700_DEBUG
 621		if(NCR_700_get_depth(SCp->device) == 0 ||
 622		   NCR_700_get_depth(SCp->device) > SCp->device->queue_depth)
 623			printk(KERN_ERR "Invalid depth in NCR_700_scsi_done(): %d\n",
 624			       NCR_700_get_depth(SCp->device));
 625#endif /* NCR_700_DEBUG */
 626		NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) - 1);
 627
 628		SCp->host_scribble = NULL;
 629		SCp->result = result;
 630		SCp->scsi_done(SCp);
 631	} else {
 632		printk(KERN_ERR "53c700: SCSI DONE HAS NULL SCp\n");
 633	}
 634}
 635
 636
 637STATIC void
 638NCR_700_internal_bus_reset(struct Scsi_Host *host)
 639{
 640	/* Bus reset */
 641	NCR_700_writeb(ASSERT_RST, host, SCNTL1_REG);
 642	udelay(50);
 643	NCR_700_writeb(0, host, SCNTL1_REG);
 644
 645}
 646
 647STATIC void
 648NCR_700_chip_setup(struct Scsi_Host *host)
 649{
 650	struct NCR_700_Host_Parameters *hostdata = 
 651		(struct NCR_700_Host_Parameters *)host->hostdata[0];
 652	__u8 min_period;
 653	__u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
 654
 655	if(hostdata->chip710) {
 656		__u8 burst_disable = 0;
 657		__u8 burst_length = 0;
 658
 659		switch (hostdata->burst_length) {
 660			case 1:
 661			        burst_length = BURST_LENGTH_1;
 662			        break;
 663			case 2:
 664			        burst_length = BURST_LENGTH_2;
 665			        break;
 666			case 4:
 667			        burst_length = BURST_LENGTH_4;
 668			        break;
 669			case 8:
 670			        burst_length = BURST_LENGTH_8;
 671			        break;
 672			default:
 673			        burst_disable = BURST_DISABLE;
 674			        break;
 675		}
 676		hostdata->dcntl_extra |= COMPAT_700_MODE;
 677
 678		NCR_700_writeb(hostdata->dcntl_extra, host, DCNTL_REG);
 679		NCR_700_writeb(burst_length | hostdata->dmode_extra,
 680			       host, DMODE_710_REG);
 681		NCR_700_writeb(burst_disable | hostdata->ctest7_extra |
 682			       (hostdata->differential ? DIFF : 0),
 683			       host, CTEST7_REG);
 684		NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG);
 685		NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY
 686			       | AUTO_ATN, host, SCNTL0_REG);
 687	} else {
 688		NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra,
 689			       host, DMODE_700_REG);
 690		NCR_700_writeb(hostdata->differential ? 
 691			       DIFF : 0, host, CTEST7_REG);
 692		if(hostdata->fast) {
 693			/* this is for 700-66, does nothing on 700 */
 694			NCR_700_writeb(LAST_DIS_ENBL | ENABLE_ACTIVE_NEGATION 
 695				       | GENERATE_RECEIVE_PARITY, host,
 696				       CTEST8_REG);
 697		} else {
 698			NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY
 699				       | PARITY | AUTO_ATN, host, SCNTL0_REG);
 700		}
 701	}
 702
 703	NCR_700_writeb(1 << host->this_id, host, SCID_REG);
 704	NCR_700_writeb(0, host, SBCL_REG);
 705	NCR_700_writeb(ASYNC_OPERATION, host, SXFER_REG);
 706
 707	NCR_700_writeb(PHASE_MM_INT | SEL_TIMEOUT_INT | GROSS_ERR_INT | UX_DISC_INT
 708	     | RST_INT | PAR_ERR_INT | SELECT_INT, host, SIEN_REG);
 709
 710	NCR_700_writeb(ABORT_INT | INT_INST_INT | ILGL_INST_INT, host, DIEN_REG);
 711	NCR_700_writeb(ENABLE_SELECT, host, SCNTL1_REG);
 712	if(hostdata->clock > 75) {
 713		printk(KERN_ERR "53c700: Clock speed %dMHz is too high: 75Mhz is the maximum this chip can be driven at\n", hostdata->clock);
 714		/* do the best we can, but the async clock will be out
 715		 * of spec: sync divider 2, async divider 3 */
 716		DEBUG(("53c700: sync 2 async 3\n"));
 717		NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG);
 718		NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
 719		hostdata->sync_clock = hostdata->clock/2;
 720	} else	if(hostdata->clock > 50  && hostdata->clock <= 75) {
 721		/* sync divider 1.5, async divider 3 */
 722		DEBUG(("53c700: sync 1.5 async 3\n"));
 723		NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG);
 724		NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
 725		hostdata->sync_clock = hostdata->clock*2;
 726		hostdata->sync_clock /= 3;
 727		
 728	} else if(hostdata->clock > 37 && hostdata->clock <= 50) {
 729		/* sync divider 1, async divider 2 */
 730		DEBUG(("53c700: sync 1 async 2\n"));
 731		NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
 732		NCR_700_writeb(ASYNC_DIV_2_0 | hostdata->dcntl_extra, host, DCNTL_REG);
 733		hostdata->sync_clock = hostdata->clock;
 734	} else if(hostdata->clock > 25 && hostdata->clock <=37) {
 735		/* sync divider 1, async divider 1.5 */
 736		DEBUG(("53c700: sync 1 async 1.5\n"));
 737		NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
 738		NCR_700_writeb(ASYNC_DIV_1_5 | hostdata->dcntl_extra, host, DCNTL_REG);
 739		hostdata->sync_clock = hostdata->clock;
 740	} else {
 741		DEBUG(("53c700: sync 1 async 1\n"));
 742		NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
 743		NCR_700_writeb(ASYNC_DIV_1_0 | hostdata->dcntl_extra, host, DCNTL_REG);
 744		/* sync divider 1, async divider 1 */
 745		hostdata->sync_clock = hostdata->clock;
 746	}
 747	/* Calculate the actual minimum period that can be supported
 748	 * by our synchronous clock speed.  See the 710 manual for
 749	 * exact details of this calculation which is based on a
 750	 * setting of the SXFER register */
 751	min_period = 1000*(4+min_xferp)/(4*hostdata->sync_clock);
 752	hostdata->min_period = NCR_700_MIN_PERIOD;
 753	if(min_period > NCR_700_MIN_PERIOD)
 754		hostdata->min_period = min_period;
 755}
 756
 757STATIC void
 758NCR_700_chip_reset(struct Scsi_Host *host)
 759{
 760	struct NCR_700_Host_Parameters *hostdata = 
 761		(struct NCR_700_Host_Parameters *)host->hostdata[0];
 762	if(hostdata->chip710) {
 763		NCR_700_writeb(SOFTWARE_RESET_710, host, ISTAT_REG);
 764		udelay(100);
 765
 766		NCR_700_writeb(0, host, ISTAT_REG);
 767	} else {
 768		NCR_700_writeb(SOFTWARE_RESET, host, DCNTL_REG);
 769		udelay(100);
 770		
 771		NCR_700_writeb(0, host, DCNTL_REG);
 772	}
 773
 774	mdelay(1000);
 775
 776	NCR_700_chip_setup(host);
 777}
 778
 779/* The heart of the message processing engine is that the instruction
 780 * immediately after the INT is the normal case (and so must be CLEAR
 781 * ACK).  If we want to do something else, we call that routine in
 782 * scripts and set temp to be the normal case + 8 (skipping the CLEAR
 783 * ACK) so that the routine returns correctly to resume its activity
 784 * */
 785STATIC __u32
 786process_extended_message(struct Scsi_Host *host, 
 787			 struct NCR_700_Host_Parameters *hostdata,
 788			 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
 789{
 790	__u32 resume_offset = dsp, temp = dsp + 8;
 791	__u8 pun = 0xff, lun = 0xff;
 792
 793	if(SCp != NULL) {
 794		pun = SCp->device->id;
 795		lun = SCp->device->lun;
 796	}
 797
 798	switch(hostdata->msgin[2]) {
 799	case A_SDTR_MSG:
 800		if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
 801			struct scsi_target *starget = SCp->device->sdev_target;
 802			__u8 period = hostdata->msgin[3];
 803			__u8 offset = hostdata->msgin[4];
 804
 805			if(offset == 0 || period == 0) {
 806				offset = 0;
 807				period = 0;
 808			}
 809
 810			spi_offset(starget) = offset;
 811			spi_period(starget) = period;
 812			
 813			if(NCR_700_is_flag_set(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION)) {
 814				spi_display_xfer_agreement(starget);
 815				NCR_700_clear_flag(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION);
 816			}
 817			
 818			NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
 819			NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
 820			
 821			NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
 822				       host, SXFER_REG);
 823
 824		} else {
 825			/* SDTR message out of the blue, reject it */
 826			shost_printk(KERN_WARNING, host,
 827				"Unexpected SDTR msg\n");
 828			hostdata->msgout[0] = A_REJECT_MSG;
 829			dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
 830			script_patch_16(hostdata->dev, hostdata->script,
 831			                MessageCount, 1);
 832			/* SendMsgOut returns, so set up the return
 833			 * address */
 834			resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
 835		}
 836		break;
 837	
 838	case A_WDTR_MSG:
 839		printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
 840		       host->host_no, pun, lun);
 841		hostdata->msgout[0] = A_REJECT_MSG;
 842		dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
 843		script_patch_16(hostdata->dev, hostdata->script, MessageCount,
 844		                1);
 845		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
 846
 847		break;
 848
 849	default:
 850		printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
 851		       host->host_no, pun, lun,
 852		       NCR_700_phase[(dsps & 0xf00) >> 8]);
 853		spi_print_msg(hostdata->msgin);
 854		printk("\n");
 855		/* just reject it */
 856		hostdata->msgout[0] = A_REJECT_MSG;
 857		dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
 858		script_patch_16(hostdata->dev, hostdata->script, MessageCount,
 859		                1);
 860		/* SendMsgOut returns, so set up the return
 861		 * address */
 862		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
 863	}
 864	NCR_700_writel(temp, host, TEMP_REG);
 865	return resume_offset;
 866}
 867
 868STATIC __u32
 869process_message(struct Scsi_Host *host,	struct NCR_700_Host_Parameters *hostdata,
 870		struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
 871{
 872	/* work out where to return to */
 873	__u32 temp = dsp + 8, resume_offset = dsp;
 874	__u8 pun = 0xff, lun = 0xff;
 875
 876	if(SCp != NULL) {
 877		pun = SCp->device->id;
 878		lun = SCp->device->lun;
 879	}
 880
 881#ifdef NCR_700_DEBUG
 882	printk("scsi%d (%d:%d): message %s: ", host->host_no, pun, lun,
 883	       NCR_700_phase[(dsps & 0xf00) >> 8]);
 884	spi_print_msg(hostdata->msgin);
 885	printk("\n");
 886#endif
 887
 888	switch(hostdata->msgin[0]) {
 889
 890	case A_EXTENDED_MSG:
 891		resume_offset =  process_extended_message(host, hostdata, SCp,
 892							  dsp, dsps);
 893		break;
 894
 895	case A_REJECT_MSG:
 896		if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
 897			/* Rejected our sync negotiation attempt */
 898			spi_period(SCp->device->sdev_target) =
 899				spi_offset(SCp->device->sdev_target) = 0;
 900			NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
 901			NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
 902		} else if(SCp != NULL && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION) {
 903			/* rejected our first simple tag message */
 904			scmd_printk(KERN_WARNING, SCp,
 905				"Rejected first tag queue attempt, turning off tag queueing\n");
 906			/* we're done negotiating */
 907			NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION);
 908			hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
 909			SCp->device->tagged_supported = 0;
 910			scsi_deactivate_tcq(SCp->device, host->cmd_per_lun);
 911		} else {
 912			shost_printk(KERN_WARNING, host,
 913				"(%d:%d) Unexpected REJECT Message %s\n",
 914			       pun, lun,
 915			       NCR_700_phase[(dsps & 0xf00) >> 8]);
 916			/* however, just ignore it */
 917		}
 918		break;
 919
 920	case A_PARITY_ERROR_MSG:
 921		printk(KERN_ERR "scsi%d (%d:%d) Parity Error!\n", host->host_no,
 922		       pun, lun);
 923		NCR_700_internal_bus_reset(host);
 924		break;
 925	case A_SIMPLE_TAG_MSG:
 926		printk(KERN_INFO "scsi%d (%d:%d) SIMPLE TAG %d %s\n", host->host_no,
 927		       pun, lun, hostdata->msgin[1],
 928		       NCR_700_phase[(dsps & 0xf00) >> 8]);
 929		/* just ignore it */
 930		break;
 931	default:
 932		printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
 933		       host->host_no, pun, lun,
 934		       NCR_700_phase[(dsps & 0xf00) >> 8]);
 935
 936		spi_print_msg(hostdata->msgin);
 937		printk("\n");
 938		/* just reject it */
 939		hostdata->msgout[0] = A_REJECT_MSG;
 940		dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
 941		script_patch_16(hostdata->dev, hostdata->script, MessageCount,
 942		                1);
 943		/* SendMsgOut returns, so set up the return
 944		 * address */
 945		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
 946
 947		break;
 948	}
 949	NCR_700_writel(temp, host, TEMP_REG);
 950	/* set us up to receive another message */
 951	dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
 952	return resume_offset;
 953}
 954
 955STATIC __u32
 956process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
 957			 struct Scsi_Host *host,
 958			 struct NCR_700_Host_Parameters *hostdata)
 959{
 960	__u32 resume_offset = 0;
 961	__u8 pun = 0xff, lun=0xff;
 962
 963	if(SCp != NULL) {
 964		pun = SCp->device->id;
 965		lun = SCp->device->lun;
 966	}
 967
 968	if(dsps == A_GOOD_STATUS_AFTER_STATUS) {
 969		DEBUG(("  COMMAND COMPLETE, status=%02x\n",
 970		       hostdata->status[0]));
 971		/* OK, if TCQ still under negotiation, we now know it works */
 972		if (NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION)
 973			NCR_700_set_tag_neg_state(SCp->device,
 974						  NCR_700_FINISHED_TAG_NEGOTIATION);
 975			
 976		/* check for contingent allegiance contitions */
 977		if(status_byte(hostdata->status[0]) == CHECK_CONDITION ||
 978		   status_byte(hostdata->status[0]) == COMMAND_TERMINATED) {
 979			struct NCR_700_command_slot *slot =
 980				(struct NCR_700_command_slot *)SCp->host_scribble;
 981			if(slot->flags == NCR_700_FLAG_AUTOSENSE) {
 982				/* OOPS: bad device, returning another
 983				 * contingent allegiance condition */
 984				scmd_printk(KERN_ERR, SCp,
 985					"broken device is looping in contingent allegiance: ignoring\n");
 986				NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
 987			} else {
 988				char *cmnd =
 989					NCR_700_get_sense_cmnd(SCp->device);
 990#ifdef NCR_DEBUG
 991				scsi_print_command(SCp);
 992				printk("  cmd %p has status %d, requesting sense\n",
 993				       SCp, hostdata->status[0]);
 994#endif
 995				/* we can destroy the command here
 996				 * because the contingent allegiance
 997				 * condition will cause a retry which
 998				 * will re-copy the command from the
 999				 * saved data_cmnd.  We also unmap any
1000				 * data associated with the command
1001				 * here */
1002				NCR_700_unmap(hostdata, SCp, slot);
1003				dma_unmap_single(hostdata->dev, slot->pCmd,
1004						 MAX_COMMAND_SIZE,
1005						 DMA_TO_DEVICE);
1006
1007				cmnd[0] = REQUEST_SENSE;
1008				cmnd[1] = (SCp->device->lun & 0x7) << 5;
1009				cmnd[2] = 0;
1010				cmnd[3] = 0;
1011				cmnd[4] = SCSI_SENSE_BUFFERSIZE;
1012				cmnd[5] = 0;
1013				/* Here's a quiet hack: the
1014				 * REQUEST_SENSE command is six bytes,
1015				 * so store a flag indicating that
1016				 * this was an internal sense request
1017				 * and the original status at the end
1018				 * of the command */
1019				cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
1020				cmnd[7] = hostdata->status[0];
1021				cmnd[8] = SCp->cmd_len;
1022				SCp->cmd_len = 6; /* command length for
1023						   * REQUEST_SENSE */
1024				slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1025				slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1026				slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | SCSI_SENSE_BUFFERSIZE);
1027				slot->SG[0].pAddr = bS_to_host(slot->dma_handle);
1028				slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
1029				slot->SG[1].pAddr = 0;
1030				slot->resume_offset = hostdata->pScript;
1031				dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
1032				dma_cache_sync(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1033
1034				/* queue the command for reissue */
1035				slot->state = NCR_700_SLOT_QUEUED;
1036				slot->flags = NCR_700_FLAG_AUTOSENSE;
1037				hostdata->state = NCR_700_HOST_FREE;
1038				hostdata->cmd = NULL;
1039			}
1040		} else {
1041			// Currently rely on the mid layer evaluation
1042			// of the tag queuing capability
1043			//
1044			//if(status_byte(hostdata->status[0]) == GOOD &&
1045			//   SCp->cmnd[0] == INQUIRY && SCp->use_sg == 0) {
1046			//	/* Piggy back the tag queueing support
1047			//	 * on this command */
1048			//	dma_sync_single_for_cpu(hostdata->dev,
1049			//			    slot->dma_handle,
1050			//			    SCp->request_bufflen,
1051			//			    DMA_FROM_DEVICE);
1052			//	if(((char *)SCp->request_buffer)[7] & 0x02) {
1053			//		scmd_printk(KERN_INFO, SCp,
1054			//		     "Enabling Tag Command Queuing\n");
1055			//		hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1056			//		NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1057			//	} else {
1058			//		NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1059			//		hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1060			//	}
1061			//}
1062			NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
1063		}
1064	} else if((dsps & 0xfffff0f0) == A_UNEXPECTED_PHASE) {
1065		__u8 i = (dsps & 0xf00) >> 8;
1066
1067		scmd_printk(KERN_ERR, SCp, "UNEXPECTED PHASE %s (%s)\n",
1068		       NCR_700_phase[i],
1069		       sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1070		scmd_printk(KERN_ERR, SCp, "         len = %d, cmd =",
1071			SCp->cmd_len);
1072		scsi_print_command(SCp);
1073
1074		NCR_700_internal_bus_reset(host);
1075	} else if((dsps & 0xfffff000) == A_FATAL) {
1076		int i = (dsps & 0xfff);
1077
1078		printk(KERN_ERR "scsi%d: (%d:%d) FATAL ERROR: %s\n",
1079		       host->host_no, pun, lun, NCR_700_fatal_messages[i]);
1080		if(dsps == A_FATAL_ILLEGAL_MSG_LENGTH) {
1081			printk(KERN_ERR "     msg begins %02x %02x\n",
1082			       hostdata->msgin[0], hostdata->msgin[1]);
1083		}
1084		NCR_700_internal_bus_reset(host);
1085	} else if((dsps & 0xfffff0f0) == A_DISCONNECT) {
1086#ifdef NCR_700_DEBUG
1087		__u8 i = (dsps & 0xf00) >> 8;
1088
1089		printk("scsi%d: (%d:%d), DISCONNECTED (%d) %s\n",
1090		       host->host_no, pun, lun,
1091		       i, NCR_700_phase[i]);
1092#endif
1093		save_for_reselection(hostdata, SCp, dsp);
1094
1095	} else if(dsps == A_RESELECTION_IDENTIFIED) {
1096		__u8 lun;
1097		struct NCR_700_command_slot *slot;
1098		__u8 reselection_id = hostdata->reselection_id;
1099		struct scsi_device *SDp;
1100
1101		lun = hostdata->msgin[0] & 0x1f;
1102
1103		hostdata->reselection_id = 0xff;
1104		DEBUG(("scsi%d: (%d:%d) RESELECTED!\n",
1105		       host->host_no, reselection_id, lun));
1106		/* clear the reselection indicator */
1107		SDp = __scsi_device_lookup(host, 0, reselection_id, lun);
1108		if(unlikely(SDp == NULL)) {
1109			printk(KERN_ERR "scsi%d: (%d:%d) HAS NO device\n",
1110			       host->host_no, reselection_id, lun);
1111			BUG();
1112		}
1113		if(hostdata->msgin[1] == A_SIMPLE_TAG_MSG) {
1114			struct scsi_cmnd *SCp = scsi_find_tag(SDp, hostdata->msgin[2]);
1115			if(unlikely(SCp == NULL)) {
1116				printk(KERN_ERR "scsi%d: (%d:%d) no saved request for tag %d\n", 
1117				       host->host_no, reselection_id, lun, hostdata->msgin[2]);
1118				BUG();
1119			}
1120
1121			slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1122			DDEBUG(KERN_DEBUG, SDp,
1123				"reselection is tag %d, slot %p(%d)\n",
1124				hostdata->msgin[2], slot, slot->tag);
1125		} else {
1126			struct scsi_cmnd *SCp = scsi_find_tag(SDp, SCSI_NO_TAG);
1127			if(unlikely(SCp == NULL)) {
1128				sdev_printk(KERN_ERR, SDp,
1129					"no saved request for untagged cmd\n");
1130				BUG();
1131			}
1132			slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1133		}
1134
1135		if(slot == NULL) {
1136			printk(KERN_ERR "scsi%d: (%d:%d) RESELECTED but no saved command (MSG = %02x %02x %02x)!!\n",
1137			       host->host_no, reselection_id, lun,
1138			       hostdata->msgin[0], hostdata->msgin[1],
1139			       hostdata->msgin[2]);
1140		} else {
1141			if(hostdata->state != NCR_700_HOST_BUSY)
1142				printk(KERN_ERR "scsi%d: FATAL, host not busy during valid reselection!\n",
1143				       host->host_no);
1144			resume_offset = slot->resume_offset;
1145			hostdata->cmd = slot->cmnd;
1146
1147			/* re-patch for this command */
1148			script_patch_32_abs(hostdata->dev, hostdata->script,
1149			                    CommandAddress, slot->pCmd);
1150			script_patch_16(hostdata->dev, hostdata->script,
1151					CommandCount, slot->cmnd->cmd_len);
1152			script_patch_32_abs(hostdata->dev, hostdata->script,
1153			                    SGScriptStartAddress,
1154					    to32bit(&slot->pSG[0].ins));
1155
1156			/* Note: setting SXFER only works if we're
1157			 * still in the MESSAGE phase, so it is vital
1158			 * that ACK is still asserted when we process
1159			 * the reselection message.  The resume offset
1160			 * should therefore always clear ACK */
1161			NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
1162				       host, SXFER_REG);
1163			dma_cache_sync(hostdata->dev, hostdata->msgin,
1164				       MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
1165			dma_cache_sync(hostdata->dev, hostdata->msgout,
1166				       MSG_ARRAY_SIZE, DMA_TO_DEVICE);
1167			/* I'm just being paranoid here, the command should
1168			 * already have been flushed from the cache */
1169			dma_cache_sync(hostdata->dev, slot->cmnd->cmnd,
1170				       slot->cmnd->cmd_len, DMA_TO_DEVICE);
1171
1172
1173			
1174		}
1175	} else if(dsps == A_RESELECTED_DURING_SELECTION) {
1176
1177		/* This section is full of debugging code because I've
1178		 * never managed to reach it.  I think what happens is
1179		 * that, because the 700 runs with selection
1180		 * interrupts enabled the whole time that we take a
1181		 * selection interrupt before we manage to get to the
1182		 * reselected script interrupt */
1183
1184		__u8 reselection_id = NCR_700_readb(host, SFBR_REG);
1185		struct NCR_700_command_slot *slot;
1186		
1187		/* Take out our own ID */
1188		reselection_id &= ~(1<<host->this_id);
1189		
1190		/* I've never seen this happen, so keep this as a printk rather
1191		 * than a debug */
1192		printk(KERN_INFO "scsi%d: (%d:%d) RESELECTION DURING SELECTION, dsp=%08x[%04x] state=%d, count=%d\n",
1193		       host->host_no, reselection_id, lun, dsp, dsp - hostdata->pScript, hostdata->state, hostdata->command_slot_count);
1194
1195		{
1196			/* FIXME: DEBUGGING CODE */
1197			__u32 SG = (__u32)bS_to_cpu(hostdata->script[A_SGScriptStartAddress_used[0]]);
1198			int i;
1199
1200			for(i=0; i< NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1201				if(SG >= to32bit(&hostdata->slots[i].pSG[0])
1202				   && SG <= to32bit(&hostdata->slots[i].pSG[NCR_700_SG_SEGMENTS]))
1203					break;
1204			}
1205			printk(KERN_INFO "IDENTIFIED SG segment as being %08x in slot %p, cmd %p, slot->resume_offset=%08x\n", SG, &hostdata->slots[i], hostdata->slots[i].cmnd, hostdata->slots[i].resume_offset);
1206			SCp =  hostdata->slots[i].cmnd;
1207		}
1208
1209		if(SCp != NULL) {
1210			slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1211			/* change slot from busy to queued to redo command */
1212			slot->state = NCR_700_SLOT_QUEUED;
1213		}
1214		hostdata->cmd = NULL;
1215		
1216		if(reselection_id == 0) {
1217			if(hostdata->reselection_id == 0xff) {
1218				printk(KERN_ERR "scsi%d: Invalid reselection during selection!!\n", host->host_no);
1219				return 0;
1220			} else {
1221				printk(KERN_ERR "scsi%d: script reselected and we took a selection interrupt\n",
1222				       host->host_no);
1223				reselection_id = hostdata->reselection_id;
1224			}
1225		} else {
1226			
1227			/* convert to real ID */
1228			reselection_id = bitmap_to_number(reselection_id);
1229		}
1230		hostdata->reselection_id = reselection_id;
1231		/* just in case we have a stale simple tag message, clear it */
1232		hostdata->msgin[1] = 0;
1233		dma_cache_sync(hostdata->dev, hostdata->msgin,
1234			       MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL);
1235		if(hostdata->tag_negotiated & (1<<reselection_id)) {
1236			resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1237		} else {
1238			resume_offset = hostdata->pScript + Ent_GetReselectionData;
1239		}
1240	} else if(dsps == A_COMPLETED_SELECTION_AS_TARGET) {
1241		/* we've just disconnected from the bus, do nothing since
1242		 * a return here will re-run the queued command slot
1243		 * that may have been interrupted by the initial selection */
1244		DEBUG((" SELECTION COMPLETED\n"));
1245	} else if((dsps & 0xfffff0f0) == A_MSG_IN) { 
1246		resume_offset = process_message(host, hostdata, SCp,
1247						dsp, dsps);
1248	} else if((dsps &  0xfffff000) == 0) {
1249		__u8 i = (dsps & 0xf0) >> 4, j = (dsps & 0xf00) >> 8;
1250		printk(KERN_ERR "scsi%d: (%d:%d), unhandled script condition %s %s at %04x\n",
1251		       host->host_no, pun, lun, NCR_700_condition[i],
1252		       NCR_700_phase[j], dsp - hostdata->pScript);
1253		if(SCp != NULL) {
1254			struct scatterlist *sg;
1255
1256			scsi_print_command(SCp);
1257			scsi_for_each_sg(SCp, sg, scsi_sg_count(SCp) + 1, i) {
1258				printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, sg->length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr);
1259			}
1260		}
1261		NCR_700_internal_bus_reset(host);
1262	} else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) {
1263		printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n",
1264		       host->host_no, pun, lun, dsps & 0xfff, dsp, dsp - hostdata->pScript);
1265		resume_offset = dsp;
1266	} else {
1267		printk(KERN_ERR "scsi%d: (%d:%d), unidentified script interrupt 0x%x at %04x\n",
1268		       host->host_no, pun, lun, dsps, dsp - hostdata->pScript);
1269		NCR_700_internal_bus_reset(host);
1270	}
1271	return resume_offset;
1272}
1273
1274/* We run the 53c700 with selection interrupts always enabled.  This
1275 * means that the chip may be selected as soon as the bus frees.  On a
1276 * busy bus, this can be before the scripts engine finishes its
1277 * processing.  Therefore, part of the selection processing has to be
1278 * to find out what the scripts engine is doing and complete the
1279 * function if necessary (i.e. process the pending disconnect or save
1280 * the interrupted initial selection */
1281STATIC inline __u32
1282process_selection(struct Scsi_Host *host, __u32 dsp)
1283{
1284	__u8 id = 0;	/* Squash compiler warning */
1285	int count = 0;
1286	__u32 resume_offset = 0;
1287	struct NCR_700_Host_Parameters *hostdata =
1288		(struct NCR_700_Host_Parameters *)host->hostdata[0];
1289	struct scsi_cmnd *SCp = hostdata->cmd;
1290	__u8 sbcl;
1291
1292	for(count = 0; count < 5; count++) {
1293		id = NCR_700_readb(host, hostdata->chip710 ?
1294				   CTEST9_REG : SFBR_REG);
1295
1296		/* Take out our own ID */
1297		id &= ~(1<<host->this_id);
1298		if(id != 0) 
1299			break;
1300		udelay(5);
1301	}
1302	sbcl = NCR_700_readb(host, SBCL_REG);
1303	if((sbcl & SBCL_IO) == 0) {
1304		/* mark as having been selected rather than reselected */
1305		id = 0xff;
1306	} else {
1307		/* convert to real ID */
1308		hostdata->reselection_id = id = bitmap_to_number(id);
1309		DEBUG(("scsi%d:  Reselected by %d\n",
1310		       host->host_no, id));
1311	}
1312	if(hostdata->state == NCR_700_HOST_BUSY && SCp != NULL) {
1313		struct NCR_700_command_slot *slot =
1314			(struct NCR_700_command_slot *)SCp->host_scribble;
1315		DEBUG(("  ID %d WARNING: RESELECTION OF BUSY HOST, saving cmd %p, slot %p, addr %x [%04x], resume %x!\n", id, hostdata->cmd, slot, dsp, dsp - hostdata->pScript, resume_offset));
1316		
1317		switch(dsp - hostdata->pScript) {
1318		case Ent_Disconnect1:
1319		case Ent_Disconnect2:
1320			save_for_reselection(hostdata, SCp, Ent_Disconnect2 + hostdata->pScript);
1321			break;
1322		case Ent_Disconnect3:
1323		case Ent_Disconnect4:
1324			save_for_reselection(hostdata, SCp, Ent_Disconnect4 + hostdata->pScript);
1325			break;
1326		case Ent_Disconnect5:
1327		case Ent_Disconnect6:
1328			save_for_reselection(hostdata, SCp, Ent_Disconnect6 + hostdata->pScript);
1329			break;
1330		case Ent_Disconnect7:
1331		case Ent_Disconnect8:
1332			save_for_reselection(hostdata, SCp, Ent_Disconnect8 + hostdata->pScript);
1333			break;
1334		case Ent_Finish1:
1335		case Ent_Finish2:
1336			process_script_interrupt(A_GOOD_STATUS_AFTER_STATUS, dsp, SCp, host, hostdata);
1337			break;
1338			
1339		default:
1340			slot->state = NCR_700_SLOT_QUEUED;
1341			break;
1342			}
1343	}
1344	hostdata->state = NCR_700_HOST_BUSY;
1345	hostdata->cmd = NULL;
1346	/* clear any stale simple tag message */
1347	hostdata->msgin[1] = 0;
1348	dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
1349		       DMA_BIDIRECTIONAL);
1350
1351	if(id == 0xff) {
1352		/* Selected as target, Ignore */
1353		resume_offset = hostdata->pScript + Ent_SelectedAsTarget;
1354	} else if(hostdata->tag_negotiated & (1<<id)) {
1355		resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1356	} else {
1357		resume_offset = hostdata->pScript + Ent_GetReselectionData;
1358	}
1359	return resume_offset;
1360}
1361
1362static inline void
1363NCR_700_clear_fifo(struct Scsi_Host *host) {
1364	const struct NCR_700_Host_Parameters *hostdata
1365		= (struct NCR_700_Host_Parameters *)host->hostdata[0];
1366	if(hostdata->chip710) {
1367		NCR_700_writeb(CLR_FIFO_710, host, CTEST8_REG);
1368	} else {
1369		NCR_700_writeb(CLR_FIFO, host, DFIFO_REG);
1370	}
1371}
1372
1373static inline void
1374NCR_700_flush_fifo(struct Scsi_Host *host) {
1375	const struct NCR_700_Host_Parameters *hostdata
1376		= (struct NCR_700_Host_Parameters *)host->hostdata[0];
1377	if(hostdata->chip710) {
1378		NCR_700_writeb(FLUSH_DMA_FIFO_710, host, CTEST8_REG);
1379		udelay(10);
1380		NCR_700_writeb(0, host, CTEST8_REG);
1381	} else {
1382		NCR_700_writeb(FLUSH_DMA_FIFO, host, DFIFO_REG);
1383		udelay(10);
1384		NCR_700_writeb(0, host, DFIFO_REG);
1385	}
1386}
1387
1388
1389/* The queue lock with interrupts disabled must be held on entry to
1390 * this function */
1391STATIC int
1392NCR_700_start_command(struct scsi_cmnd *SCp)
1393{
1394	struct NCR_700_command_slot *slot =
1395		(struct NCR_700_command_slot *)SCp->host_scribble;
1396	struct NCR_700_Host_Parameters *hostdata =
1397		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1398	__u16 count = 1;	/* for IDENTIFY message */
1399	
1400	if(hostdata->state != NCR_700_HOST_FREE) {
1401		/* keep this inside the lock to close the race window where
1402		 * the running command finishes on another CPU while we don't
1403		 * change the state to queued on this one */
1404		slot->state = NCR_700_SLOT_QUEUED;
1405
1406		DEBUG(("scsi%d: host busy, queueing command %p, slot %p\n",
1407		       SCp->device->host->host_no, slot->cmnd, slot));
1408		return 0;
1409	}
1410	hostdata->state = NCR_700_HOST_BUSY;
1411	hostdata->cmd = SCp;
1412	slot->state = NCR_700_SLOT_BUSY;
1413	/* keep interrupts disabled until we have the command correctly
1414	 * set up so we cannot take a selection interrupt */
1415
1416	hostdata->msgout[0] = NCR_700_identify((SCp->cmnd[0] != REQUEST_SENSE &&
1417						slot->flags != NCR_700_FLAG_AUTOSENSE),
1418					       SCp->device->lun);
1419	/* for INQUIRY or REQUEST_SENSE commands, we cannot be sure
1420	 * if the negotiated transfer parameters still hold, so
1421	 * always renegotiate them */
1422	if(SCp->cmnd[0] == INQUIRY || SCp->cmnd[0] == REQUEST_SENSE ||
1423	   slot->flags == NCR_700_FLAG_AUTOSENSE) {
1424		NCR_700_clear_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
1425	}
1426
1427	/* REQUEST_SENSE is asking for contingent I_T_L(_Q) status.
1428	 * If a contingent allegiance condition exists, the device
1429	 * will refuse all tags, so send the request sense as untagged
1430	 * */
1431	if((hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1432	   && (slot->tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE &&
1433	       slot->flags != NCR_700_FLAG_AUTOSENSE)) {
1434		count += scsi_populate_tag_msg(SCp, &hostdata->msgout[count]);
1435	}
1436
1437	if(hostdata->fast &&
1438	   NCR_700_is_flag_clear(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC)) {
1439		count += spi_populate_sync_msg(&hostdata->msgout[count],
1440				spi_period(SCp->device->sdev_target),
1441				spi_offset(SCp->device->sdev_target));
1442		NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1443	}
1444
1445	script_patch_16(hostdata->dev, hostdata->script, MessageCount, count);
1446
1447
1448	script_patch_ID(hostdata->dev, hostdata->script,
1449			Device_ID, 1<<scmd_id(SCp));
1450
1451	script_patch_32_abs(hostdata->dev, hostdata->script, CommandAddress,
1452			    slot->pCmd);
1453	script_patch_16(hostdata->dev, hostdata->script, CommandCount,
1454	                SCp->cmd_len);
1455	/* finally plumb the beginning of the SG list into the script
1456	 * */
1457	script_patch_32_abs(hostdata->dev, hostdata->script,
1458	                    SGScriptStartAddress, to32bit(&slot->pSG[0].ins));
1459	NCR_700_clear_fifo(SCp->device->host);
1460
1461	if(slot->resume_offset == 0)
1462		slot->resume_offset = hostdata->pScript;
1463	/* now perform all the writebacks and invalidates */
1464	dma_cache_sync(hostdata->dev, hostdata->msgout, count, DMA_TO_DEVICE);
1465	dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
1466		       DMA_FROM_DEVICE);
1467	dma_cache_sync(hostdata->dev, SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
1468	dma_cache_sync(hostdata->dev, hostdata->status, 1, DMA_FROM_DEVICE);
1469
1470	/* set the synchronous period/offset */
1471	NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
1472		       SCp->device->host, SXFER_REG);
1473	NCR_700_writel(slot->temp, SCp->device->host, TEMP_REG);
1474	NCR_700_writel(slot->resume_offset, SCp->device->host, DSP_REG);
1475
1476	return 1;
1477}
1478
1479irqreturn_t
1480NCR_700_intr(int irq, void *dev_id)
1481{
1482	struct Scsi_Host *host = (struct Scsi_Host *)dev_id;
1483	struct NCR_700_Host_Parameters *hostdata =
1484		(struct NCR_700_Host_Parameters *)host->hostdata[0];
1485	__u8 istat;
1486	__u32 resume_offset = 0;
1487	__u8 pun = 0xff, lun = 0xff;
1488	unsigned long flags;
1489	int handled = 0;
1490
1491	/* Use the host lock to serialise access to the 53c700
1492	 * hardware.  Note: In future, we may need to take the queue
1493	 * lock to enter the done routines.  When that happens, we
1494	 * need to ensure that for this driver, the host lock and the
1495	 * queue lock point to the same thing. */
1496	spin_lock_irqsave(host->host_lock, flags);
1497	if((istat = NCR_700_readb(host, ISTAT_REG))
1498	      & (SCSI_INT_PENDING | DMA_INT_PENDING)) {
1499		__u32 dsps;
1500		__u8 sstat0 = 0, dstat = 0;
1501		__u32 dsp;
1502		struct scsi_cmnd *SCp = hostdata->cmd;
1503		enum NCR_700_Host_State state;
1504
1505		handled = 1;
1506		state = hostdata->state;
1507		SCp = hostdata->cmd;
1508
1509		if(istat & SCSI_INT_PENDING) {
1510			udelay(10);
1511
1512			sstat0 = NCR_700_readb(host, SSTAT0_REG);
1513		}
1514
1515		if(istat & DMA_INT_PENDING) {
1516			udelay(10);
1517
1518			dstat = NCR_700_readb(host, DSTAT_REG);
1519		}
1520
1521		dsps = NCR_700_readl(host, DSPS_REG);
1522		dsp = NCR_700_readl(host, DSP_REG);
1523
1524		DEBUG(("scsi%d: istat %02x sstat0 %02x dstat %02x dsp %04x[%08x] dsps 0x%x\n",
1525		       host->host_no, istat, sstat0, dstat,
1526		       (dsp - (__u32)(hostdata->pScript))/4,
1527		       dsp, dsps));
1528
1529		if(SCp != NULL) {
1530			pun = SCp->device->id;
1531			lun = SCp->device->lun;
1532		}
1533
1534		if(sstat0 & SCSI_RESET_DETECTED) {
1535			struct scsi_device *SDp;
1536			int i;
1537
1538			hostdata->state = NCR_700_HOST_BUSY;
1539
1540			printk(KERN_ERR "scsi%d: Bus Reset detected, executing command %p, slot %p, dsp %08x[%04x]\n",
1541			       host->host_no, SCp, SCp == NULL ? NULL : SCp->host_scribble, dsp, dsp - hostdata->pScript);
1542
1543			scsi_report_bus_reset(host, 0);
1544
1545			/* clear all the negotiated parameters */
1546			__shost_for_each_device(SDp, host)
1547				NCR_700_clear_flag(SDp, ~0);
1548			
1549			/* clear all the slots and their pending commands */
1550			for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1551				struct scsi_cmnd *SCp;
1552				struct NCR_700_command_slot *slot =
1553					&hostdata->slots[i];
1554
1555				if(slot->state == NCR_700_SLOT_FREE)
1556					continue;
1557				
1558				SCp = slot->cmnd;
1559				printk(KERN_ERR " failing command because of reset, slot %p, cmnd %p\n",
1560				       slot, SCp);
1561				free_slot(slot, hostdata);
1562				SCp->host_scribble = NULL;
1563				NCR_700_set_depth(SCp->device, 0);
1564				/* NOTE: deadlock potential here: we
1565				 * rely on mid-layer guarantees that
1566				 * scsi_done won't try to issue the
1567				 * command again otherwise we'll
1568				 * deadlock on the
1569				 * hostdata->state_lock */
1570				SCp->result = DID_RESET << 16;
1571				SCp->scsi_done(SCp);
1572			}
1573			mdelay(25);
1574			NCR_700_chip_setup(host);
1575
1576			hostdata->state = NCR_700_HOST_FREE;
1577			hostdata->cmd = NULL;
1578			/* signal back if this was an eh induced reset */
1579			if(hostdata->eh_complete != NULL)
1580				complete(hostdata->eh_complete);
1581			goto out_unlock;
1582		} else if(sstat0 & SELECTION_TIMEOUT) {
1583			DEBUG(("scsi%d: (%d:%d) selection timeout\n",
1584			       host->host_no, pun, lun));
1585			NCR_700_scsi_done(hostdata, SCp, DID_NO_CONNECT<<16);
1586		} else if(sstat0 & PHASE_MISMATCH) {
1587			struct NCR_700_command_slot *slot = (SCp == NULL) ? NULL :
1588				(struct NCR_700_command_slot *)SCp->host_scribble;
1589
1590			if(dsp == Ent_SendMessage + 8 + hostdata->pScript) {
1591				/* It wants to reply to some part of
1592				 * our message */
1593#ifdef NCR_700_DEBUG
1594				__u32 temp = NCR_700_readl(host, TEMP_REG);
1595				int count = (hostdata->script[Ent_SendMessage/4] & 0xffffff) - ((NCR_700_readl(host, DBC_REG) & 0xffffff) + NCR_700_data_residual(host));
1596				printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1597#endif
1598				resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch;
1599			} else if(dsp >= to32bit(&slot->pSG[0].ins) &&
1600				  dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) {
1601				int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff;
1602				int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List);
1603				int residual = NCR_700_data_residual(host);
1604				int i;
1605#ifdef NCR_700_DEBUG
1606				__u32 naddr = NCR_700_readl(host, DNAD_REG);
1607
1608				printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x\n",
1609				       host->host_no, pun, lun,
1610				       SGcount, data_transfer);
1611				scsi_print_command(SCp);
1612				if(residual) {
1613					printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x, residual %d\n",
1614				       host->host_no, pun, lun,
1615				       SGcount, data_transfer, residual);
1616				}
1617#endif
1618				data_transfer += residual;
1619
1620				if(data_transfer != 0) {
1621					int count; 
1622					__u32 pAddr;
1623
1624					SGcount--;
1625
1626					count = (bS_to_cpu(slot->SG[SGcount].ins) & 0x00ffffff);
1627					DEBUG(("DATA TRANSFER MISMATCH, count = %d, transferred %d\n", count, count-data_transfer));
1628					slot->SG[SGcount].ins &= bS_to_host(0xff000000);
1629					slot->SG[SGcount].ins |= bS_to_host(data_transfer);
1630					pAddr = bS_to_cpu(slot->SG[SGcount].pAddr);
1631					pAddr += (count - data_transfer);
1632#ifdef NCR_700_DEBUG
1633					if(pAddr != naddr) {
1634						printk("scsi%d (%d:%d) transfer mismatch pAddr=%lx, naddr=%lx, data_transfer=%d, residual=%d\n", host->host_no, pun, lun, (unsigned long)pAddr, (unsigned long)naddr, data_transfer, residual);
1635					}
1636#endif
1637					slot->SG[SGcount].pAddr = bS_to_host(pAddr);
1638				}
1639				/* set the executed moves to nops */
1640				for(i=0; i<SGcount; i++) {
1641					slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
1642					slot->SG[i].pAddr = 0;
1643				}
1644				dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1645				/* and pretend we disconnected after
1646				 * the command phase */
1647				resume_offset = hostdata->pScript + Ent_MsgInDuringData;
1648				/* make sure all the data is flushed */
1649				NCR_700_flush_fifo(host);
1650			} else {
1651				__u8 sbcl = NCR_700_readb(host, SBCL_REG);
1652				printk(KERN_ERR "scsi%d: (%d:%d) phase mismatch at %04x, phase %s\n",
1653				       host->host_no, pun, lun, dsp - hostdata->pScript, sbcl_to_string(sbcl));
1654				NCR_700_internal_bus_reset(host);
1655			}
1656
1657		} else if(sstat0 & SCSI_GROSS_ERROR) {
1658			printk(KERN_ERR "scsi%d: (%d:%d) GROSS ERROR\n",
1659			       host->host_no, pun, lun);
1660			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1661		} else if(sstat0 & PARITY_ERROR) {
1662			printk(KERN_ERR "scsi%d: (%d:%d) PARITY ERROR\n",
1663			       host->host_no, pun, lun);
1664			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1665		} else if(dstat & SCRIPT_INT_RECEIVED) {
1666			DEBUG(("scsi%d: (%d:%d) ====>SCRIPT INTERRUPT<====\n",
1667			       host->host_no, pun, lun));
1668			resume_offset = process_script_interrupt(dsps, dsp, SCp, host, hostdata);
1669		} else if(dstat & (ILGL_INST_DETECTED)) {
1670			printk(KERN_ERR "scsi%d: (%d:%d) Illegal Instruction detected at 0x%08x[0x%x]!!!\n"
1671			       "         Please email James.Bottomley@HansenPartnership.com with the details\n",
1672			       host->host_no, pun, lun,
1673			       dsp, dsp - hostdata->pScript);
1674			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1675		} else if(dstat & (WATCH_DOG_INTERRUPT|ABORTED)) {
1676			printk(KERN_ERR "scsi%d: (%d:%d) serious DMA problem, dstat=%02x\n",
1677			       host->host_no, pun, lun, dstat);
1678			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1679		}
1680
1681		
1682		/* NOTE: selection interrupt processing MUST occur
1683		 * after script interrupt processing to correctly cope
1684		 * with the case where we process a disconnect and
1685		 * then get reselected before we process the
1686		 * disconnection */
1687		if(sstat0 & SELECTED) {
1688			/* FIXME: It currently takes at least FOUR
1689			 * interrupts to complete a command that
1690			 * disconnects: one for the disconnect, one
1691			 * for the reselection, one to get the
1692			 * reselection data and one to complete the
1693			 * command.  If we guess the reselected
1694			 * command here and prepare it, we only need
1695			 * to get a reselection data interrupt if we
1696			 * guessed wrongly.  Since the interrupt
1697			 * overhead is much greater than the command
1698			 * setup, this would be an efficient
1699			 * optimisation particularly as we probably
1700			 * only have one outstanding command on a
1701			 * target most of the time */
1702
1703			resume_offset = process_selection(host, dsp);
1704
1705		}
1706
1707	}
1708
1709	if(resume_offset) {
1710		if(hostdata->state != NCR_700_HOST_BUSY) {
1711			printk(KERN_ERR "scsi%d: Driver error: resume at 0x%08x [0x%04x] with non busy host!\n",
1712			       host->host_no, resume_offset, resume_offset - hostdata->pScript);
1713			hostdata->state = NCR_700_HOST_BUSY;
1714		}
1715
1716		DEBUG(("Attempting to resume at %x\n", resume_offset));
1717		NCR_700_clear_fifo(host);
1718		NCR_700_writel(resume_offset, host, DSP_REG);
1719	} 
1720	/* There is probably a technical no-no about this: If we're a
1721	 * shared interrupt and we got this interrupt because the
1722	 * other device needs servicing not us, we're still going to
1723	 * check our queued commands here---of course, there shouldn't
1724	 * be any outstanding.... */
1725	if(hostdata->state == NCR_700_HOST_FREE) {
1726		int i;
1727
1728		for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1729			/* fairness: always run the queue from the last
1730			 * position we left off */
1731			int j = (i + hostdata->saved_slot_position)
1732				% NCR_700_COMMAND_SLOTS_PER_HOST;
1733			
1734			if(hostdata->slots[j].state != NCR_700_SLOT_QUEUED)
1735				continue;
1736			if(NCR_700_start_command(hostdata->slots[j].cmnd)) {
1737				DEBUG(("scsi%d: Issuing saved command slot %p, cmd %p\t\n",
1738				       host->host_no, &hostdata->slots[j],
1739				       hostdata->slots[j].cmnd));
1740				hostdata->saved_slot_position = j + 1;
1741			}
1742
1743			break;
1744		}
1745	}
1746 out_unlock:
1747	spin_unlock_irqrestore(host->host_lock, flags);
1748	return IRQ_RETVAL(handled);
1749}
1750
1751static int
1752NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
1753{
1754	struct NCR_700_Host_Parameters *hostdata = 
1755		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1756	__u32 move_ins;
1757	enum dma_data_direction direction;
1758	struct NCR_700_command_slot *slot;
1759
1760	if(hostdata->command_slot_count >= NCR_700_COMMAND_SLOTS_PER_HOST) {
1761		/* We're over our allocation, this should never happen
1762		 * since we report the max allocation to the mid layer */
1763		printk(KERN_WARNING "scsi%d: Command depth has gone over queue depth\n", SCp->device->host->host_no);
1764		return 1;
1765	}
1766	/* check for untagged commands.  We cannot have any outstanding
1767	 * commands if we accept them.  Commands could be untagged because:
1768	 *
1769	 * - The tag negotiated bitmap is clear
1770	 * - The blk layer sent and untagged command
1771	 */
1772	if(NCR_700_get_depth(SCp->device) != 0
1773	   && (!(hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1774	       || !blk_rq_tagged(SCp->request))) {
1775		CDEBUG(KERN_ERR, SCp, "has non zero depth %d\n",
1776		       NCR_700_get_depth(SCp->device));
1777		return SCSI_MLQUEUE_DEVICE_BUSY;
1778	}
1779	if(NCR_700_get_depth(SCp->device) >= SCp->device->queue_depth) {
1780		CDEBUG(KERN_ERR, SCp, "has max tag depth %d\n",
1781		       NCR_700_get_depth(SCp->device));
1782		return SCSI_MLQUEUE_DEVICE_BUSY;
1783	}
1784	NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) + 1);
1785
1786	/* begin the command here */
1787	/* no need to check for NULL, test for command_slot_count above
1788	 * ensures a slot is free */
1789	slot = find_empty_slot(hostdata);
1790
1791	slot->cmnd = SCp;
1792
1793	SCp->scsi_done = done;
1794	SCp->host_scribble = (unsigned char *)slot;
1795	SCp->SCp.ptr = NULL;
1796	SCp->SCp.buffer = NULL;
1797
1798#ifdef NCR_700_DEBUG
1799	printk("53c700: scsi%d, command ", SCp->device->host->host_no);
1800	scsi_print_command(SCp);
1801#endif
1802	if(blk_rq_tagged(SCp->request)
1803	   && (hostdata->tag_negotiated &(1<<scmd_id(SCp))) == 0
1804	   && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) {
1805		scmd_printk(KERN_ERR, SCp, "Enabling Tag Command Queuing\n");
1806		hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1807		NCR_700_set_tag_neg_state(SCp->device, NCR_700_DURING_TAG_NEGOTIATION);
1808	}
1809
1810	/* here we may have to process an untagged command.  The gate
1811	 * above ensures that this will be the only one outstanding,
1812	 * so clear the tag negotiated bit.
1813	 *
1814	 * FIXME: This will royally screw up on multiple LUN devices
1815	 * */
1816	if(!blk_rq_tagged(SCp->request)
1817	   && (hostdata->tag_negotiated &(1<<scmd_id(SCp)))) {
1818		scmd_printk(KERN_INFO, SCp, "Disabling Tag Command Queuing\n");
1819		hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1820	}
1821
1822	if((hostdata->tag_negotiated &(1<<scmd_id(SCp)))
1823	   && scsi_get_tag_type(SCp->device)) {
1824		slot->tag = SCp->request->tag;
1825		CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
1826		       slot->tag, slot);
1827	} else {
1828		slot->tag = SCSI_NO_TAG;
1829		/* must populate current_cmnd for scsi_find_tag to work */
1830		SCp->device->current_cmnd = SCp;
1831	}
1832	/* sanity check: some of the commands generated by the mid-layer
1833	 * have an eccentric idea of their sc_data_direction */
1834	if(!scsi_sg_count(SCp) && !scsi_bufflen(SCp) &&
1835	   SCp->sc_data_direction != DMA_NONE) {
1836#ifdef NCR_700_DEBUG
1837		printk("53c700: Command");
1838		scsi_print_command(SCp);
1839		printk("Has wrong data direction %d\n", SCp->sc_data_direction);
1840#endif
1841		SCp->sc_data_direction = DMA_NONE;
1842	}
1843
1844	switch (SCp->cmnd[0]) {
1845	case REQUEST_SENSE:
1846		/* clear the internal sense magic */
1847		SCp->cmnd[6] = 0;
1848		/* fall through */
1849	default:
1850		/* OK, get it from the command */
1851		switch(SCp->sc_data_direction) {
1852		case DMA_BIDIRECTIONAL:
1853		default:
1854			printk(KERN_ERR "53c700: Unknown command for data direction ");
1855			scsi_print_command(SCp);
1856			
1857			move_ins = 0;
1858			break;
1859		case DMA_NONE:
1860			move_ins = 0;
1861			break;
1862		case DMA_FROM_DEVICE:
1863			move_ins = SCRIPT_MOVE_DATA_IN;
1864			break;
1865		case DMA_TO_DEVICE:
1866			move_ins = SCRIPT_MOVE_DATA_OUT;
1867			break;
1868		}
1869	}
1870
1871	/* now build the scatter gather list */
1872	direction = SCp->sc_data_direction;
1873	if(move_ins != 0) {
1874		int i;
1875		int sg_count;
1876		dma_addr_t vPtr = 0;
1877		struct scatterlist *sg;
1878		__u32 count = 0;
1879
1880		sg_count = scsi_dma_map(SCp);
1881		BUG_ON(sg_count < 0);
1882
1883		scsi_for_each_sg(SCp, sg, sg_count, i) {
1884			vPtr = sg_dma_address(sg);
1885			count = sg_dma_len(sg);
1886
1887			slot->SG[i].ins = bS_to_host(move_ins | count);
1888			DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n",
1889			       i, count, slot->SG[i].ins, (unsigned long)vPtr));
1890			slot->SG[i].pAddr = bS_to_host(vPtr);
1891		}
1892		slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
1893		slot->SG[i].pAddr = 0;
1894		dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1895		DEBUG((" SETTING %08lx to %x\n",
1896		       (&slot->pSG[i].ins),
1897		       slot->SG[i].ins));
1898	}
1899	slot->resume_offset = 0;
1900	slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd,
1901				    MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1902	NCR_700_start_command(SCp);
1903	return 0;
1904}
1905
1906STATIC DEF_SCSI_QCMD(NCR_700_queuecommand)
1907
1908STATIC int
1909NCR_700_abort(struct scsi_cmnd * SCp)
1910{
1911	struct NCR_700_command_slot *slot;
1912
1913	scmd_printk(KERN_INFO, SCp,
1914		"New error handler wants to abort command\n\t");
1915	scsi_print_command(SCp);
1916
1917	slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1918
1919	if(slot == NULL)
1920		/* no outstanding command to abort */
1921		return SUCCESS;
1922	if(SCp->cmnd[0] == TEST_UNIT_READY) {
1923		/* FIXME: This is because of a problem in the new
1924		 * error handler.  When it is in error recovery, it
1925		 * will send a TUR to a device it thinks may still be
1926		 * showing a problem.  If the TUR isn't responded to,
1927		 * it will abort it and mark the device off line.
1928		 * Unfortunately, it does no other error recovery, so
1929		 * this would leave us with an outstanding command
1930		 * occupying a slot.  Rather than allow this to
1931		 * happen, we issue a bus reset to force all
1932		 * outstanding commands to terminate here. */
1933		NCR_700_internal_bus_reset(SCp->device->host);
1934		/* still drop through and return failed */
1935	}
1936	return FAILED;
1937
1938}
1939
1940STATIC int
1941NCR_700_bus_reset(struct scsi_cmnd * SCp)
1942{
1943	DECLARE_COMPLETION_ONSTACK(complete);
1944	struct NCR_700_Host_Parameters *hostdata = 
1945		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1946
1947	scmd_printk(KERN_INFO, SCp,
1948		"New error handler wants BUS reset, cmd %p\n\t", SCp);
1949	scsi_print_command(SCp);
1950
1951	/* In theory, eh_complete should always be null because the
1952	 * eh is single threaded, but just in case we're handling a
1953	 * reset via sg or something */
1954	spin_lock_irq(SCp->device->host->host_lock);
1955	while (hostdata->eh_complete != NULL) {
1956		spin_unlock_irq(SCp->device->host->host_lock);
1957		msleep_interruptible(100);
1958		spin_lock_irq(SCp->device->host->host_lock);
1959	}
1960
1961	hostdata->eh_complete = &complete;
1962	NCR_700_internal_bus_reset(SCp->device->host);
1963
1964	spin_unlock_irq(SCp->device->host->host_lock);
1965	wait_for_completion(&complete);
1966	spin_lock_irq(SCp->device->host->host_lock);
1967
1968	hostdata->eh_complete = NULL;
1969	/* Revalidate the transport parameters of the failing device */
1970	if(hostdata->fast)
1971		spi_schedule_dv_device(SCp->device);
1972
1973	spin_unlock_irq(SCp->device->host->host_lock);
1974	return SUCCESS;
1975}
1976
1977STATIC int
1978NCR_700_host_reset(struct scsi_cmnd * SCp)
1979{
1980	scmd_printk(KERN_INFO, SCp, "New error handler wants HOST reset\n\t");
1981	scsi_print_command(SCp);
1982
1983	spin_lock_irq(SCp->device->host->host_lock);
1984
1985	NCR_700_internal_bus_reset(SCp->device->host);
1986	NCR_700_chip_reset(SCp->device->host);
1987
1988	spin_unlock_irq(SCp->device->host->host_lock);
1989
1990	return SUCCESS;
1991}
1992
1993STATIC void
1994NCR_700_set_period(struct scsi_target *STp, int period)
1995{
1996	struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
1997	struct NCR_700_Host_Parameters *hostdata = 
1998		(struct NCR_700_Host_Parameters *)SHp->hostdata[0];
1999	
2000	if(!hostdata->fast)
2001		return;
2002
2003	if(period < hostdata->min_period)
2004		period = hostdata->min_period;
2005
2006	spi_period(STp) = period;
2007	spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2008			    NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2009	spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2010}
2011
2012STATIC void
2013NCR_700_set_offset(struct scsi_target *STp, int offset)
2014{
2015	struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
2016	struct NCR_700_Host_Parameters *hostdata = 
2017		(struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2018	int max_offset = hostdata->chip710
2019		? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET;
2020	
2021	if(!hostdata->fast)
2022		return;
2023
2024	if(offset > max_offset)
2025		offset = max_offset;
2026
2027	/* if we're currently async, make sure the period is reasonable */
2028	if(spi_offset(STp) == 0 && (spi_period(STp) < hostdata->min_period ||
2029				    spi_period(STp) > 0xff))
2030		spi_period(STp) = hostdata->min_period;
2031
2032	spi_offset(STp) = offset;
2033	spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2034			    NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2035	spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2036}
2037
2038STATIC int
2039NCR_700_slave_alloc(struct scsi_device *SDp)
2040{
2041	SDp->hostdata = kzalloc(sizeof(struct NCR_700_Device_Parameters),
2042				GFP_KERNEL);
2043
2044	if (!SDp->hostdata)
2045		return -ENOMEM;
2046
2047	return 0;
2048}
2049
2050STATIC int
2051NCR_700_slave_configure(struct scsi_device *SDp)
2052{
2053	struct NCR_700_Host_Parameters *hostdata = 
2054		(struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2055
2056	/* to do here: allocate memory; build a queue_full list */
2057	if(SDp->tagged_supported) {
2058		scsi_set_tag_type(SDp, MSG_ORDERED_TAG);
2059		scsi_activate_tcq(SDp, NCR_700_DEFAULT_TAGS);
2060		NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2061	} else {
2062		/* initialise to default depth */
2063		scsi_adjust_queue_depth(SDp, 0, SDp->host->cmd_per_lun);
2064	}
2065	if(hostdata->fast) {
2066		/* Find the correct offset and period via domain validation */
2067		if (!spi_initial_dv(SDp->sdev_target))
2068			spi_dv_device(SDp);
2069	} else {
2070		spi_offset(SDp->sdev_target) = 0;
2071		spi_period(SDp->sdev_target) = 0;
2072	}
2073	return 0;
2074}
2075
2076STATIC void
2077NCR_700_slave_destroy(struct scsi_device *SDp)
2078{
2079	kfree(SDp->hostdata);
2080	SDp->hostdata = NULL;
2081}
2082
2083static int
2084NCR_700_change_queue_depth(struct scsi_device *SDp, int depth, int reason)
2085{
2086	if (reason != SCSI_QDEPTH_DEFAULT)
2087		return -EOPNOTSUPP;
2088
2089	if (depth > NCR_700_MAX_TAGS)
2090		depth = NCR_700_MAX_TAGS;
2091
2092	scsi_adjust_queue_depth(SDp, scsi_get_tag_type(SDp), depth);
2093	return depth;
2094}
2095
2096static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type)
2097{
2098	int change_tag = ((tag_type ==0 &&  scsi_get_tag_type(SDp) != 0)
2099			  || (tag_type != 0 && scsi_get_tag_type(SDp) == 0));
2100	struct NCR_700_Host_Parameters *hostdata = 
2101		(struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2102
2103	scsi_set_tag_type(SDp, tag_type);
2104
2105	/* We have a global (per target) flag to track whether TCQ is
2106	 * enabled, so we'll be turning it off for the entire target here.
2107	 * our tag algorithm will fail if we mix tagged and untagged commands,
2108	 * so quiesce the device before doing this */
2109	if (change_tag)
2110		scsi_target_quiesce(SDp->sdev_target);
2111
2112	if (!tag_type) {
2113		/* shift back to the default unqueued number of commands
2114		 * (the user can still raise this) */
2115		scsi_deactivate_tcq(SDp, SDp->host->cmd_per_lun);
2116		hostdata->tag_negotiated &= ~(1 << sdev_id(SDp));
2117	} else {
2118		/* Here, we cleared the negotiation flag above, so this
2119		 * will force the driver to renegotiate */
2120		scsi_activate_tcq(SDp, SDp->queue_depth);
2121		if (change_tag)
2122			NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2123	}
2124	if (change_tag)
2125		scsi_target_resume(SDp->sdev_target);
2126
2127	return tag_type;
2128}
2129
2130static ssize_t
2131NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf)
2132{
2133	struct scsi_device *SDp = to_scsi_device(dev);
2134
2135	return snprintf(buf, 20, "%d\n", NCR_700_get_depth(SDp));
2136}
2137
2138static struct device_attribute NCR_700_active_tags_attr = {
2139	.attr = {
2140		.name =		"active_tags",
2141		.mode =		S_IRUGO,
2142	},
2143	.show = NCR_700_show_active_tags,
2144};
2145
2146STATIC struct device_attribute *NCR_700_dev_attrs[] = {
2147	&NCR_700_active_tags_attr,
2148	NULL,
2149};
2150
2151EXPORT_SYMBOL(NCR_700_detect);
2152EXPORT_SYMBOL(NCR_700_release);
2153EXPORT_SYMBOL(NCR_700_intr);
2154
2155static struct spi_function_template NCR_700_transport_functions =  {
2156	.set_period	= NCR_700_set_period,
2157	.show_period	= 1,
2158	.set_offset	= NCR_700_set_offset,
2159	.show_offset	= 1,
2160};
2161
2162static int __init NCR_700_init(void)
2163{
2164	NCR_700_transport_template = spi_attach_transport(&NCR_700_transport_functions);
2165	if(!NCR_700_transport_template)
2166		return -ENODEV;
2167	return 0;
2168}
2169
2170static void __exit NCR_700_exit(void)
2171{
2172	spi_release_transport(NCR_700_transport_template);
2173}
2174
2175module_init(NCR_700_init);
2176module_exit(NCR_700_exit);
2177