Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
   1/***************************************************************************
   2                          dpti.c  -  description
   3                             -------------------
   4    begin                : Thu Sep 7 2000
   5    copyright            : (C) 2000 by Adaptec
   6
   7			   July 30, 2001 First version being submitted
   8			   for inclusion in the kernel.  V2.4
   9
  10    See Documentation/scsi/dpti.txt for history, notes, license info
  11    and credits
  12 ***************************************************************************/
  13
  14/***************************************************************************
  15 *                                                                         *
  16 *   This program is free software; you can redistribute it and/or modify  *
  17 *   it under the terms of the GNU General Public License as published by  *
  18 *   the Free Software Foundation; either version 2 of the License, or     *
  19 *   (at your option) any later version.                                   *
  20 *                                                                         *
  21 ***************************************************************************/
  22/***************************************************************************
  23 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
  24 - Support 2.6 kernel and DMA-mapping
  25 - ioctl fix for raid tools
  26 - use schedule_timeout in long long loop
  27 **************************************************************************/
  28
  29/*#define DEBUG 1 */
  30/*#define UARTDELAY 1 */
  31
  32#include <linux/module.h>
  33
  34MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
  35MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
  36
  37////////////////////////////////////////////////////////////////
  38
  39#include <linux/ioctl.h>	/* For SCSI-Passthrough */
  40#include <linux/uaccess.h>
  41
  42#include <linux/stat.h>
  43#include <linux/slab.h>		/* for kmalloc() */
  44#include <linux/pci.h>		/* for PCI support */
  45#include <linux/proc_fs.h>
  46#include <linux/blkdev.h>
  47#include <linux/delay.h>	/* for udelay */
  48#include <linux/interrupt.h>
  49#include <linux/kernel.h>	/* for printk */
  50#include <linux/sched.h>
  51#include <linux/reboot.h>
  52#include <linux/spinlock.h>
  53#include <linux/dma-mapping.h>
  54
  55#include <linux/timer.h>
  56#include <linux/string.h>
  57#include <linux/ioport.h>
  58#include <linux/mutex.h>
  59
  60#include <asm/processor.h>	/* for boot_cpu_data */
  61#include <asm/pgtable.h>
  62#include <asm/io.h>		/* for virt_to_bus, etc. */
  63
  64#include <scsi/scsi.h>
  65#include <scsi/scsi_cmnd.h>
  66#include <scsi/scsi_device.h>
  67#include <scsi/scsi_host.h>
  68#include <scsi/scsi_tcq.h>
  69
  70#include "dpt/dptsig.h"
  71#include "dpti.h"
  72
  73/*============================================================================
  74 * Create a binary signature - this is read by dptsig
  75 * Needed for our management apps
  76 *============================================================================
  77 */
  78static DEFINE_MUTEX(adpt_mutex);
  79static dpt_sig_S DPTI_sig = {
  80	{'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
  81#ifdef __i386__
  82	PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
  83#elif defined(__ia64__)
  84	PROC_INTEL, PROC_IA64,
  85#elif defined(__sparc__)
  86	PROC_ULTRASPARC, PROC_ULTRASPARC,
  87#elif defined(__alpha__)
  88	PROC_ALPHA, PROC_ALPHA,
  89#else
  90	(-1),(-1),
  91#endif
  92	 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
  93	ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
  94	DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
  95};
  96
  97
  98
  99
 100/*============================================================================
 101 * Globals
 102 *============================================================================
 103 */
 104
 105static DEFINE_MUTEX(adpt_configuration_lock);
 106
 107static struct i2o_sys_tbl *sys_tbl;
 108static dma_addr_t sys_tbl_pa;
 109static int sys_tbl_ind;
 110static int sys_tbl_len;
 111
 112static adpt_hba* hba_chain = NULL;
 113static int hba_count = 0;
 114
 115static struct class *adpt_sysfs_class;
 116
 117static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
 118#ifdef CONFIG_COMPAT
 119static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
 120#endif
 121
 122static const struct file_operations adpt_fops = {
 123	.unlocked_ioctl	= adpt_unlocked_ioctl,
 124	.open		= adpt_open,
 125	.release	= adpt_close,
 126#ifdef CONFIG_COMPAT
 127	.compat_ioctl	= compat_adpt_ioctl,
 128#endif
 129	.llseek		= noop_llseek,
 130};
 131
 132/* Structures and definitions for synchronous message posting.
 133 * See adpt_i2o_post_wait() for description
 134 * */
 135struct adpt_i2o_post_wait_data
 136{
 137	int status;
 138	u32 id;
 139	adpt_wait_queue_head_t *wq;
 140	struct adpt_i2o_post_wait_data *next;
 141};
 142
 143static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
 144static u32 adpt_post_wait_id = 0;
 145static DEFINE_SPINLOCK(adpt_post_wait_lock);
 146
 147
 148/*============================================================================
 149 * 				Functions
 150 *============================================================================
 151 */
 152
 153static inline int dpt_dma64(adpt_hba *pHba)
 154{
 155	return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
 156}
 157
 158static inline u32 dma_high(dma_addr_t addr)
 159{
 160	return upper_32_bits(addr);
 161}
 162
 163static inline u32 dma_low(dma_addr_t addr)
 164{
 165	return (u32)addr;
 166}
 167
 168static u8 adpt_read_blink_led(adpt_hba* host)
 169{
 170	if (host->FwDebugBLEDflag_P) {
 171		if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
 172			return readb(host->FwDebugBLEDvalue_P);
 173		}
 174	}
 175	return 0;
 176}
 177
 178/*============================================================================
 179 * Scsi host template interface functions
 180 *============================================================================
 181 */
 182
 183#ifdef MODULE
 184static struct pci_device_id dptids[] = {
 185	{ PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
 186	{ PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
 187	{ 0, }
 188};
 189#endif
 190
 191MODULE_DEVICE_TABLE(pci,dptids);
 192
 193static int adpt_detect(struct scsi_host_template* sht)
 194{
 195	struct pci_dev *pDev = NULL;
 196	adpt_hba *pHba;
 197	adpt_hba *next;
 198
 199	PINFO("Detecting Adaptec I2O RAID controllers...\n");
 200
 201        /* search for all Adatpec I2O RAID cards */
 202	while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
 203		if(pDev->device == PCI_DPT_DEVICE_ID ||
 204		   pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
 205			if(adpt_install_hba(sht, pDev) ){
 206				PERROR("Could not Init an I2O RAID device\n");
 207				PERROR("Will not try to detect others.\n");
 208				return hba_count-1;
 209			}
 210			pci_dev_get(pDev);
 211		}
 212	}
 213
 214	/* In INIT state, Activate IOPs */
 215	for (pHba = hba_chain; pHba; pHba = next) {
 216		next = pHba->next;
 217		// Activate does get status , init outbound, and get hrt
 218		if (adpt_i2o_activate_hba(pHba) < 0) {
 219			adpt_i2o_delete_hba(pHba);
 220		}
 221	}
 222
 223
 224	/* Active IOPs in HOLD state */
 225
 226rebuild_sys_tab:
 227	if (hba_chain == NULL) 
 228		return 0;
 229
 230	/*
 231	 * If build_sys_table fails, we kill everything and bail
 232	 * as we can't init the IOPs w/o a system table
 233	 */	
 234	if (adpt_i2o_build_sys_table() < 0) {
 235		adpt_i2o_sys_shutdown();
 236		return 0;
 237	}
 238
 239	PDEBUG("HBA's in HOLD state\n");
 240
 241	/* If IOP don't get online, we need to rebuild the System table */
 242	for (pHba = hba_chain; pHba; pHba = pHba->next) {
 243		if (adpt_i2o_online_hba(pHba) < 0) {
 244			adpt_i2o_delete_hba(pHba);	
 245			goto rebuild_sys_tab;
 246		}
 247	}
 248
 249	/* Active IOPs now in OPERATIONAL state */
 250	PDEBUG("HBA's in OPERATIONAL state\n");
 251
 252	printk("dpti: If you have a lot of devices this could take a few minutes.\n");
 253	for (pHba = hba_chain; pHba; pHba = next) {
 254		next = pHba->next;
 255		printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
 256		if (adpt_i2o_lct_get(pHba) < 0){
 257			adpt_i2o_delete_hba(pHba);
 258			continue;
 259		}
 260
 261		if (adpt_i2o_parse_lct(pHba) < 0){
 262			adpt_i2o_delete_hba(pHba);
 263			continue;
 264		}
 265		adpt_inquiry(pHba);
 266	}
 267
 268	adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
 269	if (IS_ERR(adpt_sysfs_class)) {
 270		printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
 271		adpt_sysfs_class = NULL;
 272	}
 273
 274	for (pHba = hba_chain; pHba; pHba = next) {
 275		next = pHba->next;
 276		if (adpt_scsi_host_alloc(pHba, sht) < 0){
 277			adpt_i2o_delete_hba(pHba);
 278			continue;
 279		}
 280		pHba->initialized = TRUE;
 281		pHba->state &= ~DPTI_STATE_RESET;
 282		if (adpt_sysfs_class) {
 283			struct device *dev = device_create(adpt_sysfs_class,
 284				NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
 285				"dpti%d", pHba->unit);
 286			if (IS_ERR(dev)) {
 287				printk(KERN_WARNING"dpti%d: unable to "
 288					"create device in dpt_i2o class\n",
 289					pHba->unit);
 290			}
 291		}
 292	}
 293
 294	// Register our control device node
 295	// nodes will need to be created in /dev to access this
 296	// the nodes can not be created from within the driver
 297	if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
 298		adpt_i2o_sys_shutdown();
 299		return 0;
 300	}
 301	return hba_count;
 302}
 303
 304
 305static void adpt_release(adpt_hba *pHba)
 306{
 307	struct Scsi_Host *shost = pHba->host;
 308
 309	scsi_remove_host(shost);
 310//	adpt_i2o_quiesce_hba(pHba);
 311	adpt_i2o_delete_hba(pHba);
 312	scsi_host_put(shost);
 313}
 314
 315
 316static void adpt_inquiry(adpt_hba* pHba)
 317{
 318	u32 msg[17]; 
 319	u32 *mptr;
 320	u32 *lenptr;
 321	int direction;
 322	int scsidir;
 323	u32 len;
 324	u32 reqlen;
 325	u8* buf;
 326	dma_addr_t addr;
 327	u8  scb[16];
 328	s32 rcode;
 329
 330	memset(msg, 0, sizeof(msg));
 331	buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
 332	if(!buf){
 333		printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
 334		return;
 335	}
 336	memset((void*)buf, 0, 36);
 337	
 338	len = 36;
 339	direction = 0x00000000;	
 340	scsidir  =0x40000000;	// DATA IN  (iop<--dev)
 341
 342	if (dpt_dma64(pHba))
 343		reqlen = 17;		// SINGLE SGE, 64 bit
 344	else
 345		reqlen = 14;		// SINGLE SGE, 32 bit
 346	/* Stick the headers on */
 347	msg[0] = reqlen<<16 | SGL_OFFSET_12;
 348	msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
 349	msg[2] = 0;
 350	msg[3]  = 0;
 351	// Adaptec/DPT Private stuff 
 352	msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
 353	msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
 354	/* Direction, disconnect ok | sense data | simple queue , CDBLen */
 355	// I2O_SCB_FLAG_ENABLE_DISCONNECT | 
 356	// I2O_SCB_FLAG_SIMPLE_QUEUE_TAG | 
 357	// I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
 358	msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
 359
 360	mptr=msg+7;
 361
 362	memset(scb, 0, sizeof(scb));
 363	// Write SCSI command into the message - always 16 byte block 
 364	scb[0] = INQUIRY;
 365	scb[1] = 0;
 366	scb[2] = 0;
 367	scb[3] = 0;
 368	scb[4] = 36;
 369	scb[5] = 0;
 370	// Don't care about the rest of scb
 371
 372	memcpy(mptr, scb, sizeof(scb));
 373	mptr+=4;
 374	lenptr=mptr++;		/* Remember me - fill in when we know */
 375
 376	/* Now fill in the SGList and command */
 377	*lenptr = len;
 378	if (dpt_dma64(pHba)) {
 379		*mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
 380		*mptr++ = 1 << PAGE_SHIFT;
 381		*mptr++ = 0xD0000000|direction|len;
 382		*mptr++ = dma_low(addr);
 383		*mptr++ = dma_high(addr);
 384	} else {
 385		*mptr++ = 0xD0000000|direction|len;
 386		*mptr++ = addr;
 387	}
 388
 389	// Send it on it's way
 390	rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
 391	if (rcode != 0) {
 392		sprintf(pHba->detail, "Adaptec I2O RAID");
 393		printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
 394		if (rcode != -ETIME && rcode != -EINTR)
 395			dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
 396	} else {
 397		memset(pHba->detail, 0, sizeof(pHba->detail));
 398		memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
 399		memcpy(&(pHba->detail[16]), " Model: ", 8);
 400		memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
 401		memcpy(&(pHba->detail[40]), " FW: ", 4);
 402		memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
 403		pHba->detail[48] = '\0';	/* precautionary */
 404		dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
 405	}
 406	adpt_i2o_status_get(pHba);
 407	return ;
 408}
 409
 410
 411static int adpt_slave_configure(struct scsi_device * device)
 412{
 413	struct Scsi_Host *host = device->host;
 414	adpt_hba* pHba;
 415
 416	pHba = (adpt_hba *) host->hostdata[0];
 417
 418	if (host->can_queue && device->tagged_supported) {
 419		scsi_change_queue_depth(device,
 420				host->can_queue - 1);
 421	}
 422	return 0;
 423}
 424
 425static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
 426{
 427	adpt_hba* pHba = NULL;
 428	struct adpt_device* pDev = NULL;	/* dpt per device information */
 429
 430	cmd->scsi_done = done;
 431	/*
 432	 * SCSI REQUEST_SENSE commands will be executed automatically by the 
 433	 * Host Adapter for any errors, so they should not be executed 
 434	 * explicitly unless the Sense Data is zero indicating that no error 
 435	 * occurred.
 436	 */
 437
 438	if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
 439		cmd->result = (DID_OK << 16);
 440		cmd->scsi_done(cmd);
 441		return 0;
 442	}
 443
 444	pHba = (adpt_hba*)cmd->device->host->hostdata[0];
 445	if (!pHba) {
 446		return FAILED;
 447	}
 448
 449	rmb();
 450	if ((pHba->state) & DPTI_STATE_RESET)
 451		return SCSI_MLQUEUE_HOST_BUSY;
 452
 453	// TODO if the cmd->device if offline then I may need to issue a bus rescan
 454	// followed by a get_lct to see if the device is there anymore
 455	if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
 456		/*
 457		 * First command request for this device.  Set up a pointer
 458		 * to the device structure.  This should be a TEST_UNIT_READY
 459		 * command from scan_scsis_single.
 460		 */
 461		if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
 462			// TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response 
 463			// with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
 464			cmd->result = (DID_NO_CONNECT << 16);
 465			cmd->scsi_done(cmd);
 466			return 0;
 467		}
 468		cmd->device->hostdata = pDev;
 469	}
 470	pDev->pScsi_dev = cmd->device;
 471
 472	/*
 473	 * If we are being called from when the device is being reset, 
 474	 * delay processing of the command until later.
 475	 */
 476	if (pDev->state & DPTI_DEV_RESET ) {
 477		return FAILED;
 478	}
 479	return adpt_scsi_to_i2o(pHba, cmd, pDev);
 480}
 481
 482static DEF_SCSI_QCMD(adpt_queue)
 483
 484static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
 485		sector_t capacity, int geom[])
 486{
 487	int heads=-1;
 488	int sectors=-1;
 489	int cylinders=-1;
 490
 491	// *** First lets set the default geometry ****
 492	
 493	// If the capacity is less than ox2000
 494	if (capacity < 0x2000 ) {	// floppy
 495		heads = 18;
 496		sectors = 2;
 497	} 
 498	// else if between 0x2000 and 0x20000
 499	else if (capacity < 0x20000) {
 500		heads = 64;
 501		sectors = 32;
 502	}
 503	// else if between 0x20000 and 0x40000
 504	else if (capacity < 0x40000) {
 505		heads = 65;
 506		sectors = 63;
 507	}
 508	// else if between 0x4000 and 0x80000
 509	else if (capacity < 0x80000) {
 510		heads = 128;
 511		sectors = 63;
 512	}
 513	// else if greater than 0x80000
 514	else {
 515		heads = 255;
 516		sectors = 63;
 517	}
 518	cylinders = sector_div(capacity, heads * sectors);
 519
 520	// Special case if CDROM
 521	if(sdev->type == 5) {  // CDROM
 522		heads = 252;
 523		sectors = 63;
 524		cylinders = 1111;
 525	}
 526
 527	geom[0] = heads;
 528	geom[1] = sectors;
 529	geom[2] = cylinders;
 530	
 531	PDEBUG("adpt_bios_param: exit\n");
 532	return 0;
 533}
 534
 535
 536static const char *adpt_info(struct Scsi_Host *host)
 537{
 538	adpt_hba* pHba;
 539
 540	pHba = (adpt_hba *) host->hostdata[0];
 541	return (char *) (pHba->detail);
 542}
 543
 544static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
 545{
 546	struct adpt_device* d;
 547	int id;
 548	int chan;
 549	adpt_hba* pHba;
 550	int unit;
 551
 552	// Find HBA (host bus adapter) we are looking for
 553	mutex_lock(&adpt_configuration_lock);
 554	for (pHba = hba_chain; pHba; pHba = pHba->next) {
 555		if (pHba->host == host) {
 556			break;	/* found adapter */
 557		}
 558	}
 559	mutex_unlock(&adpt_configuration_lock);
 560	if (pHba == NULL) {
 561		return 0;
 562	}
 563	host = pHba->host;
 564
 565	seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
 566	seq_printf(m, "%s\n", pHba->detail);
 567	seq_printf(m, "SCSI Host=scsi%d  Control Node=/dev/%s  irq=%d\n", 
 568			pHba->host->host_no, pHba->name, host->irq);
 569	seq_printf(m, "\tpost fifo size  = %d\n\treply fifo size = %d\n\tsg table size   = %d\n\n",
 570			host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
 571
 572	seq_puts(m, "Devices:\n");
 573	for(chan = 0; chan < MAX_CHANNEL; chan++) {
 574		for(id = 0; id < MAX_ID; id++) {
 575			d = pHba->channel[chan].device[id];
 576			while(d) {
 577				seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
 578				seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
 579
 580				unit = d->pI2o_dev->lct_data.tid;
 581				seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu)  (%s)\n\n",
 582					       unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
 583					       scsi_device_online(d->pScsi_dev)? "online":"offline"); 
 584				d = d->next_lun;
 585			}
 586		}
 587	}
 588	return 0;
 589}
 590
 591/*
 592 *	Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
 593 */
 594static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
 595{
 596	return (u32)cmd->serial_number;
 597}
 598
 599/*
 600 *	Go from a u32 'context' to a struct scsi_cmnd * .
 601 *	This could probably be made more efficient.
 602 */
 603static struct scsi_cmnd *
 604	adpt_cmd_from_context(adpt_hba * pHba, u32 context)
 605{
 606	struct scsi_cmnd * cmd;
 607	struct scsi_device * d;
 608
 609	if (context == 0)
 610		return NULL;
 611
 612	spin_unlock(pHba->host->host_lock);
 613	shost_for_each_device(d, pHba->host) {
 614		unsigned long flags;
 615		spin_lock_irqsave(&d->list_lock, flags);
 616		list_for_each_entry(cmd, &d->cmd_list, list) {
 617			if (((u32)cmd->serial_number == context)) {
 618				spin_unlock_irqrestore(&d->list_lock, flags);
 619				scsi_device_put(d);
 620				spin_lock(pHba->host->host_lock);
 621				return cmd;
 622			}
 623		}
 624		spin_unlock_irqrestore(&d->list_lock, flags);
 625	}
 626	spin_lock(pHba->host->host_lock);
 627
 628	return NULL;
 629}
 630
 631/*
 632 *	Turn a pointer to ioctl reply data into an u32 'context'
 633 */
 634static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
 635{
 636#if BITS_PER_LONG == 32
 637	return (u32)(unsigned long)reply;
 638#else
 639	ulong flags = 0;
 640	u32 nr, i;
 641
 642	spin_lock_irqsave(pHba->host->host_lock, flags);
 643	nr = ARRAY_SIZE(pHba->ioctl_reply_context);
 644	for (i = 0; i < nr; i++) {
 645		if (pHba->ioctl_reply_context[i] == NULL) {
 646			pHba->ioctl_reply_context[i] = reply;
 647			break;
 648		}
 649	}
 650	spin_unlock_irqrestore(pHba->host->host_lock, flags);
 651	if (i >= nr) {
 652		printk(KERN_WARNING"%s: Too many outstanding "
 653				"ioctl commands\n", pHba->name);
 654		return (u32)-1;
 655	}
 656
 657	return i;
 658#endif
 659}
 660
 661/*
 662 *	Go from an u32 'context' to a pointer to ioctl reply data.
 663 */
 664static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
 665{
 666#if BITS_PER_LONG == 32
 667	return (void *)(unsigned long)context;
 668#else
 669	void *p = pHba->ioctl_reply_context[context];
 670	pHba->ioctl_reply_context[context] = NULL;
 671
 672	return p;
 673#endif
 674}
 675
 676/*===========================================================================
 677 * Error Handling routines
 678 *===========================================================================
 679 */
 680
 681static int adpt_abort(struct scsi_cmnd * cmd)
 682{
 683	adpt_hba* pHba = NULL;	/* host bus adapter structure */
 684	struct adpt_device* dptdevice;	/* dpt per device information */
 685	u32 msg[5];
 686	int rcode;
 687
 688	if(cmd->serial_number == 0){
 689		return FAILED;
 690	}
 691	pHba = (adpt_hba*) cmd->device->host->hostdata[0];
 692	printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
 693	if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
 694		printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
 695		return FAILED;
 696	}
 697
 698	memset(msg, 0, sizeof(msg));
 699	msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
 700	msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
 701	msg[2] = 0;
 702	msg[3]= 0; 
 703	msg[4] = adpt_cmd_to_context(cmd);
 704	if (pHba->host)
 705		spin_lock_irq(pHba->host->host_lock);
 706	rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
 707	if (pHba->host)
 708		spin_unlock_irq(pHba->host->host_lock);
 709	if (rcode != 0) {
 710		if(rcode == -EOPNOTSUPP ){
 711			printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
 712			return FAILED;
 713		}
 714		printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
 715		return FAILED;
 716	} 
 717	printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
 718	return SUCCESS;
 719}
 720
 721
 722#define I2O_DEVICE_RESET 0x27
 723// This is the same for BLK and SCSI devices
 724// NOTE this is wrong in the i2o.h definitions
 725// This is not currently supported by our adapter but we issue it anyway
 726static int adpt_device_reset(struct scsi_cmnd* cmd)
 727{
 728	adpt_hba* pHba;
 729	u32 msg[4];
 730	u32 rcode;
 731	int old_state;
 732	struct adpt_device* d = cmd->device->hostdata;
 733
 734	pHba = (void*) cmd->device->host->hostdata[0];
 735	printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
 736	if (!d) {
 737		printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
 738		return FAILED;
 739	}
 740	memset(msg, 0, sizeof(msg));
 741	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
 742	msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
 743	msg[2] = 0;
 744	msg[3] = 0;
 745
 746	if (pHba->host)
 747		spin_lock_irq(pHba->host->host_lock);
 748	old_state = d->state;
 749	d->state |= DPTI_DEV_RESET;
 750	rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
 751	d->state = old_state;
 752	if (pHba->host)
 753		spin_unlock_irq(pHba->host->host_lock);
 754	if (rcode != 0) {
 755		if(rcode == -EOPNOTSUPP ){
 756			printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
 757			return FAILED;
 758		}
 759		printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
 760		return FAILED;
 761	} else {
 762		printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
 763		return SUCCESS;
 764	}
 765}
 766
 767
 768#define I2O_HBA_BUS_RESET 0x87
 769// This version of bus reset is called by the eh_error handler
 770static int adpt_bus_reset(struct scsi_cmnd* cmd)
 771{
 772	adpt_hba* pHba;
 773	u32 msg[4];
 774	u32 rcode;
 775
 776	pHba = (adpt_hba*)cmd->device->host->hostdata[0];
 777	memset(msg, 0, sizeof(msg));
 778	printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
 779	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
 780	msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
 781	msg[2] = 0;
 782	msg[3] = 0;
 783	if (pHba->host)
 784		spin_lock_irq(pHba->host->host_lock);
 785	rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
 786	if (pHba->host)
 787		spin_unlock_irq(pHba->host->host_lock);
 788	if (rcode != 0) {
 789		printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
 790		return FAILED;
 791	} else {
 792		printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
 793		return SUCCESS;
 794	}
 795}
 796
 797// This version of reset is called by the eh_error_handler
 798static int __adpt_reset(struct scsi_cmnd* cmd)
 799{
 800	adpt_hba* pHba;
 801	int rcode;
 802	char name[32];
 803
 804	pHba = (adpt_hba*)cmd->device->host->hostdata[0];
 805	strncpy(name, pHba->name, sizeof(name));
 806	printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n", name, cmd->device->channel, pHba->channel[cmd->device->channel].tid);
 807	rcode =  adpt_hba_reset(pHba);
 808	if(rcode == 0){
 809		printk(KERN_WARNING"%s: HBA reset complete\n", name);
 810		return SUCCESS;
 811	} else {
 812		printk(KERN_WARNING"%s: HBA reset failed (%x)\n", name, rcode);
 813		return FAILED;
 814	}
 815}
 816
 817static int adpt_reset(struct scsi_cmnd* cmd)
 818{
 819	int rc;
 820
 821	spin_lock_irq(cmd->device->host->host_lock);
 822	rc = __adpt_reset(cmd);
 823	spin_unlock_irq(cmd->device->host->host_lock);
 824
 825	return rc;
 826}
 827
 828// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
 829static int adpt_hba_reset(adpt_hba* pHba)
 830{
 831	int rcode;
 832
 833	pHba->state |= DPTI_STATE_RESET;
 834
 835	// Activate does get status , init outbound, and get hrt
 836	if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
 837		printk(KERN_ERR "%s: Could not activate\n", pHba->name);
 838		adpt_i2o_delete_hba(pHba);
 839		return rcode;
 840	}
 841
 842	if ((rcode=adpt_i2o_build_sys_table()) < 0) {
 843		adpt_i2o_delete_hba(pHba);
 844		return rcode;
 845	}
 846	PDEBUG("%s: in HOLD state\n",pHba->name);
 847
 848	if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
 849		adpt_i2o_delete_hba(pHba);	
 850		return rcode;
 851	}
 852	PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
 853
 854	if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
 855		adpt_i2o_delete_hba(pHba);
 856		return rcode;
 857	}
 858
 859	if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
 860		adpt_i2o_delete_hba(pHba);
 861		return rcode;
 862	}
 863	pHba->state &= ~DPTI_STATE_RESET;
 864
 865	adpt_fail_posted_scbs(pHba);
 866	return 0;	/* return success */
 867}
 868
 869/*===========================================================================
 870 * 
 871 *===========================================================================
 872 */
 873
 874
 875static void adpt_i2o_sys_shutdown(void)
 876{
 877	adpt_hba *pHba, *pNext;
 878	struct adpt_i2o_post_wait_data *p1, *old;
 879
 880	 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
 881	 printk(KERN_INFO"   This could take a few minutes if there are many devices attached\n");
 882	/* Delete all IOPs from the controller chain */
 883	/* They should have already been released by the
 884	 * scsi-core
 885	 */
 886	for (pHba = hba_chain; pHba; pHba = pNext) {
 887		pNext = pHba->next;
 888		adpt_i2o_delete_hba(pHba);
 889	}
 890
 891	/* Remove any timedout entries from the wait queue.  */
 892//	spin_lock_irqsave(&adpt_post_wait_lock, flags);
 893	/* Nothing should be outstanding at this point so just
 894	 * free them 
 895	 */
 896	for(p1 = adpt_post_wait_queue; p1;) {
 897		old = p1;
 898		p1 = p1->next;
 899		kfree(old);
 900	}
 901//	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
 902	adpt_post_wait_queue = NULL;
 903
 904	 printk(KERN_INFO "Adaptec I2O controllers down.\n");
 905}
 906
 907static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
 908{
 909
 910	adpt_hba* pHba = NULL;
 911	adpt_hba* p = NULL;
 912	ulong base_addr0_phys = 0;
 913	ulong base_addr1_phys = 0;
 914	u32 hba_map0_area_size = 0;
 915	u32 hba_map1_area_size = 0;
 916	void __iomem *base_addr_virt = NULL;
 917	void __iomem *msg_addr_virt = NULL;
 918	int dma64 = 0;
 919
 920	int raptorFlag = FALSE;
 921
 922	if(pci_enable_device(pDev)) {
 923		return -EINVAL;
 924	}
 925
 926	if (pci_request_regions(pDev, "dpt_i2o")) {
 927		PERROR("dpti: adpt_config_hba: pci request region failed\n");
 928		return -EINVAL;
 929	}
 930
 931	pci_set_master(pDev);
 932
 933	/*
 934	 *	See if we should enable dma64 mode.
 935	 */
 936	if (sizeof(dma_addr_t) > 4 &&
 937	    pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) {
 938		if (dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32))
 939			dma64 = 1;
 940	}
 941	if (!dma64 && pci_set_dma_mask(pDev, DMA_BIT_MASK(32)) != 0)
 942		return -EINVAL;
 943
 944	/* adapter only supports message blocks below 4GB */
 945	pci_set_consistent_dma_mask(pDev, DMA_BIT_MASK(32));
 946
 947	base_addr0_phys = pci_resource_start(pDev,0);
 948	hba_map0_area_size = pci_resource_len(pDev,0);
 949
 950	// Check if standard PCI card or single BAR Raptor
 951	if(pDev->device == PCI_DPT_DEVICE_ID){
 952		if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
 953			// Raptor card with this device id needs 4M
 954			hba_map0_area_size = 0x400000;
 955		} else { // Not Raptor - it is a PCI card
 956			if(hba_map0_area_size > 0x100000 ){ 
 957				hba_map0_area_size = 0x100000;
 958			}
 959		}
 960	} else {// Raptor split BAR config
 961		// Use BAR1 in this configuration
 962		base_addr1_phys = pci_resource_start(pDev,1);
 963		hba_map1_area_size = pci_resource_len(pDev,1);
 964		raptorFlag = TRUE;
 965	}
 966
 967#if BITS_PER_LONG == 64
 968	/*
 969	 *	The original Adaptec 64 bit driver has this comment here:
 970	 *	"x86_64 machines need more optimal mappings"
 971	 *
 972	 *	I assume some HBAs report ridiculously large mappings
 973	 *	and we need to limit them on platforms with IOMMUs.
 974	 */
 975	if (raptorFlag == TRUE) {
 976		if (hba_map0_area_size > 128)
 977			hba_map0_area_size = 128;
 978		if (hba_map1_area_size > 524288)
 979			hba_map1_area_size = 524288;
 980	} else {
 981		if (hba_map0_area_size > 524288)
 982			hba_map0_area_size = 524288;
 983	}
 984#endif
 985
 986	base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
 987	if (!base_addr_virt) {
 988		pci_release_regions(pDev);
 989		PERROR("dpti: adpt_config_hba: io remap failed\n");
 990		return -EINVAL;
 991	}
 992
 993        if(raptorFlag == TRUE) {
 994		msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
 995		if (!msg_addr_virt) {
 996			PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
 997			iounmap(base_addr_virt);
 998			pci_release_regions(pDev);
 999			return -EINVAL;
1000		}
1001	} else {
1002		msg_addr_virt = base_addr_virt;
1003	}
1004	
1005	// Allocate and zero the data structure
1006	pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
1007	if (!pHba) {
1008		if (msg_addr_virt != base_addr_virt)
1009			iounmap(msg_addr_virt);
1010		iounmap(base_addr_virt);
1011		pci_release_regions(pDev);
1012		return -ENOMEM;
1013	}
1014
1015	mutex_lock(&adpt_configuration_lock);
1016
1017	if(hba_chain != NULL){
1018		for(p = hba_chain; p->next; p = p->next);
1019		p->next = pHba;
1020	} else {
1021		hba_chain = pHba;
1022	}
1023	pHba->next = NULL;
1024	pHba->unit = hba_count;
1025	sprintf(pHba->name, "dpti%d", hba_count);
1026	hba_count++;
1027	
1028	mutex_unlock(&adpt_configuration_lock);
1029
1030	pHba->pDev = pDev;
1031	pHba->base_addr_phys = base_addr0_phys;
1032
1033	// Set up the Virtual Base Address of the I2O Device
1034	pHba->base_addr_virt = base_addr_virt;
1035	pHba->msg_addr_virt = msg_addr_virt;
1036	pHba->irq_mask = base_addr_virt+0x30;
1037	pHba->post_port = base_addr_virt+0x40;
1038	pHba->reply_port = base_addr_virt+0x44;
1039
1040	pHba->hrt = NULL;
1041	pHba->lct = NULL;
1042	pHba->lct_size = 0;
1043	pHba->status_block = NULL;
1044	pHba->post_count = 0;
1045	pHba->state = DPTI_STATE_RESET;
1046	pHba->pDev = pDev;
1047	pHba->devices = NULL;
1048	pHba->dma64 = dma64;
1049
1050	// Initializing the spinlocks
1051	spin_lock_init(&pHba->state_lock);
1052	spin_lock_init(&adpt_post_wait_lock);
1053
1054	if(raptorFlag == 0){
1055		printk(KERN_INFO "Adaptec I2O RAID controller"
1056				 " %d at %p size=%x irq=%d%s\n", 
1057			hba_count-1, base_addr_virt,
1058			hba_map0_area_size, pDev->irq,
1059			dma64 ? " (64-bit DMA)" : "");
1060	} else {
1061		printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1062			hba_count-1, pDev->irq,
1063			dma64 ? " (64-bit DMA)" : "");
1064		printk(KERN_INFO"     BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1065		printk(KERN_INFO"     BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1066	}
1067
1068	if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1069		printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1070		adpt_i2o_delete_hba(pHba);
1071		return -EINVAL;
1072	}
1073
1074	return 0;
1075}
1076
1077
1078static void adpt_i2o_delete_hba(adpt_hba* pHba)
1079{
1080	adpt_hba* p1;
1081	adpt_hba* p2;
1082	struct i2o_device* d;
1083	struct i2o_device* next;
1084	int i;
1085	int j;
1086	struct adpt_device* pDev;
1087	struct adpt_device* pNext;
1088
1089
1090	mutex_lock(&adpt_configuration_lock);
1091	if(pHba->host){
1092		free_irq(pHba->host->irq, pHba);
1093	}
1094	p2 = NULL;
1095	for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1096		if(p1 == pHba) {
1097			if(p2) {
1098				p2->next = p1->next;
1099			} else {
1100				hba_chain = p1->next;
1101			}
1102			break;
1103		}
1104	}
1105
1106	hba_count--;
1107	mutex_unlock(&adpt_configuration_lock);
1108
1109	iounmap(pHba->base_addr_virt);
1110	pci_release_regions(pHba->pDev);
1111	if(pHba->msg_addr_virt != pHba->base_addr_virt){
1112		iounmap(pHba->msg_addr_virt);
1113	}
1114	if(pHba->FwDebugBuffer_P)
1115	   	iounmap(pHba->FwDebugBuffer_P);
1116	if(pHba->hrt) {
1117		dma_free_coherent(&pHba->pDev->dev,
1118			pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1119			pHba->hrt, pHba->hrt_pa);
1120	}
1121	if(pHba->lct) {
1122		dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1123			pHba->lct, pHba->lct_pa);
1124	}
1125	if(pHba->status_block) {
1126		dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1127			pHba->status_block, pHba->status_block_pa);
1128	}
1129	if(pHba->reply_pool) {
1130		dma_free_coherent(&pHba->pDev->dev,
1131			pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1132			pHba->reply_pool, pHba->reply_pool_pa);
1133	}
1134
1135	for(d = pHba->devices; d ; d = next){
1136		next = d->next;
1137		kfree(d);
1138	}
1139	for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1140		for(j = 0; j < MAX_ID; j++){
1141			if(pHba->channel[i].device[j] != NULL){
1142				for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1143					pNext = pDev->next_lun;
1144					kfree(pDev);
1145				}
1146			}
1147		}
1148	}
1149	pci_dev_put(pHba->pDev);
1150	if (adpt_sysfs_class)
1151		device_destroy(adpt_sysfs_class,
1152				MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1153	kfree(pHba);
1154
1155	if(hba_count <= 0){
1156		unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);   
1157		if (adpt_sysfs_class) {
1158			class_destroy(adpt_sysfs_class);
1159			adpt_sysfs_class = NULL;
1160		}
1161	}
1162}
1163
1164static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
1165{
1166	struct adpt_device* d;
1167
1168	if(chan < 0 || chan >= MAX_CHANNEL)
1169		return NULL;
1170	
1171	d = pHba->channel[chan].device[id];
1172	if(!d || d->tid == 0) {
1173		return NULL;
1174	}
1175
1176	/* If it is the only lun at that address then this should match*/
1177	if(d->scsi_lun == lun){
1178		return d;
1179	}
1180
1181	/* else we need to look through all the luns */
1182	for(d=d->next_lun ; d ; d = d->next_lun){
1183		if(d->scsi_lun == lun){
1184			return d;
1185		}
1186	}
1187	return NULL;
1188}
1189
1190
1191static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1192{
1193	// I used my own version of the WAIT_QUEUE_HEAD
1194	// to handle some version differences
1195	// When embedded in the kernel this could go back to the vanilla one
1196	ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1197	int status = 0;
1198	ulong flags = 0;
1199	struct adpt_i2o_post_wait_data *p1, *p2;
1200	struct adpt_i2o_post_wait_data *wait_data =
1201		kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1202	DECLARE_WAITQUEUE(wait, current);
1203
1204	if (!wait_data)
1205		return -ENOMEM;
1206
1207	/*
1208	 * The spin locking is needed to keep anyone from playing
1209	 * with the queue pointers and id while we do the same
1210	 */
1211	spin_lock_irqsave(&adpt_post_wait_lock, flags);
1212       // TODO we need a MORE unique way of getting ids
1213       // to support async LCT get
1214	wait_data->next = adpt_post_wait_queue;
1215	adpt_post_wait_queue = wait_data;
1216	adpt_post_wait_id++;
1217	adpt_post_wait_id &= 0x7fff;
1218	wait_data->id =  adpt_post_wait_id;
1219	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1220
1221	wait_data->wq = &adpt_wq_i2o_post;
1222	wait_data->status = -ETIMEDOUT;
1223
1224	add_wait_queue(&adpt_wq_i2o_post, &wait);
1225
1226	msg[2] |= 0x80000000 | ((u32)wait_data->id);
1227	timeout *= HZ;
1228	if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1229		set_current_state(TASK_INTERRUPTIBLE);
1230		if(pHba->host)
1231			spin_unlock_irq(pHba->host->host_lock);
1232		if (!timeout)
1233			schedule();
1234		else{
1235			timeout = schedule_timeout(timeout);
1236			if (timeout == 0) {
1237				// I/O issued, but cannot get result in
1238				// specified time. Freeing resorces is
1239				// dangerous.
1240				status = -ETIME;
1241			}
1242		}
1243		if(pHba->host)
1244			spin_lock_irq(pHba->host->host_lock);
1245	}
1246	remove_wait_queue(&adpt_wq_i2o_post, &wait);
1247
1248	if(status == -ETIMEDOUT){
1249		printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1250		// We will have to free the wait_data memory during shutdown
1251		return status;
1252	}
1253
1254	/* Remove the entry from the queue.  */
1255	p2 = NULL;
1256	spin_lock_irqsave(&adpt_post_wait_lock, flags);
1257	for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1258		if(p1 == wait_data) {
1259			if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1260				status = -EOPNOTSUPP;
1261			}
1262			if(p2) {
1263				p2->next = p1->next;
1264			} else {
1265				adpt_post_wait_queue = p1->next;
1266			}
1267			break;
1268		}
1269	}
1270	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1271
1272	kfree(wait_data);
1273
1274	return status;
1275}
1276
1277
1278static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1279{
1280
1281	u32 m = EMPTY_QUEUE;
1282	u32 __iomem *msg;
1283	ulong timeout = jiffies + 30*HZ;
1284	do {
1285		rmb();
1286		m = readl(pHba->post_port);
1287		if (m != EMPTY_QUEUE) {
1288			break;
1289		}
1290		if(time_after(jiffies,timeout)){
1291			printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1292			return -ETIMEDOUT;
1293		}
1294		schedule_timeout_uninterruptible(1);
1295	} while(m == EMPTY_QUEUE);
1296		
1297	msg = pHba->msg_addr_virt + m;
1298	memcpy_toio(msg, data, len);
1299	wmb();
1300
1301	//post message
1302	writel(m, pHba->post_port);
1303	wmb();
1304
1305	return 0;
1306}
1307
1308
1309static void adpt_i2o_post_wait_complete(u32 context, int status)
1310{
1311	struct adpt_i2o_post_wait_data *p1 = NULL;
1312	/*
1313	 * We need to search through the adpt_post_wait
1314	 * queue to see if the given message is still
1315	 * outstanding.  If not, it means that the IOP
1316	 * took longer to respond to the message than we
1317	 * had allowed and timer has already expired.
1318	 * Not much we can do about that except log
1319	 * it for debug purposes, increase timeout, and recompile
1320	 *
1321	 * Lock needed to keep anyone from moving queue pointers
1322	 * around while we're looking through them.
1323	 */
1324
1325	context &= 0x7fff;
1326
1327	spin_lock(&adpt_post_wait_lock);
1328	for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1329		if(p1->id == context) {
1330			p1->status = status;
1331			spin_unlock(&adpt_post_wait_lock);
1332			wake_up_interruptible(p1->wq);
1333			return;
1334		}
1335	}
1336	spin_unlock(&adpt_post_wait_lock);
1337        // If this happens we lose commands that probably really completed
1338	printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1339	printk(KERN_DEBUG"      Tasks in wait queue:\n");
1340	for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1341		printk(KERN_DEBUG"           %d\n",p1->id);
1342	}
1343	return;
1344}
1345
1346static s32 adpt_i2o_reset_hba(adpt_hba* pHba)			
1347{
1348	u32 msg[8];
1349	u8* status;
1350	dma_addr_t addr;
1351	u32 m = EMPTY_QUEUE ;
1352	ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1353
1354	if(pHba->initialized  == FALSE) {	// First time reset should be quick
1355		timeout = jiffies + (25*HZ);
1356	} else {
1357		adpt_i2o_quiesce_hba(pHba);
1358	}
1359
1360	do {
1361		rmb();
1362		m = readl(pHba->post_port);
1363		if (m != EMPTY_QUEUE) {
1364			break;
1365		}
1366		if(time_after(jiffies,timeout)){
1367			printk(KERN_WARNING"Timeout waiting for message!\n");
1368			return -ETIMEDOUT;
1369		}
1370		schedule_timeout_uninterruptible(1);
1371	} while (m == EMPTY_QUEUE);
1372
1373	status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1374	if(status == NULL) {
1375		adpt_send_nop(pHba, m);
1376		printk(KERN_ERR"IOP reset failed - no free memory.\n");
1377		return -ENOMEM;
1378	}
1379	memset(status,0,4);
1380
1381	msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1382	msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1383	msg[2]=0;
1384	msg[3]=0;
1385	msg[4]=0;
1386	msg[5]=0;
1387	msg[6]=dma_low(addr);
1388	msg[7]=dma_high(addr);
1389
1390	memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1391	wmb();
1392	writel(m, pHba->post_port);
1393	wmb();
1394
1395	while(*status == 0){
1396		if(time_after(jiffies,timeout)){
1397			printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1398			/* We lose 4 bytes of "status" here, but we cannot
1399			   free these because controller may awake and corrupt
1400			   those bytes at any time */
1401			/* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1402			return -ETIMEDOUT;
1403		}
1404		rmb();
1405		schedule_timeout_uninterruptible(1);
1406	}
1407
1408	if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1409		PDEBUG("%s: Reset in progress...\n", pHba->name);
1410		// Here we wait for message frame to become available
1411		// indicated that reset has finished
1412		do {
1413			rmb();
1414			m = readl(pHba->post_port);
1415			if (m != EMPTY_QUEUE) {
1416				break;
1417			}
1418			if(time_after(jiffies,timeout)){
1419				printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1420				/* We lose 4 bytes of "status" here, but we
1421				   cannot free these because controller may
1422				   awake and corrupt those bytes at any time */
1423				/* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1424				return -ETIMEDOUT;
1425			}
1426			schedule_timeout_uninterruptible(1);
1427		} while (m == EMPTY_QUEUE);
1428		// Flush the offset
1429		adpt_send_nop(pHba, m);
1430	}
1431	adpt_i2o_status_get(pHba);
1432	if(*status == 0x02 ||
1433			pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1434		printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1435				pHba->name);
1436	} else {
1437		PDEBUG("%s: Reset completed.\n", pHba->name);
1438	}
1439
1440	dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1441#ifdef UARTDELAY
1442	// This delay is to allow someone attached to the card through the debug UART to 
1443	// set up the dump levels that they want before the rest of the initialization sequence
1444	adpt_delay(20000);
1445#endif
1446	return 0;
1447}
1448
1449
1450static int adpt_i2o_parse_lct(adpt_hba* pHba)
1451{
1452	int i;
1453	int max;
1454	int tid;
1455	struct i2o_device *d;
1456	i2o_lct *lct = pHba->lct;
1457	u8 bus_no = 0;
1458	s16 scsi_id;
1459	u64 scsi_lun;
1460	u32 buf[10]; // larger than 7, or 8 ...
1461	struct adpt_device* pDev; 
1462	
1463	if (lct == NULL) {
1464		printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1465		return -1;
1466	}
1467	
1468	max = lct->table_size;	
1469	max -= 3;
1470	max /= 9;
1471
1472	for(i=0;i<max;i++) {
1473		if( lct->lct_entry[i].user_tid != 0xfff){
1474			/*
1475			 * If we have hidden devices, we need to inform the upper layers about
1476			 * the possible maximum id reference to handle device access when
1477			 * an array is disassembled. This code has no other purpose but to
1478			 * allow us future access to devices that are currently hidden
1479			 * behind arrays, hotspares or have not been configured (JBOD mode).
1480			 */
1481			if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1482			    lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1483			    lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1484			    	continue;
1485			}
1486			tid = lct->lct_entry[i].tid;
1487			// I2O_DPT_DEVICE_INFO_GROUP_NO;
1488			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1489				continue;
1490			}
1491			bus_no = buf[0]>>16;
1492			scsi_id = buf[1];
1493			scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1494			if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1495				printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1496				continue;
1497			}
1498			if (scsi_id >= MAX_ID){
1499				printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1500				continue;
1501			}
1502			if(bus_no > pHba->top_scsi_channel){
1503				pHba->top_scsi_channel = bus_no;
1504			}
1505			if(scsi_id > pHba->top_scsi_id){
1506				pHba->top_scsi_id = scsi_id;
1507			}
1508			if(scsi_lun > pHba->top_scsi_lun){
1509				pHba->top_scsi_lun = scsi_lun;
1510			}
1511			continue;
1512		}
1513		d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1514		if(d==NULL)
1515		{
1516			printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1517			return -ENOMEM;
1518		}
1519		
1520		d->controller = pHba;
1521		d->next = NULL;
1522
1523		memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1524
1525		d->flags = 0;
1526		tid = d->lct_data.tid;
1527		adpt_i2o_report_hba_unit(pHba, d);
1528		adpt_i2o_install_device(pHba, d);
1529	}
1530	bus_no = 0;
1531	for(d = pHba->devices; d ; d = d->next) {
1532		if(d->lct_data.class_id  == I2O_CLASS_BUS_ADAPTER_PORT ||
1533		   d->lct_data.class_id  == I2O_CLASS_FIBRE_CHANNEL_PORT){
1534			tid = d->lct_data.tid;
1535			// TODO get the bus_no from hrt-but for now they are in order
1536			//bus_no = 
1537			if(bus_no > pHba->top_scsi_channel){
1538				pHba->top_scsi_channel = bus_no;
1539			}
1540			pHba->channel[bus_no].type = d->lct_data.class_id;
1541			pHba->channel[bus_no].tid = tid;
1542			if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1543			{
1544				pHba->channel[bus_no].scsi_id = buf[1];
1545				PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1546			}
1547			// TODO remove - this is just until we get from hrt
1548			bus_no++;
1549			if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1550				printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1551				break;
1552			}
1553		}
1554	}
1555
1556	// Setup adpt_device table
1557	for(d = pHba->devices; d ; d = d->next) {
1558		if(d->lct_data.class_id  == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1559		   d->lct_data.class_id  == I2O_CLASS_SCSI_PERIPHERAL ||
1560		   d->lct_data.class_id  == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1561
1562			tid = d->lct_data.tid;
1563			scsi_id = -1;
1564			// I2O_DPT_DEVICE_INFO_GROUP_NO;
1565			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1566				bus_no = buf[0]>>16;
1567				scsi_id = buf[1];
1568				scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1569				if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1570					continue;
1571				}
1572				if (scsi_id >= MAX_ID) {
1573					continue;
1574				}
1575				if( pHba->channel[bus_no].device[scsi_id] == NULL){
1576					pDev =  kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1577					if(pDev == NULL) {
1578						return -ENOMEM;
1579					}
1580					pHba->channel[bus_no].device[scsi_id] = pDev;
1581				} else {
1582					for( pDev = pHba->channel[bus_no].device[scsi_id];	
1583							pDev->next_lun; pDev = pDev->next_lun){
1584					}
1585					pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1586					if(pDev->next_lun == NULL) {
1587						return -ENOMEM;
1588					}
1589					pDev = pDev->next_lun;
1590				}
1591				pDev->tid = tid;
1592				pDev->scsi_channel = bus_no;
1593				pDev->scsi_id = scsi_id;
1594				pDev->scsi_lun = scsi_lun;
1595				pDev->pI2o_dev = d;
1596				d->owner = pDev;
1597				pDev->type = (buf[0])&0xff;
1598				pDev->flags = (buf[0]>>8)&0xff;
1599				if(scsi_id > pHba->top_scsi_id){
1600					pHba->top_scsi_id = scsi_id;
1601				}
1602				if(scsi_lun > pHba->top_scsi_lun){
1603					pHba->top_scsi_lun = scsi_lun;
1604				}
1605			}
1606			if(scsi_id == -1){
1607				printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1608						d->lct_data.identity_tag);
1609			}
1610		}
1611	}
1612	return 0;
1613}
1614
1615
1616/*
1617 *	Each I2O controller has a chain of devices on it - these match
1618 *	the useful parts of the LCT of the board.
1619 */
1620 
1621static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1622{
1623	mutex_lock(&adpt_configuration_lock);
1624	d->controller=pHba;
1625	d->owner=NULL;
1626	d->next=pHba->devices;
1627	d->prev=NULL;
1628	if (pHba->devices != NULL){
1629		pHba->devices->prev=d;
1630	}
1631	pHba->devices=d;
1632	*d->dev_name = 0;
1633
1634	mutex_unlock(&adpt_configuration_lock);
1635	return 0;
1636}
1637
1638static int adpt_open(struct inode *inode, struct file *file)
1639{
1640	int minor;
1641	adpt_hba* pHba;
1642
1643	mutex_lock(&adpt_mutex);
1644	//TODO check for root access
1645	//
1646	minor = iminor(inode);
1647	if (minor >= hba_count) {
1648		mutex_unlock(&adpt_mutex);
1649		return -ENXIO;
1650	}
1651	mutex_lock(&adpt_configuration_lock);
1652	for (pHba = hba_chain; pHba; pHba = pHba->next) {
1653		if (pHba->unit == minor) {
1654			break;	/* found adapter */
1655		}
1656	}
1657	if (pHba == NULL) {
1658		mutex_unlock(&adpt_configuration_lock);
1659		mutex_unlock(&adpt_mutex);
1660		return -ENXIO;
1661	}
1662
1663//	if(pHba->in_use){
1664	//	mutex_unlock(&adpt_configuration_lock);
1665//		return -EBUSY;
1666//	}
1667
1668	pHba->in_use = 1;
1669	mutex_unlock(&adpt_configuration_lock);
1670	mutex_unlock(&adpt_mutex);
1671
1672	return 0;
1673}
1674
1675static int adpt_close(struct inode *inode, struct file *file)
1676{
1677	int minor;
1678	adpt_hba* pHba;
1679
1680	minor = iminor(inode);
1681	if (minor >= hba_count) {
1682		return -ENXIO;
1683	}
1684	mutex_lock(&adpt_configuration_lock);
1685	for (pHba = hba_chain; pHba; pHba = pHba->next) {
1686		if (pHba->unit == minor) {
1687			break;	/* found adapter */
1688		}
1689	}
1690	mutex_unlock(&adpt_configuration_lock);
1691	if (pHba == NULL) {
1692		return -ENXIO;
1693	}
1694
1695	pHba->in_use = 0;
1696
1697	return 0;
1698}
1699
1700
1701static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1702{
1703	u32 msg[MAX_MESSAGE_SIZE];
1704	u32* reply = NULL;
1705	u32 size = 0;
1706	u32 reply_size = 0;
1707	u32 __user *user_msg = arg;
1708	u32 __user * user_reply = NULL;
1709	void *sg_list[pHba->sg_tablesize];
1710	u32 sg_offset = 0;
1711	u32 sg_count = 0;
1712	int sg_index = 0;
1713	u32 i = 0;
1714	u32 rcode = 0;
1715	void *p = NULL;
1716	dma_addr_t addr;
1717	ulong flags = 0;
1718
1719	memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1720	// get user msg size in u32s 
1721	if(get_user(size, &user_msg[0])){
1722		return -EFAULT;
1723	}
1724	size = size>>16;
1725
1726	user_reply = &user_msg[size];
1727	if(size > MAX_MESSAGE_SIZE){
1728		return -EFAULT;
1729	}
1730	size *= 4; // Convert to bytes
1731
1732	/* Copy in the user's I2O command */
1733	if(copy_from_user(msg, user_msg, size)) {
1734		return -EFAULT;
1735	}
1736	get_user(reply_size, &user_reply[0]);
1737	reply_size = reply_size>>16;
1738	if(reply_size > REPLY_FRAME_SIZE){
1739		reply_size = REPLY_FRAME_SIZE;
1740	}
1741	reply_size *= 4;
1742	reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1743	if(reply == NULL) {
1744		printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1745		return -ENOMEM;
1746	}
1747	sg_offset = (msg[0]>>4)&0xf;
1748	msg[2] = 0x40000000; // IOCTL context
1749	msg[3] = adpt_ioctl_to_context(pHba, reply);
1750	if (msg[3] == (u32)-1) {
1751		kfree(reply);
1752		return -EBUSY;
1753	}
1754
1755	memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1756	if(sg_offset) {
1757		// TODO add 64 bit API
1758		struct sg_simple_element *sg =  (struct sg_simple_element*) (msg+sg_offset);
1759		sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1760		if (sg_count > pHba->sg_tablesize){
1761			printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1762			kfree (reply);
1763			return -EINVAL;
1764		}
1765
1766		for(i = 0; i < sg_count; i++) {
1767			int sg_size;
1768
1769			if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1770				printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i,  sg[i].flag_count);
1771				rcode = -EINVAL;
1772				goto cleanup;
1773			}
1774			sg_size = sg[i].flag_count & 0xffffff;      
1775			/* Allocate memory for the transfer */
1776			p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1777			if(!p) {
1778				printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1779						pHba->name,sg_size,i,sg_count);
1780				rcode = -ENOMEM;
1781				goto cleanup;
1782			}
1783			sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1784			/* Copy in the user's SG buffer if necessary */
1785			if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1786				// sg_simple_element API is 32 bit
1787				if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1788					printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1789					rcode = -EFAULT;
1790					goto cleanup;
1791				}
1792			}
1793			/* sg_simple_element API is 32 bit, but addr < 4GB */
1794			sg[i].addr_bus = addr;
1795		}
1796	}
1797
1798	do {
1799		/*
1800		 * Stop any new commands from enterring the
1801		 * controller while processing the ioctl
1802		 */
1803		if (pHba->host) {
1804			scsi_block_requests(pHba->host);
1805			spin_lock_irqsave(pHba->host->host_lock, flags);
1806		}
1807		rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1808		if (rcode != 0)
1809			printk("adpt_i2o_passthru: post wait failed %d %p\n",
1810					rcode, reply);
1811		if (pHba->host) {
1812			spin_unlock_irqrestore(pHba->host->host_lock, flags);
1813			scsi_unblock_requests(pHba->host);
1814		}
1815	} while (rcode == -ETIMEDOUT);
1816
1817	if(rcode){
1818		goto cleanup;
1819	}
1820
1821	if(sg_offset) {
1822	/* Copy back the Scatter Gather buffers back to user space */
1823		u32 j;
1824		// TODO add 64 bit API
1825		struct sg_simple_element* sg;
1826		int sg_size;
1827
1828		// re-acquire the original message to handle correctly the sg copy operation
1829		memset(&msg, 0, MAX_MESSAGE_SIZE*4); 
1830		// get user msg size in u32s 
1831		if(get_user(size, &user_msg[0])){
1832			rcode = -EFAULT; 
1833			goto cleanup; 
1834		}
1835		size = size>>16;
1836		size *= 4;
1837		if (size > MAX_MESSAGE_SIZE) {
1838			rcode = -EINVAL;
1839			goto cleanup;
1840		}
1841		/* Copy in the user's I2O command */
1842		if (copy_from_user (msg, user_msg, size)) {
1843			rcode = -EFAULT;
1844			goto cleanup;
1845		}
1846		sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1847
1848		// TODO add 64 bit API
1849		sg 	 = (struct sg_simple_element*)(msg + sg_offset);
1850		for (j = 0; j < sg_count; j++) {
1851			/* Copy out the SG list to user's buffer if necessary */
1852			if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1853				sg_size = sg[j].flag_count & 0xffffff; 
1854				// sg_simple_element API is 32 bit
1855				if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1856					printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1857					rcode = -EFAULT;
1858					goto cleanup;
1859				}
1860			}
1861		}
1862	} 
1863
1864	/* Copy back the reply to user space */
1865	if (reply_size) {
1866		// we wrote our own values for context - now restore the user supplied ones
1867		if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1868			printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1869			rcode = -EFAULT;
1870		}
1871		if(copy_to_user(user_reply, reply, reply_size)) {
1872			printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1873			rcode = -EFAULT;
1874		}
1875	}
1876
1877
1878cleanup:
1879	if (rcode != -ETIME && rcode != -EINTR) {
1880		struct sg_simple_element *sg =
1881				(struct sg_simple_element*) (msg +sg_offset);
1882		kfree (reply);
1883		while(sg_index) {
1884			if(sg_list[--sg_index]) {
1885				dma_free_coherent(&pHba->pDev->dev,
1886					sg[sg_index].flag_count & 0xffffff,
1887					sg_list[sg_index],
1888					sg[sg_index].addr_bus);
1889			}
1890		}
1891	}
1892	return rcode;
1893}
1894
1895#if defined __ia64__ 
1896static void adpt_ia64_info(sysInfo_S* si)
1897{
1898	// This is all the info we need for now
1899	// We will add more info as our new
1900	// managmenent utility requires it
1901	si->processorType = PROC_IA64;
1902}
1903#endif
1904
1905#if defined __sparc__ 
1906static void adpt_sparc_info(sysInfo_S* si)
1907{
1908	// This is all the info we need for now
1909	// We will add more info as our new
1910	// managmenent utility requires it
1911	si->processorType = PROC_ULTRASPARC;
1912}
1913#endif
1914#if defined __alpha__ 
1915static void adpt_alpha_info(sysInfo_S* si)
1916{
1917	// This is all the info we need for now
1918	// We will add more info as our new
1919	// managmenent utility requires it
1920	si->processorType = PROC_ALPHA;
1921}
1922#endif
1923
1924#if defined __i386__
1925
1926#include <uapi/asm/vm86.h>
1927
1928static void adpt_i386_info(sysInfo_S* si)
1929{
1930	// This is all the info we need for now
1931	// We will add more info as our new
1932	// managmenent utility requires it
1933	switch (boot_cpu_data.x86) {
1934	case CPU_386:
1935		si->processorType = PROC_386;
1936		break;
1937	case CPU_486:
1938		si->processorType = PROC_486;
1939		break;
1940	case CPU_586:
1941		si->processorType = PROC_PENTIUM;
1942		break;
1943	default:  // Just in case 
1944		si->processorType = PROC_PENTIUM;
1945		break;
1946	}
1947}
1948#endif
1949
1950/*
1951 * This routine returns information about the system.  This does not effect
1952 * any logic and if the info is wrong - it doesn't matter.
1953 */
1954
1955/* Get all the info we can not get from kernel services */
1956static int adpt_system_info(void __user *buffer)
1957{
1958	sysInfo_S si;
1959
1960	memset(&si, 0, sizeof(si));
1961
1962	si.osType = OS_LINUX;
1963	si.osMajorVersion = 0;
1964	si.osMinorVersion = 0;
1965	si.osRevision = 0;
1966	si.busType = SI_PCI_BUS;
1967	si.processorFamily = DPTI_sig.dsProcessorFamily;
1968
1969#if defined __i386__
1970	adpt_i386_info(&si);
1971#elif defined (__ia64__)
1972	adpt_ia64_info(&si);
1973#elif defined(__sparc__)
1974	adpt_sparc_info(&si);
1975#elif defined (__alpha__)
1976	adpt_alpha_info(&si);
1977#else
1978	si.processorType = 0xff ;
1979#endif
1980	if (copy_to_user(buffer, &si, sizeof(si))){
1981		printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1982		return -EFAULT;
1983	}
1984
1985	return 0;
1986}
1987
1988static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
1989{
1990	int minor;
1991	int error = 0;
1992	adpt_hba* pHba;
1993	ulong flags = 0;
1994	void __user *argp = (void __user *)arg;
1995
1996	minor = iminor(inode);
1997	if (minor >= DPTI_MAX_HBA){
1998		return -ENXIO;
1999	}
2000	mutex_lock(&adpt_configuration_lock);
2001	for (pHba = hba_chain; pHba; pHba = pHba->next) {
2002		if (pHba->unit == minor) {
2003			break;	/* found adapter */
2004		}
2005	}
2006	mutex_unlock(&adpt_configuration_lock);
2007	if(pHba == NULL){
2008		return -ENXIO;
2009	}
2010
2011	while((volatile u32) pHba->state & DPTI_STATE_RESET )
2012		schedule_timeout_uninterruptible(2);
2013
2014	switch (cmd) {
2015	// TODO: handle 3 cases
2016	case DPT_SIGNATURE:
2017		if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
2018			return -EFAULT;
2019		}
2020		break;
2021	case I2OUSRCMD:
2022		return adpt_i2o_passthru(pHba, argp);
2023
2024	case DPT_CTRLINFO:{
2025		drvrHBAinfo_S HbaInfo;
2026
2027#define FLG_OSD_PCI_VALID 0x0001
2028#define FLG_OSD_DMA	  0x0002
2029#define FLG_OSD_I2O	  0x0004
2030		memset(&HbaInfo, 0, sizeof(HbaInfo));
2031		HbaInfo.drvrHBAnum = pHba->unit;
2032		HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
2033		HbaInfo.blinkState = adpt_read_blink_led(pHba);
2034		HbaInfo.pciBusNum =  pHba->pDev->bus->number;
2035		HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn); 
2036		HbaInfo.Interrupt = pHba->pDev->irq; 
2037		HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
2038		if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2039			printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2040			return -EFAULT;
2041		}
2042		break;
2043		}
2044	case DPT_SYSINFO:
2045		return adpt_system_info(argp);
2046	case DPT_BLINKLED:{
2047		u32 value;
2048		value = (u32)adpt_read_blink_led(pHba);
2049		if (copy_to_user(argp, &value, sizeof(value))) {
2050			return -EFAULT;
2051		}
2052		break;
2053		}
2054	case I2ORESETCMD: {
2055		struct Scsi_Host *shost = pHba->host;
2056
2057		if (shost)
2058			spin_lock_irqsave(shost->host_lock, flags);
2059		adpt_hba_reset(pHba);
2060		if (shost)
2061			spin_unlock_irqrestore(shost->host_lock, flags);
2062		break;
2063	}
2064	case I2ORESCANCMD:
2065		adpt_rescan(pHba);
2066		break;
2067	default:
2068		return -EINVAL;
2069	}
2070
2071	return error;
2072}
2073
2074static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2075{
2076	struct inode *inode;
2077	long ret;
2078 
2079	inode = file_inode(file);
2080 
2081	mutex_lock(&adpt_mutex);
2082	ret = adpt_ioctl(inode, file, cmd, arg);
2083	mutex_unlock(&adpt_mutex);
2084
2085	return ret;
2086}
2087
2088#ifdef CONFIG_COMPAT
2089static long compat_adpt_ioctl(struct file *file,
2090				unsigned int cmd, unsigned long arg)
2091{
2092	struct inode *inode;
2093	long ret;
2094 
2095	inode = file_inode(file);
2096 
2097	mutex_lock(&adpt_mutex);
2098 
2099	switch(cmd) {
2100		case DPT_SIGNATURE:
2101		case I2OUSRCMD:
2102		case DPT_CTRLINFO:
2103		case DPT_SYSINFO:
2104		case DPT_BLINKLED:
2105		case I2ORESETCMD:
2106		case I2ORESCANCMD:
2107		case (DPT_TARGET_BUSY & 0xFFFF):
2108		case DPT_TARGET_BUSY:
2109			ret = adpt_ioctl(inode, file, cmd, arg);
2110			break;
2111		default:
2112			ret =  -ENOIOCTLCMD;
2113	}
2114 
2115	mutex_unlock(&adpt_mutex);
2116 
2117	return ret;
2118}
2119#endif
2120
2121static irqreturn_t adpt_isr(int irq, void *dev_id)
2122{
2123	struct scsi_cmnd* cmd;
2124	adpt_hba* pHba = dev_id;
2125	u32 m;
2126	void __iomem *reply;
2127	u32 status=0;
2128	u32 context;
2129	ulong flags = 0;
2130	int handled = 0;
2131
2132	if (pHba == NULL){
2133		printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2134		return IRQ_NONE;
2135	}
2136	if(pHba->host)
2137		spin_lock_irqsave(pHba->host->host_lock, flags);
2138
2139	while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2140		m = readl(pHba->reply_port);
2141		if(m == EMPTY_QUEUE){
2142			// Try twice then give up
2143			rmb();
2144			m = readl(pHba->reply_port);
2145			if(m == EMPTY_QUEUE){ 
2146				// This really should not happen
2147				printk(KERN_ERR"dpti: Could not get reply frame\n");
2148				goto out;
2149			}
2150		}
2151		if (pHba->reply_pool_pa <= m &&
2152		    m < pHba->reply_pool_pa +
2153			(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2154			reply = (u8 *)pHba->reply_pool +
2155						(m - pHba->reply_pool_pa);
2156		} else {
2157			/* Ick, we should *never* be here */
2158			printk(KERN_ERR "dpti: reply frame not from pool\n");
2159			reply = (u8 *)bus_to_virt(m);
2160		}
2161
2162		if (readl(reply) & MSG_FAIL) {
2163			u32 old_m = readl(reply+28); 
2164			void __iomem *msg;
2165			u32 old_context;
2166			PDEBUG("%s: Failed message\n",pHba->name);
2167			if(old_m >= 0x100000){
2168				printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2169				writel(m,pHba->reply_port);
2170				continue;
2171			}
2172			// Transaction context is 0 in failed reply frame
2173			msg = pHba->msg_addr_virt + old_m;
2174			old_context = readl(msg+12);
2175			writel(old_context, reply+12);
2176			adpt_send_nop(pHba, old_m);
2177		} 
2178		context = readl(reply+8);
2179		if(context & 0x40000000){ // IOCTL
2180			void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2181			if( p != NULL) {
2182				memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2183			}
2184			// All IOCTLs will also be post wait
2185		}
2186		if(context & 0x80000000){ // Post wait message
2187			status = readl(reply+16);
2188			if(status  >> 24){
2189				status &=  0xffff; /* Get detail status */
2190			} else {
2191				status = I2O_POST_WAIT_OK;
2192			}
2193			if(!(context & 0x40000000)) {
2194				cmd = adpt_cmd_from_context(pHba,
2195							readl(reply+12));
2196				if(cmd != NULL) {
2197					printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2198				}
2199			}
2200			adpt_i2o_post_wait_complete(context, status);
2201		} else { // SCSI message
2202			cmd = adpt_cmd_from_context (pHba, readl(reply+12));
2203			if(cmd != NULL){
2204				scsi_dma_unmap(cmd);
2205				if(cmd->serial_number != 0) { // If not timedout
2206					adpt_i2o_to_scsi(reply, cmd);
2207				}
2208			}
2209		}
2210		writel(m, pHba->reply_port);
2211		wmb();
2212		rmb();
2213	}
2214	handled = 1;
2215out:	if(pHba->host)
2216		spin_unlock_irqrestore(pHba->host->host_lock, flags);
2217	return IRQ_RETVAL(handled);
2218}
2219
2220static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2221{
2222	int i;
2223	u32 msg[MAX_MESSAGE_SIZE];
2224	u32* mptr;
2225	u32* lptr;
2226	u32 *lenptr;
2227	int direction;
2228	int scsidir;
2229	int nseg;
2230	u32 len;
2231	u32 reqlen;
2232	s32 rcode;
2233	dma_addr_t addr;
2234
2235	memset(msg, 0 , sizeof(msg));
2236	len = scsi_bufflen(cmd);
2237	direction = 0x00000000;	
2238	
2239	scsidir = 0x00000000;			// DATA NO XFER
2240	if(len) {
2241		/*
2242		 * Set SCBFlags to indicate if data is being transferred
2243		 * in or out, or no data transfer
2244		 * Note:  Do not have to verify index is less than 0 since
2245		 * cmd->cmnd[0] is an unsigned char
2246		 */
2247		switch(cmd->sc_data_direction){
2248		case DMA_FROM_DEVICE:
2249			scsidir  =0x40000000;	// DATA IN  (iop<--dev)
2250			break;
2251		case DMA_TO_DEVICE:
2252			direction=0x04000000;	// SGL OUT
2253			scsidir  =0x80000000;	// DATA OUT (iop-->dev)
2254			break;
2255		case DMA_NONE:
2256			break;
2257		case DMA_BIDIRECTIONAL:
2258			scsidir  =0x40000000;	// DATA IN  (iop<--dev)
2259			// Assume In - and continue;
2260			break;
2261		default:
2262			printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2263			     pHba->name, cmd->cmnd[0]);
2264			cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2265			cmd->scsi_done(cmd);
2266			return 	0;
2267		}
2268	}
2269	// msg[0] is set later
2270	// I2O_CMD_SCSI_EXEC
2271	msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2272	msg[2] = 0;
2273	msg[3] = adpt_cmd_to_context(cmd);  /* Want SCSI control block back */
2274	// Our cards use the transaction context as the tag for queueing
2275	// Adaptec/DPT Private stuff 
2276	msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2277	msg[5] = d->tid;
2278	/* Direction, disconnect ok | sense data | simple queue , CDBLen */
2279	// I2O_SCB_FLAG_ENABLE_DISCONNECT | 
2280	// I2O_SCB_FLAG_SIMPLE_QUEUE_TAG | 
2281	// I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2282	msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2283
2284	mptr=msg+7;
2285
2286	// Write SCSI command into the message - always 16 byte block 
2287	memset(mptr, 0,  16);
2288	memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2289	mptr+=4;
2290	lenptr=mptr++;		/* Remember me - fill in when we know */
2291	if (dpt_dma64(pHba)) {
2292		reqlen = 16;		// SINGLE SGE
2293		*mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2294		*mptr++ = 1 << PAGE_SHIFT;
2295	} else {
2296		reqlen = 14;		// SINGLE SGE
2297	}
2298	/* Now fill in the SGList and command */
2299
2300	nseg = scsi_dma_map(cmd);
2301	BUG_ON(nseg < 0);
2302	if (nseg) {
2303		struct scatterlist *sg;
2304
2305		len = 0;
2306		scsi_for_each_sg(cmd, sg, nseg, i) {
2307			lptr = mptr;
2308			*mptr++ = direction|0x10000000|sg_dma_len(sg);
2309			len+=sg_dma_len(sg);
2310			addr = sg_dma_address(sg);
2311			*mptr++ = dma_low(addr);
2312			if (dpt_dma64(pHba))
2313				*mptr++ = dma_high(addr);
2314			/* Make this an end of list */
2315			if (i == nseg - 1)
2316				*lptr = direction|0xD0000000|sg_dma_len(sg);
2317		}
2318		reqlen = mptr - msg;
2319		*lenptr = len;
2320		
2321		if(cmd->underflow && len != cmd->underflow){
2322			printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2323				len, cmd->underflow);
2324		}
2325	} else {
2326		*lenptr = len = 0;
2327		reqlen = 12;
2328	}
2329	
2330	/* Stick the headers on */
2331	msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2332	
2333	// Send it on it's way
2334	rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2335	if (rcode == 0) {
2336		return 0;
2337	}
2338	return rcode;
2339}
2340
2341
2342static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2343{
2344	struct Scsi_Host *host;
2345
2346	host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2347	if (host == NULL) {
2348		printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2349		return -1;
2350	}
2351	host->hostdata[0] = (unsigned long)pHba;
2352	pHba->host = host;
2353
2354	host->irq = pHba->pDev->irq;
2355	/* no IO ports, so don't have to set host->io_port and
2356	 * host->n_io_port
2357	 */
2358	host->io_port = 0;
2359	host->n_io_port = 0;
2360				/* see comments in scsi_host.h */
2361	host->max_id = 16;
2362	host->max_lun = 256;
2363	host->max_channel = pHba->top_scsi_channel + 1;
2364	host->cmd_per_lun = 1;
2365	host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2366	host->sg_tablesize = pHba->sg_tablesize;
2367	host->can_queue = pHba->post_fifo_size;
2368	host->use_cmd_list = 1;
2369
2370	return 0;
2371}
2372
2373
2374static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
2375{
2376	adpt_hba* pHba;
2377	u32 hba_status;
2378	u32 dev_status;
2379	u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits 
2380	// I know this would look cleaner if I just read bytes
2381	// but the model I have been using for all the rest of the
2382	// io is in 4 byte words - so I keep that model
2383	u16 detailed_status = readl(reply+16) &0xffff;
2384	dev_status = (detailed_status & 0xff);
2385	hba_status = detailed_status >> 8;
2386
2387	// calculate resid for sg 
2388	scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2389
2390	pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2391
2392	cmd->sense_buffer[0] = '\0';  // initialize sense valid flag to false
2393
2394	if(!(reply_flags & MSG_FAIL)) {
2395		switch(detailed_status & I2O_SCSI_DSC_MASK) {
2396		case I2O_SCSI_DSC_SUCCESS:
2397			cmd->result = (DID_OK << 16);
2398			// handle underflow
2399			if (readl(reply+20) < cmd->underflow) {
2400				cmd->result = (DID_ERROR <<16);
2401				printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2402			}
2403			break;
2404		case I2O_SCSI_DSC_REQUEST_ABORTED:
2405			cmd->result = (DID_ABORT << 16);
2406			break;
2407		case I2O_SCSI_DSC_PATH_INVALID:
2408		case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2409		case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2410		case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2411		case I2O_SCSI_DSC_NO_ADAPTER:
2412		case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2413			printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2414				pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2415			cmd->result = (DID_TIME_OUT << 16);
2416			break;
2417		case I2O_SCSI_DSC_ADAPTER_BUSY:
2418		case I2O_SCSI_DSC_BUS_BUSY:
2419			cmd->result = (DID_BUS_BUSY << 16);
2420			break;
2421		case I2O_SCSI_DSC_SCSI_BUS_RESET:
2422		case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2423			cmd->result = (DID_RESET << 16);
2424			break;
2425		case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2426			printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2427			cmd->result = (DID_PARITY << 16);
2428			break;
2429		case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2430		case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2431		case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2432		case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2433		case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2434		case I2O_SCSI_DSC_DATA_OVERRUN:
2435		case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2436		case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2437		case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2438		case I2O_SCSI_DSC_PROVIDE_FAILURE:
2439		case I2O_SCSI_DSC_REQUEST_TERMINATED:
2440		case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2441		case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2442		case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2443		case I2O_SCSI_DSC_INVALID_CDB:
2444		case I2O_SCSI_DSC_LUN_INVALID:
2445		case I2O_SCSI_DSC_SCSI_TID_INVALID:
2446		case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2447		case I2O_SCSI_DSC_NO_NEXUS:
2448		case I2O_SCSI_DSC_CDB_RECEIVED:
2449		case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2450		case I2O_SCSI_DSC_QUEUE_FROZEN:
2451		case I2O_SCSI_DSC_REQUEST_INVALID:
2452		default:
2453			printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2454				pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2455			       hba_status, dev_status, cmd->cmnd[0]);
2456			cmd->result = (DID_ERROR << 16);
2457			break;
2458		}
2459
2460		// copy over the request sense data if it was a check
2461		// condition status
2462		if (dev_status == SAM_STAT_CHECK_CONDITION) {
2463			u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2464			// Copy over the sense data
2465			memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2466			if(cmd->sense_buffer[0] == 0x70 /* class 7 */ && 
2467			   cmd->sense_buffer[2] == DATA_PROTECT ){
2468				/* This is to handle an array failed */
2469				cmd->result = (DID_TIME_OUT << 16);
2470				printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2471					pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2472					hba_status, dev_status, cmd->cmnd[0]);
2473
2474			}
2475		}
2476	} else {
2477		/* In this condtion we could not talk to the tid
2478		 * the card rejected it.  We should signal a retry
2479		 * for a limitted number of retries.
2480		 */
2481		cmd->result = (DID_TIME_OUT << 16);
2482		printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
2483			pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2484			((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2485	}
2486
2487	cmd->result |= (dev_status);
2488
2489	if(cmd->scsi_done != NULL){
2490		cmd->scsi_done(cmd);
2491	} 
2492	return cmd->result;
2493}
2494
2495
2496static s32 adpt_rescan(adpt_hba* pHba)
2497{
2498	s32 rcode;
2499	ulong flags = 0;
2500
2501	if(pHba->host)
2502		spin_lock_irqsave(pHba->host->host_lock, flags);
2503	if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2504		goto out;
2505	if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2506		goto out;
2507	rcode = 0;
2508out:	if(pHba->host)
2509		spin_unlock_irqrestore(pHba->host->host_lock, flags);
2510	return rcode;
2511}
2512
2513
2514static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2515{
2516	int i;
2517	int max;
2518	int tid;
2519	struct i2o_device *d;
2520	i2o_lct *lct = pHba->lct;
2521	u8 bus_no = 0;
2522	s16 scsi_id;
2523	u64 scsi_lun;
2524	u32 buf[10]; // at least 8 u32's
2525	struct adpt_device* pDev = NULL;
2526	struct i2o_device* pI2o_dev = NULL;
2527	
2528	if (lct == NULL) {
2529		printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2530		return -1;
2531	}
2532	
2533	max = lct->table_size;	
2534	max -= 3;
2535	max /= 9;
2536
2537	// Mark each drive as unscanned
2538	for (d = pHba->devices; d; d = d->next) {
2539		pDev =(struct adpt_device*) d->owner;
2540		if(!pDev){
2541			continue;
2542		}
2543		pDev->state |= DPTI_DEV_UNSCANNED;
2544	}
2545
2546	printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2547	
2548	for(i=0;i<max;i++) {
2549		if( lct->lct_entry[i].user_tid != 0xfff){
2550			continue;
2551		}
2552
2553		if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2554		    lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2555		    lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2556			tid = lct->lct_entry[i].tid;
2557			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2558				printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2559				continue;
2560			}
2561			bus_no = buf[0]>>16;
2562			if (bus_no >= MAX_CHANNEL) {	/* Something wrong skip it */
2563				printk(KERN_WARNING
2564					"%s: Channel number %d out of range\n",
2565					pHba->name, bus_no);
2566				continue;
2567			}
2568
2569			scsi_id = buf[1];
2570			scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
2571			pDev = pHba->channel[bus_no].device[scsi_id];
2572			/* da lun */
2573			while(pDev) {
2574				if(pDev->scsi_lun == scsi_lun) {
2575					break;
2576				}
2577				pDev = pDev->next_lun;
2578			}
2579			if(!pDev ) { // Something new add it
2580				d = kmalloc(sizeof(struct i2o_device),
2581					    GFP_ATOMIC);
2582				if(d==NULL)
2583				{
2584					printk(KERN_CRIT "Out of memory for I2O device data.\n");
2585					return -ENOMEM;
2586				}
2587				
2588				d->controller = pHba;
2589				d->next = NULL;
2590
2591				memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2592
2593				d->flags = 0;
2594				adpt_i2o_report_hba_unit(pHba, d);
2595				adpt_i2o_install_device(pHba, d);
2596	
2597				pDev = pHba->channel[bus_no].device[scsi_id];	
2598				if( pDev == NULL){
2599					pDev =
2600					  kzalloc(sizeof(struct adpt_device),
2601						  GFP_ATOMIC);
2602					if(pDev == NULL) {
2603						return -ENOMEM;
2604					}
2605					pHba->channel[bus_no].device[scsi_id] = pDev;
2606				} else {
2607					while (pDev->next_lun) {
2608						pDev = pDev->next_lun;
2609					}
2610					pDev = pDev->next_lun =
2611					  kzalloc(sizeof(struct adpt_device),
2612						  GFP_ATOMIC);
2613					if(pDev == NULL) {
2614						return -ENOMEM;
2615					}
2616				}
2617				pDev->tid = d->lct_data.tid;
2618				pDev->scsi_channel = bus_no;
2619				pDev->scsi_id = scsi_id;
2620				pDev->scsi_lun = scsi_lun;
2621				pDev->pI2o_dev = d;
2622				d->owner = pDev;
2623				pDev->type = (buf[0])&0xff;
2624				pDev->flags = (buf[0]>>8)&0xff;
2625				// Too late, SCSI system has made up it's mind, but what the hey ...
2626				if(scsi_id > pHba->top_scsi_id){
2627					pHba->top_scsi_id = scsi_id;
2628				}
2629				if(scsi_lun > pHba->top_scsi_lun){
2630					pHba->top_scsi_lun = scsi_lun;
2631				}
2632				continue;
2633			} // end of new i2o device
2634
2635			// We found an old device - check it
2636			while(pDev) {
2637				if(pDev->scsi_lun == scsi_lun) {
2638					if(!scsi_device_online(pDev->pScsi_dev)) {
2639						printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
2640								pHba->name,bus_no,scsi_id,scsi_lun);
2641						if (pDev->pScsi_dev) {
2642							scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2643						}
2644					}
2645					d = pDev->pI2o_dev;
2646					if(d->lct_data.tid != tid) { // something changed
2647						pDev->tid = tid;
2648						memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2649						if (pDev->pScsi_dev) {
2650							pDev->pScsi_dev->changed = TRUE;
2651							pDev->pScsi_dev->removable = TRUE;
2652						}
2653					}
2654					// Found it - mark it scanned
2655					pDev->state = DPTI_DEV_ONLINE;
2656					break;
2657				}
2658				pDev = pDev->next_lun;
2659			}
2660		}
2661	}
2662	for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2663		pDev =(struct adpt_device*) pI2o_dev->owner;
2664		if(!pDev){
2665			continue;
2666		}
2667		// Drive offline drives that previously existed but could not be found
2668		// in the LCT table
2669		if (pDev->state & DPTI_DEV_UNSCANNED){
2670			pDev->state = DPTI_DEV_OFFLINE;
2671			printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2672			if (pDev->pScsi_dev) {
2673				scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2674			}
2675		}
2676	}
2677	return 0;
2678}
2679
2680static void adpt_fail_posted_scbs(adpt_hba* pHba)
2681{
2682	struct scsi_cmnd* 	cmd = NULL;
2683	struct scsi_device* 	d = NULL;
2684
2685	shost_for_each_device(d, pHba->host) {
2686		unsigned long flags;
2687		spin_lock_irqsave(&d->list_lock, flags);
2688		list_for_each_entry(cmd, &d->cmd_list, list) {
2689			if(cmd->serial_number == 0){
2690				continue;
2691			}
2692			cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2693			cmd->scsi_done(cmd);
2694		}
2695		spin_unlock_irqrestore(&d->list_lock, flags);
2696	}
2697}
2698
2699
2700/*============================================================================
2701 *  Routines from i2o subsystem
2702 *============================================================================
2703 */
2704
2705
2706
2707/*
2708 *	Bring an I2O controller into HOLD state. See the spec.
2709 */
2710static int adpt_i2o_activate_hba(adpt_hba* pHba)
2711{
2712	int rcode;
2713
2714	if(pHba->initialized ) {
2715		if (adpt_i2o_status_get(pHba) < 0) {
2716			if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2717				printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2718				return rcode;
2719			}
2720			if (adpt_i2o_status_get(pHba) < 0) {
2721				printk(KERN_INFO "HBA not responding.\n");
2722				return -1;
2723			}
2724		}
2725
2726		if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2727			printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2728			return -1;
2729		}
2730
2731		if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2732		    pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2733		    pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2734		    pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2735			adpt_i2o_reset_hba(pHba);			
2736			if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2737				printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2738				return -1;
2739			}
2740		}
2741	} else {
2742		if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2743			printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2744			return rcode;
2745		}
2746
2747	}
2748
2749	if (adpt_i2o_init_outbound_q(pHba) < 0) {
2750		return -1;
2751	}
2752
2753	/* In HOLD state */
2754	
2755	if (adpt_i2o_hrt_get(pHba) < 0) {
2756		return -1;
2757	}
2758
2759	return 0;
2760}
2761
2762/*
2763 *	Bring a controller online into OPERATIONAL state. 
2764 */
2765 
2766static int adpt_i2o_online_hba(adpt_hba* pHba)
2767{
2768	if (adpt_i2o_systab_send(pHba) < 0)
2769		return -1;
2770	/* In READY state */
2771
2772	if (adpt_i2o_enable_hba(pHba) < 0)
2773		return -1;
2774
2775	/* In OPERATIONAL state  */
2776	return 0;
2777}
2778
2779static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2780{
2781	u32 __iomem *msg;
2782	ulong timeout = jiffies + 5*HZ;
2783
2784	while(m == EMPTY_QUEUE){
2785		rmb();
2786		m = readl(pHba->post_port);
2787		if(m != EMPTY_QUEUE){
2788			break;
2789		}
2790		if(time_after(jiffies,timeout)){
2791			printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2792			return 2;
2793		}
2794		schedule_timeout_uninterruptible(1);
2795	}
2796	msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2797	writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2798	writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2799	writel( 0,&msg[2]);
2800	wmb();
2801
2802	writel(m, pHba->post_port);
2803	wmb();
2804	return 0;
2805}
2806
2807static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2808{
2809	u8 *status;
2810	dma_addr_t addr;
2811	u32 __iomem *msg = NULL;
2812	int i;
2813	ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2814	u32 m;
2815
2816	do {
2817		rmb();
2818		m = readl(pHba->post_port);
2819		if (m != EMPTY_QUEUE) {
2820			break;
2821		}
2822
2823		if(time_after(jiffies,timeout)){
2824			printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2825			return -ETIMEDOUT;
2826		}
2827		schedule_timeout_uninterruptible(1);
2828	} while(m == EMPTY_QUEUE);
2829
2830	msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2831
2832	status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2833	if (!status) {
2834		adpt_send_nop(pHba, m);
2835		printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2836			pHba->name);
2837		return -ENOMEM;
2838	}
2839	memset(status, 0, 4);
2840
2841	writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2842	writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2843	writel(0, &msg[2]);
2844	writel(0x0106, &msg[3]);	/* Transaction context */
2845	writel(4096, &msg[4]);		/* Host page frame size */
2846	writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]);	/* Outbound msg frame size and Initcode */
2847	writel(0xD0000004, &msg[6]);		/* Simple SG LE, EOB */
2848	writel((u32)addr, &msg[7]);
2849
2850	writel(m, pHba->post_port);
2851	wmb();
2852
2853	// Wait for the reply status to come back
2854	do {
2855		if (*status) {
2856			if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2857				break;
2858			}
2859		}
2860		rmb();
2861		if(time_after(jiffies,timeout)){
2862			printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2863			/* We lose 4 bytes of "status" here, but we
2864			   cannot free these because controller may
2865			   awake and corrupt those bytes at any time */
2866			/* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
2867			return -ETIMEDOUT;
2868		}
2869		schedule_timeout_uninterruptible(1);
2870	} while (1);
2871
2872	// If the command was successful, fill the fifo with our reply
2873	// message packets
2874	if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2875		dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2876		return -2;
2877	}
2878	dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2879
2880	if(pHba->reply_pool != NULL) {
2881		dma_free_coherent(&pHba->pDev->dev,
2882			pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2883			pHba->reply_pool, pHba->reply_pool_pa);
2884	}
2885
2886	pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2887				pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2888				&pHba->reply_pool_pa, GFP_KERNEL);
2889	if (!pHba->reply_pool) {
2890		printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2891		return -ENOMEM;
2892	}
2893	memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2894
2895	for(i = 0; i < pHba->reply_fifo_size; i++) {
2896		writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2897			pHba->reply_port);
2898		wmb();
2899	}
2900	adpt_i2o_status_get(pHba);
2901	return 0;
2902}
2903
2904
2905/*
2906 * I2O System Table.  Contains information about
2907 * all the IOPs in the system.  Used to inform IOPs
2908 * about each other's existence.
2909 *
2910 * sys_tbl_ver is the CurrentChangeIndicator that is
2911 * used by IOPs to track changes.
2912 */
2913
2914
2915
2916static s32 adpt_i2o_status_get(adpt_hba* pHba)
2917{
2918	ulong timeout;
2919	u32 m;
2920	u32 __iomem *msg;
2921	u8 *status_block=NULL;
2922
2923	if(pHba->status_block == NULL) {
2924		pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2925					sizeof(i2o_status_block),
2926					&pHba->status_block_pa, GFP_KERNEL);
2927		if(pHba->status_block == NULL) {
2928			printk(KERN_ERR
2929			"dpti%d: Get Status Block failed; Out of memory. \n", 
2930			pHba->unit);
2931			return -ENOMEM;
2932		}
2933	}
2934	memset(pHba->status_block, 0, sizeof(i2o_status_block));
2935	status_block = (u8*)(pHba->status_block);
2936	timeout = jiffies+TMOUT_GETSTATUS*HZ;
2937	do {
2938		rmb();
2939		m = readl(pHba->post_port);
2940		if (m != EMPTY_QUEUE) {
2941			break;
2942		}
2943		if(time_after(jiffies,timeout)){
2944			printk(KERN_ERR "%s: Timeout waiting for message !\n",
2945					pHba->name);
2946			return -ETIMEDOUT;
2947		}
2948		schedule_timeout_uninterruptible(1);
2949	} while(m==EMPTY_QUEUE);
2950
2951	
2952	msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2953
2954	writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2955	writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2956	writel(1, &msg[2]);
2957	writel(0, &msg[3]);
2958	writel(0, &msg[4]);
2959	writel(0, &msg[5]);
2960	writel( dma_low(pHba->status_block_pa), &msg[6]);
2961	writel( dma_high(pHba->status_block_pa), &msg[7]);
2962	writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2963
2964	//post message
2965	writel(m, pHba->post_port);
2966	wmb();
2967
2968	while(status_block[87]!=0xff){
2969		if(time_after(jiffies,timeout)){
2970			printk(KERN_ERR"dpti%d: Get status timeout.\n",
2971				pHba->unit);
2972			return -ETIMEDOUT;
2973		}
2974		rmb();
2975		schedule_timeout_uninterruptible(1);
2976	}
2977
2978	// Set up our number of outbound and inbound messages
2979	pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2980	if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2981		pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2982	}
2983
2984	pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2985	if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2986		pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2987	}
2988
2989	// Calculate the Scatter Gather list size
2990	if (dpt_dma64(pHba)) {
2991		pHba->sg_tablesize
2992		  = ((pHba->status_block->inbound_frame_size * 4
2993		  - 14 * sizeof(u32))
2994		  / (sizeof(struct sg_simple_element) + sizeof(u32)));
2995	} else {
2996		pHba->sg_tablesize
2997		  = ((pHba->status_block->inbound_frame_size * 4
2998		  - 12 * sizeof(u32))
2999		  / sizeof(struct sg_simple_element));
3000	}
3001	if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
3002		pHba->sg_tablesize = SG_LIST_ELEMENTS;
3003	}
3004
3005
3006#ifdef DEBUG
3007	printk("dpti%d: State = ",pHba->unit);
3008	switch(pHba->status_block->iop_state) {
3009		case 0x01:
3010			printk("INIT\n");
3011			break;
3012		case 0x02:
3013			printk("RESET\n");
3014			break;
3015		case 0x04:
3016			printk("HOLD\n");
3017			break;
3018		case 0x05:
3019			printk("READY\n");
3020			break;
3021		case 0x08:
3022			printk("OPERATIONAL\n");
3023			break;
3024		case 0x10:
3025			printk("FAILED\n");
3026			break;
3027		case 0x11:
3028			printk("FAULTED\n");
3029			break;
3030		default:
3031			printk("%x (unknown!!)\n",pHba->status_block->iop_state);
3032	}
3033#endif
3034	return 0;
3035}
3036
3037/*
3038 * Get the IOP's Logical Configuration Table
3039 */
3040static int adpt_i2o_lct_get(adpt_hba* pHba)
3041{
3042	u32 msg[8];
3043	int ret;
3044	u32 buf[16];
3045
3046	if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
3047		pHba->lct_size = pHba->status_block->expected_lct_size;
3048	}
3049	do {
3050		if (pHba->lct == NULL) {
3051			pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3052					pHba->lct_size, &pHba->lct_pa,
3053					GFP_ATOMIC);
3054			if(pHba->lct == NULL) {
3055				printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3056					pHba->name);
3057				return -ENOMEM;
3058			}
3059		}
3060		memset(pHba->lct, 0, pHba->lct_size);
3061
3062		msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3063		msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3064		msg[2] = 0;
3065		msg[3] = 0;
3066		msg[4] = 0xFFFFFFFF;	/* All devices */
3067		msg[5] = 0x00000000;	/* Report now */
3068		msg[6] = 0xD0000000|pHba->lct_size;
3069		msg[7] = (u32)pHba->lct_pa;
3070
3071		if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3072			printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n", 
3073				pHba->name, ret);	
3074			printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3075			return ret;
3076		}
3077
3078		if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3079			pHba->lct_size = pHba->lct->table_size << 2;
3080			dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3081					pHba->lct, pHba->lct_pa);
3082			pHba->lct = NULL;
3083		}
3084	} while (pHba->lct == NULL);
3085
3086	PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3087
3088
3089	// I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3090	if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3091		pHba->FwDebugBufferSize = buf[1];
3092		pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3093						pHba->FwDebugBufferSize);
3094		if (pHba->FwDebugBuffer_P) {
3095			pHba->FwDebugFlags_P     = pHba->FwDebugBuffer_P +
3096							FW_DEBUG_FLAGS_OFFSET;
3097			pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3098							FW_DEBUG_BLED_OFFSET;
3099			pHba->FwDebugBLEDflag_P  = pHba->FwDebugBLEDvalue_P + 1;
3100			pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3101						FW_DEBUG_STR_LENGTH_OFFSET;
3102			pHba->FwDebugBuffer_P += buf[2]; 
3103			pHba->FwDebugFlags = 0;
3104		}
3105	}
3106
3107	return 0;
3108}
3109
3110static int adpt_i2o_build_sys_table(void)
3111{
3112	adpt_hba* pHba = hba_chain;
3113	int count = 0;
3114
3115	if (sys_tbl)
3116		dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3117					sys_tbl, sys_tbl_pa);
3118
3119	sys_tbl_len = sizeof(struct i2o_sys_tbl) +	// Header + IOPs
3120				(hba_count) * sizeof(struct i2o_sys_tbl_entry);
3121
3122	sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3123				sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
3124	if (!sys_tbl) {
3125		printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");	
3126		return -ENOMEM;
3127	}
3128	memset(sys_tbl, 0, sys_tbl_len);
3129
3130	sys_tbl->num_entries = hba_count;
3131	sys_tbl->version = I2OVERSION;
3132	sys_tbl->change_ind = sys_tbl_ind++;
3133
3134	for(pHba = hba_chain; pHba; pHba = pHba->next) {
3135		u64 addr;
3136		// Get updated Status Block so we have the latest information
3137		if (adpt_i2o_status_get(pHba)) {
3138			sys_tbl->num_entries--;
3139			continue; // try next one	
3140		}
3141
3142		sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3143		sys_tbl->iops[count].iop_id = pHba->unit + 2;
3144		sys_tbl->iops[count].seg_num = 0;
3145		sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3146		sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3147		sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3148		sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3149		sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3150		sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
3151		addr = pHba->base_addr_phys + 0x40;
3152		sys_tbl->iops[count].inbound_low = dma_low(addr);
3153		sys_tbl->iops[count].inbound_high = dma_high(addr);
3154
3155		count++;
3156	}
3157
3158#ifdef DEBUG
3159{
3160	u32 *table = (u32*)sys_tbl;
3161	printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3162	for(count = 0; count < (sys_tbl_len >>2); count++) {
3163		printk(KERN_INFO "sys_tbl[%d] = %0#10x\n", 
3164			count, table[count]);
3165	}
3166}
3167#endif
3168
3169	return 0;
3170}
3171
3172
3173/*
3174 *	 Dump the information block associated with a given unit (TID)
3175 */
3176 
3177static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3178{
3179	char buf[64];
3180	int unit = d->lct_data.tid;
3181
3182	printk(KERN_INFO "TID %3.3d ", unit);
3183
3184	if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3185	{
3186		buf[16]=0;
3187		printk(" Vendor: %-12.12s", buf);
3188	}
3189	if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3190	{
3191		buf[16]=0;
3192		printk(" Device: %-12.12s", buf);
3193	}
3194	if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3195	{
3196		buf[8]=0;
3197		printk(" Rev: %-12.12s\n", buf);
3198	}
3199#ifdef DEBUG
3200	 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3201	 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3202	 printk(KERN_INFO "\tFlags: ");
3203
3204	 if(d->lct_data.device_flags&(1<<0))
3205		  printk("C");	     // ConfigDialog requested
3206	 if(d->lct_data.device_flags&(1<<1))
3207		  printk("U");	     // Multi-user capable
3208	 if(!(d->lct_data.device_flags&(1<<4)))
3209		  printk("P");	     // Peer service enabled!
3210	 if(!(d->lct_data.device_flags&(1<<5)))
3211		  printk("M");	     // Mgmt service enabled!
3212	 printk("\n");
3213#endif
3214}
3215
3216#ifdef DEBUG
3217/*
3218 *	Do i2o class name lookup
3219 */
3220static const char *adpt_i2o_get_class_name(int class)
3221{
3222	int idx = 16;
3223	static char *i2o_class_name[] = {
3224		"Executive",
3225		"Device Driver Module",
3226		"Block Device",
3227		"Tape Device",
3228		"LAN Interface",
3229		"WAN Interface",
3230		"Fibre Channel Port",
3231		"Fibre Channel Device",
3232		"SCSI Device",
3233		"ATE Port",
3234		"ATE Device",
3235		"Floppy Controller",
3236		"Floppy Device",
3237		"Secondary Bus Port",
3238		"Peer Transport Agent",
3239		"Peer Transport",
3240		"Unknown"
3241	};
3242	
3243	switch(class&0xFFF) {
3244	case I2O_CLASS_EXECUTIVE:
3245		idx = 0; break;
3246	case I2O_CLASS_DDM:
3247		idx = 1; break;
3248	case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3249		idx = 2; break;
3250	case I2O_CLASS_SEQUENTIAL_STORAGE:
3251		idx = 3; break;
3252	case I2O_CLASS_LAN:
3253		idx = 4; break;
3254	case I2O_CLASS_WAN:
3255		idx = 5; break;
3256	case I2O_CLASS_FIBRE_CHANNEL_PORT:
3257		idx = 6; break;
3258	case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3259		idx = 7; break;
3260	case I2O_CLASS_SCSI_PERIPHERAL:
3261		idx = 8; break;
3262	case I2O_CLASS_ATE_PORT:
3263		idx = 9; break;
3264	case I2O_CLASS_ATE_PERIPHERAL:
3265		idx = 10; break;
3266	case I2O_CLASS_FLOPPY_CONTROLLER:
3267		idx = 11; break;
3268	case I2O_CLASS_FLOPPY_DEVICE:
3269		idx = 12; break;
3270	case I2O_CLASS_BUS_ADAPTER_PORT:
3271		idx = 13; break;
3272	case I2O_CLASS_PEER_TRANSPORT_AGENT:
3273		idx = 14; break;
3274	case I2O_CLASS_PEER_TRANSPORT:
3275		idx = 15; break;
3276	}
3277	return i2o_class_name[idx];
3278}
3279#endif
3280
3281
3282static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3283{
3284	u32 msg[6];
3285	int ret, size = sizeof(i2o_hrt);
3286
3287	do {
3288		if (pHba->hrt == NULL) {
3289			pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3290					size, &pHba->hrt_pa, GFP_KERNEL);
3291			if (pHba->hrt == NULL) {
3292				printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3293				return -ENOMEM;
3294			}
3295		}
3296
3297		msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3298		msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3299		msg[2]= 0;
3300		msg[3]= 0;
3301		msg[4]= (0xD0000000 | size);    /* Simple transaction */
3302		msg[5]= (u32)pHba->hrt_pa;	/* Dump it here */
3303
3304		if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3305			printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3306			return ret;
3307		}
3308
3309		if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3310			int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3311			dma_free_coherent(&pHba->pDev->dev, size,
3312				pHba->hrt, pHba->hrt_pa);
3313			size = newsize;
3314			pHba->hrt = NULL;
3315		}
3316	} while(pHba->hrt == NULL);
3317	return 0;
3318}                                                                                                                                       
3319
3320/*
3321 *	 Query one scalar group value or a whole scalar group.
3322 */		    	
3323static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid, 
3324			int group, int field, void *buf, int buflen)
3325{
3326	u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3327	u8 *opblk_va;
3328	dma_addr_t opblk_pa;
3329	u8 *resblk_va;
3330	dma_addr_t resblk_pa;
3331
3332	int size;
3333
3334	/* 8 bytes for header */
3335	resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3336			sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3337	if (resblk_va == NULL) {
3338		printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3339		return -ENOMEM;
3340	}
3341
3342	opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3343			sizeof(opblk), &opblk_pa, GFP_KERNEL);
3344	if (opblk_va == NULL) {
3345		dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3346			resblk_va, resblk_pa);
3347		printk(KERN_CRIT "%s: query operation failed; Out of memory.\n",
3348			pHba->name);
3349		return -ENOMEM;
3350	}
3351	if (field == -1)  		/* whole group */
3352			opblk[4] = -1;
3353
3354	memcpy(opblk_va, opblk, sizeof(opblk));
3355	size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid, 
3356		opblk_va, opblk_pa, sizeof(opblk),
3357		resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3358	dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3359	if (size == -ETIME) {
3360		dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3361							resblk_va, resblk_pa);
3362		printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3363		return -ETIME;
3364	} else if (size == -EINTR) {
3365		dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3366							resblk_va, resblk_pa);
3367		printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3368		return -EINTR;
3369	}
3370			
3371	memcpy(buf, resblk_va+8, buflen);  /* cut off header */
3372
3373	dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3374						resblk_va, resblk_pa);
3375	if (size < 0)
3376		return size;	
3377
3378	return buflen;
3379}
3380
3381
3382/*	Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3383 *
3384 *	This function can be used for all UtilParamsGet/Set operations.
3385 *	The OperationBlock is given in opblk-buffer, 
3386 *	and results are returned in resblk-buffer.
3387 *	Note that the minimum sized resblk is 8 bytes and contains
3388 *	ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3389 */
3390static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid, 
3391		  void *opblk_va,  dma_addr_t opblk_pa, int oplen,
3392		void *resblk_va, dma_addr_t resblk_pa, int reslen)
3393{
3394	u32 msg[9]; 
3395	u32 *res = (u32 *)resblk_va;
3396	int wait_status;
3397
3398	msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3399	msg[1] = cmd << 24 | HOST_TID << 12 | tid; 
3400	msg[2] = 0;
3401	msg[3] = 0;
3402	msg[4] = 0;
3403	msg[5] = 0x54000000 | oplen;	/* OperationBlock */
3404	msg[6] = (u32)opblk_pa;
3405	msg[7] = 0xD0000000 | reslen;	/* ResultBlock */
3406	msg[8] = (u32)resblk_pa;
3407
3408	if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3409		printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3410   		return wait_status; 	/* -DetailedStatus */
3411	}
3412
3413	if (res[1]&0x00FF0000) { 	/* BlockStatus != SUCCESS */
3414		printk(KERN_WARNING "%s: %s - Error:\n  ErrorInfoSize = 0x%02x, "
3415			"BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3416			pHba->name,
3417			(cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3418							 : "PARAMS_GET",   
3419			res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3420		return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3421	}
3422
3423	 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */ 
3424}
3425
3426
3427static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3428{
3429	u32 msg[4];
3430	int ret;
3431
3432	adpt_i2o_status_get(pHba);
3433
3434	/* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3435
3436	if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3437   	   (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3438		return 0;
3439	}
3440
3441	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3442	msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3443	msg[2] = 0;
3444	msg[3] = 0;
3445
3446	if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3447		printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3448				pHba->unit, -ret);
3449	} else {
3450		printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3451	}
3452
3453	adpt_i2o_status_get(pHba);
3454	return ret;
3455}
3456
3457
3458/* 
3459 * Enable IOP. Allows the IOP to resume external operations.
3460 */
3461static int adpt_i2o_enable_hba(adpt_hba* pHba)
3462{
3463	u32 msg[4];
3464	int ret;
3465	
3466	adpt_i2o_status_get(pHba);
3467	if(!pHba->status_block){
3468		return -ENOMEM;
3469	}
3470	/* Enable only allowed on READY state */
3471	if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3472		return 0;
3473
3474	if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3475		return -EINVAL;
3476
3477	msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3478	msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3479	msg[2]= 0;
3480	msg[3]= 0;
3481
3482	if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3483		printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n", 
3484			pHba->name, ret);
3485	} else {
3486		PDEBUG("%s: Enabled.\n", pHba->name);
3487	}
3488
3489	adpt_i2o_status_get(pHba);
3490	return ret;
3491}
3492
3493
3494static int adpt_i2o_systab_send(adpt_hba* pHba)
3495{
3496	 u32 msg[12];
3497	 int ret;
3498
3499	msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3500	msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3501	msg[2] = 0;
3502	msg[3] = 0;
3503	msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3504	msg[5] = 0;				   /* Segment 0 */
3505
3506	/* 
3507	 * Provide three SGL-elements:
3508	 * System table (SysTab), Private memory space declaration and 
3509	 * Private i/o space declaration  
3510	 */
3511	msg[6] = 0x54000000 | sys_tbl_len;
3512	msg[7] = (u32)sys_tbl_pa;
3513	msg[8] = 0x54000000 | 0;
3514	msg[9] = 0;
3515	msg[10] = 0xD4000000 | 0;
3516	msg[11] = 0;
3517
3518	if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3519		printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n", 
3520			pHba->name, ret);
3521	}
3522#ifdef DEBUG
3523	else {
3524		PINFO("%s: SysTab set.\n", pHba->name);
3525	}
3526#endif
3527
3528	return ret;	
3529}
3530
3531
3532/*============================================================================
3533 *
3534 *============================================================================
3535 */
3536
3537
3538#ifdef UARTDELAY 
3539
3540static static void adpt_delay(int millisec)
3541{
3542	int i;
3543	for (i = 0; i < millisec; i++) {
3544		udelay(1000);	/* delay for one millisecond */
3545	}
3546}
3547
3548#endif
3549
3550static struct scsi_host_template driver_template = {
3551	.module			= THIS_MODULE,
3552	.name			= "dpt_i2o",
3553	.proc_name		= "dpt_i2o",
3554	.show_info		= adpt_show_info,
3555	.info			= adpt_info,
3556	.queuecommand		= adpt_queue,
3557	.eh_abort_handler	= adpt_abort,
3558	.eh_device_reset_handler = adpt_device_reset,
3559	.eh_bus_reset_handler	= adpt_bus_reset,
3560	.eh_host_reset_handler	= adpt_reset,
3561	.bios_param		= adpt_bios_param,
3562	.slave_configure	= adpt_slave_configure,
3563	.can_queue		= MAX_TO_IOP_MESSAGES,
3564	.this_id		= 7,
3565	.use_clustering		= ENABLE_CLUSTERING,
3566};
3567
3568static int __init adpt_init(void)
3569{
3570	int		error;
3571	adpt_hba	*pHba, *next;
3572
3573	printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3574
3575	error = adpt_detect(&driver_template);
3576	if (error < 0)
3577		return error;
3578	if (hba_chain == NULL)
3579		return -ENODEV;
3580
3581	for (pHba = hba_chain; pHba; pHba = pHba->next) {
3582		error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3583		if (error)
3584			goto fail;
3585		scsi_scan_host(pHba->host);
3586	}
3587	return 0;
3588fail:
3589	for (pHba = hba_chain; pHba; pHba = next) {
3590		next = pHba->next;
3591		scsi_remove_host(pHba->host);
3592	}
3593	return error;
3594}
3595
3596static void __exit adpt_exit(void)
3597{
3598	adpt_hba	*pHba, *next;
3599
3600	for (pHba = hba_chain; pHba; pHba = next) {
3601		next = pHba->next;
3602		adpt_release(pHba);
3603	}
3604}
3605
3606module_init(adpt_init);
3607module_exit(adpt_exit);
3608
3609MODULE_LICENSE("GPL");