Linux Audio

Check our new training course

Loading...
v3.1
 
   1/***************************************************************************
   2                          dpti.c  -  description
   3                             -------------------
   4    begin                : Thu Sep 7 2000
   5    copyright            : (C) 2000 by Adaptec
   6
   7			   July 30, 2001 First version being submitted
   8			   for inclusion in the kernel.  V2.4
   9
  10    See Documentation/scsi/dpti.txt for history, notes, license info
  11    and credits
  12 ***************************************************************************/
  13
  14/***************************************************************************
  15 *                                                                         *
  16 *   This program is free software; you can redistribute it and/or modify  *
  17 *   it under the terms of the GNU General Public License as published by  *
  18 *   the Free Software Foundation; either version 2 of the License, or     *
  19 *   (at your option) any later version.                                   *
  20 *                                                                         *
  21 ***************************************************************************/
  22/***************************************************************************
  23 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
  24 - Support 2.6 kernel and DMA-mapping
  25 - ioctl fix for raid tools
  26 - use schedule_timeout in long long loop
  27 **************************************************************************/
  28
  29/*#define DEBUG 1 */
  30/*#define UARTDELAY 1 */
  31
  32#include <linux/module.h>
 
  33
  34MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
  35MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
  36
  37////////////////////////////////////////////////////////////////
  38
  39#include <linux/ioctl.h>	/* For SCSI-Passthrough */
  40#include <asm/uaccess.h>
  41
  42#include <linux/stat.h>
  43#include <linux/slab.h>		/* for kmalloc() */
  44#include <linux/pci.h>		/* for PCI support */
  45#include <linux/proc_fs.h>
  46#include <linux/blkdev.h>
  47#include <linux/delay.h>	/* for udelay */
  48#include <linux/interrupt.h>
  49#include <linux/kernel.h>	/* for printk */
  50#include <linux/sched.h>
  51#include <linux/reboot.h>
  52#include <linux/spinlock.h>
  53#include <linux/dma-mapping.h>
  54
  55#include <linux/timer.h>
  56#include <linux/string.h>
  57#include <linux/ioport.h>
  58#include <linux/mutex.h>
  59
  60#include <asm/processor.h>	/* for boot_cpu_data */
  61#include <asm/pgtable.h>
  62#include <asm/io.h>		/* for virt_to_bus, etc. */
  63
  64#include <scsi/scsi.h>
  65#include <scsi/scsi_cmnd.h>
  66#include <scsi/scsi_device.h>
  67#include <scsi/scsi_host.h>
  68#include <scsi/scsi_tcq.h>
  69
  70#include "dpt/dptsig.h"
  71#include "dpti.h"
  72
  73/*============================================================================
  74 * Create a binary signature - this is read by dptsig
  75 * Needed for our management apps
  76 *============================================================================
  77 */
  78static DEFINE_MUTEX(adpt_mutex);
  79static dpt_sig_S DPTI_sig = {
  80	{'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
  81#ifdef __i386__
  82	PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
  83#elif defined(__ia64__)
  84	PROC_INTEL, PROC_IA64,
  85#elif defined(__sparc__)
  86	PROC_ULTRASPARC, PROC_ULTRASPARC,
  87#elif defined(__alpha__)
  88	PROC_ALPHA, PROC_ALPHA,
  89#else
  90	(-1),(-1),
  91#endif
  92	 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
  93	ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
  94	DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
  95};
  96
  97
  98
  99
 100/*============================================================================
 101 * Globals
 102 *============================================================================
 103 */
 104
 105static DEFINE_MUTEX(adpt_configuration_lock);
 106
 107static struct i2o_sys_tbl *sys_tbl;
 108static dma_addr_t sys_tbl_pa;
 109static int sys_tbl_ind;
 110static int sys_tbl_len;
 111
 112static adpt_hba* hba_chain = NULL;
 113static int hba_count = 0;
 114
 115static struct class *adpt_sysfs_class;
 116
 117static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
 118#ifdef CONFIG_COMPAT
 119static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
 120#endif
 121
 122static const struct file_operations adpt_fops = {
 123	.unlocked_ioctl	= adpt_unlocked_ioctl,
 124	.open		= adpt_open,
 125	.release	= adpt_close,
 126#ifdef CONFIG_COMPAT
 127	.compat_ioctl	= compat_adpt_ioctl,
 128#endif
 129	.llseek		= noop_llseek,
 130};
 131
 132/* Structures and definitions for synchronous message posting.
 133 * See adpt_i2o_post_wait() for description
 134 * */
 135struct adpt_i2o_post_wait_data
 136{
 137	int status;
 138	u32 id;
 139	adpt_wait_queue_head_t *wq;
 140	struct adpt_i2o_post_wait_data *next;
 141};
 142
 143static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
 144static u32 adpt_post_wait_id = 0;
 145static DEFINE_SPINLOCK(adpt_post_wait_lock);
 146
 147
 148/*============================================================================
 149 * 				Functions
 150 *============================================================================
 151 */
 152
 153static inline int dpt_dma64(adpt_hba *pHba)
 154{
 155	return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
 156}
 157
 158static inline u32 dma_high(dma_addr_t addr)
 159{
 160	return upper_32_bits(addr);
 161}
 162
 163static inline u32 dma_low(dma_addr_t addr)
 164{
 165	return (u32)addr;
 166}
 167
 168static u8 adpt_read_blink_led(adpt_hba* host)
 169{
 170	if (host->FwDebugBLEDflag_P) {
 171		if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
 172			return readb(host->FwDebugBLEDvalue_P);
 173		}
 174	}
 175	return 0;
 176}
 177
 178/*============================================================================
 179 * Scsi host template interface functions
 180 *============================================================================
 181 */
 182
 
 183static struct pci_device_id dptids[] = {
 184	{ PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
 185	{ PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
 186	{ 0, }
 187};
 
 
 188MODULE_DEVICE_TABLE(pci,dptids);
 189
 190static int adpt_detect(struct scsi_host_template* sht)
 191{
 192	struct pci_dev *pDev = NULL;
 193	adpt_hba *pHba;
 194	adpt_hba *next;
 195
 196	PINFO("Detecting Adaptec I2O RAID controllers...\n");
 197
 198        /* search for all Adatpec I2O RAID cards */
 199	while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
 200		if(pDev->device == PCI_DPT_DEVICE_ID ||
 201		   pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
 202			if(adpt_install_hba(sht, pDev) ){
 203				PERROR("Could not Init an I2O RAID device\n");
 204				PERROR("Will not try to detect others.\n");
 205				return hba_count-1;
 206			}
 207			pci_dev_get(pDev);
 208		}
 209	}
 210
 211	/* In INIT state, Activate IOPs */
 212	for (pHba = hba_chain; pHba; pHba = next) {
 213		next = pHba->next;
 214		// Activate does get status , init outbound, and get hrt
 215		if (adpt_i2o_activate_hba(pHba) < 0) {
 216			adpt_i2o_delete_hba(pHba);
 217		}
 218	}
 219
 220
 221	/* Active IOPs in HOLD state */
 222
 223rebuild_sys_tab:
 224	if (hba_chain == NULL) 
 225		return 0;
 226
 227	/*
 228	 * If build_sys_table fails, we kill everything and bail
 229	 * as we can't init the IOPs w/o a system table
 230	 */	
 231	if (adpt_i2o_build_sys_table() < 0) {
 232		adpt_i2o_sys_shutdown();
 233		return 0;
 234	}
 235
 236	PDEBUG("HBA's in HOLD state\n");
 237
 238	/* If IOP don't get online, we need to rebuild the System table */
 239	for (pHba = hba_chain; pHba; pHba = pHba->next) {
 240		if (adpt_i2o_online_hba(pHba) < 0) {
 241			adpt_i2o_delete_hba(pHba);	
 242			goto rebuild_sys_tab;
 243		}
 244	}
 245
 246	/* Active IOPs now in OPERATIONAL state */
 247	PDEBUG("HBA's in OPERATIONAL state\n");
 248
 249	printk("dpti: If you have a lot of devices this could take a few minutes.\n");
 250	for (pHba = hba_chain; pHba; pHba = next) {
 251		next = pHba->next;
 252		printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
 253		if (adpt_i2o_lct_get(pHba) < 0){
 254			adpt_i2o_delete_hba(pHba);
 255			continue;
 256		}
 257
 258		if (adpt_i2o_parse_lct(pHba) < 0){
 259			adpt_i2o_delete_hba(pHba);
 260			continue;
 261		}
 262		adpt_inquiry(pHba);
 263	}
 264
 265	adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
 266	if (IS_ERR(adpt_sysfs_class)) {
 267		printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
 268		adpt_sysfs_class = NULL;
 269	}
 270
 271	for (pHba = hba_chain; pHba; pHba = next) {
 272		next = pHba->next;
 273		if (adpt_scsi_host_alloc(pHba, sht) < 0){
 274			adpt_i2o_delete_hba(pHba);
 275			continue;
 276		}
 277		pHba->initialized = TRUE;
 278		pHba->state &= ~DPTI_STATE_RESET;
 279		if (adpt_sysfs_class) {
 280			struct device *dev = device_create(adpt_sysfs_class,
 281				NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
 282				"dpti%d", pHba->unit);
 283			if (IS_ERR(dev)) {
 284				printk(KERN_WARNING"dpti%d: unable to "
 285					"create device in dpt_i2o class\n",
 286					pHba->unit);
 287			}
 288		}
 289	}
 290
 291	// Register our control device node
 292	// nodes will need to be created in /dev to access this
 293	// the nodes can not be created from within the driver
 294	if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
 295		adpt_i2o_sys_shutdown();
 296		return 0;
 297	}
 298	return hba_count;
 299}
 300
 301
 302/*
 303 * scsi_unregister will be called AFTER we return.
 304 */
 305static int adpt_release(struct Scsi_Host *host)
 306{
 307	adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
 
 
 308//	adpt_i2o_quiesce_hba(pHba);
 309	adpt_i2o_delete_hba(pHba);
 310	scsi_unregister(host);
 311	return 0;
 312}
 313
 314
 315static void adpt_inquiry(adpt_hba* pHba)
 316{
 317	u32 msg[17]; 
 318	u32 *mptr;
 319	u32 *lenptr;
 320	int direction;
 321	int scsidir;
 322	u32 len;
 323	u32 reqlen;
 324	u8* buf;
 325	dma_addr_t addr;
 326	u8  scb[16];
 327	s32 rcode;
 328
 329	memset(msg, 0, sizeof(msg));
 330	buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
 331	if(!buf){
 332		printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
 333		return;
 334	}
 335	memset((void*)buf, 0, 36);
 336	
 337	len = 36;
 338	direction = 0x00000000;	
 339	scsidir  =0x40000000;	// DATA IN  (iop<--dev)
 340
 341	if (dpt_dma64(pHba))
 342		reqlen = 17;		// SINGLE SGE, 64 bit
 343	else
 344		reqlen = 14;		// SINGLE SGE, 32 bit
 345	/* Stick the headers on */
 346	msg[0] = reqlen<<16 | SGL_OFFSET_12;
 347	msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
 348	msg[2] = 0;
 349	msg[3]  = 0;
 350	// Adaptec/DPT Private stuff 
 351	msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
 352	msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
 353	/* Direction, disconnect ok | sense data | simple queue , CDBLen */
 354	// I2O_SCB_FLAG_ENABLE_DISCONNECT | 
 355	// I2O_SCB_FLAG_SIMPLE_QUEUE_TAG | 
 356	// I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
 357	msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
 358
 359	mptr=msg+7;
 360
 361	memset(scb, 0, sizeof(scb));
 362	// Write SCSI command into the message - always 16 byte block 
 363	scb[0] = INQUIRY;
 364	scb[1] = 0;
 365	scb[2] = 0;
 366	scb[3] = 0;
 367	scb[4] = 36;
 368	scb[5] = 0;
 369	// Don't care about the rest of scb
 370
 371	memcpy(mptr, scb, sizeof(scb));
 372	mptr+=4;
 373	lenptr=mptr++;		/* Remember me - fill in when we know */
 374
 375	/* Now fill in the SGList and command */
 376	*lenptr = len;
 377	if (dpt_dma64(pHba)) {
 378		*mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
 379		*mptr++ = 1 << PAGE_SHIFT;
 380		*mptr++ = 0xD0000000|direction|len;
 381		*mptr++ = dma_low(addr);
 382		*mptr++ = dma_high(addr);
 383	} else {
 384		*mptr++ = 0xD0000000|direction|len;
 385		*mptr++ = addr;
 386	}
 387
 388	// Send it on it's way
 389	rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
 390	if (rcode != 0) {
 391		sprintf(pHba->detail, "Adaptec I2O RAID");
 392		printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
 393		if (rcode != -ETIME && rcode != -EINTR)
 394			dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
 395	} else {
 396		memset(pHba->detail, 0, sizeof(pHba->detail));
 397		memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
 398		memcpy(&(pHba->detail[16]), " Model: ", 8);
 399		memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
 400		memcpy(&(pHba->detail[40]), " FW: ", 4);
 401		memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
 402		pHba->detail[48] = '\0';	/* precautionary */
 403		dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
 404	}
 405	adpt_i2o_status_get(pHba);
 406	return ;
 407}
 408
 409
 410static int adpt_slave_configure(struct scsi_device * device)
 411{
 412	struct Scsi_Host *host = device->host;
 413	adpt_hba* pHba;
 414
 415	pHba = (adpt_hba *) host->hostdata[0];
 416
 417	if (host->can_queue && device->tagged_supported) {
 418		scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
 419				host->can_queue - 1);
 420	} else {
 421		scsi_adjust_queue_depth(device, 0, 1);
 422	}
 423	return 0;
 424}
 425
 426static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
 427{
 428	adpt_hba* pHba = NULL;
 429	struct adpt_device* pDev = NULL;	/* dpt per device information */
 430
 431	cmd->scsi_done = done;
 432	/*
 433	 * SCSI REQUEST_SENSE commands will be executed automatically by the 
 434	 * Host Adapter for any errors, so they should not be executed 
 435	 * explicitly unless the Sense Data is zero indicating that no error 
 436	 * occurred.
 437	 */
 438
 439	if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
 440		cmd->result = (DID_OK << 16);
 441		cmd->scsi_done(cmd);
 442		return 0;
 443	}
 444
 445	pHba = (adpt_hba*)cmd->device->host->hostdata[0];
 446	if (!pHba) {
 447		return FAILED;
 448	}
 449
 450	rmb();
 451	/*
 452	 * TODO: I need to block here if I am processing ioctl cmds
 453	 * but if the outstanding cmds all finish before the ioctl,
 454	 * the scsi-core will not know to start sending cmds to me again.
 455	 * I need to a way to restart the scsi-cores queues or should I block
 456	 * calling scsi_done on the outstanding cmds instead
 457	 * for now we don't set the IOCTL state
 458	 */
 459	if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) {
 460		pHba->host->last_reset = jiffies;
 461		pHba->host->resetting = 1;
 462		return 1;
 463	}
 464
 465	// TODO if the cmd->device if offline then I may need to issue a bus rescan
 466	// followed by a get_lct to see if the device is there anymore
 467	if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
 468		/*
 469		 * First command request for this device.  Set up a pointer
 470		 * to the device structure.  This should be a TEST_UNIT_READY
 471		 * command from scan_scsis_single.
 472		 */
 473		if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun)) == NULL) {
 474			// TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response 
 475			// with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
 476			cmd->result = (DID_NO_CONNECT << 16);
 477			cmd->scsi_done(cmd);
 478			return 0;
 479		}
 480		cmd->device->hostdata = pDev;
 481	}
 482	pDev->pScsi_dev = cmd->device;
 483
 484	/*
 485	 * If we are being called from when the device is being reset, 
 486	 * delay processing of the command until later.
 487	 */
 488	if (pDev->state & DPTI_DEV_RESET ) {
 489		return FAILED;
 490	}
 491	return adpt_scsi_to_i2o(pHba, cmd, pDev);
 492}
 493
 494static DEF_SCSI_QCMD(adpt_queue)
 495
 496static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
 497		sector_t capacity, int geom[])
 498{
 499	int heads=-1;
 500	int sectors=-1;
 501	int cylinders=-1;
 502
 503	// *** First lets set the default geometry ****
 504	
 505	// If the capacity is less than ox2000
 506	if (capacity < 0x2000 ) {	// floppy
 507		heads = 18;
 508		sectors = 2;
 509	} 
 510	// else if between 0x2000 and 0x20000
 511	else if (capacity < 0x20000) {
 512		heads = 64;
 513		sectors = 32;
 514	}
 515	// else if between 0x20000 and 0x40000
 516	else if (capacity < 0x40000) {
 517		heads = 65;
 518		sectors = 63;
 519	}
 520	// else if between 0x4000 and 0x80000
 521	else if (capacity < 0x80000) {
 522		heads = 128;
 523		sectors = 63;
 524	}
 525	// else if greater than 0x80000
 526	else {
 527		heads = 255;
 528		sectors = 63;
 529	}
 530	cylinders = sector_div(capacity, heads * sectors);
 531
 532	// Special case if CDROM
 533	if(sdev->type == 5) {  // CDROM
 534		heads = 252;
 535		sectors = 63;
 536		cylinders = 1111;
 537	}
 538
 539	geom[0] = heads;
 540	geom[1] = sectors;
 541	geom[2] = cylinders;
 542	
 543	PDEBUG("adpt_bios_param: exit\n");
 544	return 0;
 545}
 546
 547
 548static const char *adpt_info(struct Scsi_Host *host)
 549{
 550	adpt_hba* pHba;
 551
 552	pHba = (adpt_hba *) host->hostdata[0];
 553	return (char *) (pHba->detail);
 554}
 555
 556static int adpt_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
 557		  int length, int inout)
 558{
 559	struct adpt_device* d;
 560	int id;
 561	int chan;
 562	int len = 0;
 563	int begin = 0;
 564	int pos = 0;
 565	adpt_hba* pHba;
 566	int unit;
 567
 568	*start = buffer;
 569	if (inout == TRUE) {
 570		/*
 571		 * The user has done a write and wants us to take the
 572		 * data in the buffer and do something with it.
 573		 * proc_scsiwrite calls us with inout = 1
 574		 *
 575		 * Read data from buffer (writing to us) - NOT SUPPORTED
 576		 */
 577		return -EINVAL;
 578	}
 579
 580	/*
 581	 * inout = 0 means the user has done a read and wants information
 582	 * returned, so we write information about the cards into the buffer
 583	 * proc_scsiread() calls us with inout = 0
 584	 */
 585
 586	// Find HBA (host bus adapter) we are looking for
 587	mutex_lock(&adpt_configuration_lock);
 588	for (pHba = hba_chain; pHba; pHba = pHba->next) {
 589		if (pHba->host == host) {
 590			break;	/* found adapter */
 591		}
 592	}
 593	mutex_unlock(&adpt_configuration_lock);
 594	if (pHba == NULL) {
 595		return 0;
 596	}
 597	host = pHba->host;
 598
 599	len  = sprintf(buffer    , "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
 600	len += sprintf(buffer+len, "%s\n", pHba->detail);
 601	len += sprintf(buffer+len, "SCSI Host=scsi%d  Control Node=/dev/%s  irq=%d\n", 
 602			pHba->host->host_no, pHba->name, host->irq);
 603	len += sprintf(buffer+len, "\tpost fifo size  = %d\n\treply fifo size = %d\n\tsg table size   = %d\n\n",
 604			host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
 605
 606	pos = begin + len;
 607
 608	/* CHECKPOINT */
 609	if(pos > offset + length) {
 610		goto stop_output;
 611	}
 612	if(pos <= offset) {
 613		/*
 614		 * If we haven't even written to where we last left
 615		 * off (the last time we were called), reset the 
 616		 * beginning pointer.
 617		 */
 618		len = 0;
 619		begin = pos;
 620	}
 621	len +=  sprintf(buffer+len, "Devices:\n");
 622	for(chan = 0; chan < MAX_CHANNEL; chan++) {
 623		for(id = 0; id < MAX_ID; id++) {
 624			d = pHba->channel[chan].device[id];
 625			while(d){
 626				len += sprintf(buffer+len,"\t%-24.24s", d->pScsi_dev->vendor);
 627				len += sprintf(buffer+len," Rev: %-8.8s\n", d->pScsi_dev->rev);
 628				pos = begin + len;
 629
 630
 631				/* CHECKPOINT */
 632				if(pos > offset + length) {
 633					goto stop_output;
 634				}
 635				if(pos <= offset) {
 636					len = 0;
 637					begin = pos;
 638				}
 639
 640				unit = d->pI2o_dev->lct_data.tid;
 641				len += sprintf(buffer+len, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d)  (%s)\n\n",
 642					       unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun,
 643					       scsi_device_online(d->pScsi_dev)? "online":"offline"); 
 644				pos = begin + len;
 645
 646				/* CHECKPOINT */
 647				if(pos > offset + length) {
 648					goto stop_output;
 649				}
 650				if(pos <= offset) {
 651					len = 0;
 652					begin = pos;
 653				}
 654
 655				d = d->next_lun;
 656			}
 657		}
 658	}
 659
 660	/*
 661	 * begin is where we last checked our position with regards to offset
 662	 * begin is always less than offset.  len is relative to begin.  It
 663	 * is the number of bytes written past begin
 664	 *
 665	 */
 666stop_output:
 667	/* stop the output and calculate the correct length */
 668	*(buffer + len) = '\0';
 669
 670	*start = buffer + (offset - begin);	/* Start of wanted data */
 671	len -= (offset - begin);
 672	if(len > length) {
 673		len = length;
 674	} else if(len < 0){
 675		len = 0;
 676		**start = '\0';
 677	}
 678	return len;
 679}
 680
 681/*
 682 *	Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
 683 */
 684static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
 685{
 686	return (u32)cmd->serial_number;
 687}
 688
 689/*
 690 *	Go from a u32 'context' to a struct scsi_cmnd * .
 691 *	This could probably be made more efficient.
 692 */
 693static struct scsi_cmnd *
 694	adpt_cmd_from_context(adpt_hba * pHba, u32 context)
 695{
 696	struct scsi_cmnd * cmd;
 697	struct scsi_device * d;
 698
 699	if (context == 0)
 700		return NULL;
 701
 702	spin_unlock(pHba->host->host_lock);
 703	shost_for_each_device(d, pHba->host) {
 704		unsigned long flags;
 705		spin_lock_irqsave(&d->list_lock, flags);
 706		list_for_each_entry(cmd, &d->cmd_list, list) {
 707			if (((u32)cmd->serial_number == context)) {
 708				spin_unlock_irqrestore(&d->list_lock, flags);
 709				scsi_device_put(d);
 710				spin_lock(pHba->host->host_lock);
 711				return cmd;
 712			}
 713		}
 714		spin_unlock_irqrestore(&d->list_lock, flags);
 715	}
 716	spin_lock(pHba->host->host_lock);
 717
 718	return NULL;
 719}
 720
 721/*
 722 *	Turn a pointer to ioctl reply data into an u32 'context'
 723 */
 724static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
 725{
 726#if BITS_PER_LONG == 32
 727	return (u32)(unsigned long)reply;
 728#else
 729	ulong flags = 0;
 730	u32 nr, i;
 731
 732	spin_lock_irqsave(pHba->host->host_lock, flags);
 733	nr = ARRAY_SIZE(pHba->ioctl_reply_context);
 734	for (i = 0; i < nr; i++) {
 735		if (pHba->ioctl_reply_context[i] == NULL) {
 736			pHba->ioctl_reply_context[i] = reply;
 737			break;
 738		}
 739	}
 740	spin_unlock_irqrestore(pHba->host->host_lock, flags);
 741	if (i >= nr) {
 742		kfree (reply);
 743		printk(KERN_WARNING"%s: Too many outstanding "
 744				"ioctl commands\n", pHba->name);
 745		return (u32)-1;
 746	}
 747
 748	return i;
 749#endif
 750}
 751
 752/*
 753 *	Go from an u32 'context' to a pointer to ioctl reply data.
 754 */
 755static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
 756{
 757#if BITS_PER_LONG == 32
 758	return (void *)(unsigned long)context;
 759#else
 760	void *p = pHba->ioctl_reply_context[context];
 761	pHba->ioctl_reply_context[context] = NULL;
 762
 763	return p;
 764#endif
 765}
 766
 767/*===========================================================================
 768 * Error Handling routines
 769 *===========================================================================
 770 */
 771
 772static int adpt_abort(struct scsi_cmnd * cmd)
 773{
 774	adpt_hba* pHba = NULL;	/* host bus adapter structure */
 775	struct adpt_device* dptdevice;	/* dpt per device information */
 776	u32 msg[5];
 777	int rcode;
 778
 779	if(cmd->serial_number == 0){
 780		return FAILED;
 781	}
 782	pHba = (adpt_hba*) cmd->device->host->hostdata[0];
 783	printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
 784	if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
 785		printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
 786		return FAILED;
 787	}
 788
 789	memset(msg, 0, sizeof(msg));
 790	msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
 791	msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
 792	msg[2] = 0;
 793	msg[3]= 0; 
 794	msg[4] = adpt_cmd_to_context(cmd);
 
 795	if (pHba->host)
 796		spin_lock_irq(pHba->host->host_lock);
 797	rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
 798	if (pHba->host)
 799		spin_unlock_irq(pHba->host->host_lock);
 800	if (rcode != 0) {
 801		if(rcode == -EOPNOTSUPP ){
 802			printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
 803			return FAILED;
 804		}
 805		printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
 806		return FAILED;
 807	} 
 808	printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
 809	return SUCCESS;
 810}
 811
 812
 813#define I2O_DEVICE_RESET 0x27
 814// This is the same for BLK and SCSI devices
 815// NOTE this is wrong in the i2o.h definitions
 816// This is not currently supported by our adapter but we issue it anyway
 817static int adpt_device_reset(struct scsi_cmnd* cmd)
 818{
 819	adpt_hba* pHba;
 820	u32 msg[4];
 821	u32 rcode;
 822	int old_state;
 823	struct adpt_device* d = cmd->device->hostdata;
 824
 825	pHba = (void*) cmd->device->host->hostdata[0];
 826	printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
 827	if (!d) {
 828		printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
 829		return FAILED;
 830	}
 831	memset(msg, 0, sizeof(msg));
 832	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
 833	msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
 834	msg[2] = 0;
 835	msg[3] = 0;
 836
 837	if (pHba->host)
 838		spin_lock_irq(pHba->host->host_lock);
 839	old_state = d->state;
 840	d->state |= DPTI_DEV_RESET;
 841	rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
 842	d->state = old_state;
 843	if (pHba->host)
 844		spin_unlock_irq(pHba->host->host_lock);
 845	if (rcode != 0) {
 846		if(rcode == -EOPNOTSUPP ){
 847			printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
 848			return FAILED;
 849		}
 850		printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
 851		return FAILED;
 852	} else {
 853		printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
 854		return SUCCESS;
 855	}
 856}
 857
 858
 859#define I2O_HBA_BUS_RESET 0x87
 860// This version of bus reset is called by the eh_error handler
 861static int adpt_bus_reset(struct scsi_cmnd* cmd)
 862{
 863	adpt_hba* pHba;
 864	u32 msg[4];
 865	u32 rcode;
 866
 867	pHba = (adpt_hba*)cmd->device->host->hostdata[0];
 868	memset(msg, 0, sizeof(msg));
 869	printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
 870	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
 871	msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
 872	msg[2] = 0;
 873	msg[3] = 0;
 874	if (pHba->host)
 875		spin_lock_irq(pHba->host->host_lock);
 876	rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
 877	if (pHba->host)
 878		spin_unlock_irq(pHba->host->host_lock);
 879	if (rcode != 0) {
 880		printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
 881		return FAILED;
 882	} else {
 883		printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
 884		return SUCCESS;
 885	}
 886}
 887
 888// This version of reset is called by the eh_error_handler
 889static int __adpt_reset(struct scsi_cmnd* cmd)
 890{
 891	adpt_hba* pHba;
 892	int rcode;
 
 
 893	pHba = (adpt_hba*)cmd->device->host->hostdata[0];
 894	printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
 
 895	rcode =  adpt_hba_reset(pHba);
 896	if(rcode == 0){
 897		printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
 898		return SUCCESS;
 899	} else {
 900		printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
 901		return FAILED;
 902	}
 903}
 904
 905static int adpt_reset(struct scsi_cmnd* cmd)
 906{
 907	int rc;
 908
 909	spin_lock_irq(cmd->device->host->host_lock);
 910	rc = __adpt_reset(cmd);
 911	spin_unlock_irq(cmd->device->host->host_lock);
 912
 913	return rc;
 914}
 915
 916// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
 917static int adpt_hba_reset(adpt_hba* pHba)
 918{
 919	int rcode;
 920
 921	pHba->state |= DPTI_STATE_RESET;
 922
 923	// Activate does get status , init outbound, and get hrt
 924	if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
 925		printk(KERN_ERR "%s: Could not activate\n", pHba->name);
 926		adpt_i2o_delete_hba(pHba);
 927		return rcode;
 928	}
 929
 930	if ((rcode=adpt_i2o_build_sys_table()) < 0) {
 931		adpt_i2o_delete_hba(pHba);
 932		return rcode;
 933	}
 934	PDEBUG("%s: in HOLD state\n",pHba->name);
 935
 936	if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
 937		adpt_i2o_delete_hba(pHba);	
 938		return rcode;
 939	}
 940	PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
 941
 942	if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
 943		adpt_i2o_delete_hba(pHba);
 944		return rcode;
 945	}
 946
 947	if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
 948		adpt_i2o_delete_hba(pHba);
 949		return rcode;
 950	}
 951	pHba->state &= ~DPTI_STATE_RESET;
 952
 953	adpt_fail_posted_scbs(pHba);
 954	return 0;	/* return success */
 955}
 956
 957/*===========================================================================
 958 * 
 959 *===========================================================================
 960 */
 961
 962
 963static void adpt_i2o_sys_shutdown(void)
 964{
 965	adpt_hba *pHba, *pNext;
 966	struct adpt_i2o_post_wait_data *p1, *old;
 967
 968	 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
 969	 printk(KERN_INFO"   This could take a few minutes if there are many devices attached\n");
 970	/* Delete all IOPs from the controller chain */
 971	/* They should have already been released by the
 972	 * scsi-core
 973	 */
 974	for (pHba = hba_chain; pHba; pHba = pNext) {
 975		pNext = pHba->next;
 976		adpt_i2o_delete_hba(pHba);
 977	}
 978
 979	/* Remove any timedout entries from the wait queue.  */
 980//	spin_lock_irqsave(&adpt_post_wait_lock, flags);
 981	/* Nothing should be outstanding at this point so just
 982	 * free them 
 983	 */
 984	for(p1 = adpt_post_wait_queue; p1;) {
 985		old = p1;
 986		p1 = p1->next;
 987		kfree(old);
 988	}
 989//	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
 990	adpt_post_wait_queue = NULL;
 991
 992	 printk(KERN_INFO "Adaptec I2O controllers down.\n");
 993}
 994
 995static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
 996{
 997
 998	adpt_hba* pHba = NULL;
 999	adpt_hba* p = NULL;
1000	ulong base_addr0_phys = 0;
1001	ulong base_addr1_phys = 0;
1002	u32 hba_map0_area_size = 0;
1003	u32 hba_map1_area_size = 0;
1004	void __iomem *base_addr_virt = NULL;
1005	void __iomem *msg_addr_virt = NULL;
1006	int dma64 = 0;
1007
1008	int raptorFlag = FALSE;
1009
1010	if(pci_enable_device(pDev)) {
1011		return -EINVAL;
1012	}
1013
1014	if (pci_request_regions(pDev, "dpt_i2o")) {
1015		PERROR("dpti: adpt_config_hba: pci request region failed\n");
1016		return -EINVAL;
1017	}
1018
1019	pci_set_master(pDev);
1020
1021	/*
1022	 *	See if we should enable dma64 mode.
1023	 */
1024	if (sizeof(dma_addr_t) > 4 &&
1025	    pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) {
1026		if (dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32))
1027			dma64 = 1;
1028	}
1029	if (!dma64 && pci_set_dma_mask(pDev, DMA_BIT_MASK(32)) != 0)
1030		return -EINVAL;
1031
1032	/* adapter only supports message blocks below 4GB */
1033	pci_set_consistent_dma_mask(pDev, DMA_BIT_MASK(32));
1034
1035	base_addr0_phys = pci_resource_start(pDev,0);
1036	hba_map0_area_size = pci_resource_len(pDev,0);
1037
1038	// Check if standard PCI card or single BAR Raptor
1039	if(pDev->device == PCI_DPT_DEVICE_ID){
1040		if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
1041			// Raptor card with this device id needs 4M
1042			hba_map0_area_size = 0x400000;
1043		} else { // Not Raptor - it is a PCI card
1044			if(hba_map0_area_size > 0x100000 ){ 
1045				hba_map0_area_size = 0x100000;
1046			}
1047		}
1048	} else {// Raptor split BAR config
1049		// Use BAR1 in this configuration
1050		base_addr1_phys = pci_resource_start(pDev,1);
1051		hba_map1_area_size = pci_resource_len(pDev,1);
1052		raptorFlag = TRUE;
1053	}
1054
1055#if BITS_PER_LONG == 64
1056	/*
1057	 *	The original Adaptec 64 bit driver has this comment here:
1058	 *	"x86_64 machines need more optimal mappings"
1059	 *
1060	 *	I assume some HBAs report ridiculously large mappings
1061	 *	and we need to limit them on platforms with IOMMUs.
1062	 */
1063	if (raptorFlag == TRUE) {
1064		if (hba_map0_area_size > 128)
1065			hba_map0_area_size = 128;
1066		if (hba_map1_area_size > 524288)
1067			hba_map1_area_size = 524288;
1068	} else {
1069		if (hba_map0_area_size > 524288)
1070			hba_map0_area_size = 524288;
1071	}
1072#endif
1073
1074	base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
1075	if (!base_addr_virt) {
1076		pci_release_regions(pDev);
1077		PERROR("dpti: adpt_config_hba: io remap failed\n");
1078		return -EINVAL;
1079	}
1080
1081        if(raptorFlag == TRUE) {
1082		msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
1083		if (!msg_addr_virt) {
1084			PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
1085			iounmap(base_addr_virt);
1086			pci_release_regions(pDev);
1087			return -EINVAL;
1088		}
1089	} else {
1090		msg_addr_virt = base_addr_virt;
1091	}
1092	
1093	// Allocate and zero the data structure
1094	pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
1095	if (!pHba) {
1096		if (msg_addr_virt != base_addr_virt)
1097			iounmap(msg_addr_virt);
1098		iounmap(base_addr_virt);
1099		pci_release_regions(pDev);
1100		return -ENOMEM;
1101	}
1102
1103	mutex_lock(&adpt_configuration_lock);
1104
1105	if(hba_chain != NULL){
1106		for(p = hba_chain; p->next; p = p->next);
1107		p->next = pHba;
1108	} else {
1109		hba_chain = pHba;
1110	}
1111	pHba->next = NULL;
1112	pHba->unit = hba_count;
1113	sprintf(pHba->name, "dpti%d", hba_count);
1114	hba_count++;
1115	
1116	mutex_unlock(&adpt_configuration_lock);
1117
1118	pHba->pDev = pDev;
1119	pHba->base_addr_phys = base_addr0_phys;
1120
1121	// Set up the Virtual Base Address of the I2O Device
1122	pHba->base_addr_virt = base_addr_virt;
1123	pHba->msg_addr_virt = msg_addr_virt;
1124	pHba->irq_mask = base_addr_virt+0x30;
1125	pHba->post_port = base_addr_virt+0x40;
1126	pHba->reply_port = base_addr_virt+0x44;
1127
1128	pHba->hrt = NULL;
1129	pHba->lct = NULL;
1130	pHba->lct_size = 0;
1131	pHba->status_block = NULL;
1132	pHba->post_count = 0;
1133	pHba->state = DPTI_STATE_RESET;
1134	pHba->pDev = pDev;
1135	pHba->devices = NULL;
1136	pHba->dma64 = dma64;
1137
1138	// Initializing the spinlocks
1139	spin_lock_init(&pHba->state_lock);
1140	spin_lock_init(&adpt_post_wait_lock);
1141
1142	if(raptorFlag == 0){
1143		printk(KERN_INFO "Adaptec I2O RAID controller"
1144				 " %d at %p size=%x irq=%d%s\n", 
1145			hba_count-1, base_addr_virt,
1146			hba_map0_area_size, pDev->irq,
1147			dma64 ? " (64-bit DMA)" : "");
1148	} else {
1149		printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1150			hba_count-1, pDev->irq,
1151			dma64 ? " (64-bit DMA)" : "");
1152		printk(KERN_INFO"     BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1153		printk(KERN_INFO"     BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1154	}
1155
1156	if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1157		printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1158		adpt_i2o_delete_hba(pHba);
1159		return -EINVAL;
1160	}
1161
1162	return 0;
1163}
1164
1165
1166static void adpt_i2o_delete_hba(adpt_hba* pHba)
1167{
1168	adpt_hba* p1;
1169	adpt_hba* p2;
1170	struct i2o_device* d;
1171	struct i2o_device* next;
1172	int i;
1173	int j;
1174	struct adpt_device* pDev;
1175	struct adpt_device* pNext;
1176
1177
1178	mutex_lock(&adpt_configuration_lock);
1179	// scsi_unregister calls our adpt_release which
1180	// does a quiese
1181	if(pHba->host){
1182		free_irq(pHba->host->irq, pHba);
1183	}
1184	p2 = NULL;
1185	for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1186		if(p1 == pHba) {
1187			if(p2) {
1188				p2->next = p1->next;
1189			} else {
1190				hba_chain = p1->next;
1191			}
1192			break;
1193		}
1194	}
1195
1196	hba_count--;
1197	mutex_unlock(&adpt_configuration_lock);
1198
1199	iounmap(pHba->base_addr_virt);
1200	pci_release_regions(pHba->pDev);
1201	if(pHba->msg_addr_virt != pHba->base_addr_virt){
1202		iounmap(pHba->msg_addr_virt);
1203	}
1204	if(pHba->FwDebugBuffer_P)
1205	   	iounmap(pHba->FwDebugBuffer_P);
1206	if(pHba->hrt) {
1207		dma_free_coherent(&pHba->pDev->dev,
1208			pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1209			pHba->hrt, pHba->hrt_pa);
1210	}
1211	if(pHba->lct) {
1212		dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1213			pHba->lct, pHba->lct_pa);
1214	}
1215	if(pHba->status_block) {
1216		dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1217			pHba->status_block, pHba->status_block_pa);
1218	}
1219	if(pHba->reply_pool) {
1220		dma_free_coherent(&pHba->pDev->dev,
1221			pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1222			pHba->reply_pool, pHba->reply_pool_pa);
1223	}
1224
1225	for(d = pHba->devices; d ; d = next){
1226		next = d->next;
1227		kfree(d);
1228	}
1229	for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1230		for(j = 0; j < MAX_ID; j++){
1231			if(pHba->channel[i].device[j] != NULL){
1232				for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1233					pNext = pDev->next_lun;
1234					kfree(pDev);
1235				}
1236			}
1237		}
1238	}
1239	pci_dev_put(pHba->pDev);
1240	if (adpt_sysfs_class)
1241		device_destroy(adpt_sysfs_class,
1242				MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1243	kfree(pHba);
1244
1245	if(hba_count <= 0){
1246		unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);   
1247		if (adpt_sysfs_class) {
1248			class_destroy(adpt_sysfs_class);
1249			adpt_sysfs_class = NULL;
1250		}
1251	}
1252}
1253
1254static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
1255{
1256	struct adpt_device* d;
1257
1258	if(chan < 0 || chan >= MAX_CHANNEL)
1259		return NULL;
1260	
1261	if( pHba->channel[chan].device == NULL){
1262		printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1263		return NULL;
1264	}
1265
1266	d = pHba->channel[chan].device[id];
1267	if(!d || d->tid == 0) {
1268		return NULL;
1269	}
1270
1271	/* If it is the only lun at that address then this should match*/
1272	if(d->scsi_lun == lun){
1273		return d;
1274	}
1275
1276	/* else we need to look through all the luns */
1277	for(d=d->next_lun ; d ; d = d->next_lun){
1278		if(d->scsi_lun == lun){
1279			return d;
1280		}
1281	}
1282	return NULL;
1283}
1284
1285
1286static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1287{
1288	// I used my own version of the WAIT_QUEUE_HEAD
1289	// to handle some version differences
1290	// When embedded in the kernel this could go back to the vanilla one
1291	ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1292	int status = 0;
1293	ulong flags = 0;
1294	struct adpt_i2o_post_wait_data *p1, *p2;
1295	struct adpt_i2o_post_wait_data *wait_data =
1296		kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1297	DECLARE_WAITQUEUE(wait, current);
1298
1299	if (!wait_data)
1300		return -ENOMEM;
1301
1302	/*
1303	 * The spin locking is needed to keep anyone from playing
1304	 * with the queue pointers and id while we do the same
1305	 */
1306	spin_lock_irqsave(&adpt_post_wait_lock, flags);
1307       // TODO we need a MORE unique way of getting ids
1308       // to support async LCT get
1309	wait_data->next = adpt_post_wait_queue;
1310	adpt_post_wait_queue = wait_data;
1311	adpt_post_wait_id++;
1312	adpt_post_wait_id &= 0x7fff;
1313	wait_data->id =  adpt_post_wait_id;
1314	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1315
1316	wait_data->wq = &adpt_wq_i2o_post;
1317	wait_data->status = -ETIMEDOUT;
1318
1319	add_wait_queue(&adpt_wq_i2o_post, &wait);
1320
1321	msg[2] |= 0x80000000 | ((u32)wait_data->id);
1322	timeout *= HZ;
1323	if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1324		set_current_state(TASK_INTERRUPTIBLE);
1325		if(pHba->host)
1326			spin_unlock_irq(pHba->host->host_lock);
1327		if (!timeout)
1328			schedule();
1329		else{
1330			timeout = schedule_timeout(timeout);
1331			if (timeout == 0) {
1332				// I/O issued, but cannot get result in
1333				// specified time. Freeing resorces is
1334				// dangerous.
1335				status = -ETIME;
1336			}
1337		}
1338		if(pHba->host)
1339			spin_lock_irq(pHba->host->host_lock);
1340	}
1341	remove_wait_queue(&adpt_wq_i2o_post, &wait);
1342
1343	if(status == -ETIMEDOUT){
1344		printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1345		// We will have to free the wait_data memory during shutdown
1346		return status;
1347	}
1348
1349	/* Remove the entry from the queue.  */
1350	p2 = NULL;
1351	spin_lock_irqsave(&adpt_post_wait_lock, flags);
1352	for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1353		if(p1 == wait_data) {
1354			if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1355				status = -EOPNOTSUPP;
1356			}
1357			if(p2) {
1358				p2->next = p1->next;
1359			} else {
1360				adpt_post_wait_queue = p1->next;
1361			}
1362			break;
1363		}
1364	}
1365	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1366
1367	kfree(wait_data);
1368
1369	return status;
1370}
1371
1372
1373static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1374{
1375
1376	u32 m = EMPTY_QUEUE;
1377	u32 __iomem *msg;
1378	ulong timeout = jiffies + 30*HZ;
1379	do {
1380		rmb();
1381		m = readl(pHba->post_port);
1382		if (m != EMPTY_QUEUE) {
1383			break;
1384		}
1385		if(time_after(jiffies,timeout)){
1386			printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1387			return -ETIMEDOUT;
1388		}
1389		schedule_timeout_uninterruptible(1);
1390	} while(m == EMPTY_QUEUE);
1391		
1392	msg = pHba->msg_addr_virt + m;
1393	memcpy_toio(msg, data, len);
1394	wmb();
1395
1396	//post message
1397	writel(m, pHba->post_port);
1398	wmb();
1399
1400	return 0;
1401}
1402
1403
1404static void adpt_i2o_post_wait_complete(u32 context, int status)
1405{
1406	struct adpt_i2o_post_wait_data *p1 = NULL;
1407	/*
1408	 * We need to search through the adpt_post_wait
1409	 * queue to see if the given message is still
1410	 * outstanding.  If not, it means that the IOP
1411	 * took longer to respond to the message than we
1412	 * had allowed and timer has already expired.
1413	 * Not much we can do about that except log
1414	 * it for debug purposes, increase timeout, and recompile
1415	 *
1416	 * Lock needed to keep anyone from moving queue pointers
1417	 * around while we're looking through them.
1418	 */
1419
1420	context &= 0x7fff;
1421
1422	spin_lock(&adpt_post_wait_lock);
1423	for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1424		if(p1->id == context) {
1425			p1->status = status;
1426			spin_unlock(&adpt_post_wait_lock);
1427			wake_up_interruptible(p1->wq);
1428			return;
1429		}
1430	}
1431	spin_unlock(&adpt_post_wait_lock);
1432        // If this happens we lose commands that probably really completed
1433	printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1434	printk(KERN_DEBUG"      Tasks in wait queue:\n");
1435	for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1436		printk(KERN_DEBUG"           %d\n",p1->id);
1437	}
1438	return;
1439}
1440
1441static s32 adpt_i2o_reset_hba(adpt_hba* pHba)			
1442{
1443	u32 msg[8];
1444	u8* status;
1445	dma_addr_t addr;
1446	u32 m = EMPTY_QUEUE ;
1447	ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1448
1449	if(pHba->initialized  == FALSE) {	// First time reset should be quick
1450		timeout = jiffies + (25*HZ);
1451	} else {
1452		adpt_i2o_quiesce_hba(pHba);
1453	}
1454
1455	do {
1456		rmb();
1457		m = readl(pHba->post_port);
1458		if (m != EMPTY_QUEUE) {
1459			break;
1460		}
1461		if(time_after(jiffies,timeout)){
1462			printk(KERN_WARNING"Timeout waiting for message!\n");
1463			return -ETIMEDOUT;
1464		}
1465		schedule_timeout_uninterruptible(1);
1466	} while (m == EMPTY_QUEUE);
1467
1468	status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1469	if(status == NULL) {
1470		adpt_send_nop(pHba, m);
1471		printk(KERN_ERR"IOP reset failed - no free memory.\n");
1472		return -ENOMEM;
1473	}
1474	memset(status,0,4);
1475
1476	msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1477	msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1478	msg[2]=0;
1479	msg[3]=0;
1480	msg[4]=0;
1481	msg[5]=0;
1482	msg[6]=dma_low(addr);
1483	msg[7]=dma_high(addr);
1484
1485	memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1486	wmb();
1487	writel(m, pHba->post_port);
1488	wmb();
1489
1490	while(*status == 0){
1491		if(time_after(jiffies,timeout)){
1492			printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1493			/* We lose 4 bytes of "status" here, but we cannot
1494			   free these because controller may awake and corrupt
1495			   those bytes at any time */
1496			/* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1497			return -ETIMEDOUT;
1498		}
1499		rmb();
1500		schedule_timeout_uninterruptible(1);
1501	}
1502
1503	if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1504		PDEBUG("%s: Reset in progress...\n", pHba->name);
1505		// Here we wait for message frame to become available
1506		// indicated that reset has finished
1507		do {
1508			rmb();
1509			m = readl(pHba->post_port);
1510			if (m != EMPTY_QUEUE) {
1511				break;
1512			}
1513			if(time_after(jiffies,timeout)){
1514				printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1515				/* We lose 4 bytes of "status" here, but we
1516				   cannot free these because controller may
1517				   awake and corrupt those bytes at any time */
1518				/* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1519				return -ETIMEDOUT;
1520			}
1521			schedule_timeout_uninterruptible(1);
1522		} while (m == EMPTY_QUEUE);
1523		// Flush the offset
1524		adpt_send_nop(pHba, m);
1525	}
1526	adpt_i2o_status_get(pHba);
1527	if(*status == 0x02 ||
1528			pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1529		printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1530				pHba->name);
1531	} else {
1532		PDEBUG("%s: Reset completed.\n", pHba->name);
1533	}
1534
1535	dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1536#ifdef UARTDELAY
1537	// This delay is to allow someone attached to the card through the debug UART to 
1538	// set up the dump levels that they want before the rest of the initialization sequence
1539	adpt_delay(20000);
1540#endif
1541	return 0;
1542}
1543
1544
1545static int adpt_i2o_parse_lct(adpt_hba* pHba)
1546{
1547	int i;
1548	int max;
1549	int tid;
1550	struct i2o_device *d;
1551	i2o_lct *lct = pHba->lct;
1552	u8 bus_no = 0;
1553	s16 scsi_id;
1554	s16 scsi_lun;
1555	u32 buf[10]; // larger than 7, or 8 ...
1556	struct adpt_device* pDev; 
1557	
1558	if (lct == NULL) {
1559		printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1560		return -1;
1561	}
1562	
1563	max = lct->table_size;	
1564	max -= 3;
1565	max /= 9;
1566
1567	for(i=0;i<max;i++) {
1568		if( lct->lct_entry[i].user_tid != 0xfff){
1569			/*
1570			 * If we have hidden devices, we need to inform the upper layers about
1571			 * the possible maximum id reference to handle device access when
1572			 * an array is disassembled. This code has no other purpose but to
1573			 * allow us future access to devices that are currently hidden
1574			 * behind arrays, hotspares or have not been configured (JBOD mode).
1575			 */
1576			if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1577			    lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1578			    lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1579			    	continue;
1580			}
1581			tid = lct->lct_entry[i].tid;
1582			// I2O_DPT_DEVICE_INFO_GROUP_NO;
1583			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1584				continue;
1585			}
1586			bus_no = buf[0]>>16;
1587			scsi_id = buf[1];
1588			scsi_lun = (buf[2]>>8 )&0xff;
1589			if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1590				printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1591				continue;
1592			}
1593			if (scsi_id >= MAX_ID){
1594				printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1595				continue;
1596			}
1597			if(bus_no > pHba->top_scsi_channel){
1598				pHba->top_scsi_channel = bus_no;
1599			}
1600			if(scsi_id > pHba->top_scsi_id){
1601				pHba->top_scsi_id = scsi_id;
1602			}
1603			if(scsi_lun > pHba->top_scsi_lun){
1604				pHba->top_scsi_lun = scsi_lun;
1605			}
1606			continue;
1607		}
1608		d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1609		if(d==NULL)
1610		{
1611			printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1612			return -ENOMEM;
1613		}
1614		
1615		d->controller = pHba;
1616		d->next = NULL;
1617
1618		memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1619
1620		d->flags = 0;
1621		tid = d->lct_data.tid;
1622		adpt_i2o_report_hba_unit(pHba, d);
1623		adpt_i2o_install_device(pHba, d);
1624	}
1625	bus_no = 0;
1626	for(d = pHba->devices; d ; d = d->next) {
1627		if(d->lct_data.class_id  == I2O_CLASS_BUS_ADAPTER_PORT ||
1628		   d->lct_data.class_id  == I2O_CLASS_FIBRE_CHANNEL_PORT){
1629			tid = d->lct_data.tid;
1630			// TODO get the bus_no from hrt-but for now they are in order
1631			//bus_no = 
1632			if(bus_no > pHba->top_scsi_channel){
1633				pHba->top_scsi_channel = bus_no;
1634			}
1635			pHba->channel[bus_no].type = d->lct_data.class_id;
1636			pHba->channel[bus_no].tid = tid;
1637			if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1638			{
1639				pHba->channel[bus_no].scsi_id = buf[1];
1640				PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1641			}
1642			// TODO remove - this is just until we get from hrt
1643			bus_no++;
1644			if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1645				printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1646				break;
1647			}
1648		}
1649	}
1650
1651	// Setup adpt_device table
1652	for(d = pHba->devices; d ; d = d->next) {
1653		if(d->lct_data.class_id  == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1654		   d->lct_data.class_id  == I2O_CLASS_SCSI_PERIPHERAL ||
1655		   d->lct_data.class_id  == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1656
1657			tid = d->lct_data.tid;
1658			scsi_id = -1;
1659			// I2O_DPT_DEVICE_INFO_GROUP_NO;
1660			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1661				bus_no = buf[0]>>16;
1662				scsi_id = buf[1];
1663				scsi_lun = (buf[2]>>8 )&0xff;
1664				if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1665					continue;
1666				}
1667				if (scsi_id >= MAX_ID) {
1668					continue;
1669				}
1670				if( pHba->channel[bus_no].device[scsi_id] == NULL){
1671					pDev =  kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1672					if(pDev == NULL) {
1673						return -ENOMEM;
1674					}
1675					pHba->channel[bus_no].device[scsi_id] = pDev;
1676				} else {
1677					for( pDev = pHba->channel[bus_no].device[scsi_id];	
1678							pDev->next_lun; pDev = pDev->next_lun){
1679					}
1680					pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1681					if(pDev->next_lun == NULL) {
1682						return -ENOMEM;
1683					}
1684					pDev = pDev->next_lun;
1685				}
1686				pDev->tid = tid;
1687				pDev->scsi_channel = bus_no;
1688				pDev->scsi_id = scsi_id;
1689				pDev->scsi_lun = scsi_lun;
1690				pDev->pI2o_dev = d;
1691				d->owner = pDev;
1692				pDev->type = (buf[0])&0xff;
1693				pDev->flags = (buf[0]>>8)&0xff;
1694				if(scsi_id > pHba->top_scsi_id){
1695					pHba->top_scsi_id = scsi_id;
1696				}
1697				if(scsi_lun > pHba->top_scsi_lun){
1698					pHba->top_scsi_lun = scsi_lun;
1699				}
1700			}
1701			if(scsi_id == -1){
1702				printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1703						d->lct_data.identity_tag);
1704			}
1705		}
1706	}
1707	return 0;
1708}
1709
1710
1711/*
1712 *	Each I2O controller has a chain of devices on it - these match
1713 *	the useful parts of the LCT of the board.
1714 */
1715 
1716static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1717{
1718	mutex_lock(&adpt_configuration_lock);
1719	d->controller=pHba;
1720	d->owner=NULL;
1721	d->next=pHba->devices;
1722	d->prev=NULL;
1723	if (pHba->devices != NULL){
1724		pHba->devices->prev=d;
1725	}
1726	pHba->devices=d;
1727	*d->dev_name = 0;
1728
1729	mutex_unlock(&adpt_configuration_lock);
1730	return 0;
1731}
1732
1733static int adpt_open(struct inode *inode, struct file *file)
1734{
1735	int minor;
1736	adpt_hba* pHba;
1737
1738	mutex_lock(&adpt_mutex);
1739	//TODO check for root access
1740	//
1741	minor = iminor(inode);
1742	if (minor >= hba_count) {
1743		mutex_unlock(&adpt_mutex);
1744		return -ENXIO;
1745	}
1746	mutex_lock(&adpt_configuration_lock);
1747	for (pHba = hba_chain; pHba; pHba = pHba->next) {
1748		if (pHba->unit == minor) {
1749			break;	/* found adapter */
1750		}
1751	}
1752	if (pHba == NULL) {
1753		mutex_unlock(&adpt_configuration_lock);
1754		mutex_unlock(&adpt_mutex);
1755		return -ENXIO;
1756	}
1757
1758//	if(pHba->in_use){
1759	//	mutex_unlock(&adpt_configuration_lock);
1760//		return -EBUSY;
1761//	}
1762
1763	pHba->in_use = 1;
1764	mutex_unlock(&adpt_configuration_lock);
1765	mutex_unlock(&adpt_mutex);
1766
1767	return 0;
1768}
1769
1770static int adpt_close(struct inode *inode, struct file *file)
1771{
1772	int minor;
1773	adpt_hba* pHba;
1774
1775	minor = iminor(inode);
1776	if (minor >= hba_count) {
1777		return -ENXIO;
1778	}
1779	mutex_lock(&adpt_configuration_lock);
1780	for (pHba = hba_chain; pHba; pHba = pHba->next) {
1781		if (pHba->unit == minor) {
1782			break;	/* found adapter */
1783		}
1784	}
1785	mutex_unlock(&adpt_configuration_lock);
1786	if (pHba == NULL) {
1787		return -ENXIO;
1788	}
1789
1790	pHba->in_use = 0;
1791
1792	return 0;
1793}
1794
1795
1796static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1797{
1798	u32 msg[MAX_MESSAGE_SIZE];
1799	u32* reply = NULL;
1800	u32 size = 0;
1801	u32 reply_size = 0;
1802	u32 __user *user_msg = arg;
1803	u32 __user * user_reply = NULL;
1804	void *sg_list[pHba->sg_tablesize];
1805	u32 sg_offset = 0;
1806	u32 sg_count = 0;
1807	int sg_index = 0;
1808	u32 i = 0;
1809	u32 rcode = 0;
1810	void *p = NULL;
1811	dma_addr_t addr;
1812	ulong flags = 0;
1813
1814	memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1815	// get user msg size in u32s 
1816	if(get_user(size, &user_msg[0])){
1817		return -EFAULT;
1818	}
1819	size = size>>16;
1820
1821	user_reply = &user_msg[size];
1822	if(size > MAX_MESSAGE_SIZE){
1823		return -EFAULT;
1824	}
1825	size *= 4; // Convert to bytes
1826
1827	/* Copy in the user's I2O command */
1828	if(copy_from_user(msg, user_msg, size)) {
1829		return -EFAULT;
1830	}
1831	get_user(reply_size, &user_reply[0]);
1832	reply_size = reply_size>>16;
1833	if(reply_size > REPLY_FRAME_SIZE){
1834		reply_size = REPLY_FRAME_SIZE;
1835	}
1836	reply_size *= 4;
1837	reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1838	if(reply == NULL) {
1839		printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1840		return -ENOMEM;
1841	}
1842	sg_offset = (msg[0]>>4)&0xf;
1843	msg[2] = 0x40000000; // IOCTL context
1844	msg[3] = adpt_ioctl_to_context(pHba, reply);
1845	if (msg[3] == (u32)-1)
1846		return -EBUSY;
 
 
1847
1848	memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
 
 
 
 
1849	if(sg_offset) {
1850		// TODO add 64 bit API
1851		struct sg_simple_element *sg =  (struct sg_simple_element*) (msg+sg_offset);
1852		sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1853		if (sg_count > pHba->sg_tablesize){
1854			printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1855			kfree (reply);
1856			return -EINVAL;
1857		}
1858
1859		for(i = 0; i < sg_count; i++) {
1860			int sg_size;
1861
1862			if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1863				printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i,  sg[i].flag_count);
1864				rcode = -EINVAL;
1865				goto cleanup;
1866			}
1867			sg_size = sg[i].flag_count & 0xffffff;      
1868			/* Allocate memory for the transfer */
1869			p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1870			if(!p) {
1871				printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1872						pHba->name,sg_size,i,sg_count);
1873				rcode = -ENOMEM;
1874				goto cleanup;
1875			}
1876			sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1877			/* Copy in the user's SG buffer if necessary */
1878			if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1879				// sg_simple_element API is 32 bit
1880				if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1881					printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1882					rcode = -EFAULT;
1883					goto cleanup;
1884				}
1885			}
1886			/* sg_simple_element API is 32 bit, but addr < 4GB */
1887			sg[i].addr_bus = addr;
1888		}
1889	}
1890
1891	do {
1892		if(pHba->host)
 
 
 
 
 
1893			spin_lock_irqsave(pHba->host->host_lock, flags);
1894		// This state stops any new commands from enterring the
1895		// controller while processing the ioctl
1896//		pHba->state |= DPTI_STATE_IOCTL;
1897//		We can't set this now - The scsi subsystem sets host_blocked and
1898//		the queue empties and stops.  We need a way to restart the queue
1899		rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1900		if (rcode != 0)
1901			printk("adpt_i2o_passthru: post wait failed %d %p\n",
1902					rcode, reply);
1903//		pHba->state &= ~DPTI_STATE_IOCTL;
1904		if(pHba->host)
1905			spin_unlock_irqrestore(pHba->host->host_lock, flags);
1906	} while(rcode == -ETIMEDOUT);  
 
 
1907
1908	if(rcode){
1909		goto cleanup;
1910	}
1911
1912	if(sg_offset) {
1913	/* Copy back the Scatter Gather buffers back to user space */
1914		u32 j;
1915		// TODO add 64 bit API
1916		struct sg_simple_element* sg;
1917		int sg_size;
1918
1919		// re-acquire the original message to handle correctly the sg copy operation
1920		memset(&msg, 0, MAX_MESSAGE_SIZE*4); 
1921		// get user msg size in u32s 
1922		if(get_user(size, &user_msg[0])){
1923			rcode = -EFAULT; 
1924			goto cleanup; 
1925		}
1926		size = size>>16;
1927		size *= 4;
1928		if (size > MAX_MESSAGE_SIZE) {
1929			rcode = -EINVAL;
1930			goto cleanup;
1931		}
1932		/* Copy in the user's I2O command */
1933		if (copy_from_user (msg, user_msg, size)) {
1934			rcode = -EFAULT;
1935			goto cleanup;
1936		}
1937		sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1938
1939		// TODO add 64 bit API
1940		sg 	 = (struct sg_simple_element*)(msg + sg_offset);
1941		for (j = 0; j < sg_count; j++) {
1942			/* Copy out the SG list to user's buffer if necessary */
1943			if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1944				sg_size = sg[j].flag_count & 0xffffff; 
1945				// sg_simple_element API is 32 bit
1946				if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1947					printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1948					rcode = -EFAULT;
1949					goto cleanup;
1950				}
1951			}
1952		}
1953	} 
1954
1955	/* Copy back the reply to user space */
1956	if (reply_size) {
1957		// we wrote our own values for context - now restore the user supplied ones
1958		if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1959			printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1960			rcode = -EFAULT;
1961		}
1962		if(copy_to_user(user_reply, reply, reply_size)) {
1963			printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1964			rcode = -EFAULT;
1965		}
1966	}
1967
1968
1969cleanup:
1970	if (rcode != -ETIME && rcode != -EINTR) {
1971		struct sg_simple_element *sg =
1972				(struct sg_simple_element*) (msg +sg_offset);
1973		kfree (reply);
1974		while(sg_index) {
1975			if(sg_list[--sg_index]) {
1976				dma_free_coherent(&pHba->pDev->dev,
1977					sg[sg_index].flag_count & 0xffffff,
1978					sg_list[sg_index],
1979					sg[sg_index].addr_bus);
1980			}
1981		}
1982	}
 
 
 
 
1983	return rcode;
1984}
1985
1986#if defined __ia64__ 
1987static void adpt_ia64_info(sysInfo_S* si)
1988{
1989	// This is all the info we need for now
1990	// We will add more info as our new
1991	// managmenent utility requires it
1992	si->processorType = PROC_IA64;
1993}
1994#endif
1995
1996#if defined __sparc__ 
1997static void adpt_sparc_info(sysInfo_S* si)
1998{
1999	// This is all the info we need for now
2000	// We will add more info as our new
2001	// managmenent utility requires it
2002	si->processorType = PROC_ULTRASPARC;
2003}
2004#endif
2005#if defined __alpha__ 
2006static void adpt_alpha_info(sysInfo_S* si)
2007{
2008	// This is all the info we need for now
2009	// We will add more info as our new
2010	// managmenent utility requires it
2011	si->processorType = PROC_ALPHA;
2012}
2013#endif
2014
2015#if defined __i386__
 
 
 
2016static void adpt_i386_info(sysInfo_S* si)
2017{
2018	// This is all the info we need for now
2019	// We will add more info as our new
2020	// managmenent utility requires it
2021	switch (boot_cpu_data.x86) {
2022	case CPU_386:
2023		si->processorType = PROC_386;
2024		break;
2025	case CPU_486:
2026		si->processorType = PROC_486;
2027		break;
2028	case CPU_586:
2029		si->processorType = PROC_PENTIUM;
2030		break;
2031	default:  // Just in case 
2032		si->processorType = PROC_PENTIUM;
2033		break;
2034	}
2035}
2036#endif
2037
2038/*
2039 * This routine returns information about the system.  This does not effect
2040 * any logic and if the info is wrong - it doesn't matter.
2041 */
2042
2043/* Get all the info we can not get from kernel services */
2044static int adpt_system_info(void __user *buffer)
2045{
2046	sysInfo_S si;
2047
2048	memset(&si, 0, sizeof(si));
2049
2050	si.osType = OS_LINUX;
2051	si.osMajorVersion = 0;
2052	si.osMinorVersion = 0;
2053	si.osRevision = 0;
2054	si.busType = SI_PCI_BUS;
2055	si.processorFamily = DPTI_sig.dsProcessorFamily;
2056
2057#if defined __i386__
2058	adpt_i386_info(&si);
2059#elif defined (__ia64__)
2060	adpt_ia64_info(&si);
2061#elif defined(__sparc__)
2062	adpt_sparc_info(&si);
2063#elif defined (__alpha__)
2064	adpt_alpha_info(&si);
2065#else
2066	si.processorType = 0xff ;
2067#endif
2068	if (copy_to_user(buffer, &si, sizeof(si))){
2069		printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
2070		return -EFAULT;
2071	}
2072
2073	return 0;
2074}
2075
2076static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
2077{
2078	int minor;
2079	int error = 0;
2080	adpt_hba* pHba;
2081	ulong flags = 0;
2082	void __user *argp = (void __user *)arg;
2083
2084	minor = iminor(inode);
2085	if (minor >= DPTI_MAX_HBA){
2086		return -ENXIO;
2087	}
2088	mutex_lock(&adpt_configuration_lock);
2089	for (pHba = hba_chain; pHba; pHba = pHba->next) {
2090		if (pHba->unit == minor) {
2091			break;	/* found adapter */
2092		}
2093	}
2094	mutex_unlock(&adpt_configuration_lock);
2095	if(pHba == NULL){
2096		return -ENXIO;
2097	}
2098
2099	while((volatile u32) pHba->state & DPTI_STATE_RESET )
2100		schedule_timeout_uninterruptible(2);
2101
2102	switch (cmd) {
2103	// TODO: handle 3 cases
2104	case DPT_SIGNATURE:
2105		if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
2106			return -EFAULT;
2107		}
2108		break;
2109	case I2OUSRCMD:
2110		return adpt_i2o_passthru(pHba, argp);
2111
2112	case DPT_CTRLINFO:{
2113		drvrHBAinfo_S HbaInfo;
2114
2115#define FLG_OSD_PCI_VALID 0x0001
2116#define FLG_OSD_DMA	  0x0002
2117#define FLG_OSD_I2O	  0x0004
2118		memset(&HbaInfo, 0, sizeof(HbaInfo));
2119		HbaInfo.drvrHBAnum = pHba->unit;
2120		HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
2121		HbaInfo.blinkState = adpt_read_blink_led(pHba);
2122		HbaInfo.pciBusNum =  pHba->pDev->bus->number;
2123		HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn); 
2124		HbaInfo.Interrupt = pHba->pDev->irq; 
2125		HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
2126		if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2127			printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2128			return -EFAULT;
2129		}
2130		break;
2131		}
2132	case DPT_SYSINFO:
2133		return adpt_system_info(argp);
2134	case DPT_BLINKLED:{
2135		u32 value;
2136		value = (u32)adpt_read_blink_led(pHba);
2137		if (copy_to_user(argp, &value, sizeof(value))) {
2138			return -EFAULT;
2139		}
2140		break;
2141		}
2142	case I2ORESETCMD:
2143		if(pHba->host)
2144			spin_lock_irqsave(pHba->host->host_lock, flags);
 
 
2145		adpt_hba_reset(pHba);
2146		if(pHba->host)
2147			spin_unlock_irqrestore(pHba->host->host_lock, flags);
2148		break;
 
2149	case I2ORESCANCMD:
2150		adpt_rescan(pHba);
2151		break;
2152	default:
2153		return -EINVAL;
2154	}
2155
2156	return error;
2157}
2158
2159static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2160{
2161	struct inode *inode;
2162	long ret;
2163 
2164	inode = file->f_dentry->d_inode;
2165 
2166	mutex_lock(&adpt_mutex);
2167	ret = adpt_ioctl(inode, file, cmd, arg);
2168	mutex_unlock(&adpt_mutex);
2169
2170	return ret;
2171}
2172
2173#ifdef CONFIG_COMPAT
2174static long compat_adpt_ioctl(struct file *file,
2175				unsigned int cmd, unsigned long arg)
2176{
2177	struct inode *inode;
2178	long ret;
2179 
2180	inode = file->f_dentry->d_inode;
2181 
2182	mutex_lock(&adpt_mutex);
2183 
2184	switch(cmd) {
2185		case DPT_SIGNATURE:
2186		case I2OUSRCMD:
2187		case DPT_CTRLINFO:
2188		case DPT_SYSINFO:
2189		case DPT_BLINKLED:
2190		case I2ORESETCMD:
2191		case I2ORESCANCMD:
2192		case (DPT_TARGET_BUSY & 0xFFFF):
2193		case DPT_TARGET_BUSY:
2194			ret = adpt_ioctl(inode, file, cmd, arg);
2195			break;
2196		default:
2197			ret =  -ENOIOCTLCMD;
2198	}
2199 
2200	mutex_unlock(&adpt_mutex);
2201 
2202	return ret;
2203}
2204#endif
2205
2206static irqreturn_t adpt_isr(int irq, void *dev_id)
2207{
2208	struct scsi_cmnd* cmd;
2209	adpt_hba* pHba = dev_id;
2210	u32 m;
2211	void __iomem *reply;
2212	u32 status=0;
2213	u32 context;
2214	ulong flags = 0;
2215	int handled = 0;
2216
2217	if (pHba == NULL){
2218		printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2219		return IRQ_NONE;
2220	}
2221	if(pHba->host)
2222		spin_lock_irqsave(pHba->host->host_lock, flags);
2223
2224	while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2225		m = readl(pHba->reply_port);
2226		if(m == EMPTY_QUEUE){
2227			// Try twice then give up
2228			rmb();
2229			m = readl(pHba->reply_port);
2230			if(m == EMPTY_QUEUE){ 
2231				// This really should not happen
2232				printk(KERN_ERR"dpti: Could not get reply frame\n");
2233				goto out;
2234			}
2235		}
2236		if (pHba->reply_pool_pa <= m &&
2237		    m < pHba->reply_pool_pa +
2238			(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2239			reply = (u8 *)pHba->reply_pool +
2240						(m - pHba->reply_pool_pa);
2241		} else {
2242			/* Ick, we should *never* be here */
2243			printk(KERN_ERR "dpti: reply frame not from pool\n");
2244			reply = (u8 *)bus_to_virt(m);
2245		}
2246
2247		if (readl(reply) & MSG_FAIL) {
2248			u32 old_m = readl(reply+28); 
2249			void __iomem *msg;
2250			u32 old_context;
2251			PDEBUG("%s: Failed message\n",pHba->name);
2252			if(old_m >= 0x100000){
2253				printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2254				writel(m,pHba->reply_port);
2255				continue;
2256			}
2257			// Transaction context is 0 in failed reply frame
2258			msg = pHba->msg_addr_virt + old_m;
2259			old_context = readl(msg+12);
2260			writel(old_context, reply+12);
2261			adpt_send_nop(pHba, old_m);
2262		} 
2263		context = readl(reply+8);
2264		if(context & 0x40000000){ // IOCTL
2265			void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2266			if( p != NULL) {
2267				memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2268			}
2269			// All IOCTLs will also be post wait
2270		}
2271		if(context & 0x80000000){ // Post wait message
2272			status = readl(reply+16);
2273			if(status  >> 24){
2274				status &=  0xffff; /* Get detail status */
2275			} else {
2276				status = I2O_POST_WAIT_OK;
2277			}
2278			if(!(context & 0x40000000)) {
2279				cmd = adpt_cmd_from_context(pHba,
2280							readl(reply+12));
 
 
 
 
2281				if(cmd != NULL) {
2282					printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2283				}
2284			}
2285			adpt_i2o_post_wait_complete(context, status);
2286		} else { // SCSI message
2287			cmd = adpt_cmd_from_context (pHba, readl(reply+12));
 
 
 
 
 
2288			if(cmd != NULL){
2289				scsi_dma_unmap(cmd);
2290				if(cmd->serial_number != 0) { // If not timedout
2291					adpt_i2o_to_scsi(reply, cmd);
2292				}
2293			}
2294		}
2295		writel(m, pHba->reply_port);
2296		wmb();
2297		rmb();
2298	}
2299	handled = 1;
2300out:	if(pHba->host)
2301		spin_unlock_irqrestore(pHba->host->host_lock, flags);
2302	return IRQ_RETVAL(handled);
2303}
2304
2305static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2306{
2307	int i;
2308	u32 msg[MAX_MESSAGE_SIZE];
2309	u32* mptr;
2310	u32* lptr;
2311	u32 *lenptr;
2312	int direction;
2313	int scsidir;
2314	int nseg;
2315	u32 len;
2316	u32 reqlen;
2317	s32 rcode;
2318	dma_addr_t addr;
2319
2320	memset(msg, 0 , sizeof(msg));
2321	len = scsi_bufflen(cmd);
2322	direction = 0x00000000;	
2323	
2324	scsidir = 0x00000000;			// DATA NO XFER
2325	if(len) {
2326		/*
2327		 * Set SCBFlags to indicate if data is being transferred
2328		 * in or out, or no data transfer
2329		 * Note:  Do not have to verify index is less than 0 since
2330		 * cmd->cmnd[0] is an unsigned char
2331		 */
2332		switch(cmd->sc_data_direction){
2333		case DMA_FROM_DEVICE:
2334			scsidir  =0x40000000;	// DATA IN  (iop<--dev)
2335			break;
2336		case DMA_TO_DEVICE:
2337			direction=0x04000000;	// SGL OUT
2338			scsidir  =0x80000000;	// DATA OUT (iop-->dev)
2339			break;
2340		case DMA_NONE:
2341			break;
2342		case DMA_BIDIRECTIONAL:
2343			scsidir  =0x40000000;	// DATA IN  (iop<--dev)
2344			// Assume In - and continue;
2345			break;
2346		default:
2347			printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2348			     pHba->name, cmd->cmnd[0]);
2349			cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2350			cmd->scsi_done(cmd);
2351			return 	0;
2352		}
2353	}
2354	// msg[0] is set later
2355	// I2O_CMD_SCSI_EXEC
2356	msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2357	msg[2] = 0;
2358	msg[3] = adpt_cmd_to_context(cmd);  /* Want SCSI control block back */
 
2359	// Our cards use the transaction context as the tag for queueing
2360	// Adaptec/DPT Private stuff 
2361	msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2362	msg[5] = d->tid;
2363	/* Direction, disconnect ok | sense data | simple queue , CDBLen */
2364	// I2O_SCB_FLAG_ENABLE_DISCONNECT | 
2365	// I2O_SCB_FLAG_SIMPLE_QUEUE_TAG | 
2366	// I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2367	msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2368
2369	mptr=msg+7;
2370
2371	// Write SCSI command into the message - always 16 byte block 
2372	memset(mptr, 0,  16);
2373	memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2374	mptr+=4;
2375	lenptr=mptr++;		/* Remember me - fill in when we know */
2376	if (dpt_dma64(pHba)) {
2377		reqlen = 16;		// SINGLE SGE
2378		*mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2379		*mptr++ = 1 << PAGE_SHIFT;
2380	} else {
2381		reqlen = 14;		// SINGLE SGE
2382	}
2383	/* Now fill in the SGList and command */
2384
2385	nseg = scsi_dma_map(cmd);
2386	BUG_ON(nseg < 0);
2387	if (nseg) {
2388		struct scatterlist *sg;
2389
2390		len = 0;
2391		scsi_for_each_sg(cmd, sg, nseg, i) {
2392			lptr = mptr;
2393			*mptr++ = direction|0x10000000|sg_dma_len(sg);
2394			len+=sg_dma_len(sg);
2395			addr = sg_dma_address(sg);
2396			*mptr++ = dma_low(addr);
2397			if (dpt_dma64(pHba))
2398				*mptr++ = dma_high(addr);
2399			/* Make this an end of list */
2400			if (i == nseg - 1)
2401				*lptr = direction|0xD0000000|sg_dma_len(sg);
2402		}
2403		reqlen = mptr - msg;
2404		*lenptr = len;
2405		
2406		if(cmd->underflow && len != cmd->underflow){
2407			printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2408				len, cmd->underflow);
2409		}
2410	} else {
2411		*lenptr = len = 0;
2412		reqlen = 12;
2413	}
2414	
2415	/* Stick the headers on */
2416	msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2417	
2418	// Send it on it's way
2419	rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2420	if (rcode == 0) {
2421		return 0;
2422	}
2423	return rcode;
2424}
2425
2426
2427static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2428{
2429	struct Scsi_Host *host;
2430
2431	host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2432	if (host == NULL) {
2433		printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2434		return -1;
2435	}
2436	host->hostdata[0] = (unsigned long)pHba;
2437	pHba->host = host;
2438
2439	host->irq = pHba->pDev->irq;
2440	/* no IO ports, so don't have to set host->io_port and
2441	 * host->n_io_port
2442	 */
2443	host->io_port = 0;
2444	host->n_io_port = 0;
2445				/* see comments in scsi_host.h */
2446	host->max_id = 16;
2447	host->max_lun = 256;
2448	host->max_channel = pHba->top_scsi_channel + 1;
2449	host->cmd_per_lun = 1;
2450	host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2451	host->sg_tablesize = pHba->sg_tablesize;
2452	host->can_queue = pHba->post_fifo_size;
2453
2454	return 0;
2455}
2456
2457
2458static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
2459{
2460	adpt_hba* pHba;
2461	u32 hba_status;
2462	u32 dev_status;
2463	u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits 
2464	// I know this would look cleaner if I just read bytes
2465	// but the model I have been using for all the rest of the
2466	// io is in 4 byte words - so I keep that model
2467	u16 detailed_status = readl(reply+16) &0xffff;
2468	dev_status = (detailed_status & 0xff);
2469	hba_status = detailed_status >> 8;
2470
2471	// calculate resid for sg 
2472	scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2473
2474	pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2475
2476	cmd->sense_buffer[0] = '\0';  // initialize sense valid flag to false
2477
2478	if(!(reply_flags & MSG_FAIL)) {
2479		switch(detailed_status & I2O_SCSI_DSC_MASK) {
2480		case I2O_SCSI_DSC_SUCCESS:
2481			cmd->result = (DID_OK << 16);
2482			// handle underflow
2483			if (readl(reply+20) < cmd->underflow) {
2484				cmd->result = (DID_ERROR <<16);
2485				printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2486			}
2487			break;
2488		case I2O_SCSI_DSC_REQUEST_ABORTED:
2489			cmd->result = (DID_ABORT << 16);
2490			break;
2491		case I2O_SCSI_DSC_PATH_INVALID:
2492		case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2493		case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2494		case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2495		case I2O_SCSI_DSC_NO_ADAPTER:
2496		case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2497			printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2498				pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2499			cmd->result = (DID_TIME_OUT << 16);
2500			break;
2501		case I2O_SCSI_DSC_ADAPTER_BUSY:
2502		case I2O_SCSI_DSC_BUS_BUSY:
2503			cmd->result = (DID_BUS_BUSY << 16);
2504			break;
2505		case I2O_SCSI_DSC_SCSI_BUS_RESET:
2506		case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2507			cmd->result = (DID_RESET << 16);
2508			break;
2509		case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2510			printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2511			cmd->result = (DID_PARITY << 16);
2512			break;
2513		case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2514		case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2515		case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2516		case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2517		case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2518		case I2O_SCSI_DSC_DATA_OVERRUN:
2519		case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2520		case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2521		case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2522		case I2O_SCSI_DSC_PROVIDE_FAILURE:
2523		case I2O_SCSI_DSC_REQUEST_TERMINATED:
2524		case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2525		case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2526		case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2527		case I2O_SCSI_DSC_INVALID_CDB:
2528		case I2O_SCSI_DSC_LUN_INVALID:
2529		case I2O_SCSI_DSC_SCSI_TID_INVALID:
2530		case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2531		case I2O_SCSI_DSC_NO_NEXUS:
2532		case I2O_SCSI_DSC_CDB_RECEIVED:
2533		case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2534		case I2O_SCSI_DSC_QUEUE_FROZEN:
2535		case I2O_SCSI_DSC_REQUEST_INVALID:
2536		default:
2537			printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2538				pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2539			       hba_status, dev_status, cmd->cmnd[0]);
2540			cmd->result = (DID_ERROR << 16);
2541			break;
2542		}
2543
2544		// copy over the request sense data if it was a check
2545		// condition status
2546		if (dev_status == SAM_STAT_CHECK_CONDITION) {
2547			u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2548			// Copy over the sense data
2549			memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2550			if(cmd->sense_buffer[0] == 0x70 /* class 7 */ && 
2551			   cmd->sense_buffer[2] == DATA_PROTECT ){
2552				/* This is to handle an array failed */
2553				cmd->result = (DID_TIME_OUT << 16);
2554				printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2555					pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, 
2556					hba_status, dev_status, cmd->cmnd[0]);
2557
2558			}
2559		}
2560	} else {
2561		/* In this condtion we could not talk to the tid
2562		 * the card rejected it.  We should signal a retry
2563		 * for a limitted number of retries.
2564		 */
2565		cmd->result = (DID_TIME_OUT << 16);
2566		printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n",
2567			pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2568			((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2569	}
2570
2571	cmd->result |= (dev_status);
2572
2573	if(cmd->scsi_done != NULL){
2574		cmd->scsi_done(cmd);
2575	} 
2576	return cmd->result;
2577}
2578
2579
2580static s32 adpt_rescan(adpt_hba* pHba)
2581{
2582	s32 rcode;
2583	ulong flags = 0;
2584
2585	if(pHba->host)
2586		spin_lock_irqsave(pHba->host->host_lock, flags);
2587	if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2588		goto out;
2589	if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2590		goto out;
2591	rcode = 0;
2592out:	if(pHba->host)
2593		spin_unlock_irqrestore(pHba->host->host_lock, flags);
2594	return rcode;
2595}
2596
2597
2598static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2599{
2600	int i;
2601	int max;
2602	int tid;
2603	struct i2o_device *d;
2604	i2o_lct *lct = pHba->lct;
2605	u8 bus_no = 0;
2606	s16 scsi_id;
2607	s16 scsi_lun;
2608	u32 buf[10]; // at least 8 u32's
2609	struct adpt_device* pDev = NULL;
2610	struct i2o_device* pI2o_dev = NULL;
2611	
2612	if (lct == NULL) {
2613		printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2614		return -1;
2615	}
2616	
2617	max = lct->table_size;	
2618	max -= 3;
2619	max /= 9;
2620
2621	// Mark each drive as unscanned
2622	for (d = pHba->devices; d; d = d->next) {
2623		pDev =(struct adpt_device*) d->owner;
2624		if(!pDev){
2625			continue;
2626		}
2627		pDev->state |= DPTI_DEV_UNSCANNED;
2628	}
2629
2630	printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2631	
2632	for(i=0;i<max;i++) {
2633		if( lct->lct_entry[i].user_tid != 0xfff){
2634			continue;
2635		}
2636
2637		if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2638		    lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2639		    lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2640			tid = lct->lct_entry[i].tid;
2641			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2642				printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2643				continue;
2644			}
2645			bus_no = buf[0]>>16;
2646			if (bus_no >= MAX_CHANNEL) {	/* Something wrong skip it */
2647				printk(KERN_WARNING
2648					"%s: Channel number %d out of range\n",
2649					pHba->name, bus_no);
2650				continue;
2651			}
2652
2653			scsi_id = buf[1];
2654			scsi_lun = (buf[2]>>8 )&0xff;
2655			pDev = pHba->channel[bus_no].device[scsi_id];
2656			/* da lun */
2657			while(pDev) {
2658				if(pDev->scsi_lun == scsi_lun) {
2659					break;
2660				}
2661				pDev = pDev->next_lun;
2662			}
2663			if(!pDev ) { // Something new add it
2664				d = kmalloc(sizeof(struct i2o_device),
2665					    GFP_ATOMIC);
2666				if(d==NULL)
2667				{
2668					printk(KERN_CRIT "Out of memory for I2O device data.\n");
2669					return -ENOMEM;
2670				}
2671				
2672				d->controller = pHba;
2673				d->next = NULL;
2674
2675				memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2676
2677				d->flags = 0;
2678				adpt_i2o_report_hba_unit(pHba, d);
2679				adpt_i2o_install_device(pHba, d);
2680	
2681				pDev = pHba->channel[bus_no].device[scsi_id];	
2682				if( pDev == NULL){
2683					pDev =
2684					  kzalloc(sizeof(struct adpt_device),
2685						  GFP_ATOMIC);
2686					if(pDev == NULL) {
2687						return -ENOMEM;
2688					}
2689					pHba->channel[bus_no].device[scsi_id] = pDev;
2690				} else {
2691					while (pDev->next_lun) {
2692						pDev = pDev->next_lun;
2693					}
2694					pDev = pDev->next_lun =
2695					  kzalloc(sizeof(struct adpt_device),
2696						  GFP_ATOMIC);
2697					if(pDev == NULL) {
2698						return -ENOMEM;
2699					}
2700				}
2701				pDev->tid = d->lct_data.tid;
2702				pDev->scsi_channel = bus_no;
2703				pDev->scsi_id = scsi_id;
2704				pDev->scsi_lun = scsi_lun;
2705				pDev->pI2o_dev = d;
2706				d->owner = pDev;
2707				pDev->type = (buf[0])&0xff;
2708				pDev->flags = (buf[0]>>8)&0xff;
2709				// Too late, SCSI system has made up it's mind, but what the hey ...
2710				if(scsi_id > pHba->top_scsi_id){
2711					pHba->top_scsi_id = scsi_id;
2712				}
2713				if(scsi_lun > pHba->top_scsi_lun){
2714					pHba->top_scsi_lun = scsi_lun;
2715				}
2716				continue;
2717			} // end of new i2o device
2718
2719			// We found an old device - check it
2720			while(pDev) {
2721				if(pDev->scsi_lun == scsi_lun) {
2722					if(!scsi_device_online(pDev->pScsi_dev)) {
2723						printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n",
2724								pHba->name,bus_no,scsi_id,scsi_lun);
2725						if (pDev->pScsi_dev) {
2726							scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2727						}
2728					}
2729					d = pDev->pI2o_dev;
2730					if(d->lct_data.tid != tid) { // something changed
2731						pDev->tid = tid;
2732						memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2733						if (pDev->pScsi_dev) {
2734							pDev->pScsi_dev->changed = TRUE;
2735							pDev->pScsi_dev->removable = TRUE;
2736						}
2737					}
2738					// Found it - mark it scanned
2739					pDev->state = DPTI_DEV_ONLINE;
2740					break;
2741				}
2742				pDev = pDev->next_lun;
2743			}
2744		}
2745	}
2746	for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2747		pDev =(struct adpt_device*) pI2o_dev->owner;
2748		if(!pDev){
2749			continue;
2750		}
2751		// Drive offline drives that previously existed but could not be found
2752		// in the LCT table
2753		if (pDev->state & DPTI_DEV_UNSCANNED){
2754			pDev->state = DPTI_DEV_OFFLINE;
2755			printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2756			if (pDev->pScsi_dev) {
2757				scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2758			}
2759		}
2760	}
2761	return 0;
2762}
2763
2764static void adpt_fail_posted_scbs(adpt_hba* pHba)
2765{
2766	struct scsi_cmnd* 	cmd = NULL;
2767	struct scsi_device* 	d = NULL;
2768
2769	shost_for_each_device(d, pHba->host) {
2770		unsigned long flags;
2771		spin_lock_irqsave(&d->list_lock, flags);
2772		list_for_each_entry(cmd, &d->cmd_list, list) {
2773			if(cmd->serial_number == 0){
2774				continue;
2775			}
2776			cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2777			cmd->scsi_done(cmd);
2778		}
2779		spin_unlock_irqrestore(&d->list_lock, flags);
2780	}
2781}
2782
2783
2784/*============================================================================
2785 *  Routines from i2o subsystem
2786 *============================================================================
2787 */
2788
2789
2790
2791/*
2792 *	Bring an I2O controller into HOLD state. See the spec.
2793 */
2794static int adpt_i2o_activate_hba(adpt_hba* pHba)
2795{
2796	int rcode;
2797
2798	if(pHba->initialized ) {
2799		if (adpt_i2o_status_get(pHba) < 0) {
2800			if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2801				printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2802				return rcode;
2803			}
2804			if (adpt_i2o_status_get(pHba) < 0) {
2805				printk(KERN_INFO "HBA not responding.\n");
2806				return -1;
2807			}
2808		}
2809
2810		if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2811			printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2812			return -1;
2813		}
2814
2815		if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2816		    pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2817		    pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2818		    pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2819			adpt_i2o_reset_hba(pHba);			
2820			if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2821				printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2822				return -1;
2823			}
2824		}
2825	} else {
2826		if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2827			printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2828			return rcode;
2829		}
2830
2831	}
2832
2833	if (adpt_i2o_init_outbound_q(pHba) < 0) {
2834		return -1;
2835	}
2836
2837	/* In HOLD state */
2838	
2839	if (adpt_i2o_hrt_get(pHba) < 0) {
2840		return -1;
2841	}
2842
2843	return 0;
2844}
2845
2846/*
2847 *	Bring a controller online into OPERATIONAL state. 
2848 */
2849 
2850static int adpt_i2o_online_hba(adpt_hba* pHba)
2851{
2852	if (adpt_i2o_systab_send(pHba) < 0) {
2853		adpt_i2o_delete_hba(pHba);
2854		return -1;
2855	}
2856	/* In READY state */
2857
2858	if (adpt_i2o_enable_hba(pHba) < 0) {
2859		adpt_i2o_delete_hba(pHba);
2860		return -1;
2861	}
2862
2863	/* In OPERATIONAL state  */
2864	return 0;
2865}
2866
2867static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2868{
2869	u32 __iomem *msg;
2870	ulong timeout = jiffies + 5*HZ;
2871
2872	while(m == EMPTY_QUEUE){
2873		rmb();
2874		m = readl(pHba->post_port);
2875		if(m != EMPTY_QUEUE){
2876			break;
2877		}
2878		if(time_after(jiffies,timeout)){
2879			printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2880			return 2;
2881		}
2882		schedule_timeout_uninterruptible(1);
2883	}
2884	msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2885	writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2886	writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2887	writel( 0,&msg[2]);
2888	wmb();
2889
2890	writel(m, pHba->post_port);
2891	wmb();
2892	return 0;
2893}
2894
2895static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2896{
2897	u8 *status;
2898	dma_addr_t addr;
2899	u32 __iomem *msg = NULL;
2900	int i;
2901	ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2902	u32 m;
2903
2904	do {
2905		rmb();
2906		m = readl(pHba->post_port);
2907		if (m != EMPTY_QUEUE) {
2908			break;
2909		}
2910
2911		if(time_after(jiffies,timeout)){
2912			printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2913			return -ETIMEDOUT;
2914		}
2915		schedule_timeout_uninterruptible(1);
2916	} while(m == EMPTY_QUEUE);
2917
2918	msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2919
2920	status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2921	if (!status) {
2922		adpt_send_nop(pHba, m);
2923		printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2924			pHba->name);
2925		return -ENOMEM;
2926	}
2927	memset(status, 0, 4);
2928
2929	writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2930	writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2931	writel(0, &msg[2]);
2932	writel(0x0106, &msg[3]);	/* Transaction context */
2933	writel(4096, &msg[4]);		/* Host page frame size */
2934	writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]);	/* Outbound msg frame size and Initcode */
2935	writel(0xD0000004, &msg[6]);		/* Simple SG LE, EOB */
2936	writel((u32)addr, &msg[7]);
2937
2938	writel(m, pHba->post_port);
2939	wmb();
2940
2941	// Wait for the reply status to come back
2942	do {
2943		if (*status) {
2944			if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2945				break;
2946			}
2947		}
2948		rmb();
2949		if(time_after(jiffies,timeout)){
2950			printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2951			/* We lose 4 bytes of "status" here, but we
2952			   cannot free these because controller may
2953			   awake and corrupt those bytes at any time */
2954			/* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
2955			return -ETIMEDOUT;
2956		}
2957		schedule_timeout_uninterruptible(1);
2958	} while (1);
2959
2960	// If the command was successful, fill the fifo with our reply
2961	// message packets
2962	if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2963		dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2964		return -2;
2965	}
2966	dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2967
2968	if(pHba->reply_pool != NULL) {
2969		dma_free_coherent(&pHba->pDev->dev,
2970			pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2971			pHba->reply_pool, pHba->reply_pool_pa);
2972	}
2973
2974	pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2975				pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2976				&pHba->reply_pool_pa, GFP_KERNEL);
2977	if (!pHba->reply_pool) {
2978		printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2979		return -ENOMEM;
2980	}
2981	memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2982
2983	for(i = 0; i < pHba->reply_fifo_size; i++) {
2984		writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2985			pHba->reply_port);
2986		wmb();
2987	}
2988	adpt_i2o_status_get(pHba);
2989	return 0;
2990}
2991
2992
2993/*
2994 * I2O System Table.  Contains information about
2995 * all the IOPs in the system.  Used to inform IOPs
2996 * about each other's existence.
2997 *
2998 * sys_tbl_ver is the CurrentChangeIndicator that is
2999 * used by IOPs to track changes.
3000 */
3001
3002
3003
3004static s32 adpt_i2o_status_get(adpt_hba* pHba)
3005{
3006	ulong timeout;
3007	u32 m;
3008	u32 __iomem *msg;
3009	u8 *status_block=NULL;
3010
3011	if(pHba->status_block == NULL) {
3012		pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
3013					sizeof(i2o_status_block),
3014					&pHba->status_block_pa, GFP_KERNEL);
3015		if(pHba->status_block == NULL) {
3016			printk(KERN_ERR
3017			"dpti%d: Get Status Block failed; Out of memory. \n", 
3018			pHba->unit);
3019			return -ENOMEM;
3020		}
3021	}
3022	memset(pHba->status_block, 0, sizeof(i2o_status_block));
3023	status_block = (u8*)(pHba->status_block);
3024	timeout = jiffies+TMOUT_GETSTATUS*HZ;
3025	do {
3026		rmb();
3027		m = readl(pHba->post_port);
3028		if (m != EMPTY_QUEUE) {
3029			break;
3030		}
3031		if(time_after(jiffies,timeout)){
3032			printk(KERN_ERR "%s: Timeout waiting for message !\n",
3033					pHba->name);
3034			return -ETIMEDOUT;
3035		}
3036		schedule_timeout_uninterruptible(1);
3037	} while(m==EMPTY_QUEUE);
3038
3039	
3040	msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
3041
3042	writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
3043	writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
3044	writel(1, &msg[2]);
3045	writel(0, &msg[3]);
3046	writel(0, &msg[4]);
3047	writel(0, &msg[5]);
3048	writel( dma_low(pHba->status_block_pa), &msg[6]);
3049	writel( dma_high(pHba->status_block_pa), &msg[7]);
3050	writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
3051
3052	//post message
3053	writel(m, pHba->post_port);
3054	wmb();
3055
3056	while(status_block[87]!=0xff){
3057		if(time_after(jiffies,timeout)){
3058			printk(KERN_ERR"dpti%d: Get status timeout.\n",
3059				pHba->unit);
3060			return -ETIMEDOUT;
3061		}
3062		rmb();
3063		schedule_timeout_uninterruptible(1);
3064	}
3065
3066	// Set up our number of outbound and inbound messages
3067	pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
3068	if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
3069		pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
3070	}
3071
3072	pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
3073	if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
3074		pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
3075	}
3076
3077	// Calculate the Scatter Gather list size
3078	if (dpt_dma64(pHba)) {
3079		pHba->sg_tablesize
3080		  = ((pHba->status_block->inbound_frame_size * 4
3081		  - 14 * sizeof(u32))
3082		  / (sizeof(struct sg_simple_element) + sizeof(u32)));
3083	} else {
3084		pHba->sg_tablesize
3085		  = ((pHba->status_block->inbound_frame_size * 4
3086		  - 12 * sizeof(u32))
3087		  / sizeof(struct sg_simple_element));
3088	}
3089	if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
3090		pHba->sg_tablesize = SG_LIST_ELEMENTS;
3091	}
3092
3093
3094#ifdef DEBUG
3095	printk("dpti%d: State = ",pHba->unit);
3096	switch(pHba->status_block->iop_state) {
3097		case 0x01:
3098			printk("INIT\n");
3099			break;
3100		case 0x02:
3101			printk("RESET\n");
3102			break;
3103		case 0x04:
3104			printk("HOLD\n");
3105			break;
3106		case 0x05:
3107			printk("READY\n");
3108			break;
3109		case 0x08:
3110			printk("OPERATIONAL\n");
3111			break;
3112		case 0x10:
3113			printk("FAILED\n");
3114			break;
3115		case 0x11:
3116			printk("FAULTED\n");
3117			break;
3118		default:
3119			printk("%x (unknown!!)\n",pHba->status_block->iop_state);
3120	}
3121#endif
3122	return 0;
3123}
3124
3125/*
3126 * Get the IOP's Logical Configuration Table
3127 */
3128static int adpt_i2o_lct_get(adpt_hba* pHba)
3129{
3130	u32 msg[8];
3131	int ret;
3132	u32 buf[16];
3133
3134	if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
3135		pHba->lct_size = pHba->status_block->expected_lct_size;
3136	}
3137	do {
3138		if (pHba->lct == NULL) {
3139			pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3140					pHba->lct_size, &pHba->lct_pa,
3141					GFP_ATOMIC);
3142			if(pHba->lct == NULL) {
3143				printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3144					pHba->name);
3145				return -ENOMEM;
3146			}
3147		}
3148		memset(pHba->lct, 0, pHba->lct_size);
3149
3150		msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3151		msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3152		msg[2] = 0;
3153		msg[3] = 0;
3154		msg[4] = 0xFFFFFFFF;	/* All devices */
3155		msg[5] = 0x00000000;	/* Report now */
3156		msg[6] = 0xD0000000|pHba->lct_size;
3157		msg[7] = (u32)pHba->lct_pa;
3158
3159		if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3160			printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n", 
3161				pHba->name, ret);	
3162			printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3163			return ret;
3164		}
3165
3166		if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3167			pHba->lct_size = pHba->lct->table_size << 2;
3168			dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3169					pHba->lct, pHba->lct_pa);
3170			pHba->lct = NULL;
3171		}
3172	} while (pHba->lct == NULL);
3173
3174	PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3175
3176
3177	// I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3178	if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3179		pHba->FwDebugBufferSize = buf[1];
3180		pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3181						pHba->FwDebugBufferSize);
3182		if (pHba->FwDebugBuffer_P) {
3183			pHba->FwDebugFlags_P     = pHba->FwDebugBuffer_P +
3184							FW_DEBUG_FLAGS_OFFSET;
3185			pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3186							FW_DEBUG_BLED_OFFSET;
3187			pHba->FwDebugBLEDflag_P  = pHba->FwDebugBLEDvalue_P + 1;
3188			pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3189						FW_DEBUG_STR_LENGTH_OFFSET;
3190			pHba->FwDebugBuffer_P += buf[2]; 
3191			pHba->FwDebugFlags = 0;
3192		}
3193	}
3194
3195	return 0;
3196}
3197
3198static int adpt_i2o_build_sys_table(void)
3199{
3200	adpt_hba* pHba = hba_chain;
3201	int count = 0;
3202
3203	if (sys_tbl)
3204		dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3205					sys_tbl, sys_tbl_pa);
3206
3207	sys_tbl_len = sizeof(struct i2o_sys_tbl) +	// Header + IOPs
3208				(hba_count) * sizeof(struct i2o_sys_tbl_entry);
3209
3210	sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3211				sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
3212	if (!sys_tbl) {
3213		printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");	
3214		return -ENOMEM;
3215	}
3216	memset(sys_tbl, 0, sys_tbl_len);
3217
3218	sys_tbl->num_entries = hba_count;
3219	sys_tbl->version = I2OVERSION;
3220	sys_tbl->change_ind = sys_tbl_ind++;
3221
3222	for(pHba = hba_chain; pHba; pHba = pHba->next) {
3223		u64 addr;
3224		// Get updated Status Block so we have the latest information
3225		if (adpt_i2o_status_get(pHba)) {
3226			sys_tbl->num_entries--;
3227			continue; // try next one	
3228		}
3229
3230		sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3231		sys_tbl->iops[count].iop_id = pHba->unit + 2;
3232		sys_tbl->iops[count].seg_num = 0;
3233		sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3234		sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3235		sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3236		sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3237		sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3238		sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
3239		addr = pHba->base_addr_phys + 0x40;
3240		sys_tbl->iops[count].inbound_low = dma_low(addr);
3241		sys_tbl->iops[count].inbound_high = dma_high(addr);
3242
3243		count++;
3244	}
3245
3246#ifdef DEBUG
3247{
3248	u32 *table = (u32*)sys_tbl;
3249	printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3250	for(count = 0; count < (sys_tbl_len >>2); count++) {
3251		printk(KERN_INFO "sys_tbl[%d] = %0#10x\n", 
3252			count, table[count]);
3253	}
3254}
3255#endif
3256
3257	return 0;
3258}
3259
3260
3261/*
3262 *	 Dump the information block associated with a given unit (TID)
3263 */
3264 
3265static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3266{
3267	char buf[64];
3268	int unit = d->lct_data.tid;
3269
3270	printk(KERN_INFO "TID %3.3d ", unit);
3271
3272	if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3273	{
3274		buf[16]=0;
3275		printk(" Vendor: %-12.12s", buf);
3276	}
3277	if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3278	{
3279		buf[16]=0;
3280		printk(" Device: %-12.12s", buf);
3281	}
3282	if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3283	{
3284		buf[8]=0;
3285		printk(" Rev: %-12.12s\n", buf);
3286	}
3287#ifdef DEBUG
3288	 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3289	 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3290	 printk(KERN_INFO "\tFlags: ");
3291
3292	 if(d->lct_data.device_flags&(1<<0))
3293		  printk("C");	     // ConfigDialog requested
3294	 if(d->lct_data.device_flags&(1<<1))
3295		  printk("U");	     // Multi-user capable
3296	 if(!(d->lct_data.device_flags&(1<<4)))
3297		  printk("P");	     // Peer service enabled!
3298	 if(!(d->lct_data.device_flags&(1<<5)))
3299		  printk("M");	     // Mgmt service enabled!
3300	 printk("\n");
3301#endif
3302}
3303
3304#ifdef DEBUG
3305/*
3306 *	Do i2o class name lookup
3307 */
3308static const char *adpt_i2o_get_class_name(int class)
3309{
3310	int idx = 16;
3311	static char *i2o_class_name[] = {
3312		"Executive",
3313		"Device Driver Module",
3314		"Block Device",
3315		"Tape Device",
3316		"LAN Interface",
3317		"WAN Interface",
3318		"Fibre Channel Port",
3319		"Fibre Channel Device",
3320		"SCSI Device",
3321		"ATE Port",
3322		"ATE Device",
3323		"Floppy Controller",
3324		"Floppy Device",
3325		"Secondary Bus Port",
3326		"Peer Transport Agent",
3327		"Peer Transport",
3328		"Unknown"
3329	};
3330	
3331	switch(class&0xFFF) {
3332	case I2O_CLASS_EXECUTIVE:
3333		idx = 0; break;
3334	case I2O_CLASS_DDM:
3335		idx = 1; break;
3336	case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3337		idx = 2; break;
3338	case I2O_CLASS_SEQUENTIAL_STORAGE:
3339		idx = 3; break;
3340	case I2O_CLASS_LAN:
3341		idx = 4; break;
3342	case I2O_CLASS_WAN:
3343		idx = 5; break;
3344	case I2O_CLASS_FIBRE_CHANNEL_PORT:
3345		idx = 6; break;
3346	case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3347		idx = 7; break;
3348	case I2O_CLASS_SCSI_PERIPHERAL:
3349		idx = 8; break;
3350	case I2O_CLASS_ATE_PORT:
3351		idx = 9; break;
3352	case I2O_CLASS_ATE_PERIPHERAL:
3353		idx = 10; break;
3354	case I2O_CLASS_FLOPPY_CONTROLLER:
3355		idx = 11; break;
3356	case I2O_CLASS_FLOPPY_DEVICE:
3357		idx = 12; break;
3358	case I2O_CLASS_BUS_ADAPTER_PORT:
3359		idx = 13; break;
3360	case I2O_CLASS_PEER_TRANSPORT_AGENT:
3361		idx = 14; break;
3362	case I2O_CLASS_PEER_TRANSPORT:
3363		idx = 15; break;
3364	}
3365	return i2o_class_name[idx];
3366}
3367#endif
3368
3369
3370static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3371{
3372	u32 msg[6];
3373	int ret, size = sizeof(i2o_hrt);
3374
3375	do {
3376		if (pHba->hrt == NULL) {
3377			pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3378					size, &pHba->hrt_pa, GFP_KERNEL);
3379			if (pHba->hrt == NULL) {
3380				printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3381				return -ENOMEM;
3382			}
3383		}
3384
3385		msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3386		msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3387		msg[2]= 0;
3388		msg[3]= 0;
3389		msg[4]= (0xD0000000 | size);    /* Simple transaction */
3390		msg[5]= (u32)pHba->hrt_pa;	/* Dump it here */
3391
3392		if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3393			printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3394			return ret;
3395		}
3396
3397		if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3398			int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3399			dma_free_coherent(&pHba->pDev->dev, size,
3400				pHba->hrt, pHba->hrt_pa);
3401			size = newsize;
3402			pHba->hrt = NULL;
3403		}
3404	} while(pHba->hrt == NULL);
3405	return 0;
3406}                                                                                                                                       
3407
3408/*
3409 *	 Query one scalar group value or a whole scalar group.
3410 */		    	
3411static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid, 
3412			int group, int field, void *buf, int buflen)
3413{
3414	u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3415	u8 *opblk_va;
3416	dma_addr_t opblk_pa;
3417	u8 *resblk_va;
3418	dma_addr_t resblk_pa;
3419
3420	int size;
3421
3422	/* 8 bytes for header */
3423	resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3424			sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3425	if (resblk_va == NULL) {
3426		printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3427		return -ENOMEM;
3428	}
3429
3430	opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3431			sizeof(opblk), &opblk_pa, GFP_KERNEL);
3432	if (opblk_va == NULL) {
3433		dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3434			resblk_va, resblk_pa);
3435		printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
3436			pHba->name);
3437		return -ENOMEM;
3438	}
3439	if (field == -1)  		/* whole group */
3440			opblk[4] = -1;
3441
3442	memcpy(opblk_va, opblk, sizeof(opblk));
3443	size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid, 
3444		opblk_va, opblk_pa, sizeof(opblk),
3445		resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3446	dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3447	if (size == -ETIME) {
3448		dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3449							resblk_va, resblk_pa);
3450		printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3451		return -ETIME;
3452	} else if (size == -EINTR) {
3453		dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3454							resblk_va, resblk_pa);
3455		printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3456		return -EINTR;
3457	}
3458			
3459	memcpy(buf, resblk_va+8, buflen);  /* cut off header */
3460
3461	dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3462						resblk_va, resblk_pa);
3463	if (size < 0)
3464		return size;	
3465
3466	return buflen;
3467}
3468
3469
3470/*	Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3471 *
3472 *	This function can be used for all UtilParamsGet/Set operations.
3473 *	The OperationBlock is given in opblk-buffer, 
3474 *	and results are returned in resblk-buffer.
3475 *	Note that the minimum sized resblk is 8 bytes and contains
3476 *	ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3477 */
3478static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid, 
3479		  void *opblk_va,  dma_addr_t opblk_pa, int oplen,
3480		void *resblk_va, dma_addr_t resblk_pa, int reslen)
3481{
3482	u32 msg[9]; 
3483	u32 *res = (u32 *)resblk_va;
3484	int wait_status;
3485
3486	msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3487	msg[1] = cmd << 24 | HOST_TID << 12 | tid; 
3488	msg[2] = 0;
3489	msg[3] = 0;
3490	msg[4] = 0;
3491	msg[5] = 0x54000000 | oplen;	/* OperationBlock */
3492	msg[6] = (u32)opblk_pa;
3493	msg[7] = 0xD0000000 | reslen;	/* ResultBlock */
3494	msg[8] = (u32)resblk_pa;
3495
3496	if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3497		printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3498   		return wait_status; 	/* -DetailedStatus */
3499	}
3500
3501	if (res[1]&0x00FF0000) { 	/* BlockStatus != SUCCESS */
3502		printk(KERN_WARNING "%s: %s - Error:\n  ErrorInfoSize = 0x%02x, "
3503			"BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3504			pHba->name,
3505			(cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3506							 : "PARAMS_GET",   
3507			res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3508		return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3509	}
3510
3511	 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */ 
3512}
3513
3514
3515static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3516{
3517	u32 msg[4];
3518	int ret;
3519
3520	adpt_i2o_status_get(pHba);
3521
3522	/* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3523
3524	if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3525   	   (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3526		return 0;
3527	}
3528
3529	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3530	msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3531	msg[2] = 0;
3532	msg[3] = 0;
3533
3534	if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3535		printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3536				pHba->unit, -ret);
3537	} else {
3538		printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3539	}
3540
3541	adpt_i2o_status_get(pHba);
3542	return ret;
3543}
3544
3545
3546/* 
3547 * Enable IOP. Allows the IOP to resume external operations.
3548 */
3549static int adpt_i2o_enable_hba(adpt_hba* pHba)
3550{
3551	u32 msg[4];
3552	int ret;
3553	
3554	adpt_i2o_status_get(pHba);
3555	if(!pHba->status_block){
3556		return -ENOMEM;
3557	}
3558	/* Enable only allowed on READY state */
3559	if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3560		return 0;
3561
3562	if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3563		return -EINVAL;
3564
3565	msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3566	msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3567	msg[2]= 0;
3568	msg[3]= 0;
3569
3570	if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3571		printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n", 
3572			pHba->name, ret);
3573	} else {
3574		PDEBUG("%s: Enabled.\n", pHba->name);
3575	}
3576
3577	adpt_i2o_status_get(pHba);
3578	return ret;
3579}
3580
3581
3582static int adpt_i2o_systab_send(adpt_hba* pHba)
3583{
3584	 u32 msg[12];
3585	 int ret;
3586
3587	msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3588	msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3589	msg[2] = 0;
3590	msg[3] = 0;
3591	msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3592	msg[5] = 0;				   /* Segment 0 */
3593
3594	/* 
3595	 * Provide three SGL-elements:
3596	 * System table (SysTab), Private memory space declaration and 
3597	 * Private i/o space declaration  
3598	 */
3599	msg[6] = 0x54000000 | sys_tbl_len;
3600	msg[7] = (u32)sys_tbl_pa;
3601	msg[8] = 0x54000000 | 0;
3602	msg[9] = 0;
3603	msg[10] = 0xD4000000 | 0;
3604	msg[11] = 0;
3605
3606	if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3607		printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n", 
3608			pHba->name, ret);
3609	}
3610#ifdef DEBUG
3611	else {
3612		PINFO("%s: SysTab set.\n", pHba->name);
3613	}
3614#endif
3615
3616	return ret;	
3617 }
3618
3619
3620/*============================================================================
3621 *
3622 *============================================================================
3623 */
3624
3625
3626#ifdef UARTDELAY 
3627
3628static static void adpt_delay(int millisec)
3629{
3630	int i;
3631	for (i = 0; i < millisec; i++) {
3632		udelay(1000);	/* delay for one millisecond */
3633	}
3634}
3635
3636#endif
3637
3638static struct scsi_host_template driver_template = {
3639	.module			= THIS_MODULE,
3640	.name			= "dpt_i2o",
3641	.proc_name		= "dpt_i2o",
3642	.proc_info		= adpt_proc_info,
3643	.info			= adpt_info,
3644	.queuecommand		= adpt_queue,
3645	.eh_abort_handler	= adpt_abort,
3646	.eh_device_reset_handler = adpt_device_reset,
3647	.eh_bus_reset_handler	= adpt_bus_reset,
3648	.eh_host_reset_handler	= adpt_reset,
3649	.bios_param		= adpt_bios_param,
3650	.slave_configure	= adpt_slave_configure,
3651	.can_queue		= MAX_TO_IOP_MESSAGES,
3652	.this_id		= 7,
3653	.cmd_per_lun		= 1,
3654	.use_clustering		= ENABLE_CLUSTERING,
3655};
3656
3657static int __init adpt_init(void)
3658{
3659	int		error;
3660	adpt_hba	*pHba, *next;
3661
3662	printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3663
3664	error = adpt_detect(&driver_template);
3665	if (error < 0)
3666		return error;
3667	if (hba_chain == NULL)
3668		return -ENODEV;
3669
3670	for (pHba = hba_chain; pHba; pHba = pHba->next) {
3671		error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3672		if (error)
3673			goto fail;
3674		scsi_scan_host(pHba->host);
3675	}
3676	return 0;
3677fail:
3678	for (pHba = hba_chain; pHba; pHba = next) {
3679		next = pHba->next;
3680		scsi_remove_host(pHba->host);
3681	}
3682	return error;
3683}
3684
3685static void __exit adpt_exit(void)
3686{
3687	adpt_hba	*pHba, *next;
3688
3689	for (pHba = hba_chain; pHba; pHba = pHba->next)
3690		scsi_remove_host(pHba->host);
3691	for (pHba = hba_chain; pHba; pHba = next) {
3692		next = pHba->next;
3693		adpt_release(pHba->host);
3694	}
3695}
3696
3697module_init(adpt_init);
3698module_exit(adpt_exit);
3699
3700MODULE_LICENSE("GPL");
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/***************************************************************************
   3                          dpti.c  -  description
   4                             -------------------
   5    begin                : Thu Sep 7 2000
   6    copyright            : (C) 2000 by Adaptec
   7
   8			   July 30, 2001 First version being submitted
   9			   for inclusion in the kernel.  V2.4
  10
  11    See Documentation/scsi/dpti.rst for history, notes, license info
  12    and credits
  13 ***************************************************************************/
  14
  15/***************************************************************************
  16 *                                                                         *
 
 
 
 
  17 *                                                                         *
  18 ***************************************************************************/
  19/***************************************************************************
  20 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
  21 - Support 2.6 kernel and DMA-mapping
  22 - ioctl fix for raid tools
  23 - use schedule_timeout in long long loop
  24 **************************************************************************/
  25
  26/*#define DEBUG 1 */
  27/*#define UARTDELAY 1 */
  28
  29#include <linux/module.h>
  30#include <linux/pgtable.h>
  31
  32MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
  33MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
  34
  35////////////////////////////////////////////////////////////////
  36
  37#include <linux/ioctl.h>	/* For SCSI-Passthrough */
  38#include <linux/uaccess.h>
  39
  40#include <linux/stat.h>
  41#include <linux/slab.h>		/* for kmalloc() */
  42#include <linux/pci.h>		/* for PCI support */
  43#include <linux/proc_fs.h>
  44#include <linux/blkdev.h>
  45#include <linux/delay.h>	/* for udelay */
  46#include <linux/interrupt.h>
  47#include <linux/kernel.h>	/* for printk */
  48#include <linux/sched.h>
  49#include <linux/reboot.h>
  50#include <linux/spinlock.h>
  51#include <linux/dma-mapping.h>
  52
  53#include <linux/timer.h>
  54#include <linux/string.h>
  55#include <linux/ioport.h>
  56#include <linux/mutex.h>
  57
  58#include <asm/processor.h>	/* for boot_cpu_data */
 
  59#include <asm/io.h>		/* for virt_to_bus, etc. */
  60
  61#include <scsi/scsi.h>
  62#include <scsi/scsi_cmnd.h>
  63#include <scsi/scsi_device.h>
  64#include <scsi/scsi_host.h>
  65#include <scsi/scsi_tcq.h>
  66
  67#include "dpt/dptsig.h"
  68#include "dpti.h"
  69
  70/*============================================================================
  71 * Create a binary signature - this is read by dptsig
  72 * Needed for our management apps
  73 *============================================================================
  74 */
  75static DEFINE_MUTEX(adpt_mutex);
  76static dpt_sig_S DPTI_sig = {
  77	{'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
  78#ifdef __i386__
  79	PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
  80#elif defined(__ia64__)
  81	PROC_INTEL, PROC_IA64,
  82#elif defined(__sparc__)
  83	PROC_ULTRASPARC, PROC_ULTRASPARC,
  84#elif defined(__alpha__)
  85	PROC_ALPHA, PROC_ALPHA,
  86#else
  87	(-1),(-1),
  88#endif
  89	 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
  90	ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
  91	DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
  92};
  93
  94
  95
  96
  97/*============================================================================
  98 * Globals
  99 *============================================================================
 100 */
 101
 102static DEFINE_MUTEX(adpt_configuration_lock);
 103
 104static struct i2o_sys_tbl *sys_tbl;
 105static dma_addr_t sys_tbl_pa;
 106static int sys_tbl_ind;
 107static int sys_tbl_len;
 108
 109static adpt_hba* hba_chain = NULL;
 110static int hba_count = 0;
 111
 112static struct class *adpt_sysfs_class;
 113
 114static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
 115#ifdef CONFIG_COMPAT
 116static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
 117#endif
 118
 119static const struct file_operations adpt_fops = {
 120	.unlocked_ioctl	= adpt_unlocked_ioctl,
 121	.open		= adpt_open,
 122	.release	= adpt_close,
 123#ifdef CONFIG_COMPAT
 124	.compat_ioctl	= compat_adpt_ioctl,
 125#endif
 126	.llseek		= noop_llseek,
 127};
 128
 129/* Structures and definitions for synchronous message posting.
 130 * See adpt_i2o_post_wait() for description
 131 * */
 132struct adpt_i2o_post_wait_data
 133{
 134	int status;
 135	u32 id;
 136	adpt_wait_queue_head_t *wq;
 137	struct adpt_i2o_post_wait_data *next;
 138};
 139
 140static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
 141static u32 adpt_post_wait_id = 0;
 142static DEFINE_SPINLOCK(adpt_post_wait_lock);
 143
 144
 145/*============================================================================
 146 * 				Functions
 147 *============================================================================
 148 */
 149
 150static inline int dpt_dma64(adpt_hba *pHba)
 151{
 152	return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
 153}
 154
 155static inline u32 dma_high(dma_addr_t addr)
 156{
 157	return upper_32_bits(addr);
 158}
 159
 160static inline u32 dma_low(dma_addr_t addr)
 161{
 162	return (u32)addr;
 163}
 164
 165static u8 adpt_read_blink_led(adpt_hba* host)
 166{
 167	if (host->FwDebugBLEDflag_P) {
 168		if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
 169			return readb(host->FwDebugBLEDvalue_P);
 170		}
 171	}
 172	return 0;
 173}
 174
 175/*============================================================================
 176 * Scsi host template interface functions
 177 *============================================================================
 178 */
 179
 180#ifdef MODULE
 181static struct pci_device_id dptids[] = {
 182	{ PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
 183	{ PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
 184	{ 0, }
 185};
 186#endif
 187
 188MODULE_DEVICE_TABLE(pci,dptids);
 189
 190static int adpt_detect(struct scsi_host_template* sht)
 191{
 192	struct pci_dev *pDev = NULL;
 193	adpt_hba *pHba;
 194	adpt_hba *next;
 195
 196	PINFO("Detecting Adaptec I2O RAID controllers...\n");
 197
 198        /* search for all Adatpec I2O RAID cards */
 199	while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
 200		if(pDev->device == PCI_DPT_DEVICE_ID ||
 201		   pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
 202			if(adpt_install_hba(sht, pDev) ){
 203				PERROR("Could not Init an I2O RAID device\n");
 204				PERROR("Will not try to detect others.\n");
 205				return hba_count-1;
 206			}
 207			pci_dev_get(pDev);
 208		}
 209	}
 210
 211	/* In INIT state, Activate IOPs */
 212	for (pHba = hba_chain; pHba; pHba = next) {
 213		next = pHba->next;
 214		// Activate does get status , init outbound, and get hrt
 215		if (adpt_i2o_activate_hba(pHba) < 0) {
 216			adpt_i2o_delete_hba(pHba);
 217		}
 218	}
 219
 220
 221	/* Active IOPs in HOLD state */
 222
 223rebuild_sys_tab:
 224	if (hba_chain == NULL) 
 225		return 0;
 226
 227	/*
 228	 * If build_sys_table fails, we kill everything and bail
 229	 * as we can't init the IOPs w/o a system table
 230	 */	
 231	if (adpt_i2o_build_sys_table() < 0) {
 232		adpt_i2o_sys_shutdown();
 233		return 0;
 234	}
 235
 236	PDEBUG("HBA's in HOLD state\n");
 237
 238	/* If IOP don't get online, we need to rebuild the System table */
 239	for (pHba = hba_chain; pHba; pHba = pHba->next) {
 240		if (adpt_i2o_online_hba(pHba) < 0) {
 241			adpt_i2o_delete_hba(pHba);	
 242			goto rebuild_sys_tab;
 243		}
 244	}
 245
 246	/* Active IOPs now in OPERATIONAL state */
 247	PDEBUG("HBA's in OPERATIONAL state\n");
 248
 249	printk("dpti: If you have a lot of devices this could take a few minutes.\n");
 250	for (pHba = hba_chain; pHba; pHba = next) {
 251		next = pHba->next;
 252		printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
 253		if (adpt_i2o_lct_get(pHba) < 0){
 254			adpt_i2o_delete_hba(pHba);
 255			continue;
 256		}
 257
 258		if (adpt_i2o_parse_lct(pHba) < 0){
 259			adpt_i2o_delete_hba(pHba);
 260			continue;
 261		}
 262		adpt_inquiry(pHba);
 263	}
 264
 265	adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
 266	if (IS_ERR(adpt_sysfs_class)) {
 267		printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
 268		adpt_sysfs_class = NULL;
 269	}
 270
 271	for (pHba = hba_chain; pHba; pHba = next) {
 272		next = pHba->next;
 273		if (adpt_scsi_host_alloc(pHba, sht) < 0){
 274			adpt_i2o_delete_hba(pHba);
 275			continue;
 276		}
 277		pHba->initialized = TRUE;
 278		pHba->state &= ~DPTI_STATE_RESET;
 279		if (adpt_sysfs_class) {
 280			struct device *dev = device_create(adpt_sysfs_class,
 281				NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
 282				"dpti%d", pHba->unit);
 283			if (IS_ERR(dev)) {
 284				printk(KERN_WARNING"dpti%d: unable to "
 285					"create device in dpt_i2o class\n",
 286					pHba->unit);
 287			}
 288		}
 289	}
 290
 291	// Register our control device node
 292	// nodes will need to be created in /dev to access this
 293	// the nodes can not be created from within the driver
 294	if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
 295		adpt_i2o_sys_shutdown();
 296		return 0;
 297	}
 298	return hba_count;
 299}
 300
 301
 302static void adpt_release(adpt_hba *pHba)
 
 
 
 303{
 304	struct Scsi_Host *shost = pHba->host;
 305
 306	scsi_remove_host(shost);
 307//	adpt_i2o_quiesce_hba(pHba);
 308	adpt_i2o_delete_hba(pHba);
 309	scsi_host_put(shost);
 
 310}
 311
 312
 313static void adpt_inquiry(adpt_hba* pHba)
 314{
 315	u32 msg[17]; 
 316	u32 *mptr;
 317	u32 *lenptr;
 318	int direction;
 319	int scsidir;
 320	u32 len;
 321	u32 reqlen;
 322	u8* buf;
 323	dma_addr_t addr;
 324	u8  scb[16];
 325	s32 rcode;
 326
 327	memset(msg, 0, sizeof(msg));
 328	buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
 329	if(!buf){
 330		printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
 331		return;
 332	}
 333	memset((void*)buf, 0, 36);
 334	
 335	len = 36;
 336	direction = 0x00000000;	
 337	scsidir  =0x40000000;	// DATA IN  (iop<--dev)
 338
 339	if (dpt_dma64(pHba))
 340		reqlen = 17;		// SINGLE SGE, 64 bit
 341	else
 342		reqlen = 14;		// SINGLE SGE, 32 bit
 343	/* Stick the headers on */
 344	msg[0] = reqlen<<16 | SGL_OFFSET_12;
 345	msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
 346	msg[2] = 0;
 347	msg[3]  = 0;
 348	// Adaptec/DPT Private stuff 
 349	msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
 350	msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
 351	/* Direction, disconnect ok | sense data | simple queue , CDBLen */
 352	// I2O_SCB_FLAG_ENABLE_DISCONNECT | 
 353	// I2O_SCB_FLAG_SIMPLE_QUEUE_TAG | 
 354	// I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
 355	msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
 356
 357	mptr=msg+7;
 358
 359	memset(scb, 0, sizeof(scb));
 360	// Write SCSI command into the message - always 16 byte block 
 361	scb[0] = INQUIRY;
 362	scb[1] = 0;
 363	scb[2] = 0;
 364	scb[3] = 0;
 365	scb[4] = 36;
 366	scb[5] = 0;
 367	// Don't care about the rest of scb
 368
 369	memcpy(mptr, scb, sizeof(scb));
 370	mptr+=4;
 371	lenptr=mptr++;		/* Remember me - fill in when we know */
 372
 373	/* Now fill in the SGList and command */
 374	*lenptr = len;
 375	if (dpt_dma64(pHba)) {
 376		*mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
 377		*mptr++ = 1 << PAGE_SHIFT;
 378		*mptr++ = 0xD0000000|direction|len;
 379		*mptr++ = dma_low(addr);
 380		*mptr++ = dma_high(addr);
 381	} else {
 382		*mptr++ = 0xD0000000|direction|len;
 383		*mptr++ = addr;
 384	}
 385
 386	// Send it on it's way
 387	rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
 388	if (rcode != 0) {
 389		sprintf(pHba->detail, "Adaptec I2O RAID");
 390		printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
 391		if (rcode != -ETIME && rcode != -EINTR)
 392			dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
 393	} else {
 394		memset(pHba->detail, 0, sizeof(pHba->detail));
 395		memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
 396		memcpy(&(pHba->detail[16]), " Model: ", 8);
 397		memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
 398		memcpy(&(pHba->detail[40]), " FW: ", 4);
 399		memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
 400		pHba->detail[48] = '\0';	/* precautionary */
 401		dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
 402	}
 403	adpt_i2o_status_get(pHba);
 404	return ;
 405}
 406
 407
 408static int adpt_slave_configure(struct scsi_device * device)
 409{
 410	struct Scsi_Host *host = device->host;
 
 
 
 411
 412	if (host->can_queue && device->tagged_supported) {
 413		scsi_change_queue_depth(device,
 414				host->can_queue - 1);
 
 
 415	}
 416	return 0;
 417}
 418
 419static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
 420{
 421	adpt_hba* pHba = NULL;
 422	struct adpt_device* pDev = NULL;	/* dpt per device information */
 423
 424	cmd->scsi_done = done;
 425	/*
 426	 * SCSI REQUEST_SENSE commands will be executed automatically by the 
 427	 * Host Adapter for any errors, so they should not be executed 
 428	 * explicitly unless the Sense Data is zero indicating that no error 
 429	 * occurred.
 430	 */
 431
 432	if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
 433		cmd->result = (DID_OK << 16);
 434		cmd->scsi_done(cmd);
 435		return 0;
 436	}
 437
 438	pHba = (adpt_hba*)cmd->device->host->hostdata[0];
 439	if (!pHba) {
 440		return FAILED;
 441	}
 442
 443	rmb();
 444	if ((pHba->state) & DPTI_STATE_RESET)
 445		return SCSI_MLQUEUE_HOST_BUSY;
 
 
 
 
 
 
 
 
 
 
 
 446
 447	// TODO if the cmd->device if offline then I may need to issue a bus rescan
 448	// followed by a get_lct to see if the device is there anymore
 449	if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
 450		/*
 451		 * First command request for this device.  Set up a pointer
 452		 * to the device structure.  This should be a TEST_UNIT_READY
 453		 * command from scan_scsis_single.
 454		 */
 455		if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
 456			// TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response 
 457			// with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
 458			cmd->result = (DID_NO_CONNECT << 16);
 459			cmd->scsi_done(cmd);
 460			return 0;
 461		}
 462		cmd->device->hostdata = pDev;
 463	}
 464	pDev->pScsi_dev = cmd->device;
 465
 466	/*
 467	 * If we are being called from when the device is being reset, 
 468	 * delay processing of the command until later.
 469	 */
 470	if (pDev->state & DPTI_DEV_RESET ) {
 471		return FAILED;
 472	}
 473	return adpt_scsi_to_i2o(pHba, cmd, pDev);
 474}
 475
 476static DEF_SCSI_QCMD(adpt_queue)
 477
 478static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
 479		sector_t capacity, int geom[])
 480{
 481	int heads=-1;
 482	int sectors=-1;
 483	int cylinders=-1;
 484
 485	// *** First lets set the default geometry ****
 486	
 487	// If the capacity is less than ox2000
 488	if (capacity < 0x2000 ) {	// floppy
 489		heads = 18;
 490		sectors = 2;
 491	} 
 492	// else if between 0x2000 and 0x20000
 493	else if (capacity < 0x20000) {
 494		heads = 64;
 495		sectors = 32;
 496	}
 497	// else if between 0x20000 and 0x40000
 498	else if (capacity < 0x40000) {
 499		heads = 65;
 500		sectors = 63;
 501	}
 502	// else if between 0x4000 and 0x80000
 503	else if (capacity < 0x80000) {
 504		heads = 128;
 505		sectors = 63;
 506	}
 507	// else if greater than 0x80000
 508	else {
 509		heads = 255;
 510		sectors = 63;
 511	}
 512	cylinders = sector_div(capacity, heads * sectors);
 513
 514	// Special case if CDROM
 515	if(sdev->type == 5) {  // CDROM
 516		heads = 252;
 517		sectors = 63;
 518		cylinders = 1111;
 519	}
 520
 521	geom[0] = heads;
 522	geom[1] = sectors;
 523	geom[2] = cylinders;
 524	
 525	PDEBUG("adpt_bios_param: exit\n");
 526	return 0;
 527}
 528
 529
 530static const char *adpt_info(struct Scsi_Host *host)
 531{
 532	adpt_hba* pHba;
 533
 534	pHba = (adpt_hba *) host->hostdata[0];
 535	return (char *) (pHba->detail);
 536}
 537
 538static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
 
 539{
 540	struct adpt_device* d;
 541	int id;
 542	int chan;
 
 
 
 543	adpt_hba* pHba;
 544	int unit;
 545
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 546	// Find HBA (host bus adapter) we are looking for
 547	mutex_lock(&adpt_configuration_lock);
 548	for (pHba = hba_chain; pHba; pHba = pHba->next) {
 549		if (pHba->host == host) {
 550			break;	/* found adapter */
 551		}
 552	}
 553	mutex_unlock(&adpt_configuration_lock);
 554	if (pHba == NULL) {
 555		return 0;
 556	}
 557	host = pHba->host;
 558
 559	seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
 560	seq_printf(m, "%s\n", pHba->detail);
 561	seq_printf(m, "SCSI Host=scsi%d  Control Node=/dev/%s  irq=%d\n", 
 562			pHba->host->host_no, pHba->name, host->irq);
 563	seq_printf(m, "\tpost fifo size  = %d\n\treply fifo size = %d\n\tsg table size   = %d\n\n",
 564			host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
 565
 566	seq_puts(m, "Devices:\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 567	for(chan = 0; chan < MAX_CHANNEL; chan++) {
 568		for(id = 0; id < MAX_ID; id++) {
 569			d = pHba->channel[chan].device[id];
 570			while(d) {
 571				seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
 572				seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
 
 
 
 
 
 
 
 
 
 
 
 573
 574				unit = d->pI2o_dev->lct_data.tid;
 575				seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu)  (%s)\n\n",
 576					       unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
 577					       scsi_device_online(d->pScsi_dev)? "online":"offline"); 
 
 
 
 
 
 
 
 
 
 
 
 578				d = d->next_lun;
 579			}
 580		}
 581	}
 582	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 583}
 584
 585/*
 586 *	Turn a pointer to ioctl reply data into an u32 'context'
 587 */
 588static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
 589{
 590#if BITS_PER_LONG == 32
 591	return (u32)(unsigned long)reply;
 592#else
 593	ulong flags = 0;
 594	u32 nr, i;
 595
 596	spin_lock_irqsave(pHba->host->host_lock, flags);
 597	nr = ARRAY_SIZE(pHba->ioctl_reply_context);
 598	for (i = 0; i < nr; i++) {
 599		if (pHba->ioctl_reply_context[i] == NULL) {
 600			pHba->ioctl_reply_context[i] = reply;
 601			break;
 602		}
 603	}
 604	spin_unlock_irqrestore(pHba->host->host_lock, flags);
 605	if (i >= nr) {
 
 606		printk(KERN_WARNING"%s: Too many outstanding "
 607				"ioctl commands\n", pHba->name);
 608		return (u32)-1;
 609	}
 610
 611	return i;
 612#endif
 613}
 614
 615/*
 616 *	Go from an u32 'context' to a pointer to ioctl reply data.
 617 */
 618static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
 619{
 620#if BITS_PER_LONG == 32
 621	return (void *)(unsigned long)context;
 622#else
 623	void *p = pHba->ioctl_reply_context[context];
 624	pHba->ioctl_reply_context[context] = NULL;
 625
 626	return p;
 627#endif
 628}
 629
 630/*===========================================================================
 631 * Error Handling routines
 632 *===========================================================================
 633 */
 634
 635static int adpt_abort(struct scsi_cmnd * cmd)
 636{
 637	adpt_hba* pHba = NULL;	/* host bus adapter structure */
 638	struct adpt_device* dptdevice;	/* dpt per device information */
 639	u32 msg[5];
 640	int rcode;
 641
 
 
 
 642	pHba = (adpt_hba*) cmd->device->host->hostdata[0];
 643	printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
 644	if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
 645		printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
 646		return FAILED;
 647	}
 648
 649	memset(msg, 0, sizeof(msg));
 650	msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
 651	msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
 652	msg[2] = 0;
 653	msg[3]= 0;
 654	/* Add 1 to avoid firmware treating it as invalid command */
 655	msg[4] = cmd->request->tag + 1;
 656	if (pHba->host)
 657		spin_lock_irq(pHba->host->host_lock);
 658	rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
 659	if (pHba->host)
 660		spin_unlock_irq(pHba->host->host_lock);
 661	if (rcode != 0) {
 662		if(rcode == -EOPNOTSUPP ){
 663			printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
 664			return FAILED;
 665		}
 666		printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
 667		return FAILED;
 668	} 
 669	printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
 670	return SUCCESS;
 671}
 672
 673
 674#define I2O_DEVICE_RESET 0x27
 675// This is the same for BLK and SCSI devices
 676// NOTE this is wrong in the i2o.h definitions
 677// This is not currently supported by our adapter but we issue it anyway
 678static int adpt_device_reset(struct scsi_cmnd* cmd)
 679{
 680	adpt_hba* pHba;
 681	u32 msg[4];
 682	u32 rcode;
 683	int old_state;
 684	struct adpt_device* d = cmd->device->hostdata;
 685
 686	pHba = (void*) cmd->device->host->hostdata[0];
 687	printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
 688	if (!d) {
 689		printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
 690		return FAILED;
 691	}
 692	memset(msg, 0, sizeof(msg));
 693	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
 694	msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
 695	msg[2] = 0;
 696	msg[3] = 0;
 697
 698	if (pHba->host)
 699		spin_lock_irq(pHba->host->host_lock);
 700	old_state = d->state;
 701	d->state |= DPTI_DEV_RESET;
 702	rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
 703	d->state = old_state;
 704	if (pHba->host)
 705		spin_unlock_irq(pHba->host->host_lock);
 706	if (rcode != 0) {
 707		if(rcode == -EOPNOTSUPP ){
 708			printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
 709			return FAILED;
 710		}
 711		printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
 712		return FAILED;
 713	} else {
 714		printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
 715		return SUCCESS;
 716	}
 717}
 718
 719
 720#define I2O_HBA_BUS_RESET 0x87
 721// This version of bus reset is called by the eh_error handler
 722static int adpt_bus_reset(struct scsi_cmnd* cmd)
 723{
 724	adpt_hba* pHba;
 725	u32 msg[4];
 726	u32 rcode;
 727
 728	pHba = (adpt_hba*)cmd->device->host->hostdata[0];
 729	memset(msg, 0, sizeof(msg));
 730	printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
 731	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
 732	msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
 733	msg[2] = 0;
 734	msg[3] = 0;
 735	if (pHba->host)
 736		spin_lock_irq(pHba->host->host_lock);
 737	rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
 738	if (pHba->host)
 739		spin_unlock_irq(pHba->host->host_lock);
 740	if (rcode != 0) {
 741		printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
 742		return FAILED;
 743	} else {
 744		printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
 745		return SUCCESS;
 746	}
 747}
 748
 749// This version of reset is called by the eh_error_handler
 750static int __adpt_reset(struct scsi_cmnd* cmd)
 751{
 752	adpt_hba* pHba;
 753	int rcode;
 754	char name[32];
 755
 756	pHba = (adpt_hba*)cmd->device->host->hostdata[0];
 757	strncpy(name, pHba->name, sizeof(name));
 758	printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n", name, cmd->device->channel, pHba->channel[cmd->device->channel].tid);
 759	rcode =  adpt_hba_reset(pHba);
 760	if(rcode == 0){
 761		printk(KERN_WARNING"%s: HBA reset complete\n", name);
 762		return SUCCESS;
 763	} else {
 764		printk(KERN_WARNING"%s: HBA reset failed (%x)\n", name, rcode);
 765		return FAILED;
 766	}
 767}
 768
 769static int adpt_reset(struct scsi_cmnd* cmd)
 770{
 771	int rc;
 772
 773	spin_lock_irq(cmd->device->host->host_lock);
 774	rc = __adpt_reset(cmd);
 775	spin_unlock_irq(cmd->device->host->host_lock);
 776
 777	return rc;
 778}
 779
 780// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
 781static int adpt_hba_reset(adpt_hba* pHba)
 782{
 783	int rcode;
 784
 785	pHba->state |= DPTI_STATE_RESET;
 786
 787	// Activate does get status , init outbound, and get hrt
 788	if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
 789		printk(KERN_ERR "%s: Could not activate\n", pHba->name);
 790		adpt_i2o_delete_hba(pHba);
 791		return rcode;
 792	}
 793
 794	if ((rcode=adpt_i2o_build_sys_table()) < 0) {
 795		adpt_i2o_delete_hba(pHba);
 796		return rcode;
 797	}
 798	PDEBUG("%s: in HOLD state\n",pHba->name);
 799
 800	if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
 801		adpt_i2o_delete_hba(pHba);	
 802		return rcode;
 803	}
 804	PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
 805
 806	if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
 807		adpt_i2o_delete_hba(pHba);
 808		return rcode;
 809	}
 810
 811	if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
 812		adpt_i2o_delete_hba(pHba);
 813		return rcode;
 814	}
 815	pHba->state &= ~DPTI_STATE_RESET;
 816
 817	scsi_host_complete_all_commands(pHba->host, DID_RESET);
 818	return 0;	/* return success */
 819}
 820
 821/*===========================================================================
 822 * 
 823 *===========================================================================
 824 */
 825
 826
 827static void adpt_i2o_sys_shutdown(void)
 828{
 829	adpt_hba *pHba, *pNext;
 830	struct adpt_i2o_post_wait_data *p1, *old;
 831
 832	printk(KERN_INFO "Shutting down Adaptec I2O controllers.\n");
 833	printk(KERN_INFO "   This could take a few minutes if there are many devices attached\n");
 834	/* Delete all IOPs from the controller chain */
 835	/* They should have already been released by the
 836	 * scsi-core
 837	 */
 838	for (pHba = hba_chain; pHba; pHba = pNext) {
 839		pNext = pHba->next;
 840		adpt_i2o_delete_hba(pHba);
 841	}
 842
 843	/* Remove any timedout entries from the wait queue.  */
 844//	spin_lock_irqsave(&adpt_post_wait_lock, flags);
 845	/* Nothing should be outstanding at this point so just
 846	 * free them 
 847	 */
 848	for(p1 = adpt_post_wait_queue; p1;) {
 849		old = p1;
 850		p1 = p1->next;
 851		kfree(old);
 852	}
 853//	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
 854	adpt_post_wait_queue = NULL;
 855
 856	printk(KERN_INFO "Adaptec I2O controllers down.\n");
 857}
 858
 859static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
 860{
 861
 862	adpt_hba* pHba = NULL;
 863	adpt_hba* p = NULL;
 864	ulong base_addr0_phys = 0;
 865	ulong base_addr1_phys = 0;
 866	u32 hba_map0_area_size = 0;
 867	u32 hba_map1_area_size = 0;
 868	void __iomem *base_addr_virt = NULL;
 869	void __iomem *msg_addr_virt = NULL;
 870	int dma64 = 0;
 871
 872	int raptorFlag = FALSE;
 873
 874	if(pci_enable_device(pDev)) {
 875		return -EINVAL;
 876	}
 877
 878	if (pci_request_regions(pDev, "dpt_i2o")) {
 879		PERROR("dpti: adpt_config_hba: pci request region failed\n");
 880		return -EINVAL;
 881	}
 882
 883	pci_set_master(pDev);
 884
 885	/*
 886	 *	See if we should enable dma64 mode.
 887	 */
 888	if (sizeof(dma_addr_t) > 4 &&
 889	    dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32) &&
 890	    dma_set_mask(&pDev->dev, DMA_BIT_MASK(64)) == 0)
 891		dma64 = 1;
 892
 893	if (!dma64 && dma_set_mask(&pDev->dev, DMA_BIT_MASK(32)) != 0)
 894		return -EINVAL;
 895
 896	/* adapter only supports message blocks below 4GB */
 897	dma_set_coherent_mask(&pDev->dev, DMA_BIT_MASK(32));
 898
 899	base_addr0_phys = pci_resource_start(pDev,0);
 900	hba_map0_area_size = pci_resource_len(pDev,0);
 901
 902	// Check if standard PCI card or single BAR Raptor
 903	if(pDev->device == PCI_DPT_DEVICE_ID){
 904		if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
 905			// Raptor card with this device id needs 4M
 906			hba_map0_area_size = 0x400000;
 907		} else { // Not Raptor - it is a PCI card
 908			if(hba_map0_area_size > 0x100000 ){ 
 909				hba_map0_area_size = 0x100000;
 910			}
 911		}
 912	} else {// Raptor split BAR config
 913		// Use BAR1 in this configuration
 914		base_addr1_phys = pci_resource_start(pDev,1);
 915		hba_map1_area_size = pci_resource_len(pDev,1);
 916		raptorFlag = TRUE;
 917	}
 918
 919#if BITS_PER_LONG == 64
 920	/*
 921	 *	The original Adaptec 64 bit driver has this comment here:
 922	 *	"x86_64 machines need more optimal mappings"
 923	 *
 924	 *	I assume some HBAs report ridiculously large mappings
 925	 *	and we need to limit them on platforms with IOMMUs.
 926	 */
 927	if (raptorFlag == TRUE) {
 928		if (hba_map0_area_size > 128)
 929			hba_map0_area_size = 128;
 930		if (hba_map1_area_size > 524288)
 931			hba_map1_area_size = 524288;
 932	} else {
 933		if (hba_map0_area_size > 524288)
 934			hba_map0_area_size = 524288;
 935	}
 936#endif
 937
 938	base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
 939	if (!base_addr_virt) {
 940		pci_release_regions(pDev);
 941		PERROR("dpti: adpt_config_hba: io remap failed\n");
 942		return -EINVAL;
 943	}
 944
 945        if(raptorFlag == TRUE) {
 946		msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
 947		if (!msg_addr_virt) {
 948			PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
 949			iounmap(base_addr_virt);
 950			pci_release_regions(pDev);
 951			return -EINVAL;
 952		}
 953	} else {
 954		msg_addr_virt = base_addr_virt;
 955	}
 956	
 957	// Allocate and zero the data structure
 958	pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
 959	if (!pHba) {
 960		if (msg_addr_virt != base_addr_virt)
 961			iounmap(msg_addr_virt);
 962		iounmap(base_addr_virt);
 963		pci_release_regions(pDev);
 964		return -ENOMEM;
 965	}
 966
 967	mutex_lock(&adpt_configuration_lock);
 968
 969	if(hba_chain != NULL){
 970		for(p = hba_chain; p->next; p = p->next);
 971		p->next = pHba;
 972	} else {
 973		hba_chain = pHba;
 974	}
 975	pHba->next = NULL;
 976	pHba->unit = hba_count;
 977	sprintf(pHba->name, "dpti%d", hba_count);
 978	hba_count++;
 979	
 980	mutex_unlock(&adpt_configuration_lock);
 981
 982	pHba->pDev = pDev;
 983	pHba->base_addr_phys = base_addr0_phys;
 984
 985	// Set up the Virtual Base Address of the I2O Device
 986	pHba->base_addr_virt = base_addr_virt;
 987	pHba->msg_addr_virt = msg_addr_virt;
 988	pHba->irq_mask = base_addr_virt+0x30;
 989	pHba->post_port = base_addr_virt+0x40;
 990	pHba->reply_port = base_addr_virt+0x44;
 991
 992	pHba->hrt = NULL;
 993	pHba->lct = NULL;
 994	pHba->lct_size = 0;
 995	pHba->status_block = NULL;
 996	pHba->post_count = 0;
 997	pHba->state = DPTI_STATE_RESET;
 998	pHba->pDev = pDev;
 999	pHba->devices = NULL;
1000	pHba->dma64 = dma64;
1001
1002	// Initializing the spinlocks
1003	spin_lock_init(&pHba->state_lock);
1004	spin_lock_init(&adpt_post_wait_lock);
1005
1006	if(raptorFlag == 0){
1007		printk(KERN_INFO "Adaptec I2O RAID controller"
1008				 " %d at %p size=%x irq=%d%s\n", 
1009			hba_count-1, base_addr_virt,
1010			hba_map0_area_size, pDev->irq,
1011			dma64 ? " (64-bit DMA)" : "");
1012	} else {
1013		printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1014			hba_count-1, pDev->irq,
1015			dma64 ? " (64-bit DMA)" : "");
1016		printk(KERN_INFO"     BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1017		printk(KERN_INFO"     BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1018	}
1019
1020	if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1021		printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1022		adpt_i2o_delete_hba(pHba);
1023		return -EINVAL;
1024	}
1025
1026	return 0;
1027}
1028
1029
1030static void adpt_i2o_delete_hba(adpt_hba* pHba)
1031{
1032	adpt_hba* p1;
1033	adpt_hba* p2;
1034	struct i2o_device* d;
1035	struct i2o_device* next;
1036	int i;
1037	int j;
1038	struct adpt_device* pDev;
1039	struct adpt_device* pNext;
1040
1041
1042	mutex_lock(&adpt_configuration_lock);
 
 
1043	if(pHba->host){
1044		free_irq(pHba->host->irq, pHba);
1045	}
1046	p2 = NULL;
1047	for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1048		if(p1 == pHba) {
1049			if(p2) {
1050				p2->next = p1->next;
1051			} else {
1052				hba_chain = p1->next;
1053			}
1054			break;
1055		}
1056	}
1057
1058	hba_count--;
1059	mutex_unlock(&adpt_configuration_lock);
1060
1061	iounmap(pHba->base_addr_virt);
1062	pci_release_regions(pHba->pDev);
1063	if(pHba->msg_addr_virt != pHba->base_addr_virt){
1064		iounmap(pHba->msg_addr_virt);
1065	}
1066	if(pHba->FwDebugBuffer_P)
1067	   	iounmap(pHba->FwDebugBuffer_P);
1068	if(pHba->hrt) {
1069		dma_free_coherent(&pHba->pDev->dev,
1070			pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1071			pHba->hrt, pHba->hrt_pa);
1072	}
1073	if(pHba->lct) {
1074		dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1075			pHba->lct, pHba->lct_pa);
1076	}
1077	if(pHba->status_block) {
1078		dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1079			pHba->status_block, pHba->status_block_pa);
1080	}
1081	if(pHba->reply_pool) {
1082		dma_free_coherent(&pHba->pDev->dev,
1083			pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1084			pHba->reply_pool, pHba->reply_pool_pa);
1085	}
1086
1087	for(d = pHba->devices; d ; d = next){
1088		next = d->next;
1089		kfree(d);
1090	}
1091	for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1092		for(j = 0; j < MAX_ID; j++){
1093			if(pHba->channel[i].device[j] != NULL){
1094				for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1095					pNext = pDev->next_lun;
1096					kfree(pDev);
1097				}
1098			}
1099		}
1100	}
1101	pci_dev_put(pHba->pDev);
1102	if (adpt_sysfs_class)
1103		device_destroy(adpt_sysfs_class,
1104				MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1105	kfree(pHba);
1106
1107	if(hba_count <= 0){
1108		unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);   
1109		if (adpt_sysfs_class) {
1110			class_destroy(adpt_sysfs_class);
1111			adpt_sysfs_class = NULL;
1112		}
1113	}
1114}
1115
1116static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
1117{
1118	struct adpt_device* d;
1119
1120	if (chan >= MAX_CHANNEL)
1121		return NULL;
1122	
 
 
 
 
 
1123	d = pHba->channel[chan].device[id];
1124	if(!d || d->tid == 0) {
1125		return NULL;
1126	}
1127
1128	/* If it is the only lun at that address then this should match*/
1129	if(d->scsi_lun == lun){
1130		return d;
1131	}
1132
1133	/* else we need to look through all the luns */
1134	for(d=d->next_lun ; d ; d = d->next_lun){
1135		if(d->scsi_lun == lun){
1136			return d;
1137		}
1138	}
1139	return NULL;
1140}
1141
1142
1143static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1144{
1145	// I used my own version of the WAIT_QUEUE_HEAD
1146	// to handle some version differences
1147	// When embedded in the kernel this could go back to the vanilla one
1148	ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1149	int status = 0;
1150	ulong flags = 0;
1151	struct adpt_i2o_post_wait_data *p1, *p2;
1152	struct adpt_i2o_post_wait_data *wait_data =
1153		kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1154	DECLARE_WAITQUEUE(wait, current);
1155
1156	if (!wait_data)
1157		return -ENOMEM;
1158
1159	/*
1160	 * The spin locking is needed to keep anyone from playing
1161	 * with the queue pointers and id while we do the same
1162	 */
1163	spin_lock_irqsave(&adpt_post_wait_lock, flags);
1164       // TODO we need a MORE unique way of getting ids
1165       // to support async LCT get
1166	wait_data->next = adpt_post_wait_queue;
1167	adpt_post_wait_queue = wait_data;
1168	adpt_post_wait_id++;
1169	adpt_post_wait_id &= 0x7fff;
1170	wait_data->id =  adpt_post_wait_id;
1171	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1172
1173	wait_data->wq = &adpt_wq_i2o_post;
1174	wait_data->status = -ETIMEDOUT;
1175
1176	add_wait_queue(&adpt_wq_i2o_post, &wait);
1177
1178	msg[2] |= 0x80000000 | ((u32)wait_data->id);
1179	timeout *= HZ;
1180	if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1181		set_current_state(TASK_INTERRUPTIBLE);
1182		if(pHba->host)
1183			spin_unlock_irq(pHba->host->host_lock);
1184		if (!timeout)
1185			schedule();
1186		else{
1187			timeout = schedule_timeout(timeout);
1188			if (timeout == 0) {
1189				// I/O issued, but cannot get result in
1190				// specified time. Freeing resorces is
1191				// dangerous.
1192				status = -ETIME;
1193			}
1194		}
1195		if(pHba->host)
1196			spin_lock_irq(pHba->host->host_lock);
1197	}
1198	remove_wait_queue(&adpt_wq_i2o_post, &wait);
1199
1200	if(status == -ETIMEDOUT){
1201		printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1202		// We will have to free the wait_data memory during shutdown
1203		return status;
1204	}
1205
1206	/* Remove the entry from the queue.  */
1207	p2 = NULL;
1208	spin_lock_irqsave(&adpt_post_wait_lock, flags);
1209	for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1210		if(p1 == wait_data) {
1211			if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1212				status = -EOPNOTSUPP;
1213			}
1214			if(p2) {
1215				p2->next = p1->next;
1216			} else {
1217				adpt_post_wait_queue = p1->next;
1218			}
1219			break;
1220		}
1221	}
1222	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1223
1224	kfree(wait_data);
1225
1226	return status;
1227}
1228
1229
1230static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1231{
1232
1233	u32 m = EMPTY_QUEUE;
1234	u32 __iomem *msg;
1235	ulong timeout = jiffies + 30*HZ;
1236	do {
1237		rmb();
1238		m = readl(pHba->post_port);
1239		if (m != EMPTY_QUEUE) {
1240			break;
1241		}
1242		if(time_after(jiffies,timeout)){
1243			printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1244			return -ETIMEDOUT;
1245		}
1246		schedule_timeout_uninterruptible(1);
1247	} while(m == EMPTY_QUEUE);
1248		
1249	msg = pHba->msg_addr_virt + m;
1250	memcpy_toio(msg, data, len);
1251	wmb();
1252
1253	//post message
1254	writel(m, pHba->post_port);
1255	wmb();
1256
1257	return 0;
1258}
1259
1260
1261static void adpt_i2o_post_wait_complete(u32 context, int status)
1262{
1263	struct adpt_i2o_post_wait_data *p1 = NULL;
1264	/*
1265	 * We need to search through the adpt_post_wait
1266	 * queue to see if the given message is still
1267	 * outstanding.  If not, it means that the IOP
1268	 * took longer to respond to the message than we
1269	 * had allowed and timer has already expired.
1270	 * Not much we can do about that except log
1271	 * it for debug purposes, increase timeout, and recompile
1272	 *
1273	 * Lock needed to keep anyone from moving queue pointers
1274	 * around while we're looking through them.
1275	 */
1276
1277	context &= 0x7fff;
1278
1279	spin_lock(&adpt_post_wait_lock);
1280	for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1281		if(p1->id == context) {
1282			p1->status = status;
1283			spin_unlock(&adpt_post_wait_lock);
1284			wake_up_interruptible(p1->wq);
1285			return;
1286		}
1287	}
1288	spin_unlock(&adpt_post_wait_lock);
1289        // If this happens we lose commands that probably really completed
1290	printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1291	printk(KERN_DEBUG"      Tasks in wait queue:\n");
1292	for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1293		printk(KERN_DEBUG"           %d\n",p1->id);
1294	}
1295	return;
1296}
1297
1298static s32 adpt_i2o_reset_hba(adpt_hba* pHba)			
1299{
1300	u32 msg[8];
1301	u8* status;
1302	dma_addr_t addr;
1303	u32 m = EMPTY_QUEUE ;
1304	ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1305
1306	if(pHba->initialized  == FALSE) {	// First time reset should be quick
1307		timeout = jiffies + (25*HZ);
1308	} else {
1309		adpt_i2o_quiesce_hba(pHba);
1310	}
1311
1312	do {
1313		rmb();
1314		m = readl(pHba->post_port);
1315		if (m != EMPTY_QUEUE) {
1316			break;
1317		}
1318		if(time_after(jiffies,timeout)){
1319			printk(KERN_WARNING"Timeout waiting for message!\n");
1320			return -ETIMEDOUT;
1321		}
1322		schedule_timeout_uninterruptible(1);
1323	} while (m == EMPTY_QUEUE);
1324
1325	status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1326	if(status == NULL) {
1327		adpt_send_nop(pHba, m);
1328		printk(KERN_ERR"IOP reset failed - no free memory.\n");
1329		return -ENOMEM;
1330	}
 
1331
1332	msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1333	msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1334	msg[2]=0;
1335	msg[3]=0;
1336	msg[4]=0;
1337	msg[5]=0;
1338	msg[6]=dma_low(addr);
1339	msg[7]=dma_high(addr);
1340
1341	memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1342	wmb();
1343	writel(m, pHba->post_port);
1344	wmb();
1345
1346	while(*status == 0){
1347		if(time_after(jiffies,timeout)){
1348			printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1349			/* We lose 4 bytes of "status" here, but we cannot
1350			   free these because controller may awake and corrupt
1351			   those bytes at any time */
1352			/* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1353			return -ETIMEDOUT;
1354		}
1355		rmb();
1356		schedule_timeout_uninterruptible(1);
1357	}
1358
1359	if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1360		PDEBUG("%s: Reset in progress...\n", pHba->name);
1361		// Here we wait for message frame to become available
1362		// indicated that reset has finished
1363		do {
1364			rmb();
1365			m = readl(pHba->post_port);
1366			if (m != EMPTY_QUEUE) {
1367				break;
1368			}
1369			if(time_after(jiffies,timeout)){
1370				printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1371				/* We lose 4 bytes of "status" here, but we
1372				   cannot free these because controller may
1373				   awake and corrupt those bytes at any time */
1374				/* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1375				return -ETIMEDOUT;
1376			}
1377			schedule_timeout_uninterruptible(1);
1378		} while (m == EMPTY_QUEUE);
1379		// Flush the offset
1380		adpt_send_nop(pHba, m);
1381	}
1382	adpt_i2o_status_get(pHba);
1383	if(*status == 0x02 ||
1384			pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1385		printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1386				pHba->name);
1387	} else {
1388		PDEBUG("%s: Reset completed.\n", pHba->name);
1389	}
1390
1391	dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1392#ifdef UARTDELAY
1393	// This delay is to allow someone attached to the card through the debug UART to 
1394	// set up the dump levels that they want before the rest of the initialization sequence
1395	adpt_delay(20000);
1396#endif
1397	return 0;
1398}
1399
1400
1401static int adpt_i2o_parse_lct(adpt_hba* pHba)
1402{
1403	int i;
1404	int max;
1405	int tid;
1406	struct i2o_device *d;
1407	i2o_lct *lct = pHba->lct;
1408	u8 bus_no = 0;
1409	s16 scsi_id;
1410	u64 scsi_lun;
1411	u32 buf[10]; // larger than 7, or 8 ...
1412	struct adpt_device* pDev; 
1413	
1414	if (lct == NULL) {
1415		printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1416		return -1;
1417	}
1418	
1419	max = lct->table_size;	
1420	max -= 3;
1421	max /= 9;
1422
1423	for(i=0;i<max;i++) {
1424		if( lct->lct_entry[i].user_tid != 0xfff){
1425			/*
1426			 * If we have hidden devices, we need to inform the upper layers about
1427			 * the possible maximum id reference to handle device access when
1428			 * an array is disassembled. This code has no other purpose but to
1429			 * allow us future access to devices that are currently hidden
1430			 * behind arrays, hotspares or have not been configured (JBOD mode).
1431			 */
1432			if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1433			    lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1434			    lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1435			    	continue;
1436			}
1437			tid = lct->lct_entry[i].tid;
1438			// I2O_DPT_DEVICE_INFO_GROUP_NO;
1439			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1440				continue;
1441			}
1442			bus_no = buf[0]>>16;
1443			scsi_id = buf[1];
1444			scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1445			if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1446				printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1447				continue;
1448			}
1449			if (scsi_id >= MAX_ID){
1450				printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1451				continue;
1452			}
1453			if(bus_no > pHba->top_scsi_channel){
1454				pHba->top_scsi_channel = bus_no;
1455			}
1456			if(scsi_id > pHba->top_scsi_id){
1457				pHba->top_scsi_id = scsi_id;
1458			}
1459			if(scsi_lun > pHba->top_scsi_lun){
1460				pHba->top_scsi_lun = scsi_lun;
1461			}
1462			continue;
1463		}
1464		d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1465		if(d==NULL)
1466		{
1467			printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1468			return -ENOMEM;
1469		}
1470		
1471		d->controller = pHba;
1472		d->next = NULL;
1473
1474		memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1475
1476		d->flags = 0;
1477		tid = d->lct_data.tid;
1478		adpt_i2o_report_hba_unit(pHba, d);
1479		adpt_i2o_install_device(pHba, d);
1480	}
1481	bus_no = 0;
1482	for(d = pHba->devices; d ; d = d->next) {
1483		if(d->lct_data.class_id  == I2O_CLASS_BUS_ADAPTER_PORT ||
1484		   d->lct_data.class_id  == I2O_CLASS_FIBRE_CHANNEL_PORT){
1485			tid = d->lct_data.tid;
1486			// TODO get the bus_no from hrt-but for now they are in order
1487			//bus_no = 
1488			if(bus_no > pHba->top_scsi_channel){
1489				pHba->top_scsi_channel = bus_no;
1490			}
1491			pHba->channel[bus_no].type = d->lct_data.class_id;
1492			pHba->channel[bus_no].tid = tid;
1493			if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1494			{
1495				pHba->channel[bus_no].scsi_id = buf[1];
1496				PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1497			}
1498			// TODO remove - this is just until we get from hrt
1499			bus_no++;
1500			if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1501				printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1502				break;
1503			}
1504		}
1505	}
1506
1507	// Setup adpt_device table
1508	for(d = pHba->devices; d ; d = d->next) {
1509		if(d->lct_data.class_id  == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1510		   d->lct_data.class_id  == I2O_CLASS_SCSI_PERIPHERAL ||
1511		   d->lct_data.class_id  == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1512
1513			tid = d->lct_data.tid;
1514			scsi_id = -1;
1515			// I2O_DPT_DEVICE_INFO_GROUP_NO;
1516			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1517				bus_no = buf[0]>>16;
1518				scsi_id = buf[1];
1519				scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1520				if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1521					continue;
1522				}
1523				if (scsi_id >= MAX_ID) {
1524					continue;
1525				}
1526				if( pHba->channel[bus_no].device[scsi_id] == NULL){
1527					pDev =  kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1528					if(pDev == NULL) {
1529						return -ENOMEM;
1530					}
1531					pHba->channel[bus_no].device[scsi_id] = pDev;
1532				} else {
1533					for( pDev = pHba->channel[bus_no].device[scsi_id];	
1534							pDev->next_lun; pDev = pDev->next_lun){
1535					}
1536					pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1537					if(pDev->next_lun == NULL) {
1538						return -ENOMEM;
1539					}
1540					pDev = pDev->next_lun;
1541				}
1542				pDev->tid = tid;
1543				pDev->scsi_channel = bus_no;
1544				pDev->scsi_id = scsi_id;
1545				pDev->scsi_lun = scsi_lun;
1546				pDev->pI2o_dev = d;
1547				d->owner = pDev;
1548				pDev->type = (buf[0])&0xff;
1549				pDev->flags = (buf[0]>>8)&0xff;
1550				if(scsi_id > pHba->top_scsi_id){
1551					pHba->top_scsi_id = scsi_id;
1552				}
1553				if(scsi_lun > pHba->top_scsi_lun){
1554					pHba->top_scsi_lun = scsi_lun;
1555				}
1556			}
1557			if(scsi_id == -1){
1558				printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1559						d->lct_data.identity_tag);
1560			}
1561		}
1562	}
1563	return 0;
1564}
1565
1566
1567/*
1568 *	Each I2O controller has a chain of devices on it - these match
1569 *	the useful parts of the LCT of the board.
1570 */
1571 
1572static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1573{
1574	mutex_lock(&adpt_configuration_lock);
1575	d->controller=pHba;
1576	d->owner=NULL;
1577	d->next=pHba->devices;
1578	d->prev=NULL;
1579	if (pHba->devices != NULL){
1580		pHba->devices->prev=d;
1581	}
1582	pHba->devices=d;
1583	*d->dev_name = 0;
1584
1585	mutex_unlock(&adpt_configuration_lock);
1586	return 0;
1587}
1588
1589static int adpt_open(struct inode *inode, struct file *file)
1590{
1591	int minor;
1592	adpt_hba* pHba;
1593
1594	mutex_lock(&adpt_mutex);
1595	//TODO check for root access
1596	//
1597	minor = iminor(inode);
1598	if (minor >= hba_count) {
1599		mutex_unlock(&adpt_mutex);
1600		return -ENXIO;
1601	}
1602	mutex_lock(&adpt_configuration_lock);
1603	for (pHba = hba_chain; pHba; pHba = pHba->next) {
1604		if (pHba->unit == minor) {
1605			break;	/* found adapter */
1606		}
1607	}
1608	if (pHba == NULL) {
1609		mutex_unlock(&adpt_configuration_lock);
1610		mutex_unlock(&adpt_mutex);
1611		return -ENXIO;
1612	}
1613
1614//	if(pHba->in_use){
1615	//	mutex_unlock(&adpt_configuration_lock);
1616//		return -EBUSY;
1617//	}
1618
1619	pHba->in_use = 1;
1620	mutex_unlock(&adpt_configuration_lock);
1621	mutex_unlock(&adpt_mutex);
1622
1623	return 0;
1624}
1625
1626static int adpt_close(struct inode *inode, struct file *file)
1627{
1628	int minor;
1629	adpt_hba* pHba;
1630
1631	minor = iminor(inode);
1632	if (minor >= hba_count) {
1633		return -ENXIO;
1634	}
1635	mutex_lock(&adpt_configuration_lock);
1636	for (pHba = hba_chain; pHba; pHba = pHba->next) {
1637		if (pHba->unit == minor) {
1638			break;	/* found adapter */
1639		}
1640	}
1641	mutex_unlock(&adpt_configuration_lock);
1642	if (pHba == NULL) {
1643		return -ENXIO;
1644	}
1645
1646	pHba->in_use = 0;
1647
1648	return 0;
1649}
1650
1651
1652static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1653{
1654	u32 msg[MAX_MESSAGE_SIZE];
1655	u32* reply = NULL;
1656	u32 size = 0;
1657	u32 reply_size = 0;
1658	u32 __user *user_msg = arg;
1659	u32 __user * user_reply = NULL;
1660	void **sg_list = NULL;
1661	u32 sg_offset = 0;
1662	u32 sg_count = 0;
1663	int sg_index = 0;
1664	u32 i = 0;
1665	u32 rcode = 0;
1666	void *p = NULL;
1667	dma_addr_t addr;
1668	ulong flags = 0;
1669
1670	memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1671	// get user msg size in u32s 
1672	if(get_user(size, &user_msg[0])){
1673		return -EFAULT;
1674	}
1675	size = size>>16;
1676
1677	user_reply = &user_msg[size];
1678	if(size > MAX_MESSAGE_SIZE){
1679		return -EFAULT;
1680	}
1681	size *= 4; // Convert to bytes
1682
1683	/* Copy in the user's I2O command */
1684	if(copy_from_user(msg, user_msg, size)) {
1685		return -EFAULT;
1686	}
1687	get_user(reply_size, &user_reply[0]);
1688	reply_size = reply_size>>16;
1689	if(reply_size > REPLY_FRAME_SIZE){
1690		reply_size = REPLY_FRAME_SIZE;
1691	}
1692	reply_size *= 4;
1693	reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1694	if(reply == NULL) {
1695		printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1696		return -ENOMEM;
1697	}
1698	sg_offset = (msg[0]>>4)&0xf;
1699	msg[2] = 0x40000000; // IOCTL context
1700	msg[3] = adpt_ioctl_to_context(pHba, reply);
1701	if (msg[3] == (u32)-1) {
1702		rcode = -EBUSY;
1703		goto free;
1704	}
1705
1706	sg_list = kcalloc(pHba->sg_tablesize, sizeof(*sg_list), GFP_KERNEL);
1707	if (!sg_list) {
1708		rcode = -ENOMEM;
1709		goto free;
1710	}
1711	if(sg_offset) {
1712		// TODO add 64 bit API
1713		struct sg_simple_element *sg =  (struct sg_simple_element*) (msg+sg_offset);
1714		sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1715		if (sg_count > pHba->sg_tablesize){
1716			printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1717			rcode = -EINVAL;
1718			goto free;
1719		}
1720
1721		for(i = 0; i < sg_count; i++) {
1722			int sg_size;
1723
1724			if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1725				printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i,  sg[i].flag_count);
1726				rcode = -EINVAL;
1727				goto cleanup;
1728			}
1729			sg_size = sg[i].flag_count & 0xffffff;      
1730			/* Allocate memory for the transfer */
1731			p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1732			if(!p) {
1733				printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1734						pHba->name,sg_size,i,sg_count);
1735				rcode = -ENOMEM;
1736				goto cleanup;
1737			}
1738			sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1739			/* Copy in the user's SG buffer if necessary */
1740			if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1741				// sg_simple_element API is 32 bit
1742				if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1743					printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1744					rcode = -EFAULT;
1745					goto cleanup;
1746				}
1747			}
1748			/* sg_simple_element API is 32 bit, but addr < 4GB */
1749			sg[i].addr_bus = addr;
1750		}
1751	}
1752
1753	do {
1754		/*
1755		 * Stop any new commands from enterring the
1756		 * controller while processing the ioctl
1757		 */
1758		if (pHba->host) {
1759			scsi_block_requests(pHba->host);
1760			spin_lock_irqsave(pHba->host->host_lock, flags);
1761		}
 
 
 
 
1762		rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1763		if (rcode != 0)
1764			printk("adpt_i2o_passthru: post wait failed %d %p\n",
1765					rcode, reply);
1766		if (pHba->host) {
 
1767			spin_unlock_irqrestore(pHba->host->host_lock, flags);
1768			scsi_unblock_requests(pHba->host);
1769		}
1770	} while (rcode == -ETIMEDOUT);
1771
1772	if(rcode){
1773		goto cleanup;
1774	}
1775
1776	if(sg_offset) {
1777	/* Copy back the Scatter Gather buffers back to user space */
1778		u32 j;
1779		// TODO add 64 bit API
1780		struct sg_simple_element* sg;
1781		int sg_size;
1782
1783		// re-acquire the original message to handle correctly the sg copy operation
1784		memset(&msg, 0, MAX_MESSAGE_SIZE*4); 
1785		// get user msg size in u32s 
1786		if(get_user(size, &user_msg[0])){
1787			rcode = -EFAULT; 
1788			goto cleanup; 
1789		}
1790		size = size>>16;
1791		size *= 4;
1792		if (size > MAX_MESSAGE_SIZE) {
1793			rcode = -EINVAL;
1794			goto cleanup;
1795		}
1796		/* Copy in the user's I2O command */
1797		if (copy_from_user (msg, user_msg, size)) {
1798			rcode = -EFAULT;
1799			goto cleanup;
1800		}
1801		sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1802
1803		// TODO add 64 bit API
1804		sg 	 = (struct sg_simple_element*)(msg + sg_offset);
1805		for (j = 0; j < sg_count; j++) {
1806			/* Copy out the SG list to user's buffer if necessary */
1807			if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1808				sg_size = sg[j].flag_count & 0xffffff; 
1809				// sg_simple_element API is 32 bit
1810				if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1811					printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1812					rcode = -EFAULT;
1813					goto cleanup;
1814				}
1815			}
1816		}
1817	} 
1818
1819	/* Copy back the reply to user space */
1820	if (reply_size) {
1821		// we wrote our own values for context - now restore the user supplied ones
1822		if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1823			printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1824			rcode = -EFAULT;
1825		}
1826		if(copy_to_user(user_reply, reply, reply_size)) {
1827			printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1828			rcode = -EFAULT;
1829		}
1830	}
1831
1832
1833cleanup:
1834	if (rcode != -ETIME && rcode != -EINTR) {
1835		struct sg_simple_element *sg =
1836				(struct sg_simple_element*) (msg +sg_offset);
 
1837		while(sg_index) {
1838			if(sg_list[--sg_index]) {
1839				dma_free_coherent(&pHba->pDev->dev,
1840					sg[sg_index].flag_count & 0xffffff,
1841					sg_list[sg_index],
1842					sg[sg_index].addr_bus);
1843			}
1844		}
1845	}
1846
1847free:
1848	kfree(sg_list);
1849	kfree(reply);
1850	return rcode;
1851}
1852
1853#if defined __ia64__ 
1854static void adpt_ia64_info(sysInfo_S* si)
1855{
1856	// This is all the info we need for now
1857	// We will add more info as our new
1858	// managmenent utility requires it
1859	si->processorType = PROC_IA64;
1860}
1861#endif
1862
1863#if defined __sparc__ 
1864static void adpt_sparc_info(sysInfo_S* si)
1865{
1866	// This is all the info we need for now
1867	// We will add more info as our new
1868	// managmenent utility requires it
1869	si->processorType = PROC_ULTRASPARC;
1870}
1871#endif
1872#if defined __alpha__ 
1873static void adpt_alpha_info(sysInfo_S* si)
1874{
1875	// This is all the info we need for now
1876	// We will add more info as our new
1877	// managmenent utility requires it
1878	si->processorType = PROC_ALPHA;
1879}
1880#endif
1881
1882#if defined __i386__
1883
1884#include <uapi/asm/vm86.h>
1885
1886static void adpt_i386_info(sysInfo_S* si)
1887{
1888	// This is all the info we need for now
1889	// We will add more info as our new
1890	// managmenent utility requires it
1891	switch (boot_cpu_data.x86) {
1892	case CPU_386:
1893		si->processorType = PROC_386;
1894		break;
1895	case CPU_486:
1896		si->processorType = PROC_486;
1897		break;
1898	case CPU_586:
1899		si->processorType = PROC_PENTIUM;
1900		break;
1901	default:  // Just in case 
1902		si->processorType = PROC_PENTIUM;
1903		break;
1904	}
1905}
1906#endif
1907
1908/*
1909 * This routine returns information about the system.  This does not effect
1910 * any logic and if the info is wrong - it doesn't matter.
1911 */
1912
1913/* Get all the info we can not get from kernel services */
1914static int adpt_system_info(void __user *buffer)
1915{
1916	sysInfo_S si;
1917
1918	memset(&si, 0, sizeof(si));
1919
1920	si.osType = OS_LINUX;
1921	si.osMajorVersion = 0;
1922	si.osMinorVersion = 0;
1923	si.osRevision = 0;
1924	si.busType = SI_PCI_BUS;
1925	si.processorFamily = DPTI_sig.dsProcessorFamily;
1926
1927#if defined __i386__
1928	adpt_i386_info(&si);
1929#elif defined (__ia64__)
1930	adpt_ia64_info(&si);
1931#elif defined(__sparc__)
1932	adpt_sparc_info(&si);
1933#elif defined (__alpha__)
1934	adpt_alpha_info(&si);
1935#else
1936	si.processorType = 0xff ;
1937#endif
1938	if (copy_to_user(buffer, &si, sizeof(si))){
1939		printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1940		return -EFAULT;
1941	}
1942
1943	return 0;
1944}
1945
1946static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
1947{
1948	int minor;
1949	int error = 0;
1950	adpt_hba* pHba;
1951	ulong flags = 0;
1952	void __user *argp = (void __user *)arg;
1953
1954	minor = iminor(inode);
1955	if (minor >= DPTI_MAX_HBA){
1956		return -ENXIO;
1957	}
1958	mutex_lock(&adpt_configuration_lock);
1959	for (pHba = hba_chain; pHba; pHba = pHba->next) {
1960		if (pHba->unit == minor) {
1961			break;	/* found adapter */
1962		}
1963	}
1964	mutex_unlock(&adpt_configuration_lock);
1965	if(pHba == NULL){
1966		return -ENXIO;
1967	}
1968
1969	while((volatile u32) pHba->state & DPTI_STATE_RESET )
1970		schedule_timeout_uninterruptible(2);
1971
1972	switch (cmd) {
1973	// TODO: handle 3 cases
1974	case DPT_SIGNATURE:
1975		if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
1976			return -EFAULT;
1977		}
1978		break;
1979	case I2OUSRCMD:
1980		return adpt_i2o_passthru(pHba, argp);
1981
1982	case DPT_CTRLINFO:{
1983		drvrHBAinfo_S HbaInfo;
1984
1985#define FLG_OSD_PCI_VALID 0x0001
1986#define FLG_OSD_DMA	  0x0002
1987#define FLG_OSD_I2O	  0x0004
1988		memset(&HbaInfo, 0, sizeof(HbaInfo));
1989		HbaInfo.drvrHBAnum = pHba->unit;
1990		HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
1991		HbaInfo.blinkState = adpt_read_blink_led(pHba);
1992		HbaInfo.pciBusNum =  pHba->pDev->bus->number;
1993		HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn); 
1994		HbaInfo.Interrupt = pHba->pDev->irq; 
1995		HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
1996		if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
1997			printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
1998			return -EFAULT;
1999		}
2000		break;
2001		}
2002	case DPT_SYSINFO:
2003		return adpt_system_info(argp);
2004	case DPT_BLINKLED:{
2005		u32 value;
2006		value = (u32)adpt_read_blink_led(pHba);
2007		if (copy_to_user(argp, &value, sizeof(value))) {
2008			return -EFAULT;
2009		}
2010		break;
2011		}
2012	case I2ORESETCMD: {
2013		struct Scsi_Host *shost = pHba->host;
2014
2015		if (shost)
2016			spin_lock_irqsave(shost->host_lock, flags);
2017		adpt_hba_reset(pHba);
2018		if (shost)
2019			spin_unlock_irqrestore(shost->host_lock, flags);
2020		break;
2021	}
2022	case I2ORESCANCMD:
2023		adpt_rescan(pHba);
2024		break;
2025	default:
2026		return -EINVAL;
2027	}
2028
2029	return error;
2030}
2031
2032static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2033{
2034	struct inode *inode;
2035	long ret;
2036 
2037	inode = file_inode(file);
2038 
2039	mutex_lock(&adpt_mutex);
2040	ret = adpt_ioctl(inode, file, cmd, arg);
2041	mutex_unlock(&adpt_mutex);
2042
2043	return ret;
2044}
2045
2046#ifdef CONFIG_COMPAT
2047static long compat_adpt_ioctl(struct file *file,
2048				unsigned int cmd, unsigned long arg)
2049{
2050	struct inode *inode;
2051	long ret;
2052 
2053	inode = file_inode(file);
2054 
2055	mutex_lock(&adpt_mutex);
2056 
2057	switch(cmd) {
2058		case DPT_SIGNATURE:
2059		case I2OUSRCMD:
2060		case DPT_CTRLINFO:
2061		case DPT_SYSINFO:
2062		case DPT_BLINKLED:
2063		case I2ORESETCMD:
2064		case I2ORESCANCMD:
2065		case (DPT_TARGET_BUSY & 0xFFFF):
2066		case DPT_TARGET_BUSY:
2067			ret = adpt_ioctl(inode, file, cmd, arg);
2068			break;
2069		default:
2070			ret =  -ENOIOCTLCMD;
2071	}
2072 
2073	mutex_unlock(&adpt_mutex);
2074 
2075	return ret;
2076}
2077#endif
2078
2079static irqreturn_t adpt_isr(int irq, void *dev_id)
2080{
2081	struct scsi_cmnd* cmd;
2082	adpt_hba* pHba = dev_id;
2083	u32 m;
2084	void __iomem *reply;
2085	u32 status=0;
2086	u32 context;
2087	ulong flags = 0;
2088	int handled = 0;
2089
2090	if (pHba == NULL){
2091		printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2092		return IRQ_NONE;
2093	}
2094	if(pHba->host)
2095		spin_lock_irqsave(pHba->host->host_lock, flags);
2096
2097	while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2098		m = readl(pHba->reply_port);
2099		if(m == EMPTY_QUEUE){
2100			// Try twice then give up
2101			rmb();
2102			m = readl(pHba->reply_port);
2103			if(m == EMPTY_QUEUE){ 
2104				// This really should not happen
2105				printk(KERN_ERR"dpti: Could not get reply frame\n");
2106				goto out;
2107			}
2108		}
2109		if (pHba->reply_pool_pa <= m &&
2110		    m < pHba->reply_pool_pa +
2111			(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2112			reply = (u8 *)pHba->reply_pool +
2113						(m - pHba->reply_pool_pa);
2114		} else {
2115			/* Ick, we should *never* be here */
2116			printk(KERN_ERR "dpti: reply frame not from pool\n");
2117			reply = (u8 *)bus_to_virt(m);
2118		}
2119
2120		if (readl(reply) & MSG_FAIL) {
2121			u32 old_m = readl(reply+28); 
2122			void __iomem *msg;
2123			u32 old_context;
2124			PDEBUG("%s: Failed message\n",pHba->name);
2125			if(old_m >= 0x100000){
2126				printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2127				writel(m,pHba->reply_port);
2128				continue;
2129			}
2130			// Transaction context is 0 in failed reply frame
2131			msg = pHba->msg_addr_virt + old_m;
2132			old_context = readl(msg+12);
2133			writel(old_context, reply+12);
2134			adpt_send_nop(pHba, old_m);
2135		} 
2136		context = readl(reply+8);
2137		if(context & 0x40000000){ // IOCTL
2138			void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2139			if( p != NULL) {
2140				memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2141			}
2142			// All IOCTLs will also be post wait
2143		}
2144		if(context & 0x80000000){ // Post wait message
2145			status = readl(reply+16);
2146			if(status  >> 24){
2147				status &=  0xffff; /* Get detail status */
2148			} else {
2149				status = I2O_POST_WAIT_OK;
2150			}
2151			if(!(context & 0x40000000)) {
2152				/*
2153				 * The request tag is one less than the command tag
2154				 * as the firmware might treat a 0 tag as invalid
2155				 */
2156				cmd = scsi_host_find_tag(pHba->host,
2157							 readl(reply + 12) - 1);
2158				if(cmd != NULL) {
2159					printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2160				}
2161			}
2162			adpt_i2o_post_wait_complete(context, status);
2163		} else { // SCSI message
2164			/*
2165			 * The request tag is one less than the command tag
2166			 * as the firmware might treat a 0 tag as invalid
2167			 */
2168			cmd = scsi_host_find_tag(pHba->host,
2169						 readl(reply + 12) - 1);
2170			if(cmd != NULL){
2171				scsi_dma_unmap(cmd);
2172				adpt_i2o_scsi_complete(reply, cmd);
 
 
2173			}
2174		}
2175		writel(m, pHba->reply_port);
2176		wmb();
2177		rmb();
2178	}
2179	handled = 1;
2180out:	if(pHba->host)
2181		spin_unlock_irqrestore(pHba->host->host_lock, flags);
2182	return IRQ_RETVAL(handled);
2183}
2184
2185static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2186{
2187	int i;
2188	u32 msg[MAX_MESSAGE_SIZE];
2189	u32* mptr;
2190	u32* lptr;
2191	u32 *lenptr;
2192	int direction;
2193	int scsidir;
2194	int nseg;
2195	u32 len;
2196	u32 reqlen;
2197	s32 rcode;
2198	dma_addr_t addr;
2199
2200	memset(msg, 0 , sizeof(msg));
2201	len = scsi_bufflen(cmd);
2202	direction = 0x00000000;	
2203	
2204	scsidir = 0x00000000;			// DATA NO XFER
2205	if(len) {
2206		/*
2207		 * Set SCBFlags to indicate if data is being transferred
2208		 * in or out, or no data transfer
2209		 * Note:  Do not have to verify index is less than 0 since
2210		 * cmd->cmnd[0] is an unsigned char
2211		 */
2212		switch(cmd->sc_data_direction){
2213		case DMA_FROM_DEVICE:
2214			scsidir  =0x40000000;	// DATA IN  (iop<--dev)
2215			break;
2216		case DMA_TO_DEVICE:
2217			direction=0x04000000;	// SGL OUT
2218			scsidir  =0x80000000;	// DATA OUT (iop-->dev)
2219			break;
2220		case DMA_NONE:
2221			break;
2222		case DMA_BIDIRECTIONAL:
2223			scsidir  =0x40000000;	// DATA IN  (iop<--dev)
2224			// Assume In - and continue;
2225			break;
2226		default:
2227			printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2228			     pHba->name, cmd->cmnd[0]);
2229			cmd->result = (DID_ERROR <<16);
2230			cmd->scsi_done(cmd);
2231			return 	0;
2232		}
2233	}
2234	// msg[0] is set later
2235	// I2O_CMD_SCSI_EXEC
2236	msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2237	msg[2] = 0;
2238	/* Add 1 to avoid firmware treating it as invalid command */
2239	msg[3] = cmd->request->tag + 1;
2240	// Our cards use the transaction context as the tag for queueing
2241	// Adaptec/DPT Private stuff 
2242	msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2243	msg[5] = d->tid;
2244	/* Direction, disconnect ok | sense data | simple queue , CDBLen */
2245	// I2O_SCB_FLAG_ENABLE_DISCONNECT | 
2246	// I2O_SCB_FLAG_SIMPLE_QUEUE_TAG | 
2247	// I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2248	msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2249
2250	mptr=msg+7;
2251
2252	// Write SCSI command into the message - always 16 byte block 
2253	memset(mptr, 0,  16);
2254	memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2255	mptr+=4;
2256	lenptr=mptr++;		/* Remember me - fill in when we know */
2257	if (dpt_dma64(pHba)) {
2258		reqlen = 16;		// SINGLE SGE
2259		*mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2260		*mptr++ = 1 << PAGE_SHIFT;
2261	} else {
2262		reqlen = 14;		// SINGLE SGE
2263	}
2264	/* Now fill in the SGList and command */
2265
2266	nseg = scsi_dma_map(cmd);
2267	BUG_ON(nseg < 0);
2268	if (nseg) {
2269		struct scatterlist *sg;
2270
2271		len = 0;
2272		scsi_for_each_sg(cmd, sg, nseg, i) {
2273			lptr = mptr;
2274			*mptr++ = direction|0x10000000|sg_dma_len(sg);
2275			len+=sg_dma_len(sg);
2276			addr = sg_dma_address(sg);
2277			*mptr++ = dma_low(addr);
2278			if (dpt_dma64(pHba))
2279				*mptr++ = dma_high(addr);
2280			/* Make this an end of list */
2281			if (i == nseg - 1)
2282				*lptr = direction|0xD0000000|sg_dma_len(sg);
2283		}
2284		reqlen = mptr - msg;
2285		*lenptr = len;
2286		
2287		if(cmd->underflow && len != cmd->underflow){
2288			printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2289				len, cmd->underflow);
2290		}
2291	} else {
2292		*lenptr = len = 0;
2293		reqlen = 12;
2294	}
2295	
2296	/* Stick the headers on */
2297	msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2298	
2299	// Send it on it's way
2300	rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2301	if (rcode == 0) {
2302		return 0;
2303	}
2304	return rcode;
2305}
2306
2307
2308static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2309{
2310	struct Scsi_Host *host;
2311
2312	host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2313	if (host == NULL) {
2314		printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2315		return -1;
2316	}
2317	host->hostdata[0] = (unsigned long)pHba;
2318	pHba->host = host;
2319
2320	host->irq = pHba->pDev->irq;
2321	/* no IO ports, so don't have to set host->io_port and
2322	 * host->n_io_port
2323	 */
2324	host->io_port = 0;
2325	host->n_io_port = 0;
2326				/* see comments in scsi_host.h */
2327	host->max_id = 16;
2328	host->max_lun = 256;
2329	host->max_channel = pHba->top_scsi_channel + 1;
2330	host->cmd_per_lun = 1;
2331	host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2332	host->sg_tablesize = pHba->sg_tablesize;
2333	host->can_queue = pHba->post_fifo_size;
2334
2335	return 0;
2336}
2337
2338
2339static void adpt_i2o_scsi_complete(void __iomem *reply, struct scsi_cmnd *cmd)
2340{
2341	adpt_hba* pHba;
2342	u32 hba_status;
2343	u32 dev_status;
2344	u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits 
2345	// I know this would look cleaner if I just read bytes
2346	// but the model I have been using for all the rest of the
2347	// io is in 4 byte words - so I keep that model
2348	u16 detailed_status = readl(reply+16) &0xffff;
2349	dev_status = (detailed_status & 0xff);
2350	hba_status = detailed_status >> 8;
2351
2352	// calculate resid for sg 
2353	scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2354
2355	pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2356
2357	cmd->sense_buffer[0] = '\0';  // initialize sense valid flag to false
2358
2359	if(!(reply_flags & MSG_FAIL)) {
2360		switch(detailed_status & I2O_SCSI_DSC_MASK) {
2361		case I2O_SCSI_DSC_SUCCESS:
2362			cmd->result = (DID_OK << 16);
2363			// handle underflow
2364			if (readl(reply+20) < cmd->underflow) {
2365				cmd->result = (DID_ERROR <<16);
2366				printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2367			}
2368			break;
2369		case I2O_SCSI_DSC_REQUEST_ABORTED:
2370			cmd->result = (DID_ABORT << 16);
2371			break;
2372		case I2O_SCSI_DSC_PATH_INVALID:
2373		case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2374		case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2375		case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2376		case I2O_SCSI_DSC_NO_ADAPTER:
2377		case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2378			printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2379				pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2380			cmd->result = (DID_TIME_OUT << 16);
2381			break;
2382		case I2O_SCSI_DSC_ADAPTER_BUSY:
2383		case I2O_SCSI_DSC_BUS_BUSY:
2384			cmd->result = (DID_BUS_BUSY << 16);
2385			break;
2386		case I2O_SCSI_DSC_SCSI_BUS_RESET:
2387		case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2388			cmd->result = (DID_RESET << 16);
2389			break;
2390		case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2391			printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2392			cmd->result = (DID_PARITY << 16);
2393			break;
2394		case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2395		case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2396		case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2397		case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2398		case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2399		case I2O_SCSI_DSC_DATA_OVERRUN:
2400		case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2401		case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2402		case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2403		case I2O_SCSI_DSC_PROVIDE_FAILURE:
2404		case I2O_SCSI_DSC_REQUEST_TERMINATED:
2405		case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2406		case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2407		case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2408		case I2O_SCSI_DSC_INVALID_CDB:
2409		case I2O_SCSI_DSC_LUN_INVALID:
2410		case I2O_SCSI_DSC_SCSI_TID_INVALID:
2411		case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2412		case I2O_SCSI_DSC_NO_NEXUS:
2413		case I2O_SCSI_DSC_CDB_RECEIVED:
2414		case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2415		case I2O_SCSI_DSC_QUEUE_FROZEN:
2416		case I2O_SCSI_DSC_REQUEST_INVALID:
2417		default:
2418			printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2419				pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2420			       hba_status, dev_status, cmd->cmnd[0]);
2421			cmd->result = (DID_ERROR << 16);
2422			break;
2423		}
2424
2425		// copy over the request sense data if it was a check
2426		// condition status
2427		if (dev_status == SAM_STAT_CHECK_CONDITION) {
2428			u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2429			// Copy over the sense data
2430			memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2431			if(cmd->sense_buffer[0] == 0x70 /* class 7 */ && 
2432			   cmd->sense_buffer[2] == DATA_PROTECT ){
2433				/* This is to handle an array failed */
2434				cmd->result = (DID_TIME_OUT << 16);
2435				printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2436					pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2437					hba_status, dev_status, cmd->cmnd[0]);
2438
2439			}
2440		}
2441	} else {
2442		/* In this condtion we could not talk to the tid
2443		 * the card rejected it.  We should signal a retry
2444		 * for a limitted number of retries.
2445		 */
2446		cmd->result = (DID_TIME_OUT << 16);
2447		printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
2448			pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2449			((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2450	}
2451
2452	cmd->result |= (dev_status);
2453
2454	if(cmd->scsi_done != NULL){
2455		cmd->scsi_done(cmd);
2456	} 
 
2457}
2458
2459
2460static s32 adpt_rescan(adpt_hba* pHba)
2461{
2462	s32 rcode;
2463	ulong flags = 0;
2464
2465	if(pHba->host)
2466		spin_lock_irqsave(pHba->host->host_lock, flags);
2467	if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2468		goto out;
2469	if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2470		goto out;
2471	rcode = 0;
2472out:	if(pHba->host)
2473		spin_unlock_irqrestore(pHba->host->host_lock, flags);
2474	return rcode;
2475}
2476
2477
2478static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2479{
2480	int i;
2481	int max;
2482	int tid;
2483	struct i2o_device *d;
2484	i2o_lct *lct = pHba->lct;
2485	u8 bus_no = 0;
2486	s16 scsi_id;
2487	u64 scsi_lun;
2488	u32 buf[10]; // at least 8 u32's
2489	struct adpt_device* pDev = NULL;
2490	struct i2o_device* pI2o_dev = NULL;
2491	
2492	if (lct == NULL) {
2493		printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2494		return -1;
2495	}
2496	
2497	max = lct->table_size;	
2498	max -= 3;
2499	max /= 9;
2500
2501	// Mark each drive as unscanned
2502	for (d = pHba->devices; d; d = d->next) {
2503		pDev =(struct adpt_device*) d->owner;
2504		if(!pDev){
2505			continue;
2506		}
2507		pDev->state |= DPTI_DEV_UNSCANNED;
2508	}
2509
2510	printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2511	
2512	for(i=0;i<max;i++) {
2513		if( lct->lct_entry[i].user_tid != 0xfff){
2514			continue;
2515		}
2516
2517		if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2518		    lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2519		    lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2520			tid = lct->lct_entry[i].tid;
2521			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2522				printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2523				continue;
2524			}
2525			bus_no = buf[0]>>16;
2526			if (bus_no >= MAX_CHANNEL) {	/* Something wrong skip it */
2527				printk(KERN_WARNING
2528					"%s: Channel number %d out of range\n",
2529					pHba->name, bus_no);
2530				continue;
2531			}
2532
2533			scsi_id = buf[1];
2534			scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
2535			pDev = pHba->channel[bus_no].device[scsi_id];
2536			/* da lun */
2537			while(pDev) {
2538				if(pDev->scsi_lun == scsi_lun) {
2539					break;
2540				}
2541				pDev = pDev->next_lun;
2542			}
2543			if(!pDev ) { // Something new add it
2544				d = kmalloc(sizeof(struct i2o_device),
2545					    GFP_ATOMIC);
2546				if(d==NULL)
2547				{
2548					printk(KERN_CRIT "Out of memory for I2O device data.\n");
2549					return -ENOMEM;
2550				}
2551				
2552				d->controller = pHba;
2553				d->next = NULL;
2554
2555				memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2556
2557				d->flags = 0;
2558				adpt_i2o_report_hba_unit(pHba, d);
2559				adpt_i2o_install_device(pHba, d);
2560	
2561				pDev = pHba->channel[bus_no].device[scsi_id];	
2562				if( pDev == NULL){
2563					pDev =
2564					  kzalloc(sizeof(struct adpt_device),
2565						  GFP_ATOMIC);
2566					if(pDev == NULL) {
2567						return -ENOMEM;
2568					}
2569					pHba->channel[bus_no].device[scsi_id] = pDev;
2570				} else {
2571					while (pDev->next_lun) {
2572						pDev = pDev->next_lun;
2573					}
2574					pDev = pDev->next_lun =
2575					  kzalloc(sizeof(struct adpt_device),
2576						  GFP_ATOMIC);
2577					if(pDev == NULL) {
2578						return -ENOMEM;
2579					}
2580				}
2581				pDev->tid = d->lct_data.tid;
2582				pDev->scsi_channel = bus_no;
2583				pDev->scsi_id = scsi_id;
2584				pDev->scsi_lun = scsi_lun;
2585				pDev->pI2o_dev = d;
2586				d->owner = pDev;
2587				pDev->type = (buf[0])&0xff;
2588				pDev->flags = (buf[0]>>8)&0xff;
2589				// Too late, SCSI system has made up it's mind, but what the hey ...
2590				if(scsi_id > pHba->top_scsi_id){
2591					pHba->top_scsi_id = scsi_id;
2592				}
2593				if(scsi_lun > pHba->top_scsi_lun){
2594					pHba->top_scsi_lun = scsi_lun;
2595				}
2596				continue;
2597			} // end of new i2o device
2598
2599			// We found an old device - check it
2600			while(pDev) {
2601				if(pDev->scsi_lun == scsi_lun) {
2602					if(!scsi_device_online(pDev->pScsi_dev)) {
2603						printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
2604								pHba->name,bus_no,scsi_id,scsi_lun);
2605						if (pDev->pScsi_dev) {
2606							scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2607						}
2608					}
2609					d = pDev->pI2o_dev;
2610					if(d->lct_data.tid != tid) { // something changed
2611						pDev->tid = tid;
2612						memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2613						if (pDev->pScsi_dev) {
2614							pDev->pScsi_dev->changed = TRUE;
2615							pDev->pScsi_dev->removable = TRUE;
2616						}
2617					}
2618					// Found it - mark it scanned
2619					pDev->state = DPTI_DEV_ONLINE;
2620					break;
2621				}
2622				pDev = pDev->next_lun;
2623			}
2624		}
2625	}
2626	for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2627		pDev =(struct adpt_device*) pI2o_dev->owner;
2628		if(!pDev){
2629			continue;
2630		}
2631		// Drive offline drives that previously existed but could not be found
2632		// in the LCT table
2633		if (pDev->state & DPTI_DEV_UNSCANNED){
2634			pDev->state = DPTI_DEV_OFFLINE;
2635			printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2636			if (pDev->pScsi_dev) {
2637				scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2638			}
2639		}
2640	}
2641	return 0;
2642}
2643
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2644/*============================================================================
2645 *  Routines from i2o subsystem
2646 *============================================================================
2647 */
2648
2649
2650
2651/*
2652 *	Bring an I2O controller into HOLD state. See the spec.
2653 */
2654static int adpt_i2o_activate_hba(adpt_hba* pHba)
2655{
2656	int rcode;
2657
2658	if(pHba->initialized ) {
2659		if (adpt_i2o_status_get(pHba) < 0) {
2660			if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2661				printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2662				return rcode;
2663			}
2664			if (adpt_i2o_status_get(pHba) < 0) {
2665				printk(KERN_INFO "HBA not responding.\n");
2666				return -1;
2667			}
2668		}
2669
2670		if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2671			printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2672			return -1;
2673		}
2674
2675		if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2676		    pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2677		    pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2678		    pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2679			adpt_i2o_reset_hba(pHba);			
2680			if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2681				printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2682				return -1;
2683			}
2684		}
2685	} else {
2686		if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2687			printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2688			return rcode;
2689		}
2690
2691	}
2692
2693	if (adpt_i2o_init_outbound_q(pHba) < 0) {
2694		return -1;
2695	}
2696
2697	/* In HOLD state */
2698	
2699	if (adpt_i2o_hrt_get(pHba) < 0) {
2700		return -1;
2701	}
2702
2703	return 0;
2704}
2705
2706/*
2707 *	Bring a controller online into OPERATIONAL state. 
2708 */
2709 
2710static int adpt_i2o_online_hba(adpt_hba* pHba)
2711{
2712	if (adpt_i2o_systab_send(pHba) < 0)
 
2713		return -1;
 
2714	/* In READY state */
2715
2716	if (adpt_i2o_enable_hba(pHba) < 0)
 
2717		return -1;
 
2718
2719	/* In OPERATIONAL state  */
2720	return 0;
2721}
2722
2723static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2724{
2725	u32 __iomem *msg;
2726	ulong timeout = jiffies + 5*HZ;
2727
2728	while(m == EMPTY_QUEUE){
2729		rmb();
2730		m = readl(pHba->post_port);
2731		if(m != EMPTY_QUEUE){
2732			break;
2733		}
2734		if(time_after(jiffies,timeout)){
2735			printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2736			return 2;
2737		}
2738		schedule_timeout_uninterruptible(1);
2739	}
2740	msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2741	writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2742	writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2743	writel( 0,&msg[2]);
2744	wmb();
2745
2746	writel(m, pHba->post_port);
2747	wmb();
2748	return 0;
2749}
2750
2751static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2752{
2753	u8 *status;
2754	dma_addr_t addr;
2755	u32 __iomem *msg = NULL;
2756	int i;
2757	ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2758	u32 m;
2759
2760	do {
2761		rmb();
2762		m = readl(pHba->post_port);
2763		if (m != EMPTY_QUEUE) {
2764			break;
2765		}
2766
2767		if(time_after(jiffies,timeout)){
2768			printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2769			return -ETIMEDOUT;
2770		}
2771		schedule_timeout_uninterruptible(1);
2772	} while(m == EMPTY_QUEUE);
2773
2774	msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2775
2776	status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2777	if (!status) {
2778		adpt_send_nop(pHba, m);
2779		printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2780			pHba->name);
2781		return -ENOMEM;
2782	}
 
2783
2784	writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2785	writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2786	writel(0, &msg[2]);
2787	writel(0x0106, &msg[3]);	/* Transaction context */
2788	writel(4096, &msg[4]);		/* Host page frame size */
2789	writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]);	/* Outbound msg frame size and Initcode */
2790	writel(0xD0000004, &msg[6]);		/* Simple SG LE, EOB */
2791	writel((u32)addr, &msg[7]);
2792
2793	writel(m, pHba->post_port);
2794	wmb();
2795
2796	// Wait for the reply status to come back
2797	do {
2798		if (*status) {
2799			if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2800				break;
2801			}
2802		}
2803		rmb();
2804		if(time_after(jiffies,timeout)){
2805			printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2806			/* We lose 4 bytes of "status" here, but we
2807			   cannot free these because controller may
2808			   awake and corrupt those bytes at any time */
2809			/* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
2810			return -ETIMEDOUT;
2811		}
2812		schedule_timeout_uninterruptible(1);
2813	} while (1);
2814
2815	// If the command was successful, fill the fifo with our reply
2816	// message packets
2817	if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2818		dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2819		return -2;
2820	}
2821	dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2822
2823	if(pHba->reply_pool != NULL) {
2824		dma_free_coherent(&pHba->pDev->dev,
2825			pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2826			pHba->reply_pool, pHba->reply_pool_pa);
2827	}
2828
2829	pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2830				pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2831				&pHba->reply_pool_pa, GFP_KERNEL);
2832	if (!pHba->reply_pool) {
2833		printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2834		return -ENOMEM;
2835	}
 
2836
2837	for(i = 0; i < pHba->reply_fifo_size; i++) {
2838		writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2839			pHba->reply_port);
2840		wmb();
2841	}
2842	adpt_i2o_status_get(pHba);
2843	return 0;
2844}
2845
2846
2847/*
2848 * I2O System Table.  Contains information about
2849 * all the IOPs in the system.  Used to inform IOPs
2850 * about each other's existence.
2851 *
2852 * sys_tbl_ver is the CurrentChangeIndicator that is
2853 * used by IOPs to track changes.
2854 */
2855
2856
2857
2858static s32 adpt_i2o_status_get(adpt_hba* pHba)
2859{
2860	ulong timeout;
2861	u32 m;
2862	u32 __iomem *msg;
2863	u8 *status_block=NULL;
2864
2865	if(pHba->status_block == NULL) {
2866		pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2867					sizeof(i2o_status_block),
2868					&pHba->status_block_pa, GFP_KERNEL);
2869		if(pHba->status_block == NULL) {
2870			printk(KERN_ERR
2871			"dpti%d: Get Status Block failed; Out of memory. \n", 
2872			pHba->unit);
2873			return -ENOMEM;
2874		}
2875	}
2876	memset(pHba->status_block, 0, sizeof(i2o_status_block));
2877	status_block = (u8*)(pHba->status_block);
2878	timeout = jiffies+TMOUT_GETSTATUS*HZ;
2879	do {
2880		rmb();
2881		m = readl(pHba->post_port);
2882		if (m != EMPTY_QUEUE) {
2883			break;
2884		}
2885		if(time_after(jiffies,timeout)){
2886			printk(KERN_ERR "%s: Timeout waiting for message !\n",
2887					pHba->name);
2888			return -ETIMEDOUT;
2889		}
2890		schedule_timeout_uninterruptible(1);
2891	} while(m==EMPTY_QUEUE);
2892
2893	
2894	msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2895
2896	writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2897	writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2898	writel(1, &msg[2]);
2899	writel(0, &msg[3]);
2900	writel(0, &msg[4]);
2901	writel(0, &msg[5]);
2902	writel( dma_low(pHba->status_block_pa), &msg[6]);
2903	writel( dma_high(pHba->status_block_pa), &msg[7]);
2904	writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2905
2906	//post message
2907	writel(m, pHba->post_port);
2908	wmb();
2909
2910	while(status_block[87]!=0xff){
2911		if(time_after(jiffies,timeout)){
2912			printk(KERN_ERR"dpti%d: Get status timeout.\n",
2913				pHba->unit);
2914			return -ETIMEDOUT;
2915		}
2916		rmb();
2917		schedule_timeout_uninterruptible(1);
2918	}
2919
2920	// Set up our number of outbound and inbound messages
2921	pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2922	if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2923		pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2924	}
2925
2926	pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2927	if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2928		pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2929	}
2930
2931	// Calculate the Scatter Gather list size
2932	if (dpt_dma64(pHba)) {
2933		pHba->sg_tablesize
2934		  = ((pHba->status_block->inbound_frame_size * 4
2935		  - 14 * sizeof(u32))
2936		  / (sizeof(struct sg_simple_element) + sizeof(u32)));
2937	} else {
2938		pHba->sg_tablesize
2939		  = ((pHba->status_block->inbound_frame_size * 4
2940		  - 12 * sizeof(u32))
2941		  / sizeof(struct sg_simple_element));
2942	}
2943	if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
2944		pHba->sg_tablesize = SG_LIST_ELEMENTS;
2945	}
2946
2947
2948#ifdef DEBUG
2949	printk("dpti%d: State = ",pHba->unit);
2950	switch(pHba->status_block->iop_state) {
2951		case 0x01:
2952			printk("INIT\n");
2953			break;
2954		case 0x02:
2955			printk("RESET\n");
2956			break;
2957		case 0x04:
2958			printk("HOLD\n");
2959			break;
2960		case 0x05:
2961			printk("READY\n");
2962			break;
2963		case 0x08:
2964			printk("OPERATIONAL\n");
2965			break;
2966		case 0x10:
2967			printk("FAILED\n");
2968			break;
2969		case 0x11:
2970			printk("FAULTED\n");
2971			break;
2972		default:
2973			printk("%x (unknown!!)\n",pHba->status_block->iop_state);
2974	}
2975#endif
2976	return 0;
2977}
2978
2979/*
2980 * Get the IOP's Logical Configuration Table
2981 */
2982static int adpt_i2o_lct_get(adpt_hba* pHba)
2983{
2984	u32 msg[8];
2985	int ret;
2986	u32 buf[16];
2987
2988	if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
2989		pHba->lct_size = pHba->status_block->expected_lct_size;
2990	}
2991	do {
2992		if (pHba->lct == NULL) {
2993			pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
2994					pHba->lct_size, &pHba->lct_pa,
2995					GFP_ATOMIC);
2996			if(pHba->lct == NULL) {
2997				printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
2998					pHba->name);
2999				return -ENOMEM;
3000			}
3001		}
3002		memset(pHba->lct, 0, pHba->lct_size);
3003
3004		msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3005		msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3006		msg[2] = 0;
3007		msg[3] = 0;
3008		msg[4] = 0xFFFFFFFF;	/* All devices */
3009		msg[5] = 0x00000000;	/* Report now */
3010		msg[6] = 0xD0000000|pHba->lct_size;
3011		msg[7] = (u32)pHba->lct_pa;
3012
3013		if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3014			printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n", 
3015				pHba->name, ret);	
3016			printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3017			return ret;
3018		}
3019
3020		if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3021			pHba->lct_size = pHba->lct->table_size << 2;
3022			dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3023					pHba->lct, pHba->lct_pa);
3024			pHba->lct = NULL;
3025		}
3026	} while (pHba->lct == NULL);
3027
3028	PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3029
3030
3031	// I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3032	if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3033		pHba->FwDebugBufferSize = buf[1];
3034		pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3035						pHba->FwDebugBufferSize);
3036		if (pHba->FwDebugBuffer_P) {
3037			pHba->FwDebugFlags_P     = pHba->FwDebugBuffer_P +
3038							FW_DEBUG_FLAGS_OFFSET;
3039			pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3040							FW_DEBUG_BLED_OFFSET;
3041			pHba->FwDebugBLEDflag_P  = pHba->FwDebugBLEDvalue_P + 1;
3042			pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3043						FW_DEBUG_STR_LENGTH_OFFSET;
3044			pHba->FwDebugBuffer_P += buf[2]; 
3045			pHba->FwDebugFlags = 0;
3046		}
3047	}
3048
3049	return 0;
3050}
3051
3052static int adpt_i2o_build_sys_table(void)
3053{
3054	adpt_hba* pHba = hba_chain;
3055	int count = 0;
3056
3057	if (sys_tbl)
3058		dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3059					sys_tbl, sys_tbl_pa);
3060
3061	sys_tbl_len = sizeof(struct i2o_sys_tbl) +	// Header + IOPs
3062				(hba_count) * sizeof(struct i2o_sys_tbl_entry);
3063
3064	sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3065				sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
3066	if (!sys_tbl) {
3067		printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");	
3068		return -ENOMEM;
3069	}
 
3070
3071	sys_tbl->num_entries = hba_count;
3072	sys_tbl->version = I2OVERSION;
3073	sys_tbl->change_ind = sys_tbl_ind++;
3074
3075	for(pHba = hba_chain; pHba; pHba = pHba->next) {
3076		u64 addr;
3077		// Get updated Status Block so we have the latest information
3078		if (adpt_i2o_status_get(pHba)) {
3079			sys_tbl->num_entries--;
3080			continue; // try next one	
3081		}
3082
3083		sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3084		sys_tbl->iops[count].iop_id = pHba->unit + 2;
3085		sys_tbl->iops[count].seg_num = 0;
3086		sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3087		sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3088		sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3089		sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3090		sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3091		sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
3092		addr = pHba->base_addr_phys + 0x40;
3093		sys_tbl->iops[count].inbound_low = dma_low(addr);
3094		sys_tbl->iops[count].inbound_high = dma_high(addr);
3095
3096		count++;
3097	}
3098
3099#ifdef DEBUG
3100{
3101	u32 *table = (u32*)sys_tbl;
3102	printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3103	for(count = 0; count < (sys_tbl_len >>2); count++) {
3104		printk(KERN_INFO "sys_tbl[%d] = %0#10x\n", 
3105			count, table[count]);
3106	}
3107}
3108#endif
3109
3110	return 0;
3111}
3112
3113
3114/*
3115 *	 Dump the information block associated with a given unit (TID)
3116 */
3117 
3118static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3119{
3120	char buf[64];
3121	int unit = d->lct_data.tid;
3122
3123	printk(KERN_INFO "TID %3.3d ", unit);
3124
3125	if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3126	{
3127		buf[16]=0;
3128		printk(" Vendor: %-12.12s", buf);
3129	}
3130	if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3131	{
3132		buf[16]=0;
3133		printk(" Device: %-12.12s", buf);
3134	}
3135	if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3136	{
3137		buf[8]=0;
3138		printk(" Rev: %-12.12s\n", buf);
3139	}
3140#ifdef DEBUG
3141	 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3142	 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3143	 printk(KERN_INFO "\tFlags: ");
3144
3145	 if(d->lct_data.device_flags&(1<<0))
3146		  printk("C");	     // ConfigDialog requested
3147	 if(d->lct_data.device_flags&(1<<1))
3148		  printk("U");	     // Multi-user capable
3149	 if(!(d->lct_data.device_flags&(1<<4)))
3150		  printk("P");	     // Peer service enabled!
3151	 if(!(d->lct_data.device_flags&(1<<5)))
3152		  printk("M");	     // Mgmt service enabled!
3153	 printk("\n");
3154#endif
3155}
3156
3157#ifdef DEBUG
3158/*
3159 *	Do i2o class name lookup
3160 */
3161static const char *adpt_i2o_get_class_name(int class)
3162{
3163	int idx = 16;
3164	static char *i2o_class_name[] = {
3165		"Executive",
3166		"Device Driver Module",
3167		"Block Device",
3168		"Tape Device",
3169		"LAN Interface",
3170		"WAN Interface",
3171		"Fibre Channel Port",
3172		"Fibre Channel Device",
3173		"SCSI Device",
3174		"ATE Port",
3175		"ATE Device",
3176		"Floppy Controller",
3177		"Floppy Device",
3178		"Secondary Bus Port",
3179		"Peer Transport Agent",
3180		"Peer Transport",
3181		"Unknown"
3182	};
3183	
3184	switch(class&0xFFF) {
3185	case I2O_CLASS_EXECUTIVE:
3186		idx = 0; break;
3187	case I2O_CLASS_DDM:
3188		idx = 1; break;
3189	case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3190		idx = 2; break;
3191	case I2O_CLASS_SEQUENTIAL_STORAGE:
3192		idx = 3; break;
3193	case I2O_CLASS_LAN:
3194		idx = 4; break;
3195	case I2O_CLASS_WAN:
3196		idx = 5; break;
3197	case I2O_CLASS_FIBRE_CHANNEL_PORT:
3198		idx = 6; break;
3199	case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3200		idx = 7; break;
3201	case I2O_CLASS_SCSI_PERIPHERAL:
3202		idx = 8; break;
3203	case I2O_CLASS_ATE_PORT:
3204		idx = 9; break;
3205	case I2O_CLASS_ATE_PERIPHERAL:
3206		idx = 10; break;
3207	case I2O_CLASS_FLOPPY_CONTROLLER:
3208		idx = 11; break;
3209	case I2O_CLASS_FLOPPY_DEVICE:
3210		idx = 12; break;
3211	case I2O_CLASS_BUS_ADAPTER_PORT:
3212		idx = 13; break;
3213	case I2O_CLASS_PEER_TRANSPORT_AGENT:
3214		idx = 14; break;
3215	case I2O_CLASS_PEER_TRANSPORT:
3216		idx = 15; break;
3217	}
3218	return i2o_class_name[idx];
3219}
3220#endif
3221
3222
3223static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3224{
3225	u32 msg[6];
3226	int ret, size = sizeof(i2o_hrt);
3227
3228	do {
3229		if (pHba->hrt == NULL) {
3230			pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3231					size, &pHba->hrt_pa, GFP_KERNEL);
3232			if (pHba->hrt == NULL) {
3233				printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3234				return -ENOMEM;
3235			}
3236		}
3237
3238		msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3239		msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3240		msg[2]= 0;
3241		msg[3]= 0;
3242		msg[4]= (0xD0000000 | size);    /* Simple transaction */
3243		msg[5]= (u32)pHba->hrt_pa;	/* Dump it here */
3244
3245		if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3246			printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3247			return ret;
3248		}
3249
3250		if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3251			int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3252			dma_free_coherent(&pHba->pDev->dev, size,
3253				pHba->hrt, pHba->hrt_pa);
3254			size = newsize;
3255			pHba->hrt = NULL;
3256		}
3257	} while(pHba->hrt == NULL);
3258	return 0;
3259}                                                                                                                                       
3260
3261/*
3262 *	 Query one scalar group value or a whole scalar group.
3263 */		    	
3264static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid, 
3265			int group, int field, void *buf, int buflen)
3266{
3267	u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3268	u8 *opblk_va;
3269	dma_addr_t opblk_pa;
3270	u8 *resblk_va;
3271	dma_addr_t resblk_pa;
3272
3273	int size;
3274
3275	/* 8 bytes for header */
3276	resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3277			sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3278	if (resblk_va == NULL) {
3279		printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3280		return -ENOMEM;
3281	}
3282
3283	opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3284			sizeof(opblk), &opblk_pa, GFP_KERNEL);
3285	if (opblk_va == NULL) {
3286		dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3287			resblk_va, resblk_pa);
3288		printk(KERN_CRIT "%s: query operation failed; Out of memory.\n",
3289			pHba->name);
3290		return -ENOMEM;
3291	}
3292	if (field == -1)  		/* whole group */
3293			opblk[4] = -1;
3294
3295	memcpy(opblk_va, opblk, sizeof(opblk));
3296	size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid, 
3297		opblk_va, opblk_pa, sizeof(opblk),
3298		resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3299	dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3300	if (size == -ETIME) {
3301		dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3302							resblk_va, resblk_pa);
3303		printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3304		return -ETIME;
3305	} else if (size == -EINTR) {
3306		dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3307							resblk_va, resblk_pa);
3308		printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3309		return -EINTR;
3310	}
3311			
3312	memcpy(buf, resblk_va+8, buflen);  /* cut off header */
3313
3314	dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3315						resblk_va, resblk_pa);
3316	if (size < 0)
3317		return size;	
3318
3319	return buflen;
3320}
3321
3322
3323/*	Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3324 *
3325 *	This function can be used for all UtilParamsGet/Set operations.
3326 *	The OperationBlock is given in opblk-buffer, 
3327 *	and results are returned in resblk-buffer.
3328 *	Note that the minimum sized resblk is 8 bytes and contains
3329 *	ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3330 */
3331static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid, 
3332		  void *opblk_va,  dma_addr_t opblk_pa, int oplen,
3333		void *resblk_va, dma_addr_t resblk_pa, int reslen)
3334{
3335	u32 msg[9]; 
3336	u32 *res = (u32 *)resblk_va;
3337	int wait_status;
3338
3339	msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3340	msg[1] = cmd << 24 | HOST_TID << 12 | tid; 
3341	msg[2] = 0;
3342	msg[3] = 0;
3343	msg[4] = 0;
3344	msg[5] = 0x54000000 | oplen;	/* OperationBlock */
3345	msg[6] = (u32)opblk_pa;
3346	msg[7] = 0xD0000000 | reslen;	/* ResultBlock */
3347	msg[8] = (u32)resblk_pa;
3348
3349	if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3350		printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3351   		return wait_status; 	/* -DetailedStatus */
3352	}
3353
3354	if (res[1]&0x00FF0000) { 	/* BlockStatus != SUCCESS */
3355		printk(KERN_WARNING "%s: %s - Error:\n  ErrorInfoSize = 0x%02x, "
3356			"BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3357			pHba->name,
3358			(cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3359							 : "PARAMS_GET",   
3360			res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3361		return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3362	}
3363
3364	return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3365}
3366
3367
3368static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3369{
3370	u32 msg[4];
3371	int ret;
3372
3373	adpt_i2o_status_get(pHba);
3374
3375	/* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3376
3377	if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3378   	   (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3379		return 0;
3380	}
3381
3382	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3383	msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3384	msg[2] = 0;
3385	msg[3] = 0;
3386
3387	if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3388		printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3389				pHba->unit, -ret);
3390	} else {
3391		printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3392	}
3393
3394	adpt_i2o_status_get(pHba);
3395	return ret;
3396}
3397
3398
3399/* 
3400 * Enable IOP. Allows the IOP to resume external operations.
3401 */
3402static int adpt_i2o_enable_hba(adpt_hba* pHba)
3403{
3404	u32 msg[4];
3405	int ret;
3406	
3407	adpt_i2o_status_get(pHba);
3408	if(!pHba->status_block){
3409		return -ENOMEM;
3410	}
3411	/* Enable only allowed on READY state */
3412	if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3413		return 0;
3414
3415	if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3416		return -EINVAL;
3417
3418	msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3419	msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3420	msg[2]= 0;
3421	msg[3]= 0;
3422
3423	if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3424		printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n", 
3425			pHba->name, ret);
3426	} else {
3427		PDEBUG("%s: Enabled.\n", pHba->name);
3428	}
3429
3430	adpt_i2o_status_get(pHba);
3431	return ret;
3432}
3433
3434
3435static int adpt_i2o_systab_send(adpt_hba* pHba)
3436{
3437	u32 msg[12];
3438	int ret;
3439
3440	msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3441	msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3442	msg[2] = 0;
3443	msg[3] = 0;
3444	msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3445	msg[5] = 0;				   /* Segment 0 */
3446
3447	/* 
3448	 * Provide three SGL-elements:
3449	 * System table (SysTab), Private memory space declaration and 
3450	 * Private i/o space declaration  
3451	 */
3452	msg[6] = 0x54000000 | sys_tbl_len;
3453	msg[7] = (u32)sys_tbl_pa;
3454	msg[8] = 0x54000000 | 0;
3455	msg[9] = 0;
3456	msg[10] = 0xD4000000 | 0;
3457	msg[11] = 0;
3458
3459	if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3460		printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n", 
3461			pHba->name, ret);
3462	}
3463#ifdef DEBUG
3464	else {
3465		PINFO("%s: SysTab set.\n", pHba->name);
3466	}
3467#endif
3468
3469	return ret;	
3470}
3471
3472
3473/*============================================================================
3474 *
3475 *============================================================================
3476 */
3477
3478
3479#ifdef UARTDELAY 
3480
3481static static void adpt_delay(int millisec)
3482{
3483	int i;
3484	for (i = 0; i < millisec; i++) {
3485		udelay(1000);	/* delay for one millisecond */
3486	}
3487}
3488
3489#endif
3490
3491static struct scsi_host_template driver_template = {
3492	.module			= THIS_MODULE,
3493	.name			= "dpt_i2o",
3494	.proc_name		= "dpt_i2o",
3495	.show_info		= adpt_show_info,
3496	.info			= adpt_info,
3497	.queuecommand		= adpt_queue,
3498	.eh_abort_handler	= adpt_abort,
3499	.eh_device_reset_handler = adpt_device_reset,
3500	.eh_bus_reset_handler	= adpt_bus_reset,
3501	.eh_host_reset_handler	= adpt_reset,
3502	.bios_param		= adpt_bios_param,
3503	.slave_configure	= adpt_slave_configure,
3504	.can_queue		= MAX_TO_IOP_MESSAGES,
3505	.this_id		= 7,
 
 
3506};
3507
3508static int __init adpt_init(void)
3509{
3510	int		error;
3511	adpt_hba	*pHba, *next;
3512
3513	printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3514
3515	error = adpt_detect(&driver_template);
3516	if (error < 0)
3517		return error;
3518	if (hba_chain == NULL)
3519		return -ENODEV;
3520
3521	for (pHba = hba_chain; pHba; pHba = pHba->next) {
3522		error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3523		if (error)
3524			goto fail;
3525		scsi_scan_host(pHba->host);
3526	}
3527	return 0;
3528fail:
3529	for (pHba = hba_chain; pHba; pHba = next) {
3530		next = pHba->next;
3531		scsi_remove_host(pHba->host);
3532	}
3533	return error;
3534}
3535
3536static void __exit adpt_exit(void)
3537{
3538	adpt_hba	*pHba, *next;
3539
 
 
3540	for (pHba = hba_chain; pHba; pHba = next) {
3541		next = pHba->next;
3542		adpt_release(pHba);
3543	}
3544}
3545
3546module_init(adpt_init);
3547module_exit(adpt_exit);
3548
3549MODULE_LICENSE("GPL");